base,os: Coding-style unification

Fixes #1432
This commit is contained in:
Norman Feske 2015-03-04 21:12:14 +01:00 committed by Christian Helmuth
parent 56ed7addbc
commit e8336acafc
227 changed files with 19073 additions and 18833 deletions

View File

@ -21,63 +21,68 @@
namespace Genode {
/**
* Basic CPU state
*/
struct Cpu_state
{
/**
* Native exception types
*/
enum Cpu_exception {
RESET = 1,
UNDEFINED_INSTRUCTION = 2,
SUPERVISOR_CALL = 3,
PREFETCH_ABORT = 4,
DATA_ABORT = 5,
INTERRUPT_REQUEST = 6,
FAST_INTERRUPT_REQUEST = 7,
};
/**
* Registers
*/
addr_t r0, r1, r2, r3, r4, r5, r6,
r7, r8, r9, r10, r11, r12; /* general purpose register 0..12 */
addr_t sp; /* stack pointer */
addr_t lr; /* link register */
addr_t ip; /* instruction pointer */
addr_t cpsr; /* current program status register */
addr_t cpu_exception; /* last hardware exception */
};
/**
* Extend CPU state by banked registers
*/
struct Cpu_state_modes : Cpu_state
{
/**
* Common banked registers for exception modes
*/
struct Mode_state {
enum Mode {
UND, /* Undefined */
SVC, /* Supervisor */
ABORT, /* Abort */
IRQ, /* Interrupt */
FIQ, /* Fast Interrupt */
MAX
};
addr_t spsr; /* saved program status register */
addr_t sp; /* banked stack pointer */
addr_t lr; /* banked link register */
};
Mode_state mode[Mode_state::MAX]; /* exception mode registers */
addr_t fiq_r[5]; /* fast-interrupt mode r8-r12 */
};
struct Cpu_state;
struct Cpu_state_modes;
}
/**
* Basic CPU state
*/
struct Genode::Cpu_state
{
/**
* Native exception types
*/
enum Cpu_exception {
RESET = 1,
UNDEFINED_INSTRUCTION = 2,
SUPERVISOR_CALL = 3,
PREFETCH_ABORT = 4,
DATA_ABORT = 5,
INTERRUPT_REQUEST = 6,
FAST_INTERRUPT_REQUEST = 7,
};
/**
* Registers
*/
addr_t r0, r1, r2, r3, r4, r5, r6,
r7, r8, r9, r10, r11, r12; /* general purpose register 0..12 */
addr_t sp; /* stack pointer */
addr_t lr; /* link register */
addr_t ip; /* instruction pointer */
addr_t cpsr; /* current program status register */
addr_t cpu_exception; /* last hardware exception */
};
/**
* Extend CPU state by banked registers
*/
struct Genode::Cpu_state_modes : Cpu_state
{
/**
* Common banked registers for exception modes
*/
struct Mode_state {
enum Mode {
UND, /* Undefined */
SVC, /* Supervisor */
ABORT, /* Abort */
IRQ, /* Interrupt */
FIQ, /* Fast Interrupt */
MAX
};
addr_t spsr; /* saved program status register */
addr_t sp; /* banked stack pointer */
addr_t lr; /* banked link register */
};
Mode_state mode[Mode_state::MAX]; /* exception mode registers */
addr_t fiq_r[5]; /* fast-interrupt mode r8-r12 */
};
#endif /* _INCLUDE__ARM__CPU__CPU_STATE_H_ */

View File

@ -15,8 +15,8 @@
#ifndef _INCLUDE__ARM__CPU__STRING_H_
#define _INCLUDE__ARM__CPU__STRING_H_
namespace Genode
{
namespace Genode {
/**
* Copy memory block
*

View File

@ -16,8 +16,8 @@
#ifndef _INCLUDE__ARM__VFP__STRING_H_
#define _INCLUDE__ARM__VFP__STRING_H_
namespace Genode
{
namespace Genode {
/**
* Copy memory block
*

View File

@ -14,178 +14,178 @@
#ifndef _INCLUDE__BASE__AFFINITY_H_
#define _INCLUDE__BASE__AFFINITY_H_
namespace Genode {
/**
* Affinity to CPU nodes
*
* The entity of CPU nodes is expected to form a grid where the Euclidean
* distance between nodes roughly correlate to the locality of their
* respective resources. Closely interacting processes are supposed to
* perform best when using nodes close to each other. To allow a relatively
* simple specification of such constraints, the affinity of a subsystem
* (e.g., a process) to CPU nodes is expressed as a rectangle within the
* grid of available CPU nodes. The dimensions of the grid are represented
* by 'Affinity::Space'. The rectangle within the grid is represented by
* 'Affinity::Location'.
*/
class Affinity
{
public:
class Location;
/**
* Bounds of the affinity name space
*
* An 'Affinity::Space' defines the bounds of a Cartesian
* coordinate space that expresses the entity of available CPU
* nodes. The dimension values do not necessarily correspond to
* physical CPU numbers. They solely represent the range the
* 'Affinity::Location' is relative to.
*/
class Space
{
private:
unsigned _width, _height;
public:
Space() : _width(0), _height(0) { }
/**
* Construct a two-dimensional affinity space
*/
Space(unsigned width, unsigned height)
: _width(width), _height(height) { }
/**
* Constuct one-dimensional affinity space
*/
Space(unsigned size) : _width(size), _height(1) { }
unsigned width() const { return _width; }
unsigned height() const { return _height; }
unsigned total() const { return _width*_height; }
Space multiply(Space const &other) const
{
return Space(_width*other.width(), _height*other.height());
}
/**
* Return the location of the Nth CPU within the affinity
* space
*
* This function returns a valid location even if the index
* is larger than the number of CPUs in the space. In this
* case, the x and y coordinates are wrapped by the bounds
* of the space.
*/
inline Location location_of_index(int index);
};
namespace Genode { class Affinity; }
/**
* Location within 'Space'
*/
class Location
{
private:
/**
* Affinity to CPU nodes
*
* The entity of CPU nodes is expected to form a grid where the Euclidean
* distance between nodes roughly correlate to the locality of their
* respective resources. Closely interacting processes are supposed to
* perform best when using nodes close to each other. To allow a relatively
* simple specification of such constraints, the affinity of a subsystem
* (e.g., a process) to CPU nodes is expressed as a rectangle within the
* grid of available CPU nodes. The dimensions of the grid are represented
* by 'Affinity::Space'. The rectangle within the grid is represented by
* 'Affinity::Location'.
*/
class Genode::Affinity
{
public:
int _xpos, _ypos;
unsigned _width, _height;
class Location;
public:
/**
* Bounds of the affinity name space
*
* An 'Affinity::Space' defines the bounds of a Cartesian
* coordinate space that expresses the entity of available CPU
* nodes. The dimension values do not necessarily correspond to
* physical CPU numbers. They solely represent the range the
* 'Affinity::Location' is relative to.
*/
class Space
{
private:
/**
* Default constructor creates invalid location
*/
Location() : _xpos(0), _ypos(0), _width(0), _height(0) { }
unsigned _width, _height;
/**
* Constructor to express the affinity to a single CPU
*/
Location(int xpos, unsigned ypos)
: _xpos(xpos), _ypos(ypos), _width(1), _height(1) { }
public:
/**
* Constructor to express the affinity to a set of CPUs
*/
Location(int xpos, int ypos, unsigned width, unsigned height)
: _xpos(xpos), _ypos(ypos), _width(width), _height(height) { }
Space() : _width(0), _height(0) { }
int xpos() const { return _xpos; }
int ypos() const { return _ypos; }
unsigned width() const { return _width; }
unsigned height() const { return _height; }
bool valid() const { return _width*_height > 0; }
Location multiply_position(Space const &space) const
{
return Location(_xpos*space.width(), _ypos*space.height(),
_width, _height);
}
Location transpose(int dx, int dy) const
{
return Location(_xpos + dx, _ypos + dy, _width, _height);
}
};
private:
Space _space;
Location _location;
public:
Affinity(Space const &space, Location const &location)
: _space(space), _location(location) { }
Affinity() { }
Space space() const { return _space; }
Location location() const { return _location; }
/**
* Return location scaled to specified affinity space
*/
Location scale_to(Space const &space) const
{
if (_space.total() == 0)
return Location();
/*
* Calculate coordinates of rectangle corners
*
* P1 is the upper left corner, inside the rectangle.
* P2 is the lower right corner, outside the rectangle.
/**
* Construct a two-dimensional affinity space
*/
int const x1 = _location.xpos(),
y1 = _location.ypos(),
x2 = _location.width() + x1,
y2 = _location.height() + y1;
Space(unsigned width, unsigned height)
: _width(width), _height(height) { }
/* scale corner positions */
int const scaled_x1 = (x1*space.width()) / _space.width(),
scaled_y1 = (y1*space.height()) / _space.height(),
scaled_x2 = (x2*space.width()) / _space.width(),
scaled_y2 = (y2*space.height()) / _space.height();
/**
* Constuct one-dimensional affinity space
*/
Space(unsigned size) : _width(size), _height(1) { }
/* make sure to not scale the location size to zero */
return Location(scaled_x1, scaled_y1,
max(scaled_x2 - scaled_x1, 1),
max(scaled_y2 - scaled_y1, 1));
}
};
unsigned width() const { return _width; }
unsigned height() const { return _height; }
unsigned total() const { return _width*_height; }
Space multiply(Space const &other) const
{
return Space(_width*other.width(), _height*other.height());
}
/**
* Return the location of the Nth CPU within the affinity
* space
*
* This function returns a valid location even if the index
* is larger than the number of CPUs in the space. In this
* case, the x and y coordinates are wrapped by the bounds
* of the space.
*/
inline Location location_of_index(int index);
};
Affinity::Location Affinity::Space::location_of_index(int index)
{
return Location(index % _width, (index / _width) % _height, 1, 1);
}
/**
* Location within 'Space'
*/
class Location
{
private:
int _xpos, _ypos;
unsigned _width, _height;
public:
/**
* Default constructor creates invalid location
*/
Location() : _xpos(0), _ypos(0), _width(0), _height(0) { }
/**
* Constructor to express the affinity to a single CPU
*/
Location(int xpos, unsigned ypos)
: _xpos(xpos), _ypos(ypos), _width(1), _height(1) { }
/**
* Constructor to express the affinity to a set of CPUs
*/
Location(int xpos, int ypos, unsigned width, unsigned height)
: _xpos(xpos), _ypos(ypos), _width(width), _height(height) { }
int xpos() const { return _xpos; }
int ypos() const { return _ypos; }
unsigned width() const { return _width; }
unsigned height() const { return _height; }
bool valid() const { return _width*_height > 0; }
Location multiply_position(Space const &space) const
{
return Location(_xpos*space.width(), _ypos*space.height(),
_width, _height);
}
Location transpose(int dx, int dy) const
{
return Location(_xpos + dx, _ypos + dy, _width, _height);
}
};
private:
Space _space;
Location _location;
public:
Affinity(Space const &space, Location const &location)
: _space(space), _location(location) { }
Affinity() { }
Space space() const { return _space; }
Location location() const { return _location; }
/**
* Return location scaled to specified affinity space
*/
Location scale_to(Space const &space) const
{
if (_space.total() == 0)
return Location();
/*
* Calculate coordinates of rectangle corners
*
* P1 is the upper left corner, inside the rectangle.
* P2 is the lower right corner, outside the rectangle.
*/
int const x1 = _location.xpos(),
y1 = _location.ypos(),
x2 = _location.width() + x1,
y2 = _location.height() + y1;
/* scale corner positions */
int const scaled_x1 = (x1*space.width()) / _space.width(),
scaled_y1 = (y1*space.height()) / _space.height(),
scaled_x2 = (x2*space.width()) / _space.width(),
scaled_y2 = (y2*space.height()) / _space.height();
/* make sure to not scale the location size to zero */
return Location(scaled_x1, scaled_y1,
max(scaled_x2 - scaled_x1, 1),
max(scaled_y2 - scaled_y1, 1));
}
};
Genode::Affinity::Location Genode::Affinity::Space::location_of_index(int index)
{
return Location(index % _width, (index / _width) % _height, 1, 1);
}
#endif /* _INCLUDE__BASE__AFFINITY_H_ */

View File

@ -19,187 +19,9 @@
namespace Genode {
struct Deallocator
{
/**
* Free block a previously allocated block
*/
virtual void free(void *addr, size_t size) = 0;
/**
* Return true if the size argument of 'free' is required
*
* The generic 'Allocator' interface requires the caller of 'free'
* to supply a valid size argument but not all implementations make
* use of this argument. If this function returns false, it is safe
* to call 'free' with an invalid size.
*
* Allocators that rely on the size argument must not be used for
* constructing objects whose constructors may throw exceptions.
* See the documentation of 'operator delete(void *, Allocator *)'
* below for more details.
*/
virtual bool need_size_for_free() const = 0;
};
struct Allocator : Deallocator
{
/**
* Exception type
*/
class Out_of_memory : public Exception { };
/**
* Destructor
*/
virtual ~Allocator() { }
/**
* Allocate block
*
* \param size block size to allocate
* \param out_addr resulting pointer to the new block,
* undefined in the error case
* \return true on success
*/
virtual bool alloc(size_t size, void **out_addr) = 0;
/**
* Allocate typed block
*
* This template allocates a typed block returned as a pointer to
* a non-void type. By providing this function, we prevent the
* compiler from warning us about "dereferencing type-punned
* pointer will break strict-aliasing rules".
*/
template <typename T> bool alloc(size_t size, T **out_addr)
{
void *addr = 0;
bool ret = alloc(size, &addr);
*out_addr = (T *)addr;
return ret;
}
/**
* Return total amount of backing store consumed by the allocator
*/
virtual size_t consumed() { return 0; }
/**
* Return meta-data overhead per block
*/
virtual size_t overhead(size_t size) = 0;
/***************************
** Convenience functions **
***************************/
/**
* Allocate block and signal error as an exception
*
* \param size block size to allocate
* \return pointer to the new block
* \throw Out_of_memory
*/
void *alloc(size_t size)
{
void *result = 0;
if (!alloc(size, &result))
throw Out_of_memory();
return result;
}
};
struct Range_allocator : Allocator
{
/**
* Destructor
*/
virtual ~Range_allocator() { }
/**
* Add free address range to allocator
*/
virtual int add_range(addr_t base, size_t size) = 0;
/**
* Remove address range from allocator
*/
virtual int remove_range(addr_t base, size_t size) = 0;
/**
* Return value of allocation functons
*
* 'OK' on success, or
* 'OUT_OF_METADATA' if meta-data allocation failed, or
* 'RANGE_CONFLICT' if no fitting address range is found
*/
struct Alloc_return
{
enum Value { OK = 0, OUT_OF_METADATA = -1, RANGE_CONFLICT = -2 };
Value const value;
Alloc_return(Value value) : value(value) { }
bool is_ok() const { return value == OK; }
bool is_error() const { return !is_ok(); }
};
/**
* Allocate block
*
* \param size size of new block
* \param out_addr start address of new block,
* undefined in the error case
* \param align alignment of new block specified
* as the power of two
*/
virtual Alloc_return alloc_aligned(size_t size, void **out_addr, int align = 0, addr_t from=0, addr_t to = ~0UL) = 0;
/**
* Allocate block at address
*
* \param size size of new block
* \param addr desired address of block
*
* \return 'ALLOC_OK' on success, or
* 'OUT_OF_METADATA' if meta-data allocation failed, or
* 'RANGE_CONFLICT' if specified range is occupied
*/
virtual Alloc_return alloc_addr(size_t size, addr_t addr) = 0;
/**
* Free a previously allocated block
*
* NOTE: We have to declare the 'Allocator::free(void *)' function
* here as well to make the compiler happy. Otherwise the C++
* overload resolution would not find 'Allocator::free(void *)'.
*/
virtual void free(void *addr) = 0;
virtual void free(void *addr, size_t size) = 0;
/**
* Return the sum of available memory
*
* Note that the returned value is not neccessarily allocatable
* because the memory may be fragmented.
*/
virtual size_t avail() = 0;
/**
* Check if address is inside an allocated block
*
* \param addr address to check
*
* \return true if address is inside an allocated block, false
* otherwise
*/
virtual bool valid_addr(addr_t addr) = 0;
};
struct Deallocator;
struct Allocator;
struct Range_allocator;
/**
* Destroy object
@ -221,6 +43,189 @@ namespace Genode {
template <typename T, typename DEALLOC> void destroy(DEALLOC && dealloc, T *obj);
}
struct Genode::Deallocator
{
/**
* Free block a previously allocated block
*/
virtual void free(void *addr, size_t size) = 0;
/**
* Return true if the size argument of 'free' is required
*
* The generic 'Allocator' interface requires the caller of 'free'
* to supply a valid size argument but not all implementations make
* use of this argument. If this function returns false, it is safe
* to call 'free' with an invalid size.
*
* Allocators that rely on the size argument must not be used for
* constructing objects whose constructors may throw exceptions.
* See the documentation of 'operator delete(void *, Allocator *)'
* below for more details.
*/
virtual bool need_size_for_free() const = 0;
};
struct Genode::Allocator : Deallocator
{
/**
* Exception type
*/
class Out_of_memory : public Exception { };
/**
* Destructor
*/
virtual ~Allocator() { }
/**
* Allocate block
*
* \param size block size to allocate
* \param out_addr resulting pointer to the new block,
* undefined in the error case
* \return true on success
*/
virtual bool alloc(size_t size, void **out_addr) = 0;
/**
* Allocate typed block
*
* This template allocates a typed block returned as a pointer to
* a non-void type. By providing this function, we prevent the
* compiler from warning us about "dereferencing type-punned
* pointer will break strict-aliasing rules".
*/
template <typename T> bool alloc(size_t size, T **out_addr)
{
void *addr = 0;
bool ret = alloc(size, &addr);
*out_addr = (T *)addr;
return ret;
}
/**
* Return total amount of backing store consumed by the allocator
*/
virtual size_t consumed() { return 0; }
/**
* Return meta-data overhead per block
*/
virtual size_t overhead(size_t size) = 0;
/***************************
** Convenience functions **
***************************/
/**
* Allocate block and signal error as an exception
*
* \param size block size to allocate
* \return pointer to the new block
* \throw Out_of_memory
*/
void *alloc(size_t size)
{
void *result = 0;
if (!alloc(size, &result))
throw Out_of_memory();
return result;
}
};
struct Genode::Range_allocator : Allocator
{
/**
* Destructor
*/
virtual ~Range_allocator() { }
/**
* Add free address range to allocator
*/
virtual int add_range(addr_t base, size_t size) = 0;
/**
* Remove address range from allocator
*/
virtual int remove_range(addr_t base, size_t size) = 0;
/**
* Return value of allocation functons
*
* 'OK' on success, or
* 'OUT_OF_METADATA' if meta-data allocation failed, or
* 'RANGE_CONFLICT' if no fitting address range is found
*/
struct Alloc_return
{
enum Value { OK = 0, OUT_OF_METADATA = -1, RANGE_CONFLICT = -2 };
Value const value;
Alloc_return(Value value) : value(value) { }
bool is_ok() const { return value == OK; }
bool is_error() const { return !is_ok(); }
};
/**
* Allocate block
*
* \param size size of new block
* \param out_addr start address of new block,
* undefined in the error case
* \param align alignment of new block specified
* as the power of two
*/
virtual Alloc_return alloc_aligned(size_t size, void **out_addr, int align = 0, addr_t from=0, addr_t to = ~0UL) = 0;
/**
* Allocate block at address
*
* \param size size of new block
* \param addr desired address of block
*
* \return 'ALLOC_OK' on success, or
* 'OUT_OF_METADATA' if meta-data allocation failed, or
* 'RANGE_CONFLICT' if specified range is occupied
*/
virtual Alloc_return alloc_addr(size_t size, addr_t addr) = 0;
/**
* Free a previously allocated block
*
* NOTE: We have to declare the 'Allocator::free(void *)' function
* here as well to make the compiler happy. Otherwise the C++
* overload resolution would not find 'Allocator::free(void *)'.
*/
virtual void free(void *addr) = 0;
virtual void free(void *addr, size_t size) = 0;
/**
* Return the sum of available memory
*
* Note that the returned value is not neccessarily allocatable
* because the memory may be fragmented.
*/
virtual size_t avail() = 0;
/**
* Check if address is inside an allocated block
*
* \param addr address to check
*
* \return true if address is inside an allocated block, false
* otherwise
*/
virtual bool valid_addr(addr_t addr) = 0;
};
void *operator new (Genode::size_t, Genode::Allocator *);
void *operator new [] (Genode::size_t, Genode::Allocator *);

View File

@ -25,309 +25,10 @@
namespace Genode {
class Allocator_avl_base : public Range_allocator
{
private:
static bool _sum_in_range(addr_t addr, addr_t offset) {
return (~0UL - addr > offset); }
protected:
class Block : public Avl_node<Block>
{
private:
addr_t _addr; /* base address */
size_t _size; /* size of block */
bool _used; /* block is in use */
short _id; /* for debugging */
size_t _max_avail; /* biggest free block size of subtree */
/**
* Request max_avail value of subtree
*/
inline size_t _child_max_avail(bool side) {
return child(side) ? child(side)->max_avail() : 0; }
/**
* Query if block can hold a specified subblock
*
* \param n number of bytes
* \param from minimum start address of subblock
* \param to maximum end address of subblock
* \param align alignment (power of two)
* \return true if block fits
*/
inline bool _fits(size_t n, unsigned align,
addr_t from, addr_t to)
{
addr_t a = align_addr(addr() < from ? from : addr(),
align);
return (a >= addr()) && _sum_in_range(a, n) &&
(a - addr() + n <= avail()) && (a + n - 1 <= to);
}
public:
/**
* Avl_node interface: compare two nodes
*/
bool higher(Block *a) {
return a->_addr >= _addr; }
/**
* Avl_node interface: update meta data on node rearrangement
*/
void recompute();
/**
* Accessor functions
*/
inline int id() { return _id; }
inline addr_t addr() { return _addr; }
inline size_t avail() { return _used ? 0 : _size; }
inline size_t size() { return _size; }
inline bool used() { return _used; }
inline size_t max_avail() { return _max_avail; }
inline void used(bool used) { _used = used; }
enum { FREE = false, USED = true };
/**
* Constructor
*
* This constructor is called from meta-data allocator during
* initialization of new meta-data blocks.
*/
Block() : _addr(0), _size(0), _used(0), _max_avail(0) { }
/**
* Constructor
*/
Block(addr_t addr, size_t size, bool used)
: _addr(addr), _size(size), _used(used),
_max_avail(used ? 0 : size)
{
static int num_blocks;
_id = ++num_blocks;
}
/**
* Find best-fitting block
*/
Block *find_best_fit(size_t size, unsigned align = 1,
addr_t from = 0UL, addr_t to = ~0UL);
/**
* Find block that contains the specified address range
*/
Block *find_by_address(addr_t addr, size_t size = 0,
bool check_overlap = 0);
/**
* Return sum of available memory in subtree
*/
size_t avail_in_subtree(void);
/**
* Debug hooks
*/
void dump();
void dump_dot(int indent = 0);
};
private:
Avl_tree<Block> _addr_tree; /* blocks sorted by base address */
Allocator *_md_alloc; /* meta-data allocator */
size_t _md_entry_size; /* size of block meta-data entry */
/**
* Alloc meta-data block
*/
Block *_alloc_block_metadata();
/**
* Alloc two meta-data blocks in a transactional way
*/
bool _alloc_two_blocks_metadata(Block **dst1, Block **dst2);
/**
* Create new block
*/
int _add_block(Block *block_metadata,
addr_t base, size_t size, bool used);
/**
* Destroy block
*/
void _destroy_block(Block *b);
/**
* Cut specified area from block
*
* The original block gets replaced by (up to) two smaller blocks
* with remaining space.
*/
void _cut_from_block(Block *b, addr_t cut_addr, size_t cut_size,
Block *dst1, Block *dst2);
protected:
/**
* Find block by specified address
*/
Block *_find_by_address(addr_t addr, size_t size = 0,
bool check_overlap = 0) const
{
Block *b = static_cast<Block *>(_addr_tree.first());
/* if the tree has one or more nodes, start search */
return b ? b->find_by_address(addr, size, check_overlap) : 0;
}
/**
* Constructor
*
* This constructor can only be called from a derived class that
* provides an allocator for block meta-data entries. This way,
* we can attach custom information to block meta data.
*/
Allocator_avl_base(Allocator *md_alloc, size_t md_entry_size) :
_md_alloc(md_alloc), _md_entry_size(md_entry_size) { }
public:
/**
* Return address of any block of the allocator
*
* \param out_addr result that contains address of block
* \return true if block was found or
* false if there is no block available
*
* If no block was found, out_addr is set to zero.
*/
bool any_block_addr(addr_t *out_addr);
/**
* Debug hook
*/
void dump_addr_tree(Block *addr_node = 0);
/*******************************
** Range allocator interface **
*******************************/
int add_range(addr_t base, size_t size);
int remove_range(addr_t base, size_t size);
Alloc_return alloc_aligned(size_t size, void **out_addr, int align = 0, addr_t from = 0, addr_t to = ~0UL);
Alloc_return alloc_addr(size_t size, addr_t addr);
void free(void *addr);
size_t avail();
bool valid_addr(addr_t addr);
/*************************
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) {
return (Allocator_avl_base::alloc_aligned(size, out_addr).is_ok()); }
void free(void *addr, size_t) { free(addr); }
/**
* Return size of block at specified address
*/
size_t size_at(void const *addr) const;
/**
* Return the memory overhead per Block
*
* The overhead is a rough estimation. If a block is somewhere
* in the middle of a free area, we could consider the meta data
* for the two free subareas when calculating the overhead.
*
* The 'sizeof(umword_t)' represents the overhead of the meta-data
* slab allocator.
*/
size_t overhead(size_t) { return sizeof(Block) + sizeof(umword_t); }
bool need_size_for_free() const override { return false; }
};
/**
* AVL-based allocator with custom meta data attached to each block.
*
* \param BMDT block meta-data type
*/
template <typename BMDT, unsigned SLAB_BLOCK_SIZE = 256 * sizeof(addr_t)>
class Allocator_avl_tpl : public Allocator_avl_base
{
protected:
/*
* Pump up the Block class with custom meta-data type
*/
class Block : public Allocator_avl_base::Block, public BMDT { };
Tslab<Block,SLAB_BLOCK_SIZE> _metadata; /* meta-data allocator */
char _initial_md_block[SLAB_BLOCK_SIZE]; /* first (static) meta-data block */
public:
/**
* Constructor
*
* \param metadata_chunk_alloc pointer to allocator used to allocate
* meta-data blocks. If set to 0,
* use ourself for allocating our
* meta-data blocks. This works only
* if the managed memory is completely
* accessible by the allocator.
*/
explicit Allocator_avl_tpl(Allocator *metadata_chunk_alloc) :
Allocator_avl_base(&_metadata, sizeof(Block)),
_metadata((metadata_chunk_alloc) ? metadata_chunk_alloc : this,
(Slab_block *)&_initial_md_block) { }
/**
* Assign custom meta data to block at specified address
*/
void metadata(void *addr, BMDT bmd) const
{
Block *b = static_cast<Block *>(_find_by_address((addr_t)addr));
if (b) *static_cast<BMDT *>(b) = bmd;
}
/**
* Return meta data that was attached to block at specified address
*/
BMDT* metadata(void *addr) const
{
Block *b = static_cast<Block *>(_find_by_address((addr_t)addr));
return b && b->used() ? b : 0;
}
int add_range(addr_t base, size_t size)
{
/*
* We disable the slab block allocation while
* processing add_range to prevent avalanche
* effects when (slab trying to make an allocation
* at Allocator_avl that is empty).
*/
Allocator *md_bs = _metadata.backing_store();
_metadata.backing_store(0);
int ret = Allocator_avl_base::add_range(base, size);
_metadata.backing_store(md_bs);
return ret;
}
};
class Allocator_avl_base;
template <typename, unsigned SLAB_BLOCK_SIZE = 256*sizeof(addr_t)>
class Allocator_avl_tpl;
/**
* Define AVL-based allocator without any meta data attached to each block
@ -336,4 +37,308 @@ namespace Genode {
typedef Allocator_avl_tpl<Empty> Allocator_avl;
}
class Genode::Allocator_avl_base : public Range_allocator
{
private:
static bool _sum_in_range(addr_t addr, addr_t offset) {
return (~0UL - addr > offset); }
protected:
class Block : public Avl_node<Block>
{
private:
addr_t _addr; /* base address */
size_t _size; /* size of block */
bool _used; /* block is in use */
short _id; /* for debugging */
size_t _max_avail; /* biggest free block size of subtree */
/**
* Request max_avail value of subtree
*/
inline size_t _child_max_avail(bool side) {
return child(side) ? child(side)->max_avail() : 0; }
/**
* Query if block can hold a specified subblock
*
* \param n number of bytes
* \param from minimum start address of subblock
* \param to maximum end address of subblock
* \param align alignment (power of two)
* \return true if block fits
*/
inline bool _fits(size_t n, unsigned align,
addr_t from, addr_t to)
{
addr_t a = align_addr(addr() < from ? from : addr(),
align);
return (a >= addr()) && _sum_in_range(a, n) &&
(a - addr() + n <= avail()) && (a + n - 1 <= to);
}
public:
/**
* Avl_node interface: compare two nodes
*/
bool higher(Block *a) {
return a->_addr >= _addr; }
/**
* Avl_node interface: update meta data on node rearrangement
*/
void recompute();
/**
* Accessor functions
*/
inline int id() { return _id; }
inline addr_t addr() { return _addr; }
inline size_t avail() { return _used ? 0 : _size; }
inline size_t size() { return _size; }
inline bool used() { return _used; }
inline size_t max_avail() { return _max_avail; }
inline void used(bool used) { _used = used; }
enum { FREE = false, USED = true };
/**
* Constructor
*
* This constructor is called from meta-data allocator during
* initialization of new meta-data blocks.
*/
Block() : _addr(0), _size(0), _used(0), _max_avail(0) { }
/**
* Constructor
*/
Block(addr_t addr, size_t size, bool used)
: _addr(addr), _size(size), _used(used),
_max_avail(used ? 0 : size)
{
static int num_blocks;
_id = ++num_blocks;
}
/**
* Find best-fitting block
*/
Block *find_best_fit(size_t size, unsigned align = 1,
addr_t from = 0UL, addr_t to = ~0UL);
/**
* Find block that contains the specified address range
*/
Block *find_by_address(addr_t addr, size_t size = 0,
bool check_overlap = 0);
/**
* Return sum of available memory in subtree
*/
size_t avail_in_subtree(void);
/**
* Debug hooks
*/
void dump();
void dump_dot(int indent = 0);
};
private:
Avl_tree<Block> _addr_tree; /* blocks sorted by base address */
Allocator *_md_alloc; /* meta-data allocator */
size_t _md_entry_size; /* size of block meta-data entry */
/**
* Alloc meta-data block
*/
Block *_alloc_block_metadata();
/**
* Alloc two meta-data blocks in a transactional way
*/
bool _alloc_two_blocks_metadata(Block **dst1, Block **dst2);
/**
* Create new block
*/
int _add_block(Block *block_metadata,
addr_t base, size_t size, bool used);
/**
* Destroy block
*/
void _destroy_block(Block *b);
/**
* Cut specified area from block
*
* The original block gets replaced by (up to) two smaller blocks
* with remaining space.
*/
void _cut_from_block(Block *b, addr_t cut_addr, size_t cut_size,
Block *dst1, Block *dst2);
protected:
/**
* Find block by specified address
*/
Block *_find_by_address(addr_t addr, size_t size = 0,
bool check_overlap = 0) const
{
Block *b = static_cast<Block *>(_addr_tree.first());
/* if the tree has one or more nodes, start search */
return b ? b->find_by_address(addr, size, check_overlap) : 0;
}
/**
* Constructor
*
* This constructor can only be called from a derived class that
* provides an allocator for block meta-data entries. This way,
* we can attach custom information to block meta data.
*/
Allocator_avl_base(Allocator *md_alloc, size_t md_entry_size) :
_md_alloc(md_alloc), _md_entry_size(md_entry_size) { }
public:
/**
* Return address of any block of the allocator
*
* \param out_addr result that contains address of block
* \return true if block was found or
* false if there is no block available
*
* If no block was found, out_addr is set to zero.
*/
bool any_block_addr(addr_t *out_addr);
/**
* Debug hook
*/
void dump_addr_tree(Block *addr_node = 0);
/*******************************
** Range allocator interface **
*******************************/
int add_range(addr_t base, size_t size);
int remove_range(addr_t base, size_t size);
Alloc_return alloc_aligned(size_t size, void **out_addr, int align = 0, addr_t from = 0, addr_t to = ~0UL);
Alloc_return alloc_addr(size_t size, addr_t addr);
void free(void *addr);
size_t avail();
bool valid_addr(addr_t addr);
/*************************
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) {
return (Allocator_avl_base::alloc_aligned(size, out_addr).is_ok()); }
void free(void *addr, size_t) { free(addr); }
/**
* Return size of block at specified address
*/
size_t size_at(void const *addr) const;
/**
* Return the memory overhead per Block
*
* The overhead is a rough estimation. If a block is somewhere
* in the middle of a free area, we could consider the meta data
* for the two free subareas when calculating the overhead.
*
* The 'sizeof(umword_t)' represents the overhead of the meta-data
* slab allocator.
*/
size_t overhead(size_t) { return sizeof(Block) + sizeof(umword_t); }
bool need_size_for_free() const override { return false; }
};
/**
* AVL-based allocator with custom meta data attached to each block.
*
* \param BMDT block meta-data type
*/
template <typename BMDT, unsigned SLAB_BLOCK_SIZE>
class Genode::Allocator_avl_tpl : public Allocator_avl_base
{
protected:
/*
* Pump up the Block class with custom meta-data type
*/
class Block : public Allocator_avl_base::Block, public BMDT { };
Tslab<Block,SLAB_BLOCK_SIZE> _metadata; /* meta-data allocator */
char _initial_md_block[SLAB_BLOCK_SIZE]; /* first (static) meta-data block */
public:
/**
* Constructor
*
* \param metadata_chunk_alloc pointer to allocator used to allocate
* meta-data blocks. If set to 0,
* use ourself for allocating our
* meta-data blocks. This works only
* if the managed memory is completely
* accessible by the allocator.
*/
explicit Allocator_avl_tpl(Allocator *metadata_chunk_alloc) :
Allocator_avl_base(&_metadata, sizeof(Block)),
_metadata((metadata_chunk_alloc) ? metadata_chunk_alloc : this,
(Slab_block *)&_initial_md_block) { }
/**
* Assign custom meta data to block at specified address
*/
void metadata(void *addr, BMDT bmd) const
{
Block *b = static_cast<Block *>(_find_by_address((addr_t)addr));
if (b) *static_cast<BMDT *>(b) = bmd;
}
/**
* Return meta data that was attached to block at specified address
*/
BMDT* metadata(void *addr) const
{
Block *b = static_cast<Block *>(_find_by_address((addr_t)addr));
return b && b->used() ? b : 0;
}
int add_range(addr_t base, size_t size)
{
/*
* We disable the slab block allocation while
* processing add_range to prevent avalanche
* effects when (slab trying to make an allocation
* at Allocator_avl that is empty).
*/
Allocator *md_bs = _metadata.backing_store();
_metadata.backing_store(0);
int ret = Allocator_avl_base::add_range(base, size);
_metadata.backing_store(md_bs);
return ret;
}
};
#endif /* _INCLUDE__BASE__ALLOCATOR_AVL_H_ */

View File

@ -18,95 +18,95 @@
#include <base/printf.h>
#include <base/stdint.h>
namespace Genode {
namespace Genode { class Allocator_guard; }
/**
* This class acts as guard for arbitrary allocators to limit
* memory exhaustion
*/
class Allocator_guard : public Allocator
{
private:
Allocator *_allocator; /* allocator to guard */
size_t _amount; /* total amount */
size_t _consumed; /* already consumed bytes */
/**
* This class acts as guard for arbitrary allocators to limit
* memory exhaustion
*/
class Genode::Allocator_guard : public Allocator
{
private:
public:
Allocator *_allocator; /* allocator to guard */
size_t _amount; /* total amount */
size_t _consumed; /* already consumed bytes */
Allocator_guard(Allocator *allocator, size_t amount)
: _allocator(allocator), _amount(amount), _consumed(0) { }
public:
/**
* Extend allocation limit
*/
void upgrade(size_t additional_amount) {
_amount += additional_amount; }
Allocator_guard(Allocator *allocator, size_t amount)
: _allocator(allocator), _amount(amount), _consumed(0) { }
/**
* Consume bytes without actually allocating them
*/
bool withdraw(size_t size)
{
if ((_amount - _consumed) < size)
return false;
/**
* Extend allocation limit
*/
void upgrade(size_t additional_amount) {
_amount += additional_amount; }
_consumed += size;
return true;
/**
* Consume bytes without actually allocating them
*/
bool withdraw(size_t size)
{
if ((_amount - _consumed) < size)
return false;
_consumed += size;
return true;
}
/*************************
** Allocator interface **
*************************/
/**
* Allocate block
*
* \param size block size to allocate
* \param out_addr resulting pointer to the new block,
* undefined in the error case
* \return true on success
*/
bool alloc(size_t size, void **out_addr) override
{
if ((_amount - _consumed) < (size + _allocator->overhead(size))) {
PWRN("Quota exceeded! amount=%zu, size=%zu, consumed=%zu",
_amount, (size + _allocator->overhead(size)), _consumed);
return false;
}
bool b = _allocator->alloc(size, out_addr);
if (b)
_consumed += size + _allocator->overhead(size);
return b;
}
/*************************
** Allocator interface **
*************************/
/**
* Free block a previously allocated block
*/
void free(void *addr, size_t size) override
{
_allocator->free(addr, size);
_consumed -= size + _allocator->overhead(size);
}
/**
* Allocate block
*
* \param size block size to allocate
* \param out_addr resulting pointer to the new block,
* undefined in the error case
* \return true on success
*/
bool alloc(size_t size, void **out_addr)
{
if ((_amount - _consumed) < (size + _allocator->overhead(size))) {
PWRN("Quota exceeded! amount=%zu, size=%zu, consumed=%zu",
_amount, (size + _allocator->overhead(size)), _consumed);
return false;
}
bool b = _allocator->alloc(size, out_addr);
if (b)
_consumed += size + _allocator->overhead(size);
return b;
}
/**
* Return amount of backing store consumed by the allocator
*/
size_t consumed() override { return _consumed; }
/**
* Free block a previously allocated block
*/
void free(void *addr, size_t size)
{
_allocator->free(addr, size);
_consumed -= size + _allocator->overhead(size);
}
/**
* Return allocation limit
*/
size_t quota() const { return _amount; }
/**
* Return amount of backing store consumed by the allocator
*/
size_t consumed() { return _consumed; }
/**
* Return meta-data overhead per block
*/
size_t overhead(size_t size) override { return _allocator->overhead(size); }
/**
* Return allocation limit
*/
size_t quota() const { return _amount; }
/**
* Return meta-data overhead per block
*/
size_t overhead(size_t size) { return _allocator->overhead(size); }
bool need_size_for_free() const override {
return _allocator->need_size_for_free(); }
};
}
bool need_size_for_free() const override {
return _allocator->need_size_for_free(); }
};
#endif /* _ALLOCATOR_GUARD_H_ */

View File

@ -23,6 +23,8 @@
#include <base/exception.h>
namespace Genode { class Blocking_canceled : public Exception { }; }
namespace Genode { class Blocking_canceled; }
class Genode::Blocking_canceled : public Exception { };
#endif /* _INCLUDE__BASE__BLOCKING_H_ */

View File

@ -15,6 +15,8 @@
#define _INCLUDE__BASE__CACHE_H_
namespace Genode {
enum Cache_attribute { UNCACHED, WRITE_COMBINED, CACHED };
}
#endif /* _INCLUDE__BASE__CACHE_H_ */

View File

@ -20,76 +20,78 @@
namespace Genode {
class Thread_base;
class Cancelable_lock
{
private:
class Applicant
{
private:
Thread_base *_thread_base;
Applicant *_to_wake_up;
public:
explicit Applicant(Thread_base *thread_base)
: _thread_base(thread_base), _to_wake_up(0) { }
void applicant_to_wake_up(Applicant *to_wake_up) {
_to_wake_up = to_wake_up; }
Applicant *applicant_to_wake_up() { return _to_wake_up; }
Thread_base *thread_base() { return _thread_base; }
/**
* Called from previous lock owner
*/
void wake_up();
bool operator == (Applicant &a) { return _thread_base == a.thread_base(); }
bool operator != (Applicant &a) { return _thread_base != a.thread_base(); }
};
/*
* Note that modifications of the applicants queue must be performed
* atomically. Hence, we use the additional spinlock here.
*/
volatile int _spinlock_state;
volatile int _state;
Applicant* volatile _last_applicant;
Applicant _owner;
public:
enum State { LOCKED, UNLOCKED };
/**
* Constructor
*/
explicit Cancelable_lock(State initial = UNLOCKED);
/**
* Try to aquire lock an block while lock is not free
*
* This function may throw a Genode::Blocking_canceled exception.
*/
void lock();
/**
* Release lock
*/
void unlock();
/**
* Lock guard
*/
typedef Genode::Lock_guard<Cancelable_lock> Guard;
};
class Cancelable_lock;
}
class Genode::Cancelable_lock
{
private:
class Applicant
{
private:
Thread_base *_thread_base;
Applicant *_to_wake_up;
public:
explicit Applicant(Thread_base *thread_base)
: _thread_base(thread_base), _to_wake_up(0) { }
void applicant_to_wake_up(Applicant *to_wake_up) {
_to_wake_up = to_wake_up; }
Applicant *applicant_to_wake_up() { return _to_wake_up; }
Thread_base *thread_base() { return _thread_base; }
/**
* Called from previous lock owner
*/
void wake_up();
bool operator == (Applicant &a) { return _thread_base == a.thread_base(); }
bool operator != (Applicant &a) { return _thread_base != a.thread_base(); }
};
/*
* Note that modifications of the applicants queue must be performed
* atomically. Hence, we use the additional spinlock here.
*/
volatile int _spinlock_state;
volatile int _state;
Applicant* volatile _last_applicant;
Applicant _owner;
public:
enum State { LOCKED, UNLOCKED };
/**
* Constructor
*/
explicit Cancelable_lock(State initial = UNLOCKED);
/**
* Try to aquire lock an block while lock is not free
*
* This function may throw a Genode::Blocking_canceled exception.
*/
void lock();
/**
* Release lock
*/
void unlock();
/**
* Lock guard
*/
typedef Genode::Lock_guard<Cancelable_lock> Guard;
};
#endif /* _INCLUDE__BASE__CANCELABLE_LOCK_H_ */

View File

@ -33,299 +33,307 @@ namespace Genode {
*/
typedef Native_capability Untyped_capability;
template <typename> class Capability;
/**
* Capability referring to a specific RPC interface
*
* \param RPC_INTERFACE class containing the RPC interface declaration
*/
template <typename RPC_INTERFACE>
class Capability : public Untyped_capability
{
private:
Capability<RPC_INTERFACE> reinterpret_cap_cast(Untyped_capability const &);
/**
* Insert RPC arguments into the message buffer
*/
template <typename ATL>
void _marshal_args(Ipc_client &ipc_client, ATL &args) const;
void _marshal_args(Ipc_client &, Meta::Empty &) const { }
/**
* Unmarshal single RPC argument from the message buffer
*/
template <typename T>
void _unmarshal_result(Ipc_client &ipc_client, T &arg,
Meta::Overload_selector<Rpc_arg_out>) const;
template <typename T>
void _unmarshal_result(Ipc_client &ipc_client, T &arg,
Meta::Overload_selector<Rpc_arg_inout>) const;
template <typename T>
void _unmarshal_result(Ipc_client &, T &,
Meta::Overload_selector<Rpc_arg_in>) const { }
/**
* Read RPC results from the message buffer
*/
template <typename ATL>
void _unmarshal_results(Ipc_client &ipc_client, ATL &args) const;
void _unmarshal_results(Ipc_client &, Meta::Empty &) const { }
/**
* Check RPC return code for the occurrence of exceptions
*
* A server-side exception is indicated by a non-zero exception
* code. Each exception code corresponds to an entry in the
* exception type list specified in the RPC function declaration.
* The '_check_for_exception' function template throws the
* exception type belonging to the received exception code.
*/
template <typename EXC_TL>
void _check_for_exceptions(Rpc_exception_code const exc_code,
Meta::Overload_selector<EXC_TL>) const
{
enum { EXCEPTION_CODE = RPC_EXCEPTION_BASE - Meta::Length<EXC_TL>::Value };
if (exc_code == EXCEPTION_CODE)
throw typename EXC_TL::Head();
_check_for_exceptions(exc_code, Meta::Overload_selector<typename EXC_TL::Tail>());
}
void _check_for_exceptions(Rpc_exception_code const,
Meta::Overload_selector<Meta::Empty>) const
{ }
/**
* Perform RPC call, arguments passed a as nested 'Ref_tuple' object
*/
template <typename IF>
void _call(typename IF::Client_args &args,
typename IF::Ret_type &ret) const;
/**
* Shortcut for querying argument types used in 'call' functions
*/
template <typename IF, unsigned I>
struct Arg
{
typedef typename Meta::Type_at<typename IF::Client_args, I>::Type Type;
};
template <typename FROM_RPC_INTERFACE>
Untyped_capability
_check_compatibility(Capability<FROM_RPC_INTERFACE> const &cap) const
{
FROM_RPC_INTERFACE *from = 0;
RPC_INTERFACE *to = from;
(void)to;
return cap;
}
/**
* Private constructor, should be used by the local-capability
* factory method only.
*
* \param ptr pointer to the local object this capability represents.
*/
Capability(void *ptr) : Untyped_capability(ptr) {}
/**
* Wrapper for the return type instantiated by 'call' overloads
*
* Each 'call' overload creates an instance of the return value
* type as local variable. A reference to this variable is passed
* to the '_call' function, which will assign its value. Even
* though the variable does not need to be initialized prior the
* call of '_call', the GCC will still complain "warning: ret may
* be used uninitialized in this function". Wrapping the return
* value in a struct silences the compiler.
*/
template <typename IF>
struct Return
{
typedef typename Trait::Call_return<typename IF::Ret_type>::Type
Return_type;
volatile Return_type _value;
Return_type &value() { return *(Return_type *)(&_value); }
};
public:
typedef RPC_INTERFACE Rpc_interface;
/**
* Constructor
*
* This implicit constructor checks at compile time for the
* compatibility of the source and target capability types. The
* construction is performed only if the target capability type is
* identical to or a base type of the source capability type.
*/
template <typename FROM_RPC_INTERFACE>
Capability(Capability<FROM_RPC_INTERFACE> const &cap)
: Untyped_capability(_check_compatibility(cap))
{ }
/**
* Default constructor creates invalid capability
*/
Capability() { }
/**
* Factory method to construct a local-capability.
*
* Local-capabilities can be used protection-domain internally
* only. They simply incorporate a pointer to some process-local
* object.
*
* \param ptr pointer to the corresponding local object.
* \return a capability that represents the local object.
*/
static Capability<RPC_INTERFACE> local_cap(RPC_INTERFACE* ptr) {
return Capability<RPC_INTERFACE>((void*)ptr); }
/**
* Dereference a local-capability.
*
* \param c the local-capability.
* \return pointer to the corresponding local object.
*/
static RPC_INTERFACE* deref(Capability<RPC_INTERFACE> c) {
return reinterpret_cast<RPC_INTERFACE*>(c.local()); }
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call() const
{
Meta::Empty e;
Return<IF> ret;
_call<IF>(e, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1) const
{
Meta::Empty e;
typename IF::Client_args args(v1, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4,
typename Arg<IF, 4>::Type v5) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, v5, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4,
typename Arg<IF, 4>::Type v5, typename Arg<IF, 5>::Type v6) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, v5, v6, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4,
typename Arg<IF, 4>::Type v5, typename Arg<IF, 5>::Type v6,
typename Arg<IF, 6>::Type v7) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, v5, v6, v7, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
};
/**
* Convert an untyped capability to a typed capability
*/
template <typename RPC_INTERFACE>
Capability<RPC_INTERFACE>
reinterpret_cap_cast(Untyped_capability const &untyped_cap)
{
/*
* The object layout of untyped and typed capabilities is identical.
* Hence we can just use it's copy-constructors.
*/
Untyped_capability *ptr = const_cast<Untyped_capability*>(&untyped_cap);
return *static_cast<Capability<RPC_INTERFACE>*>(ptr);
}
/**
* Convert capability type from an interface base type to an inherited
* interface type
*/
template <typename TO_RPC_INTERFACE, typename FROM_RPC_INTERFACE>
Capability<TO_RPC_INTERFACE>
static_cap_cast(Capability<FROM_RPC_INTERFACE> cap)
{
/* check interface compatibility */
(void)static_cast<TO_RPC_INTERFACE *>((FROM_RPC_INTERFACE *)0);
Capability<TO_RPC_INTERFACE> static_cap_cast(Capability<FROM_RPC_INTERFACE>);
}
return reinterpret_cap_cast<TO_RPC_INTERFACE>(cap);
}
/**
* Capability referring to a specific RPC interface
*
* \param RPC_INTERFACE class containing the RPC interface declaration
*/
template <typename RPC_INTERFACE>
class Genode::Capability : public Untyped_capability
{
private:
/**
* Insert RPC arguments into the message buffer
*/
template <typename ATL>
void _marshal_args(Ipc_client &ipc_client, ATL &args) const;
void _marshal_args(Ipc_client &, Meta::Empty &) const { }
/**
* Unmarshal single RPC argument from the message buffer
*/
template <typename T>
void _unmarshal_result(Ipc_client &ipc_client, T &arg,
Meta::Overload_selector<Rpc_arg_out>) const;
template <typename T>
void _unmarshal_result(Ipc_client &ipc_client, T &arg,
Meta::Overload_selector<Rpc_arg_inout>) const;
template <typename T>
void _unmarshal_result(Ipc_client &, T &,
Meta::Overload_selector<Rpc_arg_in>) const { }
/**
* Read RPC results from the message buffer
*/
template <typename ATL>
void _unmarshal_results(Ipc_client &ipc_client, ATL &args) const;
void _unmarshal_results(Ipc_client &, Meta::Empty &) const { }
/**
* Check RPC return code for the occurrence of exceptions
*
* A server-side exception is indicated by a non-zero exception
* code. Each exception code corresponds to an entry in the
* exception type list specified in the RPC function declaration.
* The '_check_for_exception' function template throws the
* exception type belonging to the received exception code.
*/
template <typename EXC_TL>
void _check_for_exceptions(Rpc_exception_code const exc_code,
Meta::Overload_selector<EXC_TL>) const
{
enum { EXCEPTION_CODE = RPC_EXCEPTION_BASE - Meta::Length<EXC_TL>::Value };
if (exc_code == EXCEPTION_CODE)
throw typename EXC_TL::Head();
_check_for_exceptions(exc_code, Meta::Overload_selector<typename EXC_TL::Tail>());
}
void _check_for_exceptions(Rpc_exception_code const,
Meta::Overload_selector<Meta::Empty>) const
{ }
/**
* Perform RPC call, arguments passed a as nested 'Ref_tuple' object
*/
template <typename IF>
void _call(typename IF::Client_args &args,
typename IF::Ret_type &ret) const;
/**
* Shortcut for querying argument types used in 'call' functions
*/
template <typename IF, unsigned I>
struct Arg
{
typedef typename Meta::Type_at<typename IF::Client_args, I>::Type Type;
};
template <typename FROM_RPC_INTERFACE>
Untyped_capability
_check_compatibility(Capability<FROM_RPC_INTERFACE> const &cap) const
{
FROM_RPC_INTERFACE *from = 0;
RPC_INTERFACE *to = from;
(void)to;
return cap;
}
/**
* Private constructor, should be used by the local-capability
* factory method only.
*
* \param ptr pointer to the local object this capability represents.
*/
Capability(void *ptr) : Untyped_capability(ptr) {}
/**
* Wrapper for the return type instantiated by 'call' overloads
*
* Each 'call' overload creates an instance of the return value
* type as local variable. A reference to this variable is passed
* to the '_call' function, which will assign its value. Even
* though the variable does not need to be initialized prior the
* call of '_call', the GCC will still complain "warning: ret may
* be used uninitialized in this function". Wrapping the return
* value in a struct silences the compiler.
*/
template <typename IF>
struct Return
{
typedef typename Trait::Call_return<typename IF::Ret_type>::Type
Return_type;
volatile Return_type _value;
Return_type &value() { return *(Return_type *)(&_value); }
};
public:
typedef RPC_INTERFACE Rpc_interface;
/**
* Constructor
*
* This implicit constructor checks at compile time for the
* compatibility of the source and target capability types. The
* construction is performed only if the target capability type is
* identical to or a base type of the source capability type.
*/
template <typename FROM_RPC_INTERFACE>
Capability(Capability<FROM_RPC_INTERFACE> const &cap)
: Untyped_capability(_check_compatibility(cap))
{ }
/**
* Default constructor creates invalid capability
*/
Capability() { }
/**
* Factory method to construct a local-capability.
*
* Local-capabilities can be used protection-domain internally
* only. They simply incorporate a pointer to some process-local
* object.
*
* \param ptr pointer to the corresponding local object.
* \return a capability that represents the local object.
*/
static Capability<RPC_INTERFACE> local_cap(RPC_INTERFACE* ptr) {
return Capability<RPC_INTERFACE>((void*)ptr); }
/**
* Dereference a local-capability.
*
* \param c the local-capability.
* \return pointer to the corresponding local object.
*/
static RPC_INTERFACE* deref(Capability<RPC_INTERFACE> c) {
return reinterpret_cast<RPC_INTERFACE*>(c.local()); }
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call() const
{
Meta::Empty e;
Return<IF> ret;
_call<IF>(e, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1) const
{
Meta::Empty e;
typename IF::Client_args args(v1, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4,
typename Arg<IF, 4>::Type v5) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, v5, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4,
typename Arg<IF, 4>::Type v5, typename Arg<IF, 5>::Type v6) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, v5, v6, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
template <typename IF>
typename Trait::Call_return<typename IF::Ret_type>::Type
call(typename Arg<IF, 0>::Type v1, typename Arg<IF, 1>::Type v2,
typename Arg<IF, 2>::Type v3, typename Arg<IF, 3>::Type v4,
typename Arg<IF, 4>::Type v5, typename Arg<IF, 5>::Type v6,
typename Arg<IF, 6>::Type v7) const
{
Meta::Empty e;
typename IF::Client_args args(v1, v2, v3, v4, v5, v6, v7, e);
Return<IF> ret;
_call<IF>(args, ret.value());
return ret.value();
}
};
/**
* Convert an untyped capability to a typed capability
*/
template <typename RPC_INTERFACE>
Genode::Capability<RPC_INTERFACE>
Genode::reinterpret_cap_cast(Untyped_capability const &untyped_cap)
{
/*
* The object layout of untyped and typed capabilities is identical.
* Hence we can just use it's copy-constructors.
*/
Untyped_capability *ptr = const_cast<Untyped_capability*>(&untyped_cap);
return *static_cast<Capability<RPC_INTERFACE>*>(ptr);
}
/**
* Convert capability type from an interface base type to an inherited
* interface type
*/
template <typename TO_RPC_INTERFACE, typename FROM_RPC_INTERFACE>
Genode::Capability<TO_RPC_INTERFACE>
Genode::static_cap_cast(Capability<FROM_RPC_INTERFACE> cap)
{
/* check interface compatibility */
(void)static_cast<TO_RPC_INTERFACE *>((FROM_RPC_INTERFACE *)0);
return reinterpret_cap_cast<TO_RPC_INTERFACE>(cap);
}
#endif /* _INCLUDE__BASE__CAPABILITY_H_ */

View File

@ -24,308 +24,312 @@
namespace Genode {
/**
* Child policy interface
*
* A child-policy object is an argument to a 'Child'. It is responsible for
* taking policy decisions regarding the parent interface. Most importantly,
* it defines how session requests are resolved and how session arguments
* are passed to servers when creating sessions.
*/
struct Child_policy
{
virtual ~Child_policy() { }
/**
* Return process name of the child
*/
virtual const char *name() const = 0;
/**
* Determine service to provide a session request
*
* \return Service to be contacted for the new session, or
* 0 if session request could not be resolved
*/
virtual Service *resolve_session_request(const char * /*service_name*/,
const char * /*args*/)
{ return 0; }
/**
* Apply transformations to session arguments
*/
virtual void filter_session_args(const char * /*service*/,
char * /*args*/, size_t /*args_len*/) { }
/**
* Register a service provided by the child
*
* \param name service name
* \param root interface for creating sessions for the service
* \param alloc allocator to be used for child-specific
* meta-data allocations
* \return true if announcement succeeded, or false if
* child is not permitted to announce service
*/
virtual bool announce_service(const char * /*name*/,
Root_capability /*root*/,
Allocator * /*alloc*/,
Server * /*server*/)
{ return false; }
/**
* Apply session affinity policy
*
* \param affinity affinity passed along with a session request
* \return affinity subordinated to the child policy
*/
virtual Affinity filter_session_affinity(Affinity const &affinity)
{
return affinity;
}
/**
* Unregister services that had been provided by the child
*/
virtual void unregister_services() { }
/**
* Exit child
*/
virtual void exit(int exit_value)
{
PDBG("child \"%s\" exited with exit value %d", name(), exit_value);
}
/**
* Reference RAM session
*
* The RAM session returned by this function is used for session-quota
* transfers.
*/
virtual Ram_session *ref_ram_session() { return env()->ram_session(); }
/**
* Return platform-specific PD-session arguments
*
* This function is used on Linux to supply additional PD-session
* argument to core, i.e., the chroot path, the UID, and the GID.
*/
virtual Native_pd_args const *pd_args() const { return 0; }
/**
* Respond to the release of resources by the child
*
* This function is called when the child confirms the release of
* resources in response to a yield request.
*/
virtual void yield_response() { }
/**
* Take action on additional resource needs by the child
*/
virtual void resource_request(Parent::Resource_args const &) { }
};
/**
* Implementation of the parent interface that supports resource trading
*
* There are three possible cases of how a session can be provided to
* a child:
*
* # The service is implemented locally
* # The session was obtained by asking our parent
* # The session is provided by one of our children
*
* These types must be differentiated for the quota management when a child
* issues the closing of a session or a transfers quota via our parent
* interface.
*
* If we close a session to a local service, we transfer the session quota
* from our own account to the client.
*
* If we close a parent session, we receive the session quota on our own
* account and must transfer this amount to the session-closing child.
*
* If we close a session provided by a server child, we close the session
* at the server, transfer the session quota from the server's ram session
* to our account, and subsequently transfer the same amount from our
* account to the client.
*/
class Child : protected Rpc_object<Parent>
{
private:
class Session;
/* RAM session that contains the quota of the child */
Ram_session_capability _ram;
Ram_session_client _ram_session_client;
/* CPU session that contains the quota of the child */
Cpu_session_capability _cpu;
/* RM session representing the address space of the child */
Rm_session_capability _rm;
/* Services where the RAM, CPU, and RM resources come from */
Service &_ram_service;
Service &_cpu_service;
Service &_rm_service;
/* heap for child-specific allocations using the child's quota */
Heap _heap;
Rpc_entrypoint *_entrypoint;
Parent_capability _parent_cap;
/* child policy */
Child_policy *_policy;
/* sessions opened by the child */
Lock _lock; /* protect list manipulation */
Object_pool<Session> _session_pool;
List<Session> _session_list;
/* server role */
Server _server;
/* session-argument buffer */
char _args[Parent::Session_args::MAX_SIZE];
/* signal handlers registered by the child */
Signal_context_capability _resource_avail_sigh;
Signal_context_capability _yield_sigh;
/* arguments fetched by the child in response to a yield signal */
Lock _yield_request_lock;
Resource_args _yield_request_args;
Process _process;
/**
* Attach session information to a child
*
* \throw Ram_session::Quota_exceeded the child's heap partition cannot
* hold the session meta data
*/
void _add_session(const Session &s);
/**
* Close session and revert quota donation associated with it
*/
void _remove_session(Session *s);
/**
* Return service interface targetting the parent
*
* The service returned by this function is used as default
* provider for the RAM, CPU, and RM resources of the child. It is
* solely used for targeting resource donations during
* 'Parent::upgrade_quota()' calls.
*/
static Service *_parent_service();
public:
/**
* Constructor
*
* \param elf_ds dataspace containing the binary
* \param ram RAM session with the child's quota
* \param cpu CPU session with the child's quota
* \param rm RM session representing the address space
* of the child
* \param entrypoint server entrypoint to serve the parent interface
* \param policy child policy
* \param ram_service provider of the 'ram' session
* \param cpu_service provider of the 'cpu' session
* \param rm_service provider of the 'rm' session
*
* If assigning a separate entry point to each child, the host of
* multiple children is able to handle a blocking invocation of
* the parent interface of one child while still maintaining the
* service to other children, each having an independent entry
* point.
*
* The 'ram_service', 'cpu_service', and 'rm_service' arguments are
* needed to direct quota upgrades referring to the resources of
* the child environment. By default, we expect that these
* resources are provided by the parent.
*/
Child(Dataspace_capability elf_ds,
Ram_session_capability ram,
Cpu_session_capability cpu,
Rm_session_capability rm,
Rpc_entrypoint *entrypoint,
Child_policy *policy,
Service &ram_service = *_parent_service(),
Service &cpu_service = *_parent_service(),
Service &rm_service = *_parent_service());
/**
* Destructor
*
* On destruction of a child, we close all sessions of the child to
* other services.
*/
virtual ~Child();
/**
* Return heap that uses the child's quota
*/
Allocator *heap() { return &_heap; }
Ram_session_capability ram_session_cap() const { return _ram; }
Cpu_session_capability cpu_session_cap() const { return _cpu; }
Rm_session_capability rm_session_cap() const { return _rm; }
Parent_capability parent_cap() const { return cap(); }
/**
* Discard all sessions to specified service
*
* When this function is called, we assume the server protection
* domain to be dead and all that all server quota was already
* transferred back to our own 'env()->ram_session()' account. Note
* that the specified server object may not exist anymore. We do
* not de-reference the server argument in here!
*/
void revoke_server(const Server *server);
/**
* Instruct the child to yield resources
*
* By calling this function, the child will be notified about the
* need to release the specified amount of resources. For more
* details about the protocol between a child and its parent,
* refer to the description given in 'parent/parent.h'.
*/
void yield(Resource_args const &args);
/**
* Notify the child about newly available resources
*/
void notify_resource_avail() const;
/**********************
** Parent interface **
**********************/
void announce(Service_name const &, Root_capability);
Session_capability session(Service_name const &, Session_args const &,
Affinity const &);
void upgrade(Session_capability, Upgrade_args const &);
void close(Session_capability);
void exit(int);
Thread_capability main_thread_cap() const;
void resource_avail_sigh(Signal_context_capability);
void resource_request(Resource_args const &);
void yield_sigh(Signal_context_capability);
Resource_args yield_request();
void yield_response();
};
struct Child_policy;
struct Child;
}
/**
* Child policy interface
*
* A child-policy object is an argument to a 'Child'. It is responsible for
* taking policy decisions regarding the parent interface. Most importantly,
* it defines how session requests are resolved and how session arguments
* are passed to servers when creating sessions.
*/
struct Genode::Child_policy
{
virtual ~Child_policy() { }
/**
* Return process name of the child
*/
virtual const char *name() const = 0;
/**
* Determine service to provide a session request
*
* \return Service to be contacted for the new session, or
* 0 if session request could not be resolved
*/
virtual Service *resolve_session_request(const char * /*service_name*/,
const char * /*args*/)
{ return 0; }
/**
* Apply transformations to session arguments
*/
virtual void filter_session_args(const char * /*service*/,
char * /*args*/, size_t /*args_len*/) { }
/**
* Register a service provided by the child
*
* \param name service name
* \param root interface for creating sessions for the service
* \param alloc allocator to be used for child-specific
* meta-data allocations
* \return true if announcement succeeded, or false if
* child is not permitted to announce service
*/
virtual bool announce_service(const char * /*name*/,
Root_capability /*root*/,
Allocator * /*alloc*/,
Server * /*server*/)
{ return false; }
/**
* Apply session affinity policy
*
* \param affinity affinity passed along with a session request
* \return affinity subordinated to the child policy
*/
virtual Affinity filter_session_affinity(Affinity const &affinity)
{
return affinity;
}
/**
* Unregister services that had been provided by the child
*/
virtual void unregister_services() { }
/**
* Exit child
*/
virtual void exit(int exit_value)
{
PDBG("child \"%s\" exited with exit value %d", name(), exit_value);
}
/**
* Reference RAM session
*
* The RAM session returned by this function is used for session-quota
* transfers.
*/
virtual Ram_session *ref_ram_session() { return env()->ram_session(); }
/**
* Return platform-specific PD-session arguments
*
* This function is used on Linux to supply additional PD-session
* argument to core, i.e., the chroot path, the UID, and the GID.
*/
virtual Native_pd_args const *pd_args() const { return 0; }
/**
* Respond to the release of resources by the child
*
* This function is called when the child confirms the release of
* resources in response to a yield request.
*/
virtual void yield_response() { }
/**
* Take action on additional resource needs by the child
*/
virtual void resource_request(Parent::Resource_args const &) { }
};
/**
* Implementation of the parent interface that supports resource trading
*
* There are three possible cases of how a session can be provided to
* a child:
*
* # The service is implemented locally
* # The session was obtained by asking our parent
* # The session is provided by one of our children
*
* These types must be differentiated for the quota management when a child
* issues the closing of a session or a transfers quota via our parent
* interface.
*
* If we close a session to a local service, we transfer the session quota
* from our own account to the client.
*
* If we close a parent session, we receive the session quota on our own
* account and must transfer this amount to the session-closing child.
*
* If we close a session provided by a server child, we close the session
* at the server, transfer the session quota from the server's ram session
* to our account, and subsequently transfer the same amount from our
* account to the client.
*/
class Genode::Child : protected Rpc_object<Parent>
{
private:
class Session;
/* RAM session that contains the quota of the child */
Ram_session_capability _ram;
Ram_session_client _ram_session_client;
/* CPU session that contains the quota of the child */
Cpu_session_capability _cpu;
/* RM session representing the address space of the child */
Rm_session_capability _rm;
/* Services where the RAM, CPU, and RM resources come from */
Service &_ram_service;
Service &_cpu_service;
Service &_rm_service;
/* heap for child-specific allocations using the child's quota */
Heap _heap;
Rpc_entrypoint *_entrypoint;
Parent_capability _parent_cap;
/* child policy */
Child_policy *_policy;
/* sessions opened by the child */
Lock _lock; /* protect list manipulation */
Object_pool<Session> _session_pool;
List<Session> _session_list;
/* server role */
Server _server;
/* session-argument buffer */
char _args[Parent::Session_args::MAX_SIZE];
/* signal handlers registered by the child */
Signal_context_capability _resource_avail_sigh;
Signal_context_capability _yield_sigh;
/* arguments fetched by the child in response to a yield signal */
Lock _yield_request_lock;
Resource_args _yield_request_args;
Process _process;
/**
* Attach session information to a child
*
* \throw Ram_session::Quota_exceeded the child's heap partition cannot
* hold the session meta data
*/
void _add_session(const Session &s);
/**
* Close session and revert quota donation associated with it
*/
void _remove_session(Session *s);
/**
* Return service interface targetting the parent
*
* The service returned by this function is used as default
* provider for the RAM, CPU, and RM resources of the child. It is
* solely used for targeting resource donations during
* 'Parent::upgrade_quota()' calls.
*/
static Service *_parent_service();
public:
/**
* Constructor
*
* \param elf_ds dataspace containing the binary
* \param ram RAM session with the child's quota
* \param cpu CPU session with the child's quota
* \param rm RM session representing the address space
* of the child
* \param entrypoint server entrypoint to serve the parent interface
* \param policy child policy
* \param ram_service provider of the 'ram' session
* \param cpu_service provider of the 'cpu' session
* \param rm_service provider of the 'rm' session
*
* If assigning a separate entry point to each child, the host of
* multiple children is able to handle a blocking invocation of
* the parent interface of one child while still maintaining the
* service to other children, each having an independent entry
* point.
*
* The 'ram_service', 'cpu_service', and 'rm_service' arguments are
* needed to direct quota upgrades referring to the resources of
* the child environment. By default, we expect that these
* resources are provided by the parent.
*/
Child(Dataspace_capability elf_ds,
Ram_session_capability ram,
Cpu_session_capability cpu,
Rm_session_capability rm,
Rpc_entrypoint *entrypoint,
Child_policy *policy,
Service &ram_service = *_parent_service(),
Service &cpu_service = *_parent_service(),
Service &rm_service = *_parent_service());
/**
* Destructor
*
* On destruction of a child, we close all sessions of the child to
* other services.
*/
virtual ~Child();
/**
* Return heap that uses the child's quota
*/
Allocator *heap() { return &_heap; }
Ram_session_capability ram_session_cap() const { return _ram; }
Cpu_session_capability cpu_session_cap() const { return _cpu; }
Rm_session_capability rm_session_cap() const { return _rm; }
Parent_capability parent_cap() const { return cap(); }
/**
* Discard all sessions to specified service
*
* When this function is called, we assume the server protection
* domain to be dead and all that all server quota was already
* transferred back to our own 'env()->ram_session()' account. Note
* that the specified server object may not exist anymore. We do
* not de-reference the server argument in here!
*/
void revoke_server(const Server *server);
/**
* Instruct the child to yield resources
*
* By calling this function, the child will be notified about the
* need to release the specified amount of resources. For more
* details about the protocol between a child and its parent,
* refer to the description given in 'parent/parent.h'.
*/
void yield(Resource_args const &args);
/**
* Notify the child about newly available resources
*/
void notify_resource_avail() const;
/**********************
** Parent interface **
**********************/
void announce(Service_name const &, Root_capability) override;
Session_capability session(Service_name const &, Session_args const &,
Affinity const &) override;
void upgrade(Session_capability, Upgrade_args const &) override;
void close(Session_capability) override;
void exit(int) override;
Thread_capability main_thread_cap() const override;
void resource_avail_sigh(Signal_context_capability) override;
void resource_request(Resource_args const &) override;
void yield_sigh(Signal_context_capability) override;
Resource_args yield_request() override;
void yield_response() override;
};
#endif /* _INCLUDE__BASE__CHILD_H_ */

View File

@ -17,99 +17,99 @@
#include <base/env.h>
#include <base/capability.h>
namespace Genode {
namespace Genode { template <typename> class Connection; }
/**
* Representation of an open connection to a service
*/
template <typename SESSION_TYPE>
class Connection : public Noncopyable
{
public:
enum On_destruction { CLOSE = false, KEEP_OPEN = true };
/**
* Representation of an open connection to a service
*/
template <typename SESSION_TYPE>
class Genode::Connection : public Noncopyable
{
public:
private:
enum On_destruction { CLOSE = false, KEEP_OPEN = true };
/*
* Because the argument string is used with the parent interface,
* the message-buffer size of the parent-interface provides a
* realistic upper bound for dimensioning the format- string
* buffer.
*/
enum { FORMAT_STRING_SIZE = Parent::Session_args::MAX_SIZE };
private:
Capability<SESSION_TYPE> _cap;
/*
* Because the argument string is used with the parent interface,
* the message-buffer size of the parent-interface provides a
* realistic upper bound for dimensioning the format- string
* buffer.
*/
enum { FORMAT_STRING_SIZE = Parent::Session_args::MAX_SIZE };
On_destruction _on_destruction;
Capability<SESSION_TYPE> _cap;
Capability<SESSION_TYPE> _session(Affinity const &affinity,
const char *format_args, va_list list)
{
char buf[FORMAT_STRING_SIZE];
On_destruction _on_destruction;
String_console sc(buf, FORMAT_STRING_SIZE);
sc.vprintf(format_args, list);
Capability<SESSION_TYPE> _session(Affinity const &affinity,
const char *format_args, va_list list)
{
char buf[FORMAT_STRING_SIZE];
va_end(list);
String_console sc(buf, FORMAT_STRING_SIZE);
sc.vprintf(format_args, list);
/* call parent interface with the resulting argument buffer */
return env()->parent()->session<SESSION_TYPE>(buf, affinity);
}
va_end(list);
public:
/* call parent interface with the resulting argument buffer */
return env()->parent()->session<SESSION_TYPE>(buf, affinity);
}
/**
* Constructor
*
* \param cap session capability
* \param od session policy applied when destructing the connection
*/
Connection(Capability<SESSION_TYPE> cap, On_destruction od = CLOSE):
_cap(cap), _on_destruction(od) { }
public:
/**
* Destructor
*/
~Connection()
{
if (_on_destruction == CLOSE)
env()->parent()->close(_cap);
}
/**
* Constructor
*
* \param cap session capability
* \param od session policy applied when destructing the connection
*/
Connection(Capability<SESSION_TYPE> cap, On_destruction od = CLOSE):
_cap(cap), _on_destruction(od) { }
/**
* Return session capability
*/
Capability<SESSION_TYPE> cap() const { return _cap; }
/**
* Destructor
*/
~Connection()
{
if (_on_destruction == CLOSE)
env()->parent()->close(_cap);
}
/**
* Define session policy
*/
void on_destruction(On_destruction od) { _on_destruction = od; }
/**
* Return session capability
*/
Capability<SESSION_TYPE> cap() const { return _cap; }
/**
* Shortcut for env()->parent()->session()
*/
Capability<SESSION_TYPE> session(const char *format_args, ...)
{
va_list list;
va_start(list, format_args);
/**
* Define session policy
*/
void on_destruction(On_destruction od) { _on_destruction = od; }
return _session(Affinity(), format_args, list);
}
/**
* Shortcut for env()->parent()->session()
*/
Capability<SESSION_TYPE> session(const char *format_args, ...)
{
va_list list;
va_start(list, format_args);
/**
* Shortcut for env()->parent()->session()
*/
Capability<SESSION_TYPE> session(Affinity const &affinity,
char const *format_args, ...)
{
va_list list;
va_start(list, format_args);
return _session(Affinity(), format_args, list);
}
return _session(affinity, format_args, list);
}
};
}
/**
* Shortcut for env()->parent()->session()
*/
Capability<SESSION_TYPE> session(Affinity const &affinity,
char const *format_args, ...)
{
va_list list;
va_start(list, format_args);
return _session(affinity, format_args, list);
}
};
#endif /* _INCLUDE__BASE__CONNECTION_H_ */

View File

@ -16,45 +16,45 @@
#include <stdarg.h>
namespace Genode {
namespace Genode { class Console; }
class Console
{
public:
virtual ~Console() {}
class Genode::Console
{
public:
/**
* Print format string
*/
void printf(const char *format, ...) __attribute__((format(printf, 2, 3)));
void vprintf(const char *format, va_list) __attribute__((format(printf, 2, 0)));
virtual ~Console() {}
protected:
/**
* Print format string
*/
void printf(const char *format, ...) __attribute__((format(printf, 2, 3)));
void vprintf(const char *format, va_list) __attribute__((format(printf, 2, 0)));
/**
* Backend function for the output of one character
*/
virtual void _out_char(char c) = 0;
protected:
/**
* Backend function for the output of a null-terminated string
*
* The default implementation uses _out_char. This function may
* be overridden by the backend for improving efficiency.
*
* This function is virtual to enable the use an optimized
* string-output functions on some target platforms, e.g.
* a kernel debugger that offers a string-output syscall. The
* default implementation calls '_out_char' for each character.
*/
virtual void _out_string(const char *str);
/**
* Backend function for the output of one character
*/
virtual void _out_char(char c) = 0;
private:
/**
* Backend function for the output of a null-terminated string
*
* The default implementation uses _out_char. This function may
* be overridden by the backend for improving efficiency.
*
* This function is virtual to enable the use an optimized
* string-output functions on some target platforms, e.g.
* a kernel debugger that offers a string-output syscall. The
* default implementation calls '_out_char' for each character.
*/
virtual void _out_string(const char *str);
template <typename T> void _out_unsigned(T value, unsigned base = 10, int pad = 0);
template <typename T> void _out_signed(T value, unsigned base = 10);
};
}
private:
template <typename T> void _out_unsigned(T value, unsigned base = 10, int pad = 0);
template <typename T> void _out_signed(T value, unsigned base = 10);
};
#endif /* _INCLUDE__BASE__CONSOLE_H_ */

View File

@ -19,139 +19,141 @@
namespace Genode {
class Elf_segment;
class Elf_binary
{
public:
/**
* Default constructor creates invalid object
*/
Elf_binary() : _valid(false) { }
/**
* Constructor
*
* The object is only useful if valid() returns true.
*/
explicit Elf_binary(addr_t start);
/* special types */
struct Flags {
unsigned r:1;
unsigned w:1;
unsigned x:1;
unsigned skip:1;
};
/**
* Read information about program segments
*
* \return properties of the specified program segment
*/
Elf_segment get_segment(unsigned num);
/**
* Check validity
*/
bool valid() { return _valid; }
/**
* Check for dynamic elf
*/
bool is_dynamically_linked() { return (_dynamic && _interp); }
/************************
** Accessor functions **
************************/
addr_t entry() { return valid() ? _entry : 0; }
private:
/* validity indicator indicates if the loaded ELF is valid and supported */
bool _valid;
/* dynamically linked */
bool _dynamic;
/* dynamic linker name matches 'genode' */
bool _interp;
/* ELF start pointer in memory */
addr_t _start;
/* ELF entry point */
addr_t _entry;
/* program segments */
addr_t _ph_table;
size_t _phentsize;
unsigned _phnum;
/************
** Helper **
************/
/**
* Check ELF header compatibility
*/
int _ehdr_check_compat();
/**
* Check program header compatibility
*/
int _ph_table_check_compat();
/**
* Check for dynamic program segments
*/
bool _dynamic_check_compat(unsigned type);
};
class Elf_segment
{
public:
/**
* Standard constructor creates invalid object
*/
Elf_segment() : _valid(false) { }
Elf_segment(const Elf_binary *elf, void *start, size_t file_offset,
size_t file_size, size_t mem_size, Elf_binary::Flags flags)
: _elf(elf), _start((unsigned char *)start), _file_offset(file_offset),
_file_size(file_size), _mem_size(mem_size), _flags(flags)
{
_valid = elf ? true : false;
}
const Elf_binary * elf() { return _elf; }
void * start() { return (void *)_start; }
size_t file_offset() { return _file_offset; }
size_t file_size() { return _file_size; }
size_t mem_size() { return _mem_size; }
Elf_binary::Flags flags() { return _flags; }
/**
* Check validity
*/
bool valid() { return _valid; }
private:
const Elf_binary *_elf;
bool _valid; /* validity indicator */
unsigned char *_start;
size_t _file_offset;
size_t _file_size;
size_t _mem_size;
Elf_binary::Flags _flags;
};
class Elf_binary;
}
class Genode::Elf_binary
{
public:
/**
* Default constructor creates invalid object
*/
Elf_binary() : _valid(false) { }
/**
* Constructor
*
* The object is only useful if valid() returns true.
*/
explicit Elf_binary(addr_t start);
/* special types */
struct Flags {
unsigned r:1;
unsigned w:1;
unsigned x:1;
unsigned skip:1;
};
/**
* Read information about program segments
*
* \return properties of the specified program segment
*/
Elf_segment get_segment(unsigned num);
/**
* Check validity
*/
bool valid() { return _valid; }
/**
* Check for dynamic elf
*/
bool is_dynamically_linked() { return (_dynamic && _interp); }
/************************
** Accessor functions **
************************/
addr_t entry() { return valid() ? _entry : 0; }
private:
/* validity indicator indicates if the loaded ELF is valid and supported */
bool _valid;
/* dynamically linked */
bool _dynamic;
/* dynamic linker name matches 'genode' */
bool _interp;
/* ELF start pointer in memory */
addr_t _start;
/* ELF entry point */
addr_t _entry;
/* program segments */
addr_t _ph_table;
size_t _phentsize;
unsigned _phnum;
/************
** Helper **
************/
/**
* Check ELF header compatibility
*/
int _ehdr_check_compat();
/**
* Check program header compatibility
*/
int _ph_table_check_compat();
/**
* Check for dynamic program segments
*/
bool _dynamic_check_compat(unsigned type);
};
class Genode::Elf_segment
{
public:
/**
* Standard constructor creates invalid object
*/
Elf_segment() : _valid(false) { }
Elf_segment(const Elf_binary *elf, void *start, size_t file_offset,
size_t file_size, size_t mem_size, Elf_binary::Flags flags)
: _elf(elf), _start((unsigned char *)start), _file_offset(file_offset),
_file_size(file_size), _mem_size(mem_size), _flags(flags)
{
_valid = elf ? true : false;
}
const Elf_binary * elf() { return _elf; }
void * start() { return (void *)_start; }
size_t file_offset() { return _file_offset; }
size_t file_size() { return _file_size; }
size_t mem_size() { return _mem_size; }
Elf_binary::Flags flags() { return _flags; }
/**
* Check validity
*/
bool valid() { return _valid; }
private:
const Elf_binary *_elf;
bool _valid; /* validity indicator */
unsigned char *_start;
size_t _file_offset;
size_t _file_size;
size_t _mem_size;
Elf_binary::Flags _flags;
};
#endif /* _INCLUDE__BASE__ELF_H_ */

View File

@ -30,53 +30,10 @@
namespace Genode {
class Env
{
public:
virtual ~Env() { }
/**
* Communication channel to our parent
*/
virtual Parent *parent() = 0;
/**
* RAM session for the program
*
* The RAM Session represents a quota of memory that is
* available to the program. Quota can be used to allocate
* RAM-Dataspaces.
*/
virtual Ram_session *ram_session() = 0;
virtual Ram_session_capability ram_session_cap() = 0;
/**
* CPU session for the program
*
* This session is used to create threads.
*/
virtual Cpu_session *cpu_session() = 0;
virtual Cpu_session_capability cpu_session_cap() = 0;
/**
* Region manager session of the program
*/
virtual Rm_session *rm_session() = 0;
/**
* Pd session of the program
*/
virtual Pd_session *pd_session() = 0;
/**
* Heap backed by the ram_session of the environment.
*/
virtual Allocator *heap() = 0;
};
struct Env;
extern Env *env();
/**
* Return parent capability
*
@ -86,4 +43,50 @@ namespace Genode {
Parent_capability parent_cap();
}
struct Genode::Env
{
virtual ~Env() { }
/**
* Communication channel to our parent
*/
virtual Parent *parent() = 0;
/**
* RAM session for the program
*
* The RAM Session represents a quota of memory that is
* available to the program. Quota can be used to allocate
* RAM-Dataspaces.
*/
virtual Ram_session *ram_session() = 0;
virtual Ram_session_capability ram_session_cap() = 0;
/**
* CPU session for the program
*
* This session is used to create threads.
*/
virtual Cpu_session *cpu_session() = 0;
virtual Cpu_session_capability cpu_session_cap() = 0;
/**
* Region manager session of the program
*/
virtual Rm_session *rm_session() = 0;
/**
* Pd session of the program
*/
virtual Pd_session *pd_session() = 0;
/**
* Heap backed by the ram_session of the environment.
*/
virtual Allocator *heap() = 0;
};
#endif /* _INCLUDE__BASE__ENV_H_ */

View File

@ -14,6 +14,8 @@
#ifndef _INCLUDE__BASE__EXCEPTION_H_
#define _INCLUDE__BASE__EXCEPTION_H_
namespace Genode { class Exception { }; }
namespace Genode { class Exception; }
class Genode::Exception { };
#endif /* _INCLUDE__BASE__EXCEPTION_H_ */

View File

@ -19,95 +19,98 @@
namespace Genode {
class Flexpage
{
public:
addr_t addr;
addr_t hotspot;
size_t log2_order;
Flexpage() : addr(~0UL), hotspot(0), log2_order(0) { }
Flexpage(addr_t a, addr_t h, size_t o)
: addr(a), hotspot(h), log2_order(o) { }
bool valid() { return addr != ~0UL; }
};
class Flexpage_iterator {
private:
addr_t _src_start, _src_size;
addr_t _dst_start, _dst_size;
addr_t _hotspot, _offset;
/**
* Find least significant set bit in value
*/
inline addr_t
lsb_bit(addr_t const scan)
{
if (scan == 0)
return ~0UL;
return __builtin_ctzl(scan);
}
public:
Flexpage_iterator() { }
Flexpage_iterator(addr_t src_start, size_t src_size,
addr_t dst_start, size_t dst_size,
addr_t hotspot) :
_src_start(src_start), _src_size(src_size),
_dst_start(dst_start), _dst_size(dst_size),
_hotspot(hotspot), _offset(0)
{ }
Flexpage page()
{
size_t const size = min (_src_size, _dst_size);
addr_t const from_end = _src_start + size;
addr_t const to_end = _dst_start + size;
if (_offset >= size)
return Flexpage();
addr_t const from_curr = _src_start + _offset;
addr_t const to_curr = _dst_start + _offset;
/*
* The common alignment corresponds to the number of least
* significant zero bits in both addresses.
*/
addr_t const common_bits = from_curr | to_curr;
/* find least set bit in common bits */
size_t order = lsb_bit(common_bits);
size_t max = (order == ~0UL) ? ~0UL : (1UL << order);
/* look if it still fits into both 'src' and 'dst' ranges */
if ((from_end - from_curr) < max) {
order = log2(from_end - from_curr);
order = (order == ~0UL) ? 12 : order;
max = 1UL << order;
}
if ((to_end - to_curr) < max) {
order = log2(to_end - to_curr);
order = (order == ~0UL) ? 12 : order;
}
/* advance offset by current flexpage size */
_offset += (1UL << order);
return Flexpage(from_curr, _hotspot + _offset - (1UL << order), order);
}
};
struct Flexpage;
class Flexpage_iterator;
}
struct Genode::Flexpage
{
addr_t addr;
addr_t hotspot;
size_t log2_order;
Flexpage() : addr(~0UL), hotspot(0), log2_order(0) { }
Flexpage(addr_t a, addr_t h, size_t o)
: addr(a), hotspot(h), log2_order(o) { }
bool valid() { return addr != ~0UL; }
};
class Genode::Flexpage_iterator
{
private:
addr_t _src_start, _src_size;
addr_t _dst_start, _dst_size;
addr_t _hotspot, _offset;
/**
* Find least significant set bit in value
*/
inline addr_t
lsb_bit(addr_t const scan)
{
if (scan == 0)
return ~0UL;
return __builtin_ctzl(scan);
}
public:
Flexpage_iterator() { }
Flexpage_iterator(addr_t src_start, size_t src_size,
addr_t dst_start, size_t dst_size,
addr_t hotspot)
:
_src_start(src_start), _src_size(src_size),
_dst_start(dst_start), _dst_size(dst_size),
_hotspot(hotspot), _offset(0)
{ }
Flexpage page()
{
size_t const size = min (_src_size, _dst_size);
addr_t const from_end = _src_start + size;
addr_t const to_end = _dst_start + size;
if (_offset >= size)
return Flexpage();
addr_t const from_curr = _src_start + _offset;
addr_t const to_curr = _dst_start + _offset;
/*
* The common alignment corresponds to the number of least
* significant zero bits in both addresses.
*/
addr_t const common_bits = from_curr | to_curr;
/* find least set bit in common bits */
size_t order = lsb_bit(common_bits);
size_t max = (order == ~0UL) ? ~0UL : (1UL << order);
/* look if it still fits into both 'src' and 'dst' ranges */
if ((from_end - from_curr) < max) {
order = log2(from_end - from_curr);
order = (order == ~0UL) ? 12 : order;
max = 1UL << order;
}
if ((to_end - to_curr) < max) {
order = log2(to_end - to_curr);
order = (order == ~0UL) ? 12 : order;
}
/* advance offset by current flexpage size */
_offset += (1UL << order);
return Flexpage(from_curr, _hotspot + _offset - (1UL << order), order);
}
};
#endif /* _INCLUDE__BASE__FLEX_ITERATOR_H */

View File

@ -21,194 +21,198 @@
#include <base/lock.h>
namespace Genode {
class Heap;
class Sliced_heap;
}
/**
* Heap that uses dataspaces as backing store
*
* The heap class provides an allocator that uses a list of dataspaces of a ram
* session as backing store. One dataspace may be used for holding multiple blocks.
*/
class Heap : public Allocator
{
private:
enum {
MIN_CHUNK_SIZE = 4*1024, /* in machine words */
MAX_CHUNK_SIZE = 256*1024,
/*
* Meta data includes the Dataspace structure and meta data of
* the AVL allocator.
*/
META_DATA_SIZE = 1024, /* in bytes */
/*
* Allocation sizes >= this value are considered as big
* allocations, which get their own dataspace. In contrast
* to smaller allocations, this memory is released to
* the RAM session when 'free()' is called.
*/
BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */
};
class Dataspace : public List<Dataspace>::Element
{
public:
Ram_dataspace_capability cap;
void *local_addr;
size_t size;
Dataspace(Ram_dataspace_capability c, void *local_addr, size_t size)
: cap(c), local_addr(local_addr), size(size) { }
inline void * operator new(Genode::size_t, void* addr) {
return addr; }
inline void operator delete(void*) { }
};
/**
* Heap that uses dataspaces as backing store
*
* The heap class provides an allocator that uses a list of dataspaces of a ram
* session as backing store. One dataspace may be used for holding multiple blocks.
*/
class Genode::Heap : public Allocator
{
private:
enum {
MIN_CHUNK_SIZE = 4*1024, /* in machine words */
MAX_CHUNK_SIZE = 256*1024,
/*
* This structure exists only to make sure that the dataspaces are
* destroyed after the AVL allocator.
* Meta data includes the Dataspace structure and meta data of
* the AVL allocator.
*/
struct Dataspace_pool : public List<Dataspace>
{
Ram_session *ram_session; /* ram session for backing store */
Rm_session *rm_session; /* region manager */
Dataspace_pool(Ram_session *ram_session, Rm_session *rm_session)
: ram_session(ram_session), rm_session(rm_session) { }
/**
* Destructor
*/
~Dataspace_pool();
void reassign_resources(Ram_session *ram, Rm_session *rm) {
ram_session = ram, rm_session = rm; }
};
META_DATA_SIZE = 1024, /* in bytes */
/*
* NOTE: The order of the member variables is important for
* the calling order of the destructors!
* Allocation sizes >= this value are considered as big
* allocations, which get their own dataspace. In contrast
* to smaller allocations, this memory is released to
* the RAM session when 'free()' is called.
*/
BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */
};
Lock _lock;
Dataspace_pool _ds_pool; /* list of dataspaces */
Allocator_avl _alloc; /* local allocator */
size_t _quota_limit;
size_t _quota_used;
size_t _chunk_size;
class Dataspace : public List<Dataspace>::Element
{
public:
/**
* Allocate a new dataspace of the specified size
*
* \param size number of bytes to allocate
* \param enforce_separate_metadata if true, the new dataspace
* will not contain any meta data
* \throw Rm_session::Invalid_dataspace,
* Rm_session::Region_conflict
* \return 0 on success or negative error code
*/
Heap::Dataspace *_allocate_dataspace(size_t size, bool enforce_separate_metadata);
Ram_dataspace_capability cap;
void *local_addr;
size_t size;
/**
* Try to allocate block at our local allocator
*
* \return true on success
*
* This function is a utility used by '_unsynchronized_alloc' to
* avoid code duplication.
*/
bool _try_local_alloc(size_t size, void **out_addr);
Dataspace(Ram_dataspace_capability c, void *local_addr, size_t size)
: cap(c), local_addr(local_addr), size(size) { }
/**
* Unsynchronized implementation of 'alloc'
*/
bool _unsynchronized_alloc(size_t size, void **out_addr);
inline void * operator new(Genode::size_t, void* addr) {
return addr; }
inline void operator delete(void*) { }
};
public:
/*
* This structure exists only to make sure that the dataspaces are
* destroyed after the AVL allocator.
*/
struct Dataspace_pool : public List<Dataspace>
{
Ram_session *ram_session; /* ram session for backing store */
Rm_session *rm_session; /* region manager */
enum { UNLIMITED = ~0 };
Heap(Ram_session *ram_session,
Rm_session *rm_session,
size_t quota_limit = UNLIMITED,
void *static_addr = 0,
size_t static_size = 0)
:
_ds_pool(ram_session, rm_session),
_alloc(0),
_quota_limit(quota_limit), _quota_used(0),
_chunk_size(MIN_CHUNK_SIZE)
{
if (static_addr)
_alloc.add_range((addr_t)static_addr, static_size);
}
/**
* Reconfigure quota limit
*
* \return negative error code if new quota limit is higher than
* currently used quota.
*/
int quota_limit(size_t new_quota_limit);
/**
* Re-assign RAM and RM sessions
*/
void reassign_resources(Ram_session *ram, Rm_session *rm) {
_ds_pool.reassign_resources(ram, rm); }
/*************************
** Allocator interface **
*************************/
bool alloc(size_t, void **);
void free(void *, size_t);
size_t consumed() { return _quota_used; }
size_t overhead(size_t size) { return _alloc.overhead(size); }
bool need_size_for_free() const override { return false; }
};
/**
* Heap that allocates each block at a separate dataspace
*/
class Sliced_heap : public Allocator
{
private:
class Block;
Ram_session *_ram_session; /* ram session for backing store */
Rm_session *_rm_session; /* region manager */
size_t _consumed; /* number of allocated bytes */
List<Block> _block_list; /* list of allocated blocks */
Lock _lock; /* serialize allocations */
public:
/**
* Constructor
*/
Sliced_heap(Ram_session *ram_session, Rm_session *rm_session);
Dataspace_pool(Ram_session *ram_session, Rm_session *rm_session)
: ram_session(ram_session), rm_session(rm_session) { }
/**
* Destructor
*/
~Sliced_heap();
~Dataspace_pool();
void reassign_resources(Ram_session *ram, Rm_session *rm) {
ram_session = ram, rm_session = rm; }
};
/*
* NOTE: The order of the member variables is important for
* the calling order of the destructors!
*/
Lock _lock;
Dataspace_pool _ds_pool; /* list of dataspaces */
Allocator_avl _alloc; /* local allocator */
size_t _quota_limit;
size_t _quota_used;
size_t _chunk_size;
/**
* Allocate a new dataspace of the specified size
*
* \param size number of bytes to allocate
* \param enforce_separate_metadata if true, the new dataspace
* will not contain any meta data
* \throw Rm_session::Invalid_dataspace,
* Rm_session::Region_conflict
* \return 0 on success or negative error code
*/
Heap::Dataspace *_allocate_dataspace(size_t size, bool enforce_separate_metadata);
/**
* Try to allocate block at our local allocator
*
* \return true on success
*
* This function is a utility used by '_unsynchronized_alloc' to
* avoid code duplication.
*/
bool _try_local_alloc(size_t size, void **out_addr);
/**
* Unsynchronized implementation of 'alloc'
*/
bool _unsynchronized_alloc(size_t size, void **out_addr);
public:
enum { UNLIMITED = ~0 };
Heap(Ram_session *ram_session,
Rm_session *rm_session,
size_t quota_limit = UNLIMITED,
void *static_addr = 0,
size_t static_size = 0)
:
_ds_pool(ram_session, rm_session),
_alloc(0),
_quota_limit(quota_limit), _quota_used(0),
_chunk_size(MIN_CHUNK_SIZE)
{
if (static_addr)
_alloc.add_range((addr_t)static_addr, static_size);
}
/**
* Reconfigure quota limit
*
* \return negative error code if new quota limit is higher than
* currently used quota.
*/
int quota_limit(size_t new_quota_limit);
/**
* Re-assign RAM and RM sessions
*/
void reassign_resources(Ram_session *ram, Rm_session *rm) {
_ds_pool.reassign_resources(ram, rm); }
/*************************
** Allocator interface **
*************************/
/*************************
** Allocator interface **
*************************/
bool alloc(size_t, void **);
void free(void *, size_t);
size_t consumed() { return _consumed; }
size_t overhead(size_t size);
bool need_size_for_free() const override { return false; }
};
}
bool alloc(size_t, void **) override;
void free(void *, size_t) override;
size_t consumed() override { return _quota_used; }
size_t overhead(size_t size) override { return _alloc.overhead(size); }
bool need_size_for_free() const override { return false; }
};
/**
* Heap that allocates each block at a separate dataspace
*/
class Genode::Sliced_heap : public Allocator
{
private:
class Block;
Ram_session *_ram_session; /* ram session for backing store */
Rm_session *_rm_session; /* region manager */
size_t _consumed; /* number of allocated bytes */
List<Block> _block_list; /* list of allocated blocks */
Lock _lock; /* serialize allocations */
public:
/**
* Constructor
*/
Sliced_heap(Ram_session *ram_session, Rm_session *rm_session);
/**
* Destructor
*/
~Sliced_heap();
/*************************
** Allocator interface **
*************************/
bool alloc(size_t, void **);
void free(void *, size_t);
size_t consumed() { return _consumed; }
size_t overhead(size_t size);
bool need_size_for_free() const override { return false; }
};
#endif /* _INCLUDE__BASE__HEAP_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -16,32 +16,32 @@
#include <base/cancelable_lock.h>
namespace Genode {
namespace Genode { class Lock; }
class Lock : public Cancelable_lock
{
public:
/**
* Constructor
*/
explicit Lock(State initial = UNLOCKED)
: Cancelable_lock(initial) { }
class Genode::Lock : public Cancelable_lock
{
public:
void lock()
{
while (1)
try {
Cancelable_lock::lock();
return;
} catch (Blocking_canceled) { }
}
/**
* Constructor
*/
explicit Lock(State initial = UNLOCKED)
: Cancelable_lock(initial) { }
/**
* Lock guard
*/
typedef Lock_guard<Lock> Guard;
};
}
void lock()
{
while (1)
try {
Cancelable_lock::lock();
return;
} catch (Blocking_canceled) { }
}
/**
* Lock guard
*/
typedef Lock_guard<Lock> Guard;
};
#endif /* _INCLUDE__BASE__LOCK_H_ */

View File

@ -21,26 +21,26 @@
#ifndef _INCLUDE__BASE__LOCK_GUARD_H_
#define _INCLUDE__BASE__LOCK_GUARD_H_
namespace Genode {
namespace Genode { template <typename> class Lock_guard; }
/**
* Lock guard template
*
* \param LT lock type
*/
template <typename LT>
class Lock_guard
{
private:
LT &_lock;
/**
* Lock guard template
*
* \param LT lock type
*/
template <typename LT>
class Genode::Lock_guard
{
private:
public:
LT &_lock;
explicit Lock_guard(LT &lock) : _lock(lock) { _lock.lock(); }
public:
~Lock_guard() { _lock.unlock(); }
};
}
explicit Lock_guard(LT &lock) : _lock(lock) { _lock.lock(); }
~Lock_guard() { _lock.unlock(); }
};
#endif /* _INCLUDE__BASE__LOCK_GUARD_H_ */

View File

@ -18,102 +18,102 @@
#ifndef _INCLUDE__BASE__NATIVE_CAPABILITY_H_
#define _INCLUDE__BASE__NATIVE_CAPABILITY_H_
namespace Genode {
namespace Genode { template <typename> class Native_capability_tpl; }
/**
* Generic parts of the platform-specific 'Native_capability'
*
* \param POLICY policy class that provides the type used as capability
* destination and functions for checking the validity of
* the platform-specific destination type and for
* invalid destinations.
*
* The struct passed as 'POLICY' argument must have the following
* interface:
*
* ! typedef Dst;
* ! static bool valid(Dst dst);
* ! static Dst invalid();
*
* The 'Dst' type is the platform-specific destination type (e.g., the ID
* of the destination thread targeted by the capability). The 'valid'
* function returns true if the specified destination is valid. The
* 'invalid' function produces an invalid destination.
*/
template <typename POLICY>
class Native_capability_tpl
{
public:
typedef typename POLICY::Dst Dst;
/**
* Generic parts of the platform-specific 'Native_capability'
*
* \param POLICY policy class that provides the type used as capability
* destination and functions for checking the validity of
* the platform-specific destination type and for
* invalid destinations.
*
* The struct passed as 'POLICY' argument must have the following
* interface:
*
* ! typedef Dst;
* ! static bool valid(Dst dst);
* ! static Dst invalid();
*
* The 'Dst' type is the platform-specific destination type (e.g., the ID
* of the destination thread targeted by the capability). The 'valid'
* function returns true if the specified destination is valid. The
* 'invalid' function produces an invalid destination.
*/
template <typename POLICY>
class Genode::Native_capability_tpl
{
public:
/**
* Compound object used to copy raw capability members
*
* This type is a utility solely used to communicate the
* information about the parent capability from the parent to the
* new process.
*/
struct Raw { Dst dst; long local_name; };
typedef typename POLICY::Dst Dst;
private:
/**
* Compound object used to copy raw capability members
*
* This type is a utility solely used to communicate the
* information about the parent capability from the parent to the
* new process.
*/
struct Raw { Dst dst; long local_name; };
Dst _dst;
long _local_name;
private:
protected:
Dst _dst;
long _local_name;
/**
* Constructor for a local capability
*
* A local capability just encapsulates a pointer to some
* local object. This constructor is only used by a factory
* method for local-capabilities in the generic Capability
* class.
*
* \param ptr address of the local object
*/
Native_capability_tpl(void* ptr)
: _dst(POLICY::invalid()), _local_name((long)ptr) { }
protected:
public:
/**
* Constructor for a local capability
*
* A local capability just encapsulates a pointer to some
* local object. This constructor is only used by a factory
* method for local-capabilities in the generic Capability
* class.
*
* \param ptr address of the local object
*/
Native_capability_tpl(void* ptr)
: _dst(POLICY::invalid()), _local_name((long)ptr) { }
/**
* Constructor for an invalid capability
*/
Native_capability_tpl() : _dst(POLICY::invalid()), _local_name(0) { }
public:
/**
* Publicly available constructor
*
* \param tid kernel-specific thread id
* \param local_name ID used as key to lookup the 'Rpc_object'
* that corresponds to the capability.
*/
Native_capability_tpl(Dst tid, long local_name)
: _dst(tid), _local_name(local_name) { }
/**
* Constructor for an invalid capability
*/
Native_capability_tpl() : _dst(POLICY::invalid()), _local_name(0) { }
/**
* Return true when the capability is valid
*/
bool valid() const { return POLICY::valid(_dst); }
/**
* Publicly available constructor
*
* \param tid kernel-specific thread id
* \param local_name ID used as key to lookup the 'Rpc_object'
* that corresponds to the capability.
*/
Native_capability_tpl(Dst tid, long local_name)
: _dst(tid), _local_name(local_name) { }
/**
* Return ID used to lookup the 'Rpc_object' by its capability
*/
long local_name() const { return _local_name; }
/**
* Return true when the capability is valid
*/
bool valid() const { return POLICY::valid(_dst); }
/**
* Return pointer to object referenced by a local-capability
*/
void* local() const { return (void*)_local_name; }
/**
* Return ID used to lookup the 'Rpc_object' by its capability
*/
long local_name() const { return _local_name; }
/**
* Return capability destination
*/
Dst dst() const { return _dst; }
};
}
/**
* Return pointer to object referenced by a local-capability
*/
void* local() const { return (void*)_local_name; }
/**
* Return capability destination
*/
Dst dst() const { return _dst; }
};
#endif /* _INCLUDE__BASE__NATIVE_CAPABILITY_H_ */

View File

@ -19,198 +19,198 @@
#include <base/capability.h>
#include <base/lock.h>
namespace Genode {
namespace Genode { template <typename> class Object_pool; }
/**
* Map object ids to local objects
*
* \param OBJ_TYPE object type (must be inherited from Object_pool::Entry)
*
* The local names of a capabilities are used to differentiate multiple server
* objects managed by one and the same object pool.
*/
template <typename OBJ_TYPE>
class Object_pool
{
public:
class Guard
{
private:
/**
* Map object ids to local objects
*
* \param OBJ_TYPE object type (must be inherited from Object_pool::Entry)
*
* The local names of a capabilities are used to differentiate multiple server
* objects managed by one and the same object pool.
*/
template <typename OBJ_TYPE>
class Genode::Object_pool
{
public:
OBJ_TYPE * _object;
class Guard
{
private:
public:
operator OBJ_TYPE*() const { return _object; }
OBJ_TYPE * operator->() const { return _object; }
OBJ_TYPE * object() const { return _object; }
OBJ_TYPE * _object;
template <class X>
explicit Guard(X * object) {
_object = dynamic_cast<OBJ_TYPE *>(object); }
public:
operator OBJ_TYPE*() const { return _object; }
OBJ_TYPE * operator->() const { return _object; }
OBJ_TYPE * object() const { return _object; }
~Guard()
{
if (!_object) return;
template <class X>
explicit Guard(X * object) {
_object = dynamic_cast<OBJ_TYPE *>(object); }
_object->release();
}
};
~Guard()
{
if (!_object) return;
class Entry : public Avl_node<Entry>
{
private:
Untyped_capability _cap;
short int _ref;
bool _dead;
Lock _entry_lock;
inline unsigned long _obj_id() { return _cap.local_name(); }
friend class Object_pool;
friend class Avl_tree<Entry>;
/**
* Support functions for atomic lookup and lock
* functionality of class Object_pool.
*/
void lock() { _entry_lock.lock(); };
void unlock() { _entry_lock.unlock(); };
void add_ref() { _ref += 1; }
void del_ref() { _ref -= 1; }
bool is_dead(bool set_dead = false) {
return (set_dead ? (_dead = true) : _dead); }
bool is_ref_zero() { return _ref <= 0; }
public:
enum { OBJ_ID_INVALID = 0 };
/**
* Constructors
*/
Entry() : _ref(0), _dead(false) { }
Entry(Untyped_capability cap) : _cap(cap), _ref(0), _dead(false) { }
/**
* Avl_node interface
*/
bool higher(Entry *e) { return e->_obj_id() > _obj_id(); }
void recompute() { } /* for gcc-3.4 compatibility */
/**
* Support for object pool
*/
Entry *find_by_obj_id(unsigned long obj_id)
{
if (obj_id == _obj_id()) return this;
Entry *obj = this->child(obj_id > _obj_id());
return obj ? obj->find_by_obj_id(obj_id) : 0;
}
/**
* Assign capability to object pool entry
*/
void cap(Untyped_capability c) { _cap = c; }
Untyped_capability const cap() const { return _cap; }
/**
* Function used - ideally - solely by the Guard.
*/
void release() { del_ref(); unlock(); }
void acquire() { lock(); add_ref(); }
};
private:
Avl_tree<Entry> _tree;
Lock _lock;
public:
void insert(OBJ_TYPE *obj)
{
Lock::Guard lock_guard(_lock);
_tree.insert(obj);
}
void remove_locked(OBJ_TYPE *obj)
{
obj->is_dead(true);
obj->del_ref();
while (true) {
obj->unlock();
{
Lock::Guard lock_guard(_lock);
if (obj->is_ref_zero()) {
_tree.remove(obj);
return;
}
}
obj->lock();
_object->release();
}
}
};
/**
* Lookup object
*/
OBJ_TYPE *lookup_and_lock(addr_t obj_id)
{
OBJ_TYPE * obj_typed;
class Entry : public Avl_node<Entry>
{
private:
Untyped_capability _cap;
short int _ref;
bool _dead;
Lock _entry_lock;
inline unsigned long _obj_id() { return _cap.local_name(); }
friend class Object_pool;
friend class Avl_tree<Entry>;
/**
* Support functions for atomic lookup and lock
* functionality of class Object_pool.
*/
void lock() { _entry_lock.lock(); };
void unlock() { _entry_lock.unlock(); };
void add_ref() { _ref += 1; }
void del_ref() { _ref -= 1; }
bool is_dead(bool set_dead = false) {
return (set_dead ? (_dead = true) : _dead); }
bool is_ref_zero() { return _ref <= 0; }
public:
enum { OBJ_ID_INVALID = 0 };
/**
* Constructors
*/
Entry() : _ref(0), _dead(false) { }
Entry(Untyped_capability cap) : _cap(cap), _ref(0), _dead(false) { }
/**
* Avl_node interface
*/
bool higher(Entry *e) { return e->_obj_id() > _obj_id(); }
void recompute() { } /* for gcc-3.4 compatibility */
/**
* Support for object pool
*/
Entry *find_by_obj_id(unsigned long obj_id)
{
if (obj_id == _obj_id()) return this;
Entry *obj = this->child(obj_id > _obj_id());
return obj ? obj->find_by_obj_id(obj_id) : 0;
}
/**
* Assign capability to object pool entry
*/
void cap(Untyped_capability c) { _cap = c; }
Untyped_capability const cap() const { return _cap; }
/**
* Function used - ideally - solely by the Guard.
*/
void release() { del_ref(); unlock(); }
void acquire() { lock(); add_ref(); }
};
private:
Avl_tree<Entry> _tree;
Lock _lock;
public:
void insert(OBJ_TYPE *obj)
{
Lock::Guard lock_guard(_lock);
_tree.insert(obj);
}
void remove_locked(OBJ_TYPE *obj)
{
obj->is_dead(true);
obj->del_ref();
while (true) {
obj->unlock();
{
Lock::Guard lock_guard(_lock);
Entry *obj = _tree.first();
if (!obj) return 0;
obj_typed = (OBJ_TYPE *)obj->find_by_obj_id(obj_id);
if (!obj_typed || obj_typed->is_dead())
return 0;
obj_typed->add_ref();
if (obj->is_ref_zero()) {
_tree.remove(obj);
return;
}
}
obj_typed->lock();
return obj_typed;
obj->lock();
}
}
OBJ_TYPE *lookup_and_lock(Untyped_capability cap)
{
return lookup_and_lock(cap.local_name());
}
/**
* Return first element of tree
*
* This function is used for removing tree elements step by step.
*/
OBJ_TYPE *first()
/**
* Lookup object
*/
OBJ_TYPE *lookup_and_lock(addr_t obj_id)
{
OBJ_TYPE * obj_typed;
{
Lock::Guard lock_guard(_lock);
return (OBJ_TYPE *)_tree.first();
Entry *obj = _tree.first();
if (!obj) return 0;
obj_typed = (OBJ_TYPE *)obj->find_by_obj_id(obj_id);
if (!obj_typed || obj_typed->is_dead())
return 0;
obj_typed->add_ref();
}
/**
* Return first element of tree locked
*
* This function is used for removing tree elements step by step.
*/
OBJ_TYPE *first_locked()
{
Lock::Guard lock_guard(_lock);
OBJ_TYPE * const obj_typed = (OBJ_TYPE *)_tree.first();
if (!obj_typed) { return 0; }
obj_typed->lock();
return obj_typed;
}
};
}
obj_typed->lock();
return obj_typed;
}
OBJ_TYPE *lookup_and_lock(Untyped_capability cap)
{
return lookup_and_lock(cap.local_name());
}
/**
* Return first element of tree
*
* This function is used for removing tree elements step by step.
*/
OBJ_TYPE *first()
{
Lock::Guard lock_guard(_lock);
return (OBJ_TYPE *)_tree.first();
}
/**
* Return first element of tree locked
*
* This function is used for removing tree elements step by step.
*/
OBJ_TYPE *first_locked()
{
Lock::Guard lock_guard(_lock);
OBJ_TYPE * const obj_typed = (OBJ_TYPE *)_tree.first();
if (!obj_typed) { return 0; }
obj_typed->lock();
return obj_typed;
}
};
#endif /* _INCLUDE__BASE__OBJECT_POOL_H_ */

View File

@ -27,191 +27,198 @@
namespace Genode {
/**
* Special server object for paging
*
* A 'Pager_object' is very similar to a 'Rpc_object'. It is just a
* special implementation for page-fault handling, which does not allow to
* define a "badge" for pager capabilities.
*/
class Pager_object : public Object_pool<Pager_object>::Entry
{
protected:
/**
* Local name for this pager object
*/
unsigned long _badge;
Thread_capability _thread_cap;
/**
* User-level signal handler registered for this pager object via
* 'Cpu_session::exception_handler()'.
*/
Signal_context_capability _exception_sigh;
public:
/**
* Contains information about exception state of corresponding thread.
*/
Thread_state state;
/**
* Constructor
*
* \param location affinity of paged thread to physical CPU
*/
Pager_object(unsigned long badge, Affinity::Location location)
: _badge(badge) { }
virtual ~Pager_object() { }
unsigned long badge() const { return _badge; }
/**
* Interface to be implemented by a derived class
*
* \param ps 'Ipc_pager' stream
*
* Returns !0 on error and pagefault will not be answered.
*/
virtual int pager(Ipc_pager &ps) = 0;
/**
* Wake up the faulter
*/
void wake_up();
/**
* Assign user-level exception handler for the pager object
*/
void exception_handler(Signal_context_capability sigh)
{
_exception_sigh = sigh;
}
/**
* Notify exception handler about the occurrence of an exception
*/
void submit_exception_signal()
{
if (!_exception_sigh.valid()) return;
Signal_transmitter transmitter(_exception_sigh);
transmitter.submit();
}
/**
* Remember thread cap so that rm_session can tell thread that
* rm_client is gone.
*/
Thread_capability thread_cap() { return _thread_cap; } const
void thread_cap(Thread_capability cap) { _thread_cap = cap; }
/*
* Note in the thread state that an unresolved page
* fault occurred.
*/
void unresolved_page_fault_occurred();
};
/**
* A 'Pager_activation' processes one page fault of a 'Pager_object' at a time.
*/
class Pager_object;
class Pager_entrypoint;
class Pager_activation_base: public Thread_base
{
private:
Native_capability _cap;
Pager_entrypoint *_ep; /* entry point to which the
activation belongs */
/**
* Lock used for blocking until '_cap' is initialized
*/
Lock _cap_valid;
public:
Pager_activation_base(const char *name, size_t stack_size) :
Thread_base(0, name, stack_size),
_cap(Native_capability()), _ep(0), _cap_valid(Lock::LOCKED) { }
/**
* Set entry point, which the activation serves
*
* This function is only called by the 'Pager_entrypoint'
* constructor.
*/
void ep(Pager_entrypoint *ep) { _ep = ep; }
/**
* Thread interface
*/
void entry();
/**
* Return capability to this activation
*
* This function should only be called from 'Pager_entrypoint'
*/
Native_capability cap()
{
/* ensure that the initialization of our 'Ipc_pager' is done */
if (!_cap.valid())
_cap_valid.lock();
return _cap;
}
};
/**
* Paging entry point
*
* For a paging entry point can hold only one activation. So, paging is
* strictly serialized for one entry point.
*/
class Pager_entrypoint : public Object_pool<Pager_object>
{
private:
Pager_activation_base *_activation;
Cap_session *_cap_session;
public:
/**
* Constructor
*
* \param cap_session Cap_session for creating capabilities
* for the pager objects managed by this
* entry point
* \param a initial activation
*/
Pager_entrypoint(Cap_session *cap_session, Pager_activation_base *a = 0);
/**
* Associate Pager_object with the entry point
*/
Pager_capability manage(Pager_object *obj);
/**
* Dissolve Pager_object from entry point
*/
void dissolve(Pager_object *obj);
};
template <int STACK_SIZE>
class Pager_activation : public Pager_activation_base
{
public:
Pager_activation() : Pager_activation_base("pager", STACK_SIZE)
{ start(); }
};
class Pager_activation_base;
template <int> class Pager_activation;
}
/**
* Special server object for paging
*
* A 'Pager_object' is very similar to a 'Rpc_object'. It is just a
* special implementation for page-fault handling, which does not allow to
* define a "badge" for pager capabilities.
*/
class Genode::Pager_object : public Object_pool<Pager_object>::Entry
{
protected:
/**
* Local name for this pager object
*/
unsigned long _badge;
Thread_capability _thread_cap;
/**
* User-level signal handler registered for this pager object via
* 'Cpu_session::exception_handler()'.
*/
Signal_context_capability _exception_sigh;
public:
/**
* Contains information about exception state of corresponding thread.
*/
Thread_state state;
/**
* Constructor
*
* \param location affinity of paged thread to physical CPU
*/
Pager_object(unsigned long badge, Affinity::Location location)
: _badge(badge) { }
virtual ~Pager_object() { }
unsigned long badge() const { return _badge; }
/**
* Interface to be implemented by a derived class
*
* \param ps 'Ipc_pager' stream
*
* Returns !0 on error and pagefault will not be answered.
*/
virtual int pager(Ipc_pager &ps) = 0;
/**
* Wake up the faulter
*/
void wake_up();
/**
* Assign user-level exception handler for the pager object
*/
void exception_handler(Signal_context_capability sigh)
{
_exception_sigh = sigh;
}
/**
* Notify exception handler about the occurrence of an exception
*/
void submit_exception_signal()
{
if (!_exception_sigh.valid()) return;
Signal_transmitter transmitter(_exception_sigh);
transmitter.submit();
}
/**
* Remember thread cap so that rm_session can tell thread that
* rm_client is gone.
*/
Thread_capability thread_cap() { return _thread_cap; } const
void thread_cap(Thread_capability cap) { _thread_cap = cap; }
/*
* Note in the thread state that an unresolved page
* fault occurred.
*/
void unresolved_page_fault_occurred();
};
/**
* A 'Pager_activation' processes one page fault of a 'Pager_object' at a time.
*/
class Genode::Pager_activation_base: public Thread_base
{
private:
Native_capability _cap;
Pager_entrypoint *_ep; /* entry point to which the
activation belongs */
/**
* Lock used for blocking until '_cap' is initialized
*/
Lock _cap_valid;
public:
Pager_activation_base(const char *name, size_t stack_size) :
Thread_base(0, name, stack_size),
_cap(Native_capability()), _ep(0), _cap_valid(Lock::LOCKED) { }
/**
* Set entry point, which the activation serves
*
* This function is only called by the 'Pager_entrypoint'
* constructor.
*/
void ep(Pager_entrypoint *ep) { _ep = ep; }
/**
* Thread interface
*/
void entry();
/**
* Return capability to this activation
*
* This function should only be called from 'Pager_entrypoint'
*/
Native_capability cap()
{
/* ensure that the initialization of our 'Ipc_pager' is done */
if (!_cap.valid())
_cap_valid.lock();
return _cap;
}
};
/**
* Paging entry point
*
* For a paging entry point can hold only one activation. So, paging is
* strictly serialized for one entry point.
*/
class Genode::Pager_entrypoint : public Object_pool<Pager_object>
{
private:
Pager_activation_base *_activation;
Cap_session *_cap_session;
public:
/**
* Constructor
*
* \param cap_session Cap_session for creating capabilities
* for the pager objects managed by this
* entry point
* \param a initial activation
*/
Pager_entrypoint(Cap_session *cap_session, Pager_activation_base *a = 0);
/**
* Associate Pager_object with the entry point
*/
Pager_capability manage(Pager_object *obj);
/**
* Dissolve Pager_object from entry point
*/
void dissolve(Pager_object *obj);
};
template <int STACK_SIZE>
class Genode::Pager_activation : public Pager_activation_base
{
public:
Pager_activation() : Pager_activation_base("pager", STACK_SIZE)
{ start(); }
};
#endif /* _INCLUDE__BASE__PAGER_H_ */

View File

@ -20,76 +20,76 @@
#include <cpu_session/client.h>
#include <parent/capability.h>
namespace Genode {
namespace Genode { class Process; }
class Process
{
private:
Pd_connection _pd;
Thread_capability _thread0_cap;
Cpu_session_client _cpu_session_client;
Rm_session_client _rm_session_client;
class Genode::Process
{
private:
static Dataspace_capability _dynamic_linker_cap;
Pd_connection _pd;
Thread_capability _thread0_cap;
Cpu_session_client _cpu_session_client;
Rm_session_client _rm_session_client;
/*
* Hook for passing additional platform-specific session
* arguments to the PD session. For example, on Linux a new
* process is created locally via 'fork' and the new PID gets
* then communicated to core via a PD-session argument.
*/
enum { PRIV_ARGBUF_LEN = 32 };
char _priv_pd_argbuf[PRIV_ARGBUF_LEN];
const char *_priv_pd_args(Parent_capability parent_cap,
Dataspace_capability elf_data_ds,
const char *name, char *const argv[]);
static Dataspace_capability _dynamic_linker_cap;
public:
/*
* Hook for passing additional platform-specific session
* arguments to the PD session. For example, on Linux a new
* process is created locally via 'fork' and the new PID gets
* then communicated to core via a PD-session argument.
*/
enum { PRIV_ARGBUF_LEN = 32 };
char _priv_pd_argbuf[PRIV_ARGBUF_LEN];
const char *_priv_pd_args(Parent_capability parent_cap,
Dataspace_capability elf_data_ds,
const char *name, char *const argv[]);
/**
* Constructor
*
* \param elf_data_ds dataspace that contains the elf binary
* \param ram_session RAM session providing the BSS for the
* new protection domain
* \param cpu_session CPU session for the new protection domain
* \param rm_session RM session for the new protection domain
* \param parent parent of the new protection domain
* \param name name of protection domain (can be used
* for debugging)
* \param pd_args platform-specific arguments supplied to
* the PD session of the process
*
* The dataspace 'elf_data_ds' can be read-only.
*
* On construction of a protection domain, execution of the initial
* thread is started immediately.
*/
Process(Dataspace_capability elf_data_ds,
Ram_session_capability ram_session,
Cpu_session_capability cpu_session,
Rm_session_capability rm_session,
Parent_capability parent,
char const *name,
Native_pd_args const *args = 0);
public:
/**
* Destructor
*
* When called, the protection domain gets killed.
*/
~Process();
/**
* Constructor
*
* \param elf_data_ds dataspace that contains the elf binary
* \param ram_session RAM session providing the BSS for the
* new protection domain
* \param cpu_session CPU session for the new protection domain
* \param rm_session RM session for the new protection domain
* \param parent parent of the new protection domain
* \param name name of protection domain (can be used
* for debugging)
* \param pd_args platform-specific arguments supplied to
* the PD session of the process
*
* The dataspace 'elf_data_ds' can be read-only.
*
* On construction of a protection domain, execution of the initial
* thread is started immediately.
*/
Process(Dataspace_capability elf_data_ds,
Ram_session_capability ram_session,
Cpu_session_capability cpu_session,
Rm_session_capability rm_session,
Parent_capability parent,
char const *name,
Native_pd_args const *args = 0);
static void dynamic_linker(Dataspace_capability dynamic_linker_cap)
{
_dynamic_linker_cap = dynamic_linker_cap;
}
/**
* Destructor
*
* When called, the protection domain gets killed.
*/
~Process();
Pd_session_capability pd_session_cap() const { return _pd.cap(); }
static void dynamic_linker(Dataspace_capability dynamic_linker_cap)
{
_dynamic_linker_cap = dynamic_linker_cap;
}
Thread_capability main_thread_cap() const { return _thread0_cap; }
};
}
Pd_session_capability pd_session_cap() const { return _pd.cap(); }
Thread_capability main_thread_cap() const { return _thread0_cap; }
};
#endif /* _INCLUDE__BASE__PROCESS_H_ */

View File

@ -19,103 +19,107 @@
namespace Genode {
/**
* Base class of 'Rpc_in_buffer'
*/
class Rpc_in_buffer_base
{
protected:
const char *_base;
size_t _size;
/**
* Construct buffer from null-terminated string
*/
explicit Rpc_in_buffer_base(const char *str)
: _base(str), _size(strlen(str) + 1) { }
/**
* Construct an empty buffer by default
*/
Rpc_in_buffer_base(): _base(0), _size(0) { }
public:
/**
* Construct buffer
*/
Rpc_in_buffer_base(const char *base, size_t size)
: _base(base), _size(size) { }
const char *base() const { return _base; }
size_t size() const { return _size; }
};
/**
* Buffer with size constrain
*/
template <size_t MAX>
class Rpc_in_buffer : public Rpc_in_buffer_base
{
private:
/*
* This member is only there to pump up the size of the object such
* that 'sizeof()' returns the maximum buffer size when queried by
* the RPC framework.
*/
char _balloon[MAX];
public:
enum { MAX_SIZE = MAX };
/**
* Construct buffer
*/
Rpc_in_buffer(const char *base, size_t size)
: Rpc_in_buffer_base(base, min(size, (size_t)MAX_SIZE)) { }
/**
* Construct buffer from null-terminated string
*/
Rpc_in_buffer(const char *str) : Rpc_in_buffer_base(str)
{
if (_size >= MAX_SIZE - 1)
_size = MAX_SIZE - 1;
}
/**
* Default constructor creates invalid buffer
*/
Rpc_in_buffer() { }
void operator = (Rpc_in_buffer<MAX_SIZE> const &from)
{
_base = from.base();
_size = from.size();
}
/**
* Return true if buffer contains a valid null-terminated string
*/
bool is_valid_string() const {
return (_size < MAX_SIZE) && (_size > 0) && (_base[_size - 1] == '\0'); }
/**
* Return buffer content as null-terminated string
*
* \return pointer to null-terminated string
*
* The function returns an empty string if the buffer does not hold
* a valid null-terminated string. To distinguish a buffer holding
* an invalid string from a buffer holding a valid empty string,
* the function 'is_valid_string' can be used.
*/
char const *string() const { return is_valid_string() ? base() : ""; }
};
class Rpc_in_buffer_base;
template <size_t> class Rpc_in_buffer;
}
/**
* Base class of 'Rpc_in_buffer'
*/
class Genode::Rpc_in_buffer_base
{
protected:
const char *_base;
size_t _size;
/**
* Construct buffer from null-terminated string
*/
explicit Rpc_in_buffer_base(const char *str)
: _base(str), _size(strlen(str) + 1) { }
/**
* Construct an empty buffer by default
*/
Rpc_in_buffer_base(): _base(0), _size(0) { }
public:
/**
* Construct buffer
*/
Rpc_in_buffer_base(const char *base, size_t size)
: _base(base), _size(size) { }
const char *base() const { return _base; }
size_t size() const { return _size; }
};
/**
* Buffer with size constrain
*/
template <Genode::size_t MAX>
class Genode::Rpc_in_buffer : public Rpc_in_buffer_base
{
private:
/*
* This member is only there to pump up the size of the object such
* that 'sizeof()' returns the maximum buffer size when queried by
* the RPC framework.
*/
char _balloon[MAX];
public:
enum { MAX_SIZE = MAX };
/**
* Construct buffer
*/
Rpc_in_buffer(const char *base, size_t size)
: Rpc_in_buffer_base(base, min(size, (size_t)MAX_SIZE)) { }
/**
* Construct buffer from null-terminated string
*/
Rpc_in_buffer(const char *str) : Rpc_in_buffer_base(str)
{
if (_size >= MAX_SIZE - 1)
_size = MAX_SIZE - 1;
}
/**
* Default constructor creates invalid buffer
*/
Rpc_in_buffer() { }
void operator = (Rpc_in_buffer<MAX_SIZE> const &from)
{
_base = from.base();
_size = from.size();
}
/**
* Return true if buffer contains a valid null-terminated string
*/
bool is_valid_string() const {
return (_size < MAX_SIZE) && (_size > 0) && (_base[_size - 1] == '\0'); }
/**
* Return buffer content as null-terminated string
*
* \return pointer to null-terminated string
*
* The function returns an empty string if the buffer does not hold
* a valid null-terminated string. To distinguish a buffer holding
* an invalid string from a buffer holding a valid empty string,
* the function 'is_valid_string' can be used.
*/
char const *string() const { return is_valid_string() ? base() : ""; }
};
#endif /* _INCLUDE__BASE__RPC_ARGS_H_ */

View File

@ -24,370 +24,375 @@
#include <cap_session/cap_session.h>
namespace Genode {
template <typename, typename> class Rpc_dispatcher;
class Rpc_object_base;
template <typename, typename> struct Rpc_object;
class Rpc_entrypoint;
}
/**
* RPC dispatcher implementing the specified RPC interface
*
* \param RPC_INTERFACE class providing the RPC interface description
* \param SERVER class to invoke for the server-side RPC functions
*
* This class is the base class of each server-side RPC implementation. It
* contains the logic for dispatching incoming RPC requests and calls the
* server functions according to the RPC declarations in 'RPC_INTERFACE'.
*
* If using the default argument for 'SERVER', the 'RPC_INTERFACE' is expected
* to contain the abstract interface for all RPC functions. So virtual functions
* must be declared in 'RPC_INTERFACE'. In contrast, by explicitly specifying
* the 'SERVER' argument, the server-side dispatching performs direct function
* calls to the respective member functions of the 'SERVER' class and thereby
* omits virtual functions calls.
*/
template <typename RPC_INTERFACE, typename SERVER = RPC_INTERFACE>
class Genode::Rpc_dispatcher : public RPC_INTERFACE
{
/**
* RPC dispatcher implementing the specified RPC interface
*
* \param RPC_INTERFACE class providing the RPC interface description
* \param SERVER class to invoke for the server-side RPC functions
*
* This class is the base class of each server-side RPC implementation. It
* contains the logic for dispatching incoming RPC requests and calls the
* server functions according to the RPC declarations in 'RPC_INTERFACE'.
*
* If using the default argument for 'SERVER', the 'RPC_INTERFACE' is expected
* to contain the abstract interface for all RPC functions. So virtual functions
* must be declared in 'RPC_INTERFACE'. In contrast, by explicitly specifying
* the 'SERVER' argument, the server-side dispatching performs direct function
* calls to the respective member functions of the 'SERVER' class and thereby
* omits virtual functions calls.
* Shortcut for the type list of RPC functions provided by this server
* component
*/
template <typename RPC_INTERFACE, typename SERVER = RPC_INTERFACE>
class Rpc_dispatcher : public RPC_INTERFACE
{
/**
* Shortcut for the type list of RPC functions provided by this server
* component
*/
typedef typename RPC_INTERFACE::Rpc_functions Rpc_functions;
typedef typename RPC_INTERFACE::Rpc_functions Rpc_functions;
protected:
protected:
template <typename ARG_LIST>
void _read_args(Ipc_istream &is, ARG_LIST &args)
{
if (Trait::Rpc_direction<typename ARG_LIST::Head>::Type::IN)
is >> args._1;
template <typename ARG_LIST>
void _read_args(Ipc_istream &is, ARG_LIST &args)
{
if (Trait::Rpc_direction<typename ARG_LIST::Head>::Type::IN)
is >> args._1;
_read_args(is, args._2);
}
_read_args(is, args._2);
}
void _read_args(Ipc_istream &, Meta::Empty) { }
void _read_args(Ipc_istream &, Meta::Empty) { }
template <typename ARG_LIST>
void _write_results(Ipc_ostream &os, ARG_LIST &args)
{
if (Trait::Rpc_direction<typename ARG_LIST::Head>::Type::OUT)
os << args._1;
template <typename ARG_LIST>
void _write_results(Ipc_ostream &os, ARG_LIST &args)
{
if (Trait::Rpc_direction<typename ARG_LIST::Head>::Type::OUT)
os << args._1;
_write_results(os, args._2);
}
_write_results(os, args._2);
}
void _write_results(Ipc_ostream &, Meta::Empty) { }
void _write_results(Ipc_ostream &, Meta::Empty) { }
template <typename RPC_FUNCTION, typename EXC_TL>
Rpc_exception_code _do_serve(typename RPC_FUNCTION::Server_args &args,
typename RPC_FUNCTION::Ret_type &ret,
Meta::Overload_selector<RPC_FUNCTION, EXC_TL>)
{
enum { EXCEPTION_CODE = RPC_EXCEPTION_BASE - Meta::Length<EXC_TL>::Value };
try {
typedef typename EXC_TL::Tail Exc_tail;
return _do_serve(args, ret,
Meta::Overload_selector<RPC_FUNCTION, Exc_tail>());
} catch (typename EXC_TL::Head) { return EXCEPTION_CODE; }
}
template <typename RPC_FUNCTION, typename EXC_TL>
Rpc_exception_code _do_serve(typename RPC_FUNCTION::Server_args &args,
typename RPC_FUNCTION::Ret_type &ret,
Meta::Overload_selector<RPC_FUNCTION, EXC_TL>)
{
enum { EXCEPTION_CODE = RPC_EXCEPTION_BASE - Meta::Length<EXC_TL>::Value };
try {
typedef typename EXC_TL::Tail Exc_tail;
return _do_serve(args, ret,
Meta::Overload_selector<RPC_FUNCTION, Exc_tail>());
} catch (typename EXC_TL::Head) { return EXCEPTION_CODE; }
}
template <typename RPC_FUNCTION>
Rpc_exception_code _do_serve(typename RPC_FUNCTION::Server_args &args,
typename RPC_FUNCTION::Ret_type &ret,
Meta::Overload_selector<RPC_FUNCTION, Meta::Empty>)
{
RPC_FUNCTION::serve(*static_cast<SERVER *>(this), args, ret);
return 0;
}
template <typename RPC_FUNCTION>
Rpc_exception_code _do_serve(typename RPC_FUNCTION::Server_args &args,
typename RPC_FUNCTION::Ret_type &ret,
Meta::Overload_selector<RPC_FUNCTION, Meta::Empty>)
{
RPC_FUNCTION::serve(*static_cast<SERVER *>(this), args, ret);
return 0;
}
template <typename RPC_FUNCTIONS_TO_CHECK>
Rpc_exception_code _do_dispatch(Rpc_opcode opcode, Ipc_istream &is, Ipc_ostream &os,
Meta::Overload_selector<RPC_FUNCTIONS_TO_CHECK>)
{
using namespace Meta;
template <typename RPC_FUNCTIONS_TO_CHECK>
Rpc_exception_code _do_dispatch(Rpc_opcode opcode, Ipc_istream &is, Ipc_ostream &os,
Meta::Overload_selector<RPC_FUNCTIONS_TO_CHECK>)
{
using namespace Meta;
typedef typename RPC_FUNCTIONS_TO_CHECK::Head This_rpc_function;
typedef typename RPC_FUNCTIONS_TO_CHECK::Head This_rpc_function;
if (opcode == Index_of<Rpc_functions, This_rpc_function>::Value) {
if (opcode == Index_of<Rpc_functions, This_rpc_function>::Value) {
typename This_rpc_function::Server_args args{};
typename This_rpc_function::Server_args args{};
/* read arguments from istream */
_read_args(is, args);
/* read arguments from istream */
_read_args(is, args);
{
Trace::Rpc_dispatch trace_event(This_rpc_function::name());
}
/*
* Dispatch call to matching RPC base class, using
* 'This_rpc_function' and the list of its exceptions to
* select the overload.
*/
typedef typename This_rpc_function::Exceptions Exceptions;
typename This_rpc_function::Ret_type ret;
Rpc_exception_code exc;
exc = _do_serve(args, ret, Overload_selector<This_rpc_function, Exceptions>());
os << ret;
{
Trace::Rpc_reply trace_event(This_rpc_function::name());
}
/* write results to ostream 'os' */
_write_results(os, args);
return exc;
{
Trace::Rpc_dispatch trace_event(This_rpc_function::name());
}
typedef typename RPC_FUNCTIONS_TO_CHECK::Tail Tail;
return _do_dispatch(opcode, is, os, Overload_selector<Tail>());
/*
* Dispatch call to matching RPC base class, using
* 'This_rpc_function' and the list of its exceptions to
* select the overload.
*/
typedef typename This_rpc_function::Exceptions Exceptions;
typename This_rpc_function::Ret_type ret;
Rpc_exception_code exc;
exc = _do_serve(args, ret, Overload_selector<This_rpc_function, Exceptions>());
os << ret;
{
Trace::Rpc_reply trace_event(This_rpc_function::name());
}
/* write results to ostream 'os' */
_write_results(os, args);
return exc;
}
int _do_dispatch(int opcode, Ipc_istream &, Ipc_ostream &,
Meta::Overload_selector<Meta::Empty>)
{
PERR("invalid opcode %d\n", opcode);
return RPC_INVALID_OPCODE;
}
typedef typename RPC_FUNCTIONS_TO_CHECK::Tail Tail;
return _do_dispatch(opcode, is, os, Overload_selector<Tail>());
}
/**
* Handle corner case of having an RPC interface with no RPC functions
*/
Rpc_exception_code _do_dispatch(int opcode, Ipc_istream &, Ipc_ostream &,
Meta::Overload_selector<Meta::Type_list<> >)
{
return 0;
}
int _do_dispatch(int opcode, Ipc_istream &, Ipc_ostream &,
Meta::Overload_selector<Meta::Empty>)
{
PERR("invalid opcode %d\n", opcode);
return RPC_INVALID_OPCODE;
}
/**
* Protected constructor
*
* This class is only usable as base class.
*/
Rpc_dispatcher() { }
/**
* Handle corner case of having an RPC interface with no RPC functions
*/
Rpc_exception_code _do_dispatch(int opcode, Ipc_istream &, Ipc_ostream &,
Meta::Overload_selector<Meta::Type_list<> >)
{
return 0;
}
public:
/**
* Protected constructor
*
* This class is only usable as base class.
*/
Rpc_dispatcher() { }
Rpc_exception_code dispatch(int opcode, Ipc_istream &is, Ipc_ostream &os)
{
return _do_dispatch(opcode, is, os,
Meta::Overload_selector<Rpc_functions>());
}
};
class Rpc_object_base : public Object_pool<Rpc_object_base>::Entry
{
public:
virtual ~Rpc_object_base() { }
/**
* Interface to be implemented by a derived class
*
* \param op opcode of invoked method
* \param is Ipc_input stream with method arguments
* \param os Ipc_output stream for storing method results
*/
virtual int dispatch(int op, Ipc_istream &is, Ipc_ostream &os) = 0;
};
/**
* Object that is accessible from remote protection domains
*
* A 'Rpc_object' is a locally implemented object that can be referenced
* from the outer world using a capability. The capability gets created
* when attaching a 'Rpc_object' to a 'Rpc_entrypoint'.
*/
template <typename RPC_INTERFACE, typename SERVER = RPC_INTERFACE>
struct Rpc_object : Rpc_object_base, Rpc_dispatcher<RPC_INTERFACE, SERVER>
{
/*****************************
** Server-object interface **
*****************************/
public:
Rpc_exception_code dispatch(int opcode, Ipc_istream &is, Ipc_ostream &os)
{
return Rpc_dispatcher<RPC_INTERFACE, SERVER>::dispatch(opcode, is, os);
return _do_dispatch(opcode, is, os,
Meta::Overload_selector<Rpc_functions>());
}
Capability<RPC_INTERFACE> const cap() const
{
return reinterpret_cap_cast<RPC_INTERFACE>(Rpc_object_base::cap());
}
};
};
/**
* RPC entrypoint serving RPC objects
*
* The entrypoint's thread will initialize its capability but will not
* immediately enable the processing of requests. This way, the
* activation-using server can ensure that it gets initialized completely
* before the first capability invocations come in. Once the server is
* ready, it must enable the entrypoint explicitly by calling the
* 'activate()' function. The 'start_on_construction' argument is a
* shortcut for the common case where the server's capability is handed
* over to other parties _after_ the server is completely initialized.
*/
class Rpc_entrypoint : Thread_base, public Object_pool<Rpc_object_base>
class Genode::Rpc_object_base : public Object_pool<Rpc_object_base>::Entry
{
public:
virtual ~Rpc_object_base() { }
/**
* Interface to be implemented by a derived class
*
* \param op opcode of invoked method
* \param is Ipc_input stream with method arguments
* \param os Ipc_output stream for storing method results
*/
virtual int dispatch(int op, Ipc_istream &is, Ipc_ostream &os) = 0;
};
/**
* Object that is accessible from remote protection domains
*
* A 'Rpc_object' is a locally implemented object that can be referenced
* from the outer world using a capability. The capability gets created
* when attaching a 'Rpc_object' to a 'Rpc_entrypoint'.
*/
template <typename RPC_INTERFACE, typename SERVER = RPC_INTERFACE>
struct Genode::Rpc_object : Rpc_object_base, Rpc_dispatcher<RPC_INTERFACE, SERVER>
{
/*****************************
** Server-object interface **
*****************************/
Rpc_exception_code dispatch(int opcode, Ipc_istream &is, Ipc_ostream &os)
{
private:
return Rpc_dispatcher<RPC_INTERFACE, SERVER>::dispatch(opcode, is, os);
}
/**
* Prototype capability to derive capabilities for RPC objects
* from.
*/
Untyped_capability _cap;
Capability<RPC_INTERFACE> const cap() const
{
return reinterpret_cap_cast<RPC_INTERFACE>(Rpc_object_base::cap());
}
};
enum { SND_BUF_SIZE = 1024, RCV_BUF_SIZE = 1024 };
Msgbuf<SND_BUF_SIZE> _snd_buf;
Msgbuf<RCV_BUF_SIZE> _rcv_buf;
/**
* Hook to let low-level thread init code access private members
*
* This function is only used on NOVA.
*/
static void _activation_entry();
/**
* RPC entrypoint serving RPC objects
*
* The entrypoint's thread will initialize its capability but will not
* immediately enable the processing of requests. This way, the
* activation-using server can ensure that it gets initialized completely
* before the first capability invocations come in. Once the server is
* ready, it must enable the entrypoint explicitly by calling the
* 'activate()' function. The 'start_on_construction' argument is a
* shortcut for the common case where the server's capability is handed
* over to other parties _after_ the server is completely initialized.
*/
class Genode::Rpc_entrypoint : Thread_base, public Object_pool<Rpc_object_base>
{
private:
struct Exit
{
GENODE_RPC(Rpc_exit, void, _exit);
GENODE_RPC_INTERFACE(Rpc_exit);
};
/**
* Prototype capability to derive capabilities for RPC objects
* from.
*/
Untyped_capability _cap;
struct Exit_handler : Rpc_object<Exit, Exit_handler>
{
int exit;
enum { SND_BUF_SIZE = 1024, RCV_BUF_SIZE = 1024 };
Msgbuf<SND_BUF_SIZE> _snd_buf;
Msgbuf<RCV_BUF_SIZE> _rcv_buf;
Exit_handler() : exit(false) { }
/**
* Hook to let low-level thread init code access private members
*
* This function is only used on NOVA.
*/
static void _activation_entry();
void _exit() { exit = true; }
};
struct Exit
{
GENODE_RPC(Rpc_exit, void, _exit);
GENODE_RPC_INTERFACE(Rpc_exit);
};
protected:
struct Exit_handler : Rpc_object<Exit, Exit_handler>
{
int exit;
Ipc_server *_ipc_server;
Rpc_object_base *_curr_obj; /* currently dispatched RPC object */
Lock _curr_obj_lock; /* for the protection of '_curr_obj' */
Lock _cap_valid; /* thread startup synchronization */
Lock _delay_start; /* delay start of request dispatching */
Lock _delay_exit; /* delay destructor until server settled */
Cap_session *_cap_session; /* for creating capabilities */
Exit_handler _exit_handler;
Capability<Exit> _exit_cap;
Exit_handler() : exit(false) { }
/**
* Back-end function to associate RPC object with the entry point
*/
Untyped_capability _manage(Rpc_object_base *obj);
void _exit() { exit = true; }
};
/**
* Back-end function to Dissolve RPC object from entry point
*/
void _dissolve(Rpc_object_base *obj);
protected:
/**
* Force activation to cancel dispatching the specified server object
*/
void _leave_server_object(Rpc_object_base *obj);
Ipc_server *_ipc_server;
Rpc_object_base *_curr_obj; /* currently dispatched RPC object */
Lock _curr_obj_lock; /* for the protection of '_curr_obj' */
Lock _cap_valid; /* thread startup synchronization */
Lock _delay_start; /* delay start of request dispatching */
Lock _delay_exit; /* delay destructor until server settled */
Cap_session *_cap_session; /* for creating capabilities */
Exit_handler _exit_handler;
Capability<Exit> _exit_cap;
/**
* Wait until the entrypoint activation is initialized
*/
void _block_until_cap_valid();
/**
* Back-end function to associate RPC object with the entry point
*/
Untyped_capability _manage(Rpc_object_base *obj);
/**
* Thread interface
*/
void entry();
/**
* Back-end function to Dissolve RPC object from entry point
*/
void _dissolve(Rpc_object_base *obj);
public:
/**
* Force activation to cancel dispatching the specified server object
*/
void _leave_server_object(Rpc_object_base *obj);
/**
* Constructor
*
* \param cap_session 'Cap_session' for creating capabilities
* for the RPC objects managed by this entry
* point
* \param stack_size stack size of entrypoint thread
* \param name name of entrypoint thread
* \param location CPU affinity
*/
Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
char const *name, bool start_on_construction = true,
Affinity::Location location = Affinity::Location());
/**
* Wait until the entrypoint activation is initialized
*/
void _block_until_cap_valid();
~Rpc_entrypoint();
/**
* Thread interface
*/
void entry();
/**
* Associate RPC object with the entry point
*/
template <typename RPC_INTERFACE, typename RPC_SERVER>
Capability<RPC_INTERFACE>
manage(Rpc_object<RPC_INTERFACE, RPC_SERVER> *obj)
{
return reinterpret_cap_cast<RPC_INTERFACE>(_manage(obj));
}
public:
/**
* Dissolve RPC object from entry point
*/
template <typename RPC_INTERFACE, typename RPC_SERVER>
void dissolve(Rpc_object<RPC_INTERFACE, RPC_SERVER> *obj)
{
_dissolve(obj);
}
/**
* Constructor
*
* \param cap_session 'Cap_session' for creating capabilities
* for the RPC objects managed by this entry
* point
* \param stack_size stack size of entrypoint thread
* \param name name of entrypoint thread
* \param location CPU affinity
*/
Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
char const *name, bool start_on_construction = true,
Affinity::Location location = Affinity::Location());
/**
* Activate entrypoint, start processing RPC requests
*/
void activate();
~Rpc_entrypoint();
/**
* Request reply capability for current call
*
* Note: This is a temporary API function, which is going to be
* removed. Please do not use this function.
*
* Typically, a capability obtained via this function is used as
* argument of 'intermediate_reply'.
*/
Untyped_capability reply_dst();
/**
* Associate RPC object with the entry point
*/
template <typename RPC_INTERFACE, typename RPC_SERVER>
Capability<RPC_INTERFACE>
manage(Rpc_object<RPC_INTERFACE, RPC_SERVER> *obj)
{
return reinterpret_cap_cast<RPC_INTERFACE>(_manage(obj));
}
/**
* Prevent reply of current request
*
* Note: This is a temporary API function, which is going to be
* removed. Please do not use this function.
*
* This function can be used to keep the calling client blocked
* after the server has finished the processing of the client's
* request. At a later time, the server may chose to unblock the
* client via the 'intermedate_reply' function.
*/
void omit_reply();
/**
* Dissolve RPC object from entry point
*/
template <typename RPC_INTERFACE, typename RPC_SERVER>
void dissolve(Rpc_object<RPC_INTERFACE, RPC_SERVER> *obj)
{
_dissolve(obj);
}
/**
* Send a reply out of the normal call-reply order
*
* Note: This is a temporary API function, which is going to be
* removed. Please do not use this function.
*
* In combination with the 'reply_dst' accessor functions, this
* function can be used to implement services that dispatch client
* requests out of order. In such cases, the server activation may
* send reply messages to multiple blocking clients before
* answering the original call.
*/
void explicit_reply(Untyped_capability reply_cap, int return_value);
/**
* Activate entrypoint, start processing RPC requests
*/
void activate();
/**
* Return true if the caller corresponds to the entrypoint called
*/
bool is_myself() const;
};
}
/**
* Request reply capability for current call
*
* Note: This is a temporary API function, which is going to be
* removed. Please do not use this function.
*
* Typically, a capability obtained via this function is used as
* argument of 'intermediate_reply'.
*/
Untyped_capability reply_dst();
/**
* Prevent reply of current request
*
* Note: This is a temporary API function, which is going to be
* removed. Please do not use this function.
*
* This function can be used to keep the calling client blocked
* after the server has finished the processing of the client's
* request. At a later time, the server may chose to unblock the
* client via the 'intermedate_reply' function.
*/
void omit_reply();
/**
* Send a reply out of the normal call-reply order
*
* Note: This is a temporary API function, which is going to be
* removed. Please do not use this function.
*
* In combination with the 'reply_dst' accessor functions, this
* function can be used to implement services that dispatch client
* requests out of order. In such cases, the server activation may
* send reply messages to multiple blocking clients before
* answering the original call.
*/
void explicit_reply(Untyped_capability reply_cap, int return_value);
/**
* Return true if the caller corresponds to the entrypoint called
*/
bool is_myself() const;
};
#endif /* _INCLUDE__BASE__RPC_SERVER_H_ */

View File

@ -21,150 +21,157 @@
namespace Genode {
struct Semaphore_queue;
class Fifo_semaphore_queue;
template <typename, typename> class Semaphore_template;
}
/**
* Semaphore queue interface
*/
struct Genode::Semaphore_queue
{
/**
* Semaphore queue interface
*/
class Semaphore_queue
{
public:
/**
* Semaphore-queue elements
*
* A queue element represents a thread blocking on the
* semaphore.
*/
class Element : Lock
{
public:
/**
* Constructor
*/
Element() : Lock(LOCKED) { }
void block() { lock(); }
void wake_up() { unlock(); }
};
/**
* Add new queue member that is going to block
*/
void enqueue(Element *e);
/**
* Dequeue queue member to wake up next
*/
Element *dequeue();
};
/**
* First-in-first-out variant of the semaphore-queue interface
*/
class Fifo_semaphore_queue : public Semaphore_queue
{
public:
class Element : public Semaphore_queue::Element,
public Fifo<Element>::Element { };
private:
Fifo<Element> _fifo;
public:
void enqueue(Element *e) { _fifo.enqueue(e); }
Element *dequeue() { return _fifo.dequeue(); }
};
/**
* Semaphore base template
* Semaphore-queue elements
*
* \param QT semaphore wait queue type implementing the
* 'Semaphore_queue' interface
* \param QTE wait-queue element type implementing the
* 'Semaphore_queue::Element' interface
*
* The queuing policy is defined via the QT and QTE types.
* This way, the platform-specific semaphore-queueing policies
* such as priority-sorted queueing can be easily supported.
* A queue element represents a thread blocking on the
* semaphore.
*/
template <typename QT, typename QTE>
class Semaphore_template
class Element : Lock
{
protected:
int _cnt;
Lock _meta_lock;
QT _queue;
public:
/**
* Constructor
*
* \param n initial counter value of the semphore
*/
Semaphore_template(int n = 0) : _cnt(n) { }
Element() : Lock(LOCKED) { }
~Semaphore_template()
{
/* synchronize destruction with unfinished 'up()' */
try { _meta_lock.lock(); } catch (...) { }
}
void up()
{
Lock::Guard lock_guard(_meta_lock);
if (++_cnt > 0)
return;
/*
* Remove element from queue and wake up the corresponding
* blocking thread
*/
Semaphore_queue::Element * element = _queue.dequeue();
if (element)
element->wake_up();
}
void down()
{
_meta_lock.lock();
if (--_cnt < 0) {
/*
* Create semaphore queue element representing the thread
* in the wait queue.
*/
QTE queue_element;
_queue.enqueue(&queue_element);
_meta_lock.unlock();
/*
* The thread is going to block on a local lock now,
* waiting for getting waked from another thread
* calling 'up()'
* */
queue_element.block();
} else {
_meta_lock.unlock();
}
}
/**
* Return current semaphore counter
*/
int cnt() { return _cnt; }
void block() { lock(); }
void wake_up() { unlock(); }
};
/**
* Add new queue member that is going to block
*/
void enqueue(Element *e);
/**
* Dequeue queue member to wake up next
*/
Element *dequeue();
};
/**
* First-in-first-out variant of the semaphore-queue interface
*/
class Genode::Fifo_semaphore_queue : public Semaphore_queue
{
public:
class Element : public Semaphore_queue::Element,
public Fifo<Element>::Element { };
private:
Fifo<Element> _fifo;
public:
void enqueue(Element *e) { _fifo.enqueue(e); }
Element *dequeue() { return _fifo.dequeue(); }
};
/**
* Semaphore base template
*
* \param QT semaphore wait queue type implementing the
* 'Semaphore_queue' interface
* \param QTE wait-queue element type implementing the
* 'Semaphore_queue::Element' interface
*
* The queuing policy is defined via the QT and QTE types.
* This way, the platform-specific semaphore-queueing policies
* such as priority-sorted queueing can be easily supported.
*/
template <typename QT, typename QTE>
class Genode::Semaphore_template
{
protected:
int _cnt;
Lock _meta_lock;
QT _queue;
public:
/**
* Constructor
*
* \param n initial counter value of the semphore
*/
Semaphore_template(int n = 0) : _cnt(n) { }
~Semaphore_template()
{
/* synchronize destruction with unfinished 'up()' */
try { _meta_lock.lock(); } catch (...) { }
}
void up()
{
Lock::Guard lock_guard(_meta_lock);
if (++_cnt > 0)
return;
/*
* Remove element from queue and wake up the corresponding
* blocking thread
*/
Semaphore_queue::Element * element = _queue.dequeue();
if (element)
element->wake_up();
}
void down()
{
_meta_lock.lock();
if (--_cnt < 0) {
/*
* Create semaphore queue element representing the thread
* in the wait queue.
*/
QTE queue_element;
_queue.enqueue(&queue_element);
_meta_lock.unlock();
/*
* The thread is going to block on a local lock now,
* waiting for getting waked from another thread
* calling 'up()'
* */
queue_element.block();
} else {
_meta_lock.unlock();
}
}
/**
* Return current semaphore counter
*/
int cnt() { return _cnt; }
};
namespace Genode {
/**
* Semaphore with default behaviour

View File

@ -22,424 +22,433 @@
namespace Genode {
/**
* Client role
*
* A client is someone who applies for a service. If the service is not
* available yet, we enqueue the client into a wait queue and wake him up
* as soon as the requested service gets available.
*/
class Client : public List<Client>::Element
{
private:
Cancelable_lock _service_apply_lock;
const char *_apply_for;
public:
/**
* Constructor
*/
Client(): _service_apply_lock(Lock::LOCKED), _apply_for(0) { }
virtual ~Client() { }
/**
* Set/Request service name that we are currently applying for
*/
void apply_for(const char *apply_for) { _apply_for = apply_for; }
const char *apply_for() { return _apply_for; }
/**
* Service wait queue support
*/
void sleep() { _service_apply_lock.lock(); }
void wakeup() { _service_apply_lock.unlock(); }
};
/**
* Server role
*
* A server is a process that provides one or multiple services. For the
* most part, this class is used as an opaque key to represent the server
* role.
*/
class Server
{
private:
Ram_session_capability _ram;
public:
/**
* Constructor
*
* \param ram RAM session capability of the server process used,
* for quota transfers from/to the server
*/
Server(Ram_session_capability ram): _ram(ram) { }
/**
* Return RAM session capability of the server process
*/
Ram_session_capability ram_session_cap() const { return _ram; }
};
class Service : public List<Service>::Element
{
public:
enum { MAX_NAME_LEN = 32 };
private:
char _name[MAX_NAME_LEN];
public:
/*********************
** Exception types **
*********************/
class Invalid_args { };
class Unavailable { };
class Quota_exceeded { };
/**
* Constructor
*
* \param name service name
*/
Service(const char *name) { strncpy(_name, name, sizeof(_name)); }
virtual ~Service() { }
/**
* Return service name
*/
const char *name() const { return _name; }
/**
* Create session
*
* \param args session-construction arguments
* \param affinity preferred CPU affinity of session
*
* \throw Invalid_args
* \throw Unavailable
* \throw Quota_exceeded
*/
virtual Session_capability session(char const *args,
Affinity const &affinity) = 0;
/**
* Extend resource donation to an existing session
*/
virtual void upgrade(Session_capability session, const char *args) = 0;
/**
* Close session
*/
virtual void close(Session_capability /*session*/) { }
/**
* Return server providing the service
*/
virtual Server *server() const { return 0; }
/**
* Return the RAM session to be used for trading resources
*/
Ram_session_capability ram_session_cap()
{
if (server())
return server()->ram_session_cap();
return Ram_session_capability();
}
};
/**
* Representation of a locally implemented service
*/
class Local_service : public Service
{
private:
Root *_root;
public:
Local_service(const char *name, Root *root)
: Service(name), _root(root) { }
Session_capability session(const char *args, Affinity const &affinity)
{
try { return _root->session(args, affinity); }
catch (Root::Invalid_args) { throw Invalid_args(); }
catch (Root::Unavailable) { throw Unavailable(); }
catch (Root::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void upgrade(Session_capability session, const char *args)
{
try { _root->upgrade(session, args); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void close(Session_capability session)
{
try { _root->close(session); }
catch (Genode::Ipc_error) { throw Blocking_canceled(); }
}
};
/**
* Representation of a service provided by our parent
*/
class Parent_service : public Service
{
public:
Parent_service(const char *name) : Service(name) { }
Session_capability session(const char *args, Affinity const &affinity)
{
try { return env()->parent()->session(name(), args, affinity); }
catch (Parent::Unavailable) {
PWRN("parent has no service \"%s\"", name());
throw Unavailable();
}
catch (Parent::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void upgrade(Session_capability session, const char *args)
{
try { env()->parent()->upgrade(session, args); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void close(Session_capability session)
{
try { env()->parent()->close(session); }
catch (Genode::Ipc_error) { throw Blocking_canceled(); }
}
};
/**
* Representation of a service that is implemented in a child
*/
class Child_service : public Service
{
private:
Root_capability _root_cap;
Root_client _root;
Server *_server;
public:
/**
* Constructor
*
* \param name name of service
* \param root capability to root interface
* \param server server process providing the service
*/
Child_service(const char *name,
Root_capability root,
Server *server)
: Service(name), _root_cap(root), _root(root), _server(server) { }
Server *server() const { return _server; }
Session_capability session(const char *args, Affinity const &affinity)
{
if (!_root_cap.valid())
throw Unavailable();
try { return _root.session(args, affinity); }
catch (Root::Invalid_args) { throw Invalid_args(); }
catch (Root::Unavailable) { throw Unavailable(); }
catch (Root::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void upgrade(Session_capability sc, const char *args)
{
if (!_root_cap.valid())
throw Unavailable();
try { _root.upgrade(sc, args); }
catch (Root::Invalid_args) { throw Invalid_args(); }
catch (Root::Unavailable) { throw Unavailable(); }
catch (Root::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void close(Session_capability sc)
{
try { _root.close(sc); }
catch (Genode::Ipc_error) { throw Blocking_canceled(); }
}
};
/**
* Container for holding service representations
*/
class Service_registry
{
protected:
Lock _service_wait_queue_lock;
List<Client> _service_wait_queue;
List<Service> _services;
public:
/**
* Probe for service with specified name
*
* \param name service name
* \param server server providing the service,
* default (0) for any server
*/
Service *find(const char *name, Server *server = 0)
{
if (!name) return 0;
Lock::Guard lock_guard(_service_wait_queue_lock);
for (Service *s = _services.first(); s; s = s->next())
if (strcmp(s->name(), name) == 0
&& (!server || s->server() == server)) return s;
return 0;
}
/**
* Check if service name is ambiguous
*
* \return true if the same service is provided multiple
* times
*/
bool is_ambiguous(const char *name)
{
Lock::Guard lock_guard(_service_wait_queue_lock);
/* count number of services with the specified name */
unsigned cnt = 0;
for (Service *s = _services.first(); s; s = s->next())
cnt += (strcmp(s->name(), name) == 0);
return cnt > 1;
}
/**
* Return first service provided by specified server
*/
Service *find_by_server(Server *server)
{
Lock::Guard lock_guard(_service_wait_queue_lock);
for (Service *s = _services.first(); s; s = s->next())
if (s->server() == server)
return s;
return 0;
}
/**
* Wait for service
*
* This function is called by the clients's thread
* when requesting a session creation. It blocks
* if the requested service is not available.
*
* \return service structure that matches the request or
* 0 if the waiting was canceled.
*/
Service *wait_for_service(const char *name, Client *client, const char *client_name)
{
Service *service;
client->apply_for(name);
_service_wait_queue_lock.lock();
_service_wait_queue.insert(client);
_service_wait_queue_lock.unlock();
do {
service = find(name);
/*
* The service that we are seeking is not available today.
* Lets sleep a night over it.
*/
if (!service) {
printf("%s: service %s not yet available - sleeping\n",
client_name, name);
try {
client->sleep();
printf("%s: service %s got available\n", client_name, name);
} catch (Blocking_canceled) {
printf("%s: cancel waiting for service\n", client_name);
break;
}
}
} while (!service);
/* we got what we needed, stop applying */
_service_wait_queue_lock.lock();
_service_wait_queue.remove(client);
_service_wait_queue_lock.unlock();
client->apply_for(0);
return service;
}
/**
* Register service
*
* This function is called by the server's thread.
*/
void insert(Service *service)
{
/* make new service known */
_services.insert(service);
/* wake up applicants waiting for the service */
Lock::Guard lock_guard(_service_wait_queue_lock);
for (Client *c = _service_wait_queue.first(); c; c = c->next())
if (strcmp(service->name(), c->apply_for()) == 0)
c->wakeup();
}
/**
* Unregister service
*/
void remove(Service *service) { _services.remove(service); }
/**
* Unregister all services
*/
void remove_all()
{
while (_services.first())
remove(_services.first());
}
};
class Client;
class Server;
class Service;
class Local_service;
class Parent_service;
class Child_service;
class Service_registry;
}
/**
* Client role
*
* A client is someone who applies for a service. If the service is not
* available yet, we enqueue the client into a wait queue and wake him up
* as soon as the requested service gets available.
*/
class Genode::Client : public List<Client>::Element
{
private:
Cancelable_lock _service_apply_lock;
const char *_apply_for;
public:
/**
* Constructor
*/
Client(): _service_apply_lock(Lock::LOCKED), _apply_for(0) { }
virtual ~Client() { }
/**
* Set/Request service name that we are currently applying for
*/
void apply_for(const char *apply_for) { _apply_for = apply_for; }
const char *apply_for() { return _apply_for; }
/**
* Service wait queue support
*/
void sleep() { _service_apply_lock.lock(); }
void wakeup() { _service_apply_lock.unlock(); }
};
/**
* Server role
*
* A server is a process that provides one or multiple services. For the
* most part, this class is used as an opaque key to represent the server
* role.
*/
class Genode::Server
{
private:
Ram_session_capability _ram;
public:
/**
* Constructor
*
* \param ram RAM session capability of the server process used,
* for quota transfers from/to the server
*/
Server(Ram_session_capability ram): _ram(ram) { }
/**
* Return RAM session capability of the server process
*/
Ram_session_capability ram_session_cap() const { return _ram; }
};
class Genode::Service : public List<Service>::Element
{
public:
enum { MAX_NAME_LEN = 32 };
private:
char _name[MAX_NAME_LEN];
public:
/*********************
** Exception types **
*********************/
class Invalid_args { };
class Unavailable { };
class Quota_exceeded { };
/**
* Constructor
*
* \param name service name
*/
Service(const char *name) { strncpy(_name, name, sizeof(_name)); }
virtual ~Service() { }
/**
* Return service name
*/
const char *name() const { return _name; }
/**
* Create session
*
* \param args session-construction arguments
* \param affinity preferred CPU affinity of session
*
* \throw Invalid_args
* \throw Unavailable
* \throw Quota_exceeded
*/
virtual Session_capability session(char const *args,
Affinity const &affinity) = 0;
/**
* Extend resource donation to an existing session
*/
virtual void upgrade(Session_capability session, const char *args) = 0;
/**
* Close session
*/
virtual void close(Session_capability /*session*/) { }
/**
* Return server providing the service
*/
virtual Server *server() const { return 0; }
/**
* Return the RAM session to be used for trading resources
*/
Ram_session_capability ram_session_cap()
{
if (server())
return server()->ram_session_cap();
return Ram_session_capability();
}
};
/**
* Representation of a locally implemented service
*/
class Genode::Local_service : public Service
{
private:
Root *_root;
public:
Local_service(const char *name, Root *root)
: Service(name), _root(root) { }
Session_capability session(const char *args, Affinity const &affinity) override
{
try { return _root->session(args, affinity); }
catch (Root::Invalid_args) { throw Invalid_args(); }
catch (Root::Unavailable) { throw Unavailable(); }
catch (Root::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void upgrade(Session_capability session, const char *args) override
{
try { _root->upgrade(session, args); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void close(Session_capability session) override
{
try { _root->close(session); }
catch (Genode::Ipc_error) { throw Blocking_canceled(); }
}
};
/**
* Representation of a service provided by our parent
*/
class Genode::Parent_service : public Service
{
public:
Parent_service(const char *name) : Service(name) { }
Session_capability session(const char *args, Affinity const &affinity) override
{
try { return env()->parent()->session(name(), args, affinity); }
catch (Parent::Unavailable) {
PWRN("parent has no service \"%s\"", name());
throw Unavailable();
}
catch (Parent::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void upgrade(Session_capability session, const char *args) override
{
try { env()->parent()->upgrade(session, args); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void close(Session_capability session) override
{
try { env()->parent()->close(session); }
catch (Genode::Ipc_error) { throw Blocking_canceled(); }
}
};
/**
* Representation of a service that is implemented in a child
*/
class Genode::Child_service : public Service
{
private:
Root_capability _root_cap;
Root_client _root;
Server *_server;
public:
/**
* Constructor
*
* \param name name of service
* \param root capability to root interface
* \param server server process providing the service
*/
Child_service(const char *name,
Root_capability root,
Server *server)
: Service(name), _root_cap(root), _root(root), _server(server) { }
Server *server() const override { return _server; }
Session_capability session(const char *args, Affinity const &affinity) override
{
if (!_root_cap.valid())
throw Unavailable();
try { return _root.session(args, affinity); }
catch (Root::Invalid_args) { throw Invalid_args(); }
catch (Root::Unavailable) { throw Unavailable(); }
catch (Root::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void upgrade(Session_capability sc, const char *args) override
{
if (!_root_cap.valid())
throw Unavailable();
try { _root.upgrade(sc, args); }
catch (Root::Invalid_args) { throw Invalid_args(); }
catch (Root::Unavailable) { throw Unavailable(); }
catch (Root::Quota_exceeded) { throw Quota_exceeded(); }
catch (Genode::Ipc_error) { throw Unavailable(); }
}
void close(Session_capability sc) override
{
try { _root.close(sc); }
catch (Genode::Ipc_error) { throw Blocking_canceled(); }
}
};
/**
* Container for holding service representations
*/
class Genode::Service_registry
{
protected:
Lock _service_wait_queue_lock;
List<Client> _service_wait_queue;
List<Service> _services;
public:
/**
* Probe for service with specified name
*
* \param name service name
* \param server server providing the service,
* default (0) for any server
*/
Service *find(const char *name, Server *server = 0)
{
if (!name) return 0;
Lock::Guard lock_guard(_service_wait_queue_lock);
for (Service *s = _services.first(); s; s = s->next())
if (strcmp(s->name(), name) == 0
&& (!server || s->server() == server)) return s;
return 0;
}
/**
* Check if service name is ambiguous
*
* \return true if the same service is provided multiple
* times
*/
bool is_ambiguous(const char *name)
{
Lock::Guard lock_guard(_service_wait_queue_lock);
/* count number of services with the specified name */
unsigned cnt = 0;
for (Service *s = _services.first(); s; s = s->next())
cnt += (strcmp(s->name(), name) == 0);
return cnt > 1;
}
/**
* Return first service provided by specified server
*/
Service *find_by_server(Server *server)
{
Lock::Guard lock_guard(_service_wait_queue_lock);
for (Service *s = _services.first(); s; s = s->next())
if (s->server() == server)
return s;
return 0;
}
/**
* Wait for service
*
* This function is called by the clients's thread
* when requesting a session creation. It blocks
* if the requested service is not available.
*
* \return service structure that matches the request or
* 0 if the waiting was canceled.
*/
Service *wait_for_service(const char *name, Client *client, const char *client_name)
{
Service *service;
client->apply_for(name);
_service_wait_queue_lock.lock();
_service_wait_queue.insert(client);
_service_wait_queue_lock.unlock();
do {
service = find(name);
/*
* The service that we are seeking is not available today.
* Lets sleep a night over it.
*/
if (!service) {
printf("%s: service %s not yet available - sleeping\n",
client_name, name);
try {
client->sleep();
printf("%s: service %s got available\n", client_name, name);
} catch (Blocking_canceled) {
printf("%s: cancel waiting for service\n", client_name);
break;
}
}
} while (!service);
/* we got what we needed, stop applying */
_service_wait_queue_lock.lock();
_service_wait_queue.remove(client);
_service_wait_queue_lock.unlock();
client->apply_for(0);
return service;
}
/**
* Register service
*
* This function is called by the server's thread.
*/
void insert(Service *service)
{
/* make new service known */
_services.insert(service);
/* wake up applicants waiting for the service */
Lock::Guard lock_guard(_service_wait_queue_lock);
for (Client *c = _service_wait_queue.first(); c; c = c->next())
if (strcmp(service->name(), c->apply_for()) == 0)
c->wakeup();
}
/**
* Unregister service
*/
void remove(Service *service) { _services.remove(service); }
/**
* Unregister all services
*/
void remove_all()
{
while (_services.first())
remove(_services.first());
}
};
#endif /* _INCLUDE__BASE__SERVICE_H_ */

View File

@ -11,10 +11,12 @@
#include <base/stdint.h>
namespace Genode {
class Shared_object;
struct Address_info;
};
class Genode::Shared_object
{
private:

View File

@ -30,368 +30,370 @@ namespace Genode {
class Signal_receiver;
class Signal_context;
class Signal_context_registry;
/**
* Signal
*
* A signal represents a number of asynchronous notifications produced by
* one transmitter. If notifications are generated at a higher rate than as
* they can be processed at the receiver, the transmitter counts the
* notifications and delivers the total amount with the next signal
* transmission. This way, the total number of notifications gets properly
* communicated to the receiver even if the receiver is not highly
* responsive.
*
* Asynchronous notifications do not carry any payload because this payload
* would need to be queued at the transmitter. However, each transmitter
* imprints a signal-context reference into each signal. This context
* can be used by the receiver to distinguish signals coming from different
* transmitters.
*/
class Signal
{
private:
struct Data
{
Signal_context *context;
unsigned num;
/**
* Constructor
*
* \param context signal context specific for the
* signal-receiver capability used for signal
* transmission
* \param num number of signals received from the same
* transmitter
*/
Data(Signal_context *context, unsigned num)
: context(context), num(num) { }
/**
* Default constructor, representing an invalid signal
*/
Data() : context(0), num(0) { }
} _data;
/**
* Constructor
*
* Signal objects are constructed by signal receivers only.
*/
Signal(Data data);
friend class Kernel::Signal_receiver;
friend class Signal_receiver;
friend class Signal_context;
void _dec_ref_and_unlock();
void _inc_ref();
public:
Signal(Signal const &other);
Signal &operator=(Signal const &other);
~Signal();
Signal_context *context() { return _data.context; }
unsigned num() const { return _data.num; }
};
/**
* Signal transmitter
*
* Each signal-transmitter instance acts on behalf the context specified
* as constructor argument. Therefore, the resources needed for the
* transmitter such as the consumed memory 'sizeof(Signal_transmitter)'
* should be accounted to the owner of the context.
*/
class Signal_transmitter
{
private:
Signal_context_capability _context; /* destination */
public:
/**
* Constructor
*
* \param context capability to signal context that is going to
* receive signals produced by the transmitter
*/
Signal_transmitter(Signal_context_capability context = Signal_context_capability());
/**
* Set signal context
*/
void context(Signal_context_capability context);
/**
* Return signal context
*/
Signal_context_capability context();
/**
* Trigger signal submission to context
*
* \param cnt number of signals to submit at once
*/
void submit(unsigned cnt = 1);
};
/**
* Signal receiver
*/
class Signal_receiver : Noncopyable
{
private:
/**
* Semaphore used to indicate that signal(s) are ready to be picked
* up. This is needed for platforms other than 'base-hw' only.
*/
Semaphore _signal_available;
/**
* Provides the kernel-object name via the 'dst' method. This is
* needed for 'base-hw' only.
*/
Signal_receiver_capability _cap;
/**
* List of associated contexts
*/
Lock _contexts_lock;
List<List_element<Signal_context> > _contexts;
/**
* Helper to dissolve given context
*
* This function prevents duplicated code in '~Signal_receiver'
* and 'dissolve'. Note that '_contexts_lock' must be held when
* calling this function.
*/
void _unsynchronized_dissolve(Signal_context *context);
/**
* Hook to platform specific destructor parts
*/
void _platform_destructor();
public:
/**
* Exception class
*/
class Context_already_in_use { };
class Context_not_associated { };
/**
* Constructor
*/
Signal_receiver();
/**
* Destructor
*/
~Signal_receiver();
/**
* Manage signal context and return new signal-context capability
*
* \param context context associated with signals delivered to the
* receiver
* \throw 'Context_already_in_use'
* \return new signal-context capability that can be
* passed to a signal transmitter
*/
Signal_context_capability manage(Signal_context *context);
/**
* Dissolve signal context from receiver
*
* \param context context to remove from receiver
* \throw 'Context_not_associated'
*/
void dissolve(Signal_context *context);
/**
* Return true if signal was received
*/
bool pending();
/**
* Block until a signal is received
*
* \return received signal
*/
Signal wait_for_signal();
/**
* Locally submit signal to the receiver
*/
void local_submit(Signal::Data signal);
/**
* Framework-internal signal-dispatcher
*
* This function is called from the thread that monitors the signal
* source associated with the process. It must not be used for other
* purposes.
*/
static void dispatch_signals(Signal_source *signal_source);
};
/**
* Signal context
*
* A signal context is a destination for signals. One receiver can listen
* to multple contexts. If a signal arrives, the context is provided with the
* signel. This enables the receiver to distinguish different signal sources
* and dispatch incoming signals context-specific.
*/
class Signal_context
{
private:
/**
* List element in 'Signal_receiver'
*/
List_element<Signal_context> _receiver_le;
/**
* List element in process-global registry
*/
List_element<Signal_context> _registry_le;
/**
* Receiver to which the context is associated with
*
* This member is initialized by the receiver when associating
* the context with the receiver via the 'cap' function.
*/
Signal_receiver *_receiver;
Lock _lock; /* protect '_curr_signal' */
Signal::Data _curr_signal; /* most-currently received signal */
bool _pending; /* current signal is valid */
unsigned int _ref_cnt; /* number of references to this context */
Lock _destroy_lock; /* prevent destruction while the
context is in use */
/**
* Capability assigned to this context after being assocated with
* a 'Signal_receiver' via the 'manage' function. We store this
* capability in the 'Signal_context' for the mere reason to
* properly destruct the context (see '_unsynchronized_dissolve').
*/
Signal_context_capability _cap;
friend class Signal;
friend class Signal_receiver;
friend class Signal_context_registry;
public:
/**
* Constructor
*/
Signal_context()
: _receiver_le(this), _registry_le(this),
_receiver(0), _pending(0), _ref_cnt(0) { }
/**
* Destructor
*
* The virtual destructor is just there to generate a vtable for
* signal-context objects such that signal contexts can be dynamically
* casted.
*/
virtual ~Signal_context();
/**
* Local signal submission (DEPRECATED)
*
* Trigger local signal submission (within the same address space), the
* context has to be bound to a sginal receiver beforehand.
*
* \param num number of pending signals
*/
void submit(unsigned num);
/*
* Signal contexts are never invoked but only used as arguments for
* 'Signal_session' functions. Hence, there exists a capability
* type for it but no real RPC interface.
*/
GENODE_RPC_INTERFACE();
};
/**
* Abstract interface to be implemented by signal dispatchers
*/
struct Signal_dispatcher_base : Signal_context
{
virtual void dispatch(unsigned num) = 0;
};
/**
* Adapter for directing signals to member functions
*
* This utility associates member functions with signals. It is intended to
* be used as a member variable of the class that handles incoming signals
* of a certain type. The constructor takes a pointer-to-member to the
* signal handling function as argument. If a signal is received at the
* common signal reception code, this function will be invoked by calling
* 'Signal_dispatcher_base::dispatch'.
*
* \param T type of signal-handling class
*/
template <typename T>
class Signal_dispatcher : public Signal_dispatcher_base,
public Signal_context_capability
{
private:
T &obj;
void (T::*member) (unsigned);
Signal_receiver &sig_rec;
public:
/**
* Constructor
*
* \param sig_rec signal receiver to associate the signal
* handler with
* \param obj,member object and member function to call when
* the signal occurs
*/
Signal_dispatcher(Signal_receiver &sig_rec,
T &obj, void (T::*member)(unsigned))
:
Signal_context_capability(sig_rec.manage(this)),
obj(obj), member(member),
sig_rec(sig_rec)
{ }
~Signal_dispatcher() { sig_rec.dissolve(this); }
void dispatch(unsigned num) { (obj.*member)(num); }
};
class Signal_transmitter;
class Signal;
class Signal_dispatcher_base;
template <typename> class Signal_dispatcher;
}
/**
* Signal
*
* A signal represents a number of asynchronous notifications produced by
* one transmitter. If notifications are generated at a higher rate than as
* they can be processed at the receiver, the transmitter counts the
* notifications and delivers the total amount with the next signal
* transmission. This way, the total number of notifications gets properly
* communicated to the receiver even if the receiver is not highly
* responsive.
*
* Asynchronous notifications do not carry any payload because this payload
* would need to be queued at the transmitter. However, each transmitter
* imprints a signal-context reference into each signal. This context
* can be used by the receiver to distinguish signals coming from different
* transmitters.
*/
class Genode::Signal
{
private:
struct Data
{
Signal_context *context;
unsigned num;
/**
* Constructor
*
* \param context signal context specific for the
* signal-receiver capability used for signal
* transmission
* \param num number of signals received from the same
* transmitter
*/
Data(Signal_context *context, unsigned num)
: context(context), num(num) { }
/**
* Default constructor, representing an invalid signal
*/
Data() : context(0), num(0) { }
} _data;
/**
* Constructor
*
* Signal objects are constructed by signal receivers only.
*/
Signal(Data data);
friend class Kernel::Signal_receiver;
friend class Signal_receiver;
friend class Signal_context;
void _dec_ref_and_unlock();
void _inc_ref();
public:
Signal(Signal const &other);
Signal &operator=(Signal const &other);
~Signal();
Signal_context *context() { return _data.context; }
unsigned num() const { return _data.num; }
};
/**
* Signal transmitter
*
* Each signal-transmitter instance acts on behalf the context specified
* as constructor argument. Therefore, the resources needed for the
* transmitter such as the consumed memory 'sizeof(Signal_transmitter)'
* should be accounted to the owner of the context.
*/
class Genode::Signal_transmitter
{
private:
Signal_context_capability _context; /* destination */
public:
/**
* Constructor
*
* \param context capability to signal context that is going to
* receive signals produced by the transmitter
*/
Signal_transmitter(Signal_context_capability context = Signal_context_capability());
/**
* Set signal context
*/
void context(Signal_context_capability context);
/**
* Return signal context
*/
Signal_context_capability context();
/**
* Trigger signal submission to context
*
* \param cnt number of signals to submit at once
*/
void submit(unsigned cnt = 1);
};
/**
* Signal receiver
*/
class Genode::Signal_receiver : Noncopyable
{
private:
/**
* Semaphore used to indicate that signal(s) are ready to be picked
* up. This is needed for platforms other than 'base-hw' only.
*/
Semaphore _signal_available;
/**
* Provides the kernel-object name via the 'dst' method. This is
* needed for 'base-hw' only.
*/
Signal_receiver_capability _cap;
/**
* List of associated contexts
*/
Lock _contexts_lock;
List<List_element<Signal_context> > _contexts;
/**
* Helper to dissolve given context
*
* This function prevents duplicated code in '~Signal_receiver'
* and 'dissolve'. Note that '_contexts_lock' must be held when
* calling this function.
*/
void _unsynchronized_dissolve(Signal_context *context);
/**
* Hook to platform specific destructor parts
*/
void _platform_destructor();
public:
/**
* Exception class
*/
class Context_already_in_use { };
class Context_not_associated { };
/**
* Constructor
*/
Signal_receiver();
/**
* Destructor
*/
~Signal_receiver();
/**
* Manage signal context and return new signal-context capability
*
* \param context context associated with signals delivered to the
* receiver
* \throw 'Context_already_in_use'
* \return new signal-context capability that can be
* passed to a signal transmitter
*/
Signal_context_capability manage(Signal_context *context);
/**
* Dissolve signal context from receiver
*
* \param context context to remove from receiver
* \throw 'Context_not_associated'
*/
void dissolve(Signal_context *context);
/**
* Return true if signal was received
*/
bool pending();
/**
* Block until a signal is received
*
* \return received signal
*/
Signal wait_for_signal();
/**
* Locally submit signal to the receiver
*/
void local_submit(Signal::Data signal);
/**
* Framework-internal signal-dispatcher
*
* This function is called from the thread that monitors the signal
* source associated with the process. It must not be used for other
* purposes.
*/
static void dispatch_signals(Signal_source *signal_source);
};
/**
* Signal context
*
* A signal context is a destination for signals. One receiver can listen
* to multple contexts. If a signal arrives, the context is provided with the
* signel. This enables the receiver to distinguish different signal sources
* and dispatch incoming signals context-specific.
*/
class Genode::Signal_context
{
private:
/**
* List element in 'Signal_receiver'
*/
List_element<Signal_context> _receiver_le;
/**
* List element in process-global registry
*/
List_element<Signal_context> _registry_le;
/**
* Receiver to which the context is associated with
*
* This member is initialized by the receiver when associating
* the context with the receiver via the 'cap' function.
*/
Signal_receiver *_receiver;
Lock _lock; /* protect '_curr_signal' */
Signal::Data _curr_signal; /* most-currently received signal */
bool _pending; /* current signal is valid */
unsigned int _ref_cnt; /* number of references to this context */
Lock _destroy_lock; /* prevent destruction while the
context is in use */
/**
* Capability assigned to this context after being assocated with
* a 'Signal_receiver' via the 'manage' function. We store this
* capability in the 'Signal_context' for the mere reason to
* properly destruct the context (see '_unsynchronized_dissolve').
*/
Signal_context_capability _cap;
friend class Signal;
friend class Signal_receiver;
friend class Signal_context_registry;
public:
/**
* Constructor
*/
Signal_context()
: _receiver_le(this), _registry_le(this),
_receiver(0), _pending(0), _ref_cnt(0) { }
/**
* Destructor
*
* The virtual destructor is just there to generate a vtable for
* signal-context objects such that signal contexts can be dynamically
* casted.
*/
virtual ~Signal_context();
/**
* Local signal submission (DEPRECATED)
*
* Trigger local signal submission (within the same address space), the
* context has to be bound to a sginal receiver beforehand.
*
* \param num number of pending signals
*/
void submit(unsigned num);
/*
* Signal contexts are never invoked but only used as arguments for
* 'Signal_session' functions. Hence, there exists a capability
* type for it but no real RPC interface.
*/
GENODE_RPC_INTERFACE();
};
/**
* Abstract interface to be implemented by signal dispatchers
*/
struct Genode::Signal_dispatcher_base : Signal_context
{
virtual void dispatch(unsigned num) = 0;
};
/**
* Adapter for directing signals to member functions
*
* This utility associates member functions with signals. It is intended to
* be used as a member variable of the class that handles incoming signals
* of a certain type. The constructor takes a pointer-to-member to the
* signal handling function as argument. If a signal is received at the
* common signal reception code, this function will be invoked by calling
* 'Signal_dispatcher_base::dispatch'.
*
* \param T type of signal-handling class
*/
template <typename T>
class Genode::Signal_dispatcher : public Signal_dispatcher_base,
public Signal_context_capability
{
private:
T &obj;
void (T::*member) (unsigned);
Signal_receiver &sig_rec;
public:
/**
* Constructor
*
* \param sig_rec signal receiver to associate the signal
* handler with
* \param obj,member object and member function to call when
* the signal occurs
*/
Signal_dispatcher(Signal_receiver &sig_rec,
T &obj, void (T::*member)(unsigned))
:
Signal_context_capability(sig_rec.manage(this)),
obj(obj), member(member),
sig_rec(sig_rec)
{ }
~Signal_dispatcher() { sig_rec.dissolve(this); }
void dispatch(unsigned num) { (obj.*member)(num); }
};
#endif /* _INCLUDE__BASE__SIGNAL_H__ */

View File

@ -20,239 +20,240 @@
namespace Genode {
class Slab;
class Slab_block;
class Slab_entry;
class Allocator;
/**
* A slab block holds an array of slab entries.
*/
class Slab_block
{
public:
Slab_block *next; /* next block */
Slab_block *prev; /* previous block */
private:
enum { FREE, USED };
Slab *_slab; /* back reference to slab allocator */
unsigned _avail; /* free entries of this block */
/*
* Each slab block consists of three areas, a fixed-size header
* that contains the member variables declared above, a byte array
* called state table that holds the allocation state for each slab
* entry, and an area holding the actual slab entries. The number
* of state-table elements corresponds to the maximum number of slab
* entries per slab block (the '_num_elem' member variable of the
* Slab allocator).
*/
char _data[]; /* dynamic data (state table and slab entries) */
/*
* Caution! no member variables allowed below this line!
*/
/**
* Accessor functions to allocation state
*
* \param idx index of slab entry
*/
inline bool state(int idx) { return _data[idx]; }
inline void state(int idx, bool state) { _data[idx] = state; }
/**
* Request address of slab entry by its index
*/
Slab_entry *slab_entry(int idx);
/**
* Determine block index of specified slab entry
*/
int slab_entry_idx(Slab_entry *e);
public:
/**
* Constructor
*
* Normally, Slab_blocks are constructed by a Slab allocator
* that specifies itself as constructor argument.
*/
explicit Slab_block(Slab *s = 0) { if (s) slab(s); }
/**
* Configure block to be managed by the specified slab allocator
*/
void slab(Slab *slab);
/**
* Request number of available entries in block
*/
unsigned avail() { return _avail; }
/**
* Allocate slab entry from block
*/
void *alloc();
/**
* Return a used slab block entry
*/
Slab_entry *first_used_entry();
/**
* These functions are called by Slab_entry.
*/
void inc_avail(Slab_entry *e);
void dec_avail();
/**
* Debug and test hooks
*/
void dump();
int check_bounds();
};
class Slab_entry
{
private:
Slab_block *_sb;
char _data[];
/*
* Caution! no member variables allowed below this line!
*/
public:
void init() { _sb = 0; }
void occupy(Slab_block *sb)
{
_sb = sb;
_sb->dec_avail();
}
void free()
{
_sb->inc_avail(this);
_sb = 0;
}
void *addr() { return _data; }
/**
* Lookup Slab_entry by given address
*
* The specified address is supposed to point to _data[0].
*/
static Slab_entry *slab_entry(void *addr) {
return (Slab_entry *)((addr_t)addr - sizeof(Slab_entry)); }
};
/**
* Slab allocator
*/
class Slab : public Allocator
{
private:
size_t _slab_size; /* size of one slab entry */
size_t _block_size; /* size of slab block */
size_t _num_elem; /* number of slab entries per block */
Slab_block *_first_sb; /* first slab block */
Slab_block *_initial_sb; /* initial (static) slab block */
bool _alloc_state; /* indicator for 'currently in service' */
Allocator *_backing_store;
/**
* Allocate and initialize new slab block
*/
Slab_block *_new_slab_block();
public:
inline size_t slab_size() { return _slab_size; }
inline size_t block_size() { return _block_size; }
inline size_t num_elem() { return _num_elem; }
inline size_t entry_size() { return sizeof(Slab_entry) + _slab_size; }
/**
* Constructor
*
* At construction time, there exists one initial slab
* block that is used for the first couple of allocations,
* especially for the allocation of the second slab
* block.
*/
Slab(size_t slab_size, size_t block_size, Slab_block *initial_sb,
Allocator *backing_store = 0);
/**
* Destructor
*/
~Slab();
/**
* Debug function for dumping the current slab block list
*/
void dump_sb_list();
/**
* Remove block from slab block list
*/
void remove_sb(Slab_block *sb);
/**
* Insert block into slab block list
*/
void insert_sb(Slab_block *sb, Slab_block *at = 0);
/**
* Free slab entry
*/
static void free(void *addr);
/**
* Return a used slab element
*/
void *first_used_elem();
/**
* Return true if number of free slab entries is higher than n
*/
bool num_free_entries_higher_than(int n);
/**
* Define/request backing-store allocator
*/
void backing_store(Allocator *bs) { _backing_store = bs; }
Allocator *backing_store() { return _backing_store; }
/*************************
** Allocator interface **
*************************/
/**
* Allocate slab entry
*
* The 'size' parameter is ignored as only slab entries with
* preconfigured slab-entry size are allocated.
*/
bool alloc(size_t size, void **addr);
void free(void *addr, size_t) { free(addr); }
size_t consumed();
size_t overhead(size_t) { return _block_size/_num_elem; }
bool need_size_for_free() const override { return false; }
};
}
/**
* A slab block holds an array of slab entries.
*/
class Genode::Slab_block
{
public:
Slab_block *next; /* next block */
Slab_block *prev; /* previous block */
private:
enum { FREE, USED };
Slab *_slab; /* back reference to slab allocator */
unsigned _avail; /* free entries of this block */
/*
* Each slab block consists of three areas, a fixed-size header
* that contains the member variables declared above, a byte array
* called state table that holds the allocation state for each slab
* entry, and an area holding the actual slab entries. The number
* of state-table elements corresponds to the maximum number of slab
* entries per slab block (the '_num_elem' member variable of the
* Slab allocator).
*/
char _data[]; /* dynamic data (state table and slab entries) */
/*
* Caution! no member variables allowed below this line!
*/
/**
* Accessor functions to allocation state
*
* \param idx index of slab entry
*/
inline bool state(int idx) { return _data[idx]; }
inline void state(int idx, bool state) { _data[idx] = state; }
/**
* Request address of slab entry by its index
*/
Slab_entry *slab_entry(int idx);
/**
* Determine block index of specified slab entry
*/
int slab_entry_idx(Slab_entry *e);
public:
/**
* Constructor
*
* Normally, Slab_blocks are constructed by a Slab allocator
* that specifies itself as constructor argument.
*/
explicit Slab_block(Slab *s = 0) { if (s) slab(s); }
/**
* Configure block to be managed by the specified slab allocator
*/
void slab(Slab *slab);
/**
* Request number of available entries in block
*/
unsigned avail() { return _avail; }
/**
* Allocate slab entry from block
*/
void *alloc();
/**
* Return a used slab block entry
*/
Slab_entry *first_used_entry();
/**
* These functions are called by Slab_entry.
*/
void inc_avail(Slab_entry *e);
void dec_avail();
/**
* Debug and test hooks
*/
void dump();
int check_bounds();
};
class Genode::Slab_entry
{
private:
Slab_block *_sb;
char _data[];
/*
* Caution! no member variables allowed below this line!
*/
public:
void init() { _sb = 0; }
void occupy(Slab_block *sb)
{
_sb = sb;
_sb->dec_avail();
}
void free()
{
_sb->inc_avail(this);
_sb = 0;
}
void *addr() { return _data; }
/**
* Lookup Slab_entry by given address
*
* The specified address is supposed to point to _data[0].
*/
static Slab_entry *slab_entry(void *addr) {
return (Slab_entry *)((addr_t)addr - sizeof(Slab_entry)); }
};
/**
* Slab allocator
*/
class Genode::Slab : public Allocator
{
private:
size_t _slab_size; /* size of one slab entry */
size_t _block_size; /* size of slab block */
size_t _num_elem; /* number of slab entries per block */
Slab_block *_first_sb; /* first slab block */
Slab_block *_initial_sb; /* initial (static) slab block */
bool _alloc_state; /* indicator for 'currently in service' */
Allocator *_backing_store;
/**
* Allocate and initialize new slab block
*/
Slab_block *_new_slab_block();
public:
inline size_t slab_size() { return _slab_size; }
inline size_t block_size() { return _block_size; }
inline size_t num_elem() { return _num_elem; }
inline size_t entry_size() { return sizeof(Slab_entry) + _slab_size; }
/**
* Constructor
*
* At construction time, there exists one initial slab
* block that is used for the first couple of allocations,
* especially for the allocation of the second slab
* block.
*/
Slab(size_t slab_size, size_t block_size, Slab_block *initial_sb,
Allocator *backing_store = 0);
/**
* Destructor
*/
~Slab();
/**
* Debug function for dumping the current slab block list
*/
void dump_sb_list();
/**
* Remove block from slab block list
*/
void remove_sb(Slab_block *sb);
/**
* Insert block into slab block list
*/
void insert_sb(Slab_block *sb, Slab_block *at = 0);
/**
* Free slab entry
*/
static void free(void *addr);
/**
* Return a used slab element
*/
void *first_used_elem();
/**
* Return true if number of free slab entries is higher than n
*/
bool num_free_entries_higher_than(int n);
/**
* Define/request backing-store allocator
*/
void backing_store(Allocator *bs) { _backing_store = bs; }
Allocator *backing_store() { return _backing_store; }
/*************************
** Allocator interface **
*************************/
/**
* Allocate slab entry
*
* The 'size' parameter is ignored as only slab entries with
* preconfigured slab-entry size are allocated.
*/
bool alloc(size_t size, void **addr);
void free(void *addr, size_t) { free(addr); }
size_t consumed();
size_t overhead(size_t) { return _block_size/_num_elem; }
bool need_size_for_free() const override { return false; }
};
#endif /* _INCLUDE__BASE__SLAB_H_ */

View File

@ -18,65 +18,71 @@
#include <base/stdint.h>
namespace Genode {
class String_console : public Console
{
private:
char *_dst;
size_t _dst_len;
size_t _w_offset;
public:
/**
* Constructor
*
* \param dst destination character buffer
* \param dst_len size of dst
*/
String_console(char *dst, size_t dst_len)
: _dst(dst), _dst_len(dst_len), _w_offset(0)
{ _dst[0] = 0; }
/**
* Return number of characters in destination buffer
*/
size_t len() { return _w_offset; }
/***********************
** Console interface **
***********************/
void _out_char(char c)
{
/* ensure to leave space for null-termination */
if (_w_offset + 2 > _dst_len)
return;
_dst[_w_offset++] = c;
_dst[_w_offset] = 0;
}
};
class String_console;
/**
* Print format string into character buffer
*
* \return number of characters written to destination buffer
*/
inline int snprintf(char *, size_t, const char *, ...) __attribute__((format(printf, 3, 4)));
inline int snprintf(char *dst, size_t dst_len, const char *format, ...)
{
va_list list;
va_start(list, format);
inline int snprintf(char *, size_t, const char *, ...)
__attribute__((format(printf, 3, 4)));
}
String_console sc(dst, dst_len);
sc.vprintf(format, list);
va_end(list);
return sc.len();
}
class Genode::String_console : public Console
{
private:
char *_dst;
size_t _dst_len;
size_t _w_offset;
public:
/**
* Constructor
*
* \param dst destination character buffer
* \param dst_len size of dst
*/
String_console(char *dst, size_t dst_len)
: _dst(dst), _dst_len(dst_len), _w_offset(0)
{ _dst[0] = 0; }
/**
* Return number of characters in destination buffer
*/
size_t len() { return _w_offset; }
/***********************
** Console interface **
***********************/
void _out_char(char c) override
{
/* ensure to leave space for null-termination */
if (_w_offset + 2 > _dst_len)
return;
_dst[_w_offset++] = c;
_dst[_w_offset] = 0;
}
};
inline int Genode::snprintf(char *dst, size_t dst_len, const char *format, ...)
{
va_list list;
va_start(list, format);
String_console sc(dst, dst_len);
sc.vprintf(format, list);
va_end(list);
return sc.len();
}
#endif /* _INCLUDE__BASE__SNPRINTF_H_ */

View File

@ -18,238 +18,244 @@
#include <base/lock.h>
namespace Genode {
/**
* Lock-guarded allocator
*
* This class wraps the complete 'Allocator' interface while
* preventing concurrent calls to the wrapped allocator implementation.
*
* \param ALLOCATOR_IMPL class implementing the 'Allocator'
* interface
*/
template <typename ALLOCATOR_IMPL>
class Synchronized_allocator : public Allocator
{
private:
Lock _default_lock;
Lock *_lock;
ALLOCATOR_IMPL _alloc;
public:
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
Synchronized_allocator()
: _lock(&_default_lock) { }
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
explicit Synchronized_allocator(Allocator *metadata_alloc)
: _lock(&_default_lock), _alloc(metadata_alloc) { }
/**
* Return reference to wrapped (non-thread-safe) allocator
*
* This is needed, for example, if the wrapped allocator implements
* methods in addition to the Range_allocator interface.
*/
ALLOCATOR_IMPL *raw() { return &_alloc; }
/*************************
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr)
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc(size, out_addr);
}
void free(void *addr, size_t size)
{
Lock::Guard lock_guard(*_lock);
_alloc.free(addr, size);
}
size_t consumed()
{
Lock::Guard lock_guard(*_lock);
return _alloc.consumed();
}
size_t overhead(size_t size)
{
Lock::Guard lock_guard(*_lock);
return _alloc.overhead(size);
}
bool need_size_for_free() const override
{
Lock::Guard lock_guard(*_lock);
return _alloc.need_size_for_free();
}
};
/**
* Lock-guarded range allocator
*
* This class wraps the complete 'Range_allocator' interface while
* preventing concurrent calls to the wrapped allocator implementation.
*
* \param ALLOCATOR_IMPL class implementing the 'Range_allocator'
* interface
*/
template <typename ALLOCATOR_IMPL>
class Synchronized_range_allocator : public Range_allocator
{
private:
Lock _default_lock;
Lock *_lock;
ALLOCATOR_IMPL _alloc;
public:
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
Synchronized_range_allocator()
: _lock(&_default_lock) { }
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
explicit Synchronized_range_allocator(Allocator *metadata_alloc)
: _lock(&_default_lock), _alloc(metadata_alloc) { }
/**
* Constructor
*
* \param lock use specified lock rather then an embedded lock for
* synchronization
*
* This constructor is useful if multiple allocators must be
* synchronized with each other. In such as case, the shared
* lock can be passed to each 'Synchronized_range_allocator'
* instance.
*/
Synchronized_range_allocator(Lock *lock, Allocator *metadata_alloc)
: _lock(lock), _alloc(metadata_alloc) { }
/**
* Return reference to wrapped (non-thread-safe) allocator
*
* This is needed, for example, if the wrapped allocator implements
* methods in addition to the Range_allocator interface.
*
* NOTE: Synchronize accesses to the raw allocator by facilitating
* the lock() member function.
*/
ALLOCATOR_IMPL *raw() { return &_alloc; }
/**
* Return reference to synchronization lock
*/
Lock *lock() { return _lock; }
/*************************
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr)
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc(size, out_addr);
}
void free(void *addr, size_t size)
{
Lock::Guard lock_guard(*_lock);
_alloc.free(addr, size);
}
size_t consumed()
{
Lock::Guard lock_guard(*_lock);
return _alloc.consumed();
}
size_t overhead(size_t size)
{
Lock::Guard lock_guard(*_lock);
return _alloc.overhead(size);
}
bool need_size_for_free() const override
{
Lock::Guard lock_guard(*_lock);
return _alloc.need_size_for_free();
}
/*******************************
** Range-allocator interface **
*******************************/
int add_range(addr_t base, size_t size)
{
Lock::Guard lock_guard(*_lock);
return _alloc.add_range(base, size);
}
int remove_range(addr_t base, size_t size)
{
Lock::Guard lock_guard(*_lock);
return _alloc.remove_range(base, size);
}
Alloc_return alloc_aligned(size_t size, void **out_addr, int align = 0, addr_t from = 0, addr_t to = ~0UL)
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc_aligned(size, out_addr, align, from, to);
}
Alloc_return alloc_addr(size_t size, addr_t addr)
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc_addr(size, addr);
}
void free(void *addr)
{
Lock::Guard lock_guard(*_lock);
_alloc.free(addr);
}
size_t avail()
{
Lock::Guard lock_guard(*_lock);
return _alloc.avail();
}
bool valid_addr(addr_t addr)
{
Lock::Guard lock_guard(*_lock);
return _alloc.valid_addr(addr);
}
};
template <typename> class Synchronized_allocator;
template <typename> class Synchronized_range_allocator;
}
/**
* Lock-guarded allocator
*
* This class wraps the complete 'Allocator' interface while
* preventing concurrent calls to the wrapped allocator implementation.
*
* \param ALLOCATOR_IMPL class implementing the 'Allocator'
* interface
*/
template <typename ALLOCATOR_IMPL>
class Genode::Synchronized_allocator : public Allocator
{
private:
Lock _default_lock;
Lock *_lock;
ALLOCATOR_IMPL _alloc;
public:
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
Synchronized_allocator()
: _lock(&_default_lock) { }
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
explicit Synchronized_allocator(Allocator *metadata_alloc)
: _lock(&_default_lock), _alloc(metadata_alloc) { }
/**
* Return reference to wrapped (non-thread-safe) allocator
*
* This is needed, for example, if the wrapped allocator implements
* methods in addition to the Range_allocator interface.
*/
ALLOCATOR_IMPL *raw() { return &_alloc; }
/*************************
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc(size, out_addr);
}
void free(void *addr, size_t size) override
{
Lock::Guard lock_guard(*_lock);
_alloc.free(addr, size);
}
size_t consumed() override
{
Lock::Guard lock_guard(*_lock);
return _alloc.consumed();
}
size_t overhead(size_t size) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.overhead(size);
}
bool need_size_for_free() const override
{
Lock::Guard lock_guard(*_lock);
return _alloc.need_size_for_free();
}
};
/**
* Lock-guarded range allocator
*
* This class wraps the complete 'Range_allocator' interface while
* preventing concurrent calls to the wrapped allocator implementation.
*
* \param ALLOCATOR_IMPL class implementing the 'Range_allocator'
* interface
*/
template <typename ALLOCATOR_IMPL>
class Genode::Synchronized_range_allocator : public Range_allocator
{
private:
Lock _default_lock;
Lock *_lock;
ALLOCATOR_IMPL _alloc;
public:
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
Synchronized_range_allocator()
: _lock(&_default_lock) { }
/**
* Constructor
*
* This constructor uses an embedded lock for synchronizing the
* access to the allocator.
*/
explicit Synchronized_range_allocator(Allocator *metadata_alloc)
: _lock(&_default_lock), _alloc(metadata_alloc) { }
/**
* Constructor
*
* \param lock use specified lock rather then an embedded lock for
* synchronization
*
* This constructor is useful if multiple allocators must be
* synchronized with each other. In such as case, the shared
* lock can be passed to each 'Synchronized_range_allocator'
* instance.
*/
Synchronized_range_allocator(Lock *lock, Allocator *metadata_alloc)
: _lock(lock), _alloc(metadata_alloc) { }
/**
* Return reference to wrapped (non-thread-safe) allocator
*
* This is needed, for example, if the wrapped allocator implements
* methods in addition to the Range_allocator interface.
*
* NOTE: Synchronize accesses to the raw allocator by facilitating
* the lock() member function.
*/
ALLOCATOR_IMPL *raw() { return &_alloc; }
/**
* Return reference to synchronization lock
*/
Lock *lock() { return _lock; }
/*************************
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc(size, out_addr);
}
void free(void *addr, size_t size) override
{
Lock::Guard lock_guard(*_lock);
_alloc.free(addr, size);
}
size_t consumed() override
{
Lock::Guard lock_guard(*_lock);
return _alloc.consumed();
}
size_t overhead(size_t size) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.overhead(size);
}
bool need_size_for_free() const override
{
Lock::Guard lock_guard(*_lock);
return _alloc.need_size_for_free();
}
/*******************************
** Range-allocator interface **
*******************************/
int add_range(addr_t base, size_t size) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.add_range(base, size);
}
int remove_range(addr_t base, size_t size) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.remove_range(base, size);
}
Alloc_return alloc_aligned(size_t size, void **out_addr, int align = 0,
addr_t from = 0, addr_t to = ~0UL) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc_aligned(size, out_addr, align, from, to);
}
Alloc_return alloc_addr(size_t size, addr_t addr) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.alloc_addr(size, addr);
}
void free(void *addr) override
{
Lock::Guard lock_guard(*_lock);
_alloc.free(addr);
}
size_t avail() override
{
Lock::Guard lock_guard(*_lock);
return _alloc.avail();
}
bool valid_addr(addr_t addr) override
{
Lock::Guard lock_guard(*_lock);
return _alloc.valid_addr(addr);
}
};
#endif /* _INCLUDE__BASE__SYNC_ALLOCATOR_H_ */

View File

@ -9,23 +9,23 @@
* area, each thread has a fixed-sized slot, a thread context. The layout of
* each thread context looks as follows
*
* ! lower address
* ! ...
* ! ============================ <- aligned at the virtual context size
* !
* ! empty
* !
* ! ----------------------------
* !
* ! stack
* ! (top) <- initial stack pointer
* ! ---------------------------- <- address of 'Context' object
* ! additional context members
* ! ----------------------------
* ! UTCB
* ! ============================ <- aligned at the virtual context size
* ! ...
* ! higher address
* ; lower address
* ; ...
* ; ============================ <- aligned at the virtual context size
* ;
* ; empty
* ;
* ; ----------------------------
* ;
* ; stack
* ; (top) <- initial stack pointer
* ; ---------------------------- <- address of 'Context' object
* ; additional context members
* ; ----------------------------
* ; UTCB
* ; ============================ <- aligned at the virtual context size
* ; ...
* ; higher address
*
* On some platforms, a user-level thread-control block (UTCB) area contains
* data shared between the user-level thread and the kernel. It is typically
@ -69,484 +69,487 @@
namespace Genode {
class Rm_session;
class Thread_base;
template <unsigned> class Thread;
}
/**
* Concurrent control flow
*
* A 'Thread_base' object corresponds to a physical thread. The execution
* starts at the 'entry()' function as soon as 'start()' is called.
*/
class Thread_base
{
public:
class Context_alloc_failed : public Exception { };
class Stack_too_large : public Exception { };
class Stack_alloc_failed : public Exception { };
/**
* Concurrent control flow
*
* A 'Thread_base' object corresponds to a physical thread. The execution
* starts at the 'entry()' function as soon as 'start()' is called.
*/
class Genode::Thread_base
{
public:
class Context_alloc_failed : public Exception { };
class Stack_too_large : public Exception { };
class Stack_alloc_failed : public Exception { };
/**
* Thread context located within the thread-context area
*
* The end of a thread context is placed virtual size aligned.
*/
struct Context
{
private:
/**
* Top of the stack is accessible via stack_top()
*/
long _stack[];
public:
/**
* Thread context located within the thread-context area
* Top of stack
*
* The end of a thread context is placed virtual size aligned.
* The alignment matches an initial stack frame, which is
* sufficient for the AMD64 ABI (stack top + adjustment is 16-byte
* aligned).
*/
struct Context
addr_t stack_top() const
{
private:
/**
* Top of the stack is accessible via stack_top()
*/
long _stack[];
public:
/**
* Top of stack
*
* The alignment matches an initial stack frame, which is
* sufficient for the AMD64 ABI (stack top + adjustment is 16-byte
* aligned).
*/
addr_t stack_top() const
{
return ((addr_t)_stack & ~0xf) - Abi::stack_adjustment();
}
/**
* Ensure that the stack has a given size at the minimum
*
* \param size minimum stack size
*
* \throw Stack_too_large
* \throw Stack_alloc_failed
*/
void stack_size(size_t const size);
/**
* Virtual address of the start of the stack
*
* This address is pointing to the begin of the dataspace used
* for backing the thread context except for the UTCB (which is
* managed by the kernel).
*/
addr_t stack_base;
/**
* Pointer to corresponding 'Thread_base' object
*/
Thread_base *thread_base;
/**
* Dataspace containing the backing store for the thread context
*
* We keep the dataspace capability to be able to release the
* backing store on thread destruction.
*/
Ram_dataspace_capability ds_cap;
/**
* Maximum length of thread name, including null-termination
*/
enum { NAME_LEN = 64 };
/**
* Thread name, used for debugging
*/
char name[NAME_LEN];
/*
* <- end of regular memory area
*
* The following part of the thread context is backed by
* kernel-managed memory. No member variables are allowed
* beyond this point.
*/
/**
* Kernel-specific user-level thread control block
*/
Native_utcb utcb;
};
private:
/**
* Manage the allocation of thread contexts
*
* There exists only one instance of this class per process.
*/
class Context_allocator
{
private:
static constexpr size_t MAX_THREADS =
Native_config::context_area_virtual_size() /
Native_config::context_virtual_size();
struct Context_bit_allocator : Bit_allocator<MAX_THREADS>
{
Context_bit_allocator()
{
/* the first index is used by main thread */
_reserve(0, 1);
}
} _alloc;
Lock _threads_lock;
public:
/**
* Allocate thread context for specified thread
*
* \param thread thread for which to allocate the new context
* \param main_thread wether to alloc for the main thread
*
* \return virtual address of new thread context, or
* 0 if the allocation failed
*/
Context *alloc(Thread_base *thread, bool main_thread);
/**
* Release thread context
*/
void free(Context *thread);
/**
* Return 'Context' object for a given base address
*/
static Context *base_to_context(addr_t base);
/**
* Return base address of context containing the specified address
*/
static addr_t addr_to_base(void *addr);
/**
* Return index in context area for a given base address
*/
static size_t base_to_idx(addr_t base);
/**
* Return base address of context given index in context area
*/
static addr_t idx_to_base(size_t idx);
};
/**
* Return thread-context allocator
*/
static Context_allocator *_context_allocator();
/**
* Allocate and locally attach a new thread context
*
* \param stack_size size of this threads stack
* \param main_thread wether this is the main thread
*/
Context *_alloc_context(size_t stack_size, bool main_thread);
/**
* Detach and release thread context of the thread
*/
void _free_context(Context *context);
/**
* Platform-specific thread-startup code
*
* On some platforms, each new thread has to perform a startup
* protocol, e.g., waiting for a message from the kernel. This hook
* function allows for the implementation of such protocols.
*/
void _thread_bootstrap();
/**
* Helper for thread startup
*/
static void _thread_start();
/**
* Hook for platform-specific destructor supplements
*/
void _deinit_platform_thread();
protected:
/**
* Capability for this thread (set by _start())
*
* Used if thread creation involves core's CPU service.
*/
Genode::Thread_capability _thread_cap;
/**
* Capability to pager paging this thread (created by _start())
*/
Genode::Pager_capability _pager_cap;
/**
* Pointer to cpu session used for this thread
*/
Genode::Cpu_session *_cpu_session;
/**
* Pointer to primary thread context
*/
Context *_context;
/**
* Physical thread ID
*/
Native_thread _tid;
/**
* Lock used for synchronizing the finalization of the thread
*/
Genode::Lock _join_lock;
/**
* Thread type
*
* Some threads need special treatment at construction. This enum
* is solely used to distinguish them at construction.
*/
enum Type { NORMAL, MAIN, REINITIALIZED_MAIN };
private:
Trace::Logger _trace_logger;
/**
* Return 'Trace::Logger' instance of calling thread
*
* This function is used by the tracing framework internally.
*/
static Trace::Logger *_logger();
/**
* Hook for platform-specific constructor supplements
*
* \param quota CPU quota that shall be granted to the thread
* \param type enables selection of special initialization
*/
void _init_platform_thread(size_t quota, Type type);
public:
/**
* Constructor
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name for debugging
* \param stack_size stack size
* \param type enables selection of special construction
*
* \throw Stack_too_large
* \throw Stack_alloc_failed
* \throw Context_alloc_failed
*
* The stack for the new thread will be allocated from the RAM
* session of the process environment. A small portion of the
* stack size is internally used by the framework for storing
* thread-context information such as the thread's name (see
* 'struct Context').
*
* FIXME: With type = Forked_main_thread the whole
* Context::_alloc_context call gets skipped but we should
* at least set Context::ds_cap in a way that it references
* the dataspace of the already attached stack.
*/
Thread_base(size_t quota, const char *name, size_t stack_size,
Type type = NORMAL);
/**
* Constructor
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name for debugging
* \param stack_size stack size
* \param type enables selection of special construction
* \param cpu_session capability to cpu session used for construction
*
* \throw Stack_too_large
* \throw Stack_alloc_failed
* \throw Context_alloc_failed
*/
Thread_base(size_t quota, const char *name, size_t stack_size,
Type type, Cpu_session *);
/**
* Destructor
*/
virtual ~Thread_base();
/**
* Entry function of the thread
*/
virtual void entry() = 0;
/**
* Start execution of the thread
*
* This function is virtual to enable the customization of threads
* used as server activation.
*/
virtual void start();
/**
* Request name of thread
*/
void name(char *dst, size_t dst_len);
/**
* Add an additional stack to the thread
*
* \throw Stack_too_large
* \throw Stack_alloc_failed
* \throw Context_alloc_failed
*
* The stack for the new thread will be allocated from the RAM
* session of the process environment. A small portion of the
* stack size is internally used by the framework for storing
* thread-context information such as the thread's name (see
* 'struct Context').
*
* \return pointer to the new stack's top
*/
void* alloc_secondary_stack(char const *name, size_t stack_size);
/**
* Remove a secondary stack from the thread
*/
void free_secondary_stack(void* stack_addr);
/**
* Request capability of thread
*/
Genode::Thread_capability cap() const { return _thread_cap; }
/**
* Cancel currently blocking operation
*/
void cancel_blocking();
/**
* Only to be called from platform-specific code
*/
Native_thread & tid() { return _tid; }
/**
* Return top of stack
*
* \return pointer just after first stack element
*/
void *stack_top() const { return (void *)_context->stack_top(); }
/**
* Return base of stack
*
* \return pointer to last stack element
*/
void *stack_base() { return (void*)_context->stack_base; }
/**
* Return 'Thread_base' object corresponding to the calling thread
*
* \return pointer to 'Thread_base' object, or
* 0 if the calling thread is the main thread
*/
static Thread_base *myself();
return ((addr_t)_stack & ~0xf) - Abi::stack_adjustment();
}
/**
* Ensure that the stack has a given size at the minimum
*
* \param size minimum stack size
*
* \throw Context::Stack_too_large
* \throw Context::Stack_alloc_failed
* \throw Stack_too_large
* \throw Stack_alloc_failed
*/
void stack_size(size_t const size) { _context->stack_size(size); }
void stack_size(size_t const size);
/**
* Return user-level thread control block
* Virtual address of the start of the stack
*
* Note that it is safe to call this function on the result of the
* 'myself' function. It handles the special case of 'myself' being
* 0 when called by the main thread.
* This address is pointing to the begin of the dataspace used
* for backing the thread context except for the UTCB (which is
* managed by the kernel).
*/
Native_utcb *utcb();
addr_t stack_base;
/**
* Block until the thread leaves the 'entry' function
* Pointer to corresponding 'Thread_base' object
*/
Thread_base *thread_base;
/**
* Dataspace containing the backing store for the thread context
*
* Join must not be called more than once. Subsequent calls have
* undefined behaviour.
* We keep the dataspace capability to be able to release the
* backing store on thread destruction.
*/
void join();
Ram_dataspace_capability ds_cap;
/**
* Log null-terminated string as trace event
* Maximum length of thread name, including null-termination
*/
static void trace(char const *cstring)
{
_logger()->log(cstring, strlen(cstring));
}
enum { NAME_LEN = 64 };
/**
* Log binary data as trace event
* Thread name, used for debugging
*/
static void trace(char const *data, size_t len)
{
_logger()->log(data, len);
}
char name[NAME_LEN];
/**
* Log trace event as defined in base/trace.h
*/
template <typename EVENT>
static void trace(EVENT const *event) { _logger()->log(event); }
};
template <unsigned STACK_SIZE>
class Thread : public Thread_base
{
public:
/**
* Constructor
/*
* <- end of regular memory area
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name (for debugging)
* \param type enables selection of special construction
* The following part of the thread context is backed by
* kernel-managed memory. No member variables are allowed
* beyond this point.
*/
explicit Thread(size_t quota, const char *name, Type type = NORMAL)
: Thread_base(quota, name, STACK_SIZE, type) { }
/**
* Constructor
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name (for debugging)
* \param cpu_session thread created via specific cpu session
* Kernel-specific user-level thread control block
*/
explicit Thread(size_t quota, const char *name, Cpu_session * cpu_session)
: Thread_base(quota, name, STACK_SIZE, Type::NORMAL, cpu_session)
{ }
Native_utcb utcb;
};
/**
* Shortcut for 'Thread(0, name, type)'
*/
explicit Thread(const char *name, Type type = NORMAL)
: Thread_base(0, name, STACK_SIZE, type) { }
private:
/**
* Shortcut for 'Thread(0, name, cpu_session)'
*/
explicit Thread(const char *name, Cpu_session * cpu_session)
: Thread_base(0, name, STACK_SIZE, Type::NORMAL, cpu_session)
{ }
};
}
/**
* Manage the allocation of thread contexts
*
* There exists only one instance of this class per process.
*/
class Context_allocator
{
private:
static constexpr size_t MAX_THREADS =
Native_config::context_area_virtual_size() /
Native_config::context_virtual_size();
struct Context_bit_allocator : Bit_allocator<MAX_THREADS>
{
Context_bit_allocator()
{
/* the first index is used by main thread */
_reserve(0, 1);
}
} _alloc;
Lock _threads_lock;
public:
/**
* Allocate thread context for specified thread
*
* \param thread thread for which to allocate the new context
* \param main_thread wether to alloc for the main thread
*
* \return virtual address of new thread context, or
* 0 if the allocation failed
*/
Context *alloc(Thread_base *thread, bool main_thread);
/**
* Release thread context
*/
void free(Context *thread);
/**
* Return 'Context' object for a given base address
*/
static Context *base_to_context(addr_t base);
/**
* Return base address of context containing the specified address
*/
static addr_t addr_to_base(void *addr);
/**
* Return index in context area for a given base address
*/
static size_t base_to_idx(addr_t base);
/**
* Return base address of context given index in context area
*/
static addr_t idx_to_base(size_t idx);
};
/**
* Return thread-context allocator
*/
static Context_allocator *_context_allocator();
/**
* Allocate and locally attach a new thread context
*
* \param stack_size size of this threads stack
* \param main_thread wether this is the main thread
*/
Context *_alloc_context(size_t stack_size, bool main_thread);
/**
* Detach and release thread context of the thread
*/
void _free_context(Context *context);
/**
* Platform-specific thread-startup code
*
* On some platforms, each new thread has to perform a startup
* protocol, e.g., waiting for a message from the kernel. This hook
* function allows for the implementation of such protocols.
*/
void _thread_bootstrap();
/**
* Helper for thread startup
*/
static void _thread_start();
/**
* Hook for platform-specific destructor supplements
*/
void _deinit_platform_thread();
protected:
/**
* Capability for this thread (set by _start())
*
* Used if thread creation involves core's CPU service.
*/
Genode::Thread_capability _thread_cap;
/**
* Capability to pager paging this thread (created by _start())
*/
Genode::Pager_capability _pager_cap;
/**
* Pointer to cpu session used for this thread
*/
Genode::Cpu_session *_cpu_session;
/**
* Pointer to primary thread context
*/
Context *_context;
/**
* Physical thread ID
*/
Native_thread _tid;
/**
* Lock used for synchronizing the finalization of the thread
*/
Genode::Lock _join_lock;
/**
* Thread type
*
* Some threads need special treatment at construction. This enum
* is solely used to distinguish them at construction.
*/
enum Type { NORMAL, MAIN, REINITIALIZED_MAIN };
private:
Trace::Logger _trace_logger;
/**
* Return 'Trace::Logger' instance of calling thread
*
* This function is used by the tracing framework internally.
*/
static Trace::Logger *_logger();
/**
* Hook for platform-specific constructor supplements
*
* \param quota CPU quota that shall be granted to the thread
* \param type enables selection of special initialization
*/
void _init_platform_thread(size_t quota, Type type);
public:
/**
* Constructor
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name for debugging
* \param stack_size stack size
* \param type enables selection of special construction
*
* \throw Stack_too_large
* \throw Stack_alloc_failed
* \throw Context_alloc_failed
*
* The stack for the new thread will be allocated from the RAM
* session of the process environment. A small portion of the
* stack size is internally used by the framework for storing
* thread-context information such as the thread's name (see
* 'struct Context').
*
* FIXME: With type = Forked_main_thread the whole
* Context::_alloc_context call gets skipped but we should
* at least set Context::ds_cap in a way that it references
* the dataspace of the already attached stack.
*/
Thread_base(size_t quota, const char *name, size_t stack_size,
Type type = NORMAL);
/**
* Constructor
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name for debugging
* \param stack_size stack size
* \param type enables selection of special construction
* \param cpu_session capability to cpu session used for construction
*
* \throw Stack_too_large
* \throw Stack_alloc_failed
* \throw Context_alloc_failed
*/
Thread_base(size_t quota, const char *name, size_t stack_size,
Type type, Cpu_session *);
/**
* Destructor
*/
virtual ~Thread_base();
/**
* Entry function of the thread
*/
virtual void entry() = 0;
/**
* Start execution of the thread
*
* This function is virtual to enable the customization of threads
* used as server activation.
*/
virtual void start();
/**
* Request name of thread
*/
void name(char *dst, size_t dst_len);
/**
* Add an additional stack to the thread
*
* \throw Stack_too_large
* \throw Stack_alloc_failed
* \throw Context_alloc_failed
*
* The stack for the new thread will be allocated from the RAM
* session of the process environment. A small portion of the
* stack size is internally used by the framework for storing
* thread-context information such as the thread's name (see
* 'struct Context').
*
* \return pointer to the new stack's top
*/
void* alloc_secondary_stack(char const *name, size_t stack_size);
/**
* Remove a secondary stack from the thread
*/
void free_secondary_stack(void* stack_addr);
/**
* Request capability of thread
*/
Genode::Thread_capability cap() const { return _thread_cap; }
/**
* Cancel currently blocking operation
*/
void cancel_blocking();
/**
* Only to be called from platform-specific code
*/
Native_thread & tid() { return _tid; }
/**
* Return top of stack
*
* \return pointer just after first stack element
*/
void *stack_top() const { return (void *)_context->stack_top(); }
/**
* Return base of stack
*
* \return pointer to last stack element
*/
void *stack_base() { return (void*)_context->stack_base; }
/**
* Return 'Thread_base' object corresponding to the calling thread
*
* \return pointer to 'Thread_base' object, or
* 0 if the calling thread is the main thread
*/
static Thread_base *myself();
/**
* Ensure that the stack has a given size at the minimum
*
* \param size minimum stack size
*
* \throw Context::Stack_too_large
* \throw Context::Stack_alloc_failed
*/
void stack_size(size_t const size) { _context->stack_size(size); }
/**
* Return user-level thread control block
*
* Note that it is safe to call this function on the result of the
* 'myself' function. It handles the special case of 'myself' being
* 0 when called by the main thread.
*/
Native_utcb *utcb();
/**
* Block until the thread leaves the 'entry' function
*
* Join must not be called more than once. Subsequent calls have
* undefined behaviour.
*/
void join();
/**
* Log null-terminated string as trace event
*/
static void trace(char const *cstring)
{
_logger()->log(cstring, strlen(cstring));
}
/**
* Log binary data as trace event
*/
static void trace(char const *data, size_t len)
{
_logger()->log(data, len);
}
/**
* Log trace event as defined in base/trace.h
*/
template <typename EVENT>
static void trace(EVENT const *event) { _logger()->log(event); }
};
template <unsigned STACK_SIZE>
class Genode::Thread : public Thread_base
{
public:
/**
* Constructor
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name (for debugging)
* \param type enables selection of special construction
*/
explicit Thread(size_t quota, const char *name, Type type = NORMAL)
: Thread_base(quota, name, STACK_SIZE, type) { }
/**
* Constructor
*
* \param quota CPU quota that shall be granted to the thread
* \param name thread name (for debugging)
* \param cpu_session thread created via specific cpu session
*/
explicit Thread(size_t quota, const char *name, Cpu_session * cpu_session)
: Thread_base(quota, name, STACK_SIZE, Type::NORMAL, cpu_session)
{ }
/**
* Shortcut for 'Thread(0, name, type)'
*/
explicit Thread(const char *name, Type type = NORMAL)
: Thread_base(0, name, STACK_SIZE, type) { }
/**
* Shortcut for 'Thread(0, name, cpu_session)'
*/
explicit Thread(const char *name, Cpu_session * cpu_session)
: Thread_base(0, name, STACK_SIZE, Type::NORMAL, cpu_session)
{ }
};
#endif /* _INCLUDE__BASE__THREAD_H_ */

View File

@ -19,9 +19,8 @@
#include <base/thread_state_base.h>
namespace Genode {
namespace Genode { struct Thread_state; }
struct Thread_state : Thread_state_base { };
}
struct Genode::Thread_state : Thread_state_base { };
#endif /* _INCLUDE__BASE__THREAD_STATE_H_ */

View File

@ -18,14 +18,13 @@
#include <cpu/cpu_state.h>
namespace Genode {
namespace Genode { struct Thread_state_base; }
struct Thread_state_base : Cpu_state
{
bool unresolved_page_fault;
struct Genode::Thread_state_base : Cpu_state
{
bool unresolved_page_fault;
Thread_state_base() : unresolved_page_fault(false) { };
};
}
Thread_state_base() : unresolved_page_fault(false) { };
};
#endif /* _INCLUDE__BASE__THREAD_STATE_BASE_H_ */

View File

@ -18,6 +18,7 @@
#include <base/trace/policy.h>
namespace Genode { namespace Trace {
struct Rpc_call;
struct Rpc_returned;
struct Rpc_dispatch;

View File

@ -16,20 +16,18 @@
#include <base/slab.h>
namespace Genode {
namespace Genode { template <typename, size_t> struct Tslab; }
template <typename T, size_t BLOCK_SIZE>
class Tslab : public Slab
{
public:
Tslab(Allocator *backing_store,
Slab_block *initial_sb = 0)
: Slab(sizeof(T), BLOCK_SIZE, initial_sb, backing_store)
{ }
template <typename T, Genode::size_t BLOCK_SIZE>
struct Genode::Tslab : Slab
{
Tslab(Allocator *backing_store,
Slab_block *initial_sb = 0)
: Slab(sizeof(T), BLOCK_SIZE, initial_sb, backing_store)
{ }
T *first_object() { return (T *)Slab::first_used_elem(); }
};
}
T *first_object() { return (T *)Slab::first_used_elem(); }
};
#endif /* _INCLUDE__BASE__TSLAB_H_ */

View File

@ -21,39 +21,39 @@
#include <base/native_types.h>
#include <session/session.h>
namespace Genode {
struct Cap_session : Session
{
static const char *service_name() { return "CAP"; }
virtual ~Cap_session() { }
/**
* Allocate new unique userland capability
*
* \param ep entry point that will use this capability
*
* \return new userland capability
*/
virtual Native_capability alloc(Native_capability ep) = 0;
/**
* Free userland capability
*
* \param cap userland capability to free
*/
virtual void free(Native_capability cap) = 0;
namespace Genode { struct Cap_session; }
/*********************
** RPC declaration **
*********************/
struct Genode::Cap_session : Session
{
static const char *service_name() { return "CAP"; }
GENODE_RPC(Rpc_alloc, Native_capability, alloc, Native_capability);
GENODE_RPC(Rpc_free, void, free, Native_capability);
GENODE_RPC_INTERFACE(Rpc_alloc, Rpc_free);
};
}
virtual ~Cap_session() { }
/**
* Allocate new unique userland capability
*
* \param ep entry point that will use this capability
*
* \return new userland capability
*/
virtual Native_capability alloc(Native_capability ep) = 0;
/**
* Free userland capability
*
* \param cap userland capability to free
*/
virtual void free(Native_capability cap) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_alloc, Native_capability, alloc, Native_capability);
GENODE_RPC(Rpc_free, void, free, Native_capability);
GENODE_RPC_INTERFACE(Rpc_alloc, Rpc_free);
};
#endif /* _INCLUDE__CAP_SESSION__CAP_SESSION_H_ */

View File

@ -18,18 +18,18 @@
#include <cap_session/cap_session.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Cap_session_client; }
struct Cap_session_client : Rpc_client<Cap_session>
{
explicit Cap_session_client(Cap_session_capability session)
: Rpc_client<Cap_session>(session) { }
Native_capability alloc(Native_capability ep) {
return call<Rpc_alloc>(ep); }
struct Genode::Cap_session_client : Rpc_client<Cap_session>
{
explicit Cap_session_client(Cap_session_capability session)
: Rpc_client<Cap_session>(session) { }
void free(Native_capability cap) { call<Rpc_free>(cap); }
};
}
Native_capability alloc(Native_capability ep) override {
return call<Rpc_alloc>(ep); }
void free(Native_capability cap) override { call<Rpc_free>(cap); }
};
#endif /* _INCLUDE__CAP_SESSION__CLIENT_H_ */

View File

@ -17,16 +17,16 @@
#include <cap_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Cap_connection; }
struct Cap_connection : Connection<Cap_session>, Cap_session_client
{
Cap_connection()
:
Connection<Cap_session>(session("ram_quota=4K")),
Cap_session_client(cap())
{ }
};
}
struct Genode::Cap_connection : Connection<Cap_session>, Cap_session_client
{
Cap_connection()
:
Connection<Cap_session>(session("ram_quota=4K")),
Cap_session_client(cap())
{ }
};
#endif /* _INCLUDE__CAP_SESSION__CONNECTION_H_ */

View File

@ -22,7 +22,6 @@ namespace Genode {
* Make D-Cache and I-Cache coherent
*/
void cache_coherent(Genode::addr_t addr, Genode::size_t size);
}
#endif /* _INCLUDE__CPU__CACHE_H_ */

View File

@ -17,78 +17,78 @@
#include <cpu_session/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Cpu_session_client; }
struct Cpu_session_client : Rpc_client<Cpu_session>
{
explicit Cpu_session_client(Cpu_session_capability session)
: Rpc_client<Cpu_session>(session) { }
Thread_capability
create_thread(size_t quota, Name const &name, addr_t utcb = 0) {
return call<Rpc_create_thread>(quota, name, utcb); }
struct Genode::Cpu_session_client : Rpc_client<Cpu_session>
{
explicit Cpu_session_client(Cpu_session_capability session)
: Rpc_client<Cpu_session>(session) { }
Ram_dataspace_capability utcb(Thread_capability thread) {
return call<Rpc_utcb>(thread); }
Thread_capability
create_thread(size_t quota, Name const &name, addr_t utcb = 0) override {
return call<Rpc_create_thread>(quota, name, utcb); }
void kill_thread(Thread_capability thread) {
call<Rpc_kill_thread>(thread); }
Ram_dataspace_capability utcb(Thread_capability thread) override {
return call<Rpc_utcb>(thread); }
int set_pager(Thread_capability thread, Pager_capability pager) {
return call<Rpc_set_pager>(thread, pager); }
void kill_thread(Thread_capability thread) override {
call<Rpc_kill_thread>(thread); }
int start(Thread_capability thread, addr_t ip, addr_t sp) {
return call<Rpc_start>(thread, ip, sp); }
int set_pager(Thread_capability thread, Pager_capability pager) override {
return call<Rpc_set_pager>(thread, pager); }
void pause(Thread_capability thread) {
call<Rpc_pause>(thread); }
int start(Thread_capability thread, addr_t ip, addr_t sp) override {
return call<Rpc_start>(thread, ip, sp); }
void resume(Thread_capability thread) {
call<Rpc_resume>(thread); }
void pause(Thread_capability thread) override {
call<Rpc_pause>(thread); }
void cancel_blocking(Thread_capability thread) {
call<Rpc_cancel_blocking>(thread); }
void resume(Thread_capability thread) override {
call<Rpc_resume>(thread); }
Thread_state state(Thread_capability thread) {
return call<Rpc_get_state>(thread); }
void cancel_blocking(Thread_capability thread) override {
call<Rpc_cancel_blocking>(thread); }
void state(Thread_capability thread, Thread_state const &state) {
call<Rpc_set_state>(thread, state); }
Thread_state state(Thread_capability thread) override {
return call<Rpc_get_state>(thread); }
void exception_handler(Thread_capability thread, Signal_context_capability handler) {
call<Rpc_exception_handler>(thread, handler); }
void state(Thread_capability thread, Thread_state const &state) override {
call<Rpc_set_state>(thread, state); }
void single_step(Thread_capability thread, bool enable) {
call<Rpc_single_step>(thread, enable); }
void exception_handler(Thread_capability thread, Signal_context_capability handler) override {
call<Rpc_exception_handler>(thread, handler); }
Affinity::Space affinity_space() const {
return call<Rpc_affinity_space>(); }
void single_step(Thread_capability thread, bool enable) override {
call<Rpc_single_step>(thread, enable); }
void affinity(Thread_capability thread, Affinity::Location location) {
call<Rpc_affinity>(thread, location); }
Affinity::Space affinity_space() const override {
return call<Rpc_affinity_space>(); }
Dataspace_capability trace_control() {
return call<Rpc_trace_control>(); }
void affinity(Thread_capability thread, Affinity::Location location) override {
call<Rpc_affinity>(thread, location); }
unsigned trace_control_index(Thread_capability thread) {
return call<Rpc_trace_control_index>(thread); }
Dataspace_capability trace_control() override {
return call<Rpc_trace_control>(); }
Dataspace_capability trace_buffer(Thread_capability thread) {
return call<Rpc_trace_buffer>(thread); }
unsigned trace_control_index(Thread_capability thread) override {
return call<Rpc_trace_control_index>(thread); }
Dataspace_capability trace_policy(Thread_capability thread) {
return call<Rpc_trace_policy>(thread); }
Dataspace_capability trace_buffer(Thread_capability thread) override {
return call<Rpc_trace_buffer>(thread); }
int ref_account(Cpu_session_capability session) {
return call<Rpc_ref_account>(session); }
Dataspace_capability trace_policy(Thread_capability thread) override {
return call<Rpc_trace_policy>(thread); }
int transfer_quota(Cpu_session_capability session, size_t amount) {
return call<Rpc_transfer_quota>(session, amount); }
int ref_account(Cpu_session_capability session) override {
return call<Rpc_ref_account>(session); }
size_t quota() { return call<Rpc_quota>(); }
int transfer_quota(Cpu_session_capability session, size_t amount) override {
return call<Rpc_transfer_quota>(session, amount); }
size_t used() { return call<Rpc_used>(); }
};
}
size_t quota() override { return call<Rpc_quota>(); }
size_t used() override { return call<Rpc_used>(); }
};
#endif /* _INCLUDE__CPU_SESSION__CLIENT_H_ */

View File

@ -17,28 +17,28 @@
#include <cpu_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Cpu_connection; }
struct Cpu_connection : Connection<Cpu_session>, Cpu_session_client
{
enum { RAM_QUOTA = 32*1024 };
/**
* Constructor
*
* \param label initial session label
* \param priority designated priority of all threads created
* with this CPU session
*/
Cpu_connection(char const *label = "",
long priority = DEFAULT_PRIORITY,
Affinity const &affinity = Affinity())
:
Connection<Cpu_session>(
session(affinity, "priority=0x%lx, ram_quota=36K, label=\"%s\"",
priority, label)),
Cpu_session_client(cap()) { }
};
}
struct Genode::Cpu_connection : Connection<Cpu_session>, Cpu_session_client
{
enum { RAM_QUOTA = 32*1024 };
/**
* Constructor
*
* \param label initial session label
* \param priority designated priority of all threads created
* with this CPU session
*/
Cpu_connection(char const *label = "",
long priority = DEFAULT_PRIORITY,
Affinity const &affinity = Affinity())
:
Connection<Cpu_session>(
session(affinity, "priority=0x%lx, ram_quota=36K, label=\"%s\"",
priority, label)),
Cpu_session_client(cap()) { }
};
#endif /* _INCLUDE__CPU_SESSION__CONNECTION_H_ */

View File

@ -2,21 +2,6 @@
* \brief CPU (processing time) manager session interface
* \author Christian Helmuth
* \date 2006-06-27
*
* :Question:
*
* Why are thread operations not methods of the thread but
* methods of the CPU session?
*
* :Answer:
*
* This enables the CPU session to impose policies on thread
* operations. These policies are based on the session
* construction arguments. If thread operations would be
* provided as thread methods, Thread would need to consult
* its container object (its CPU session) about the authorization
* of each operation and, thereby, would introduce a circular
* dependency between CPU session and Thread.
*/
/*
@ -41,344 +26,344 @@
#include <session/session.h>
#include <ram_session/ram_session.h>
namespace Genode {
namespace Genode { struct Cpu_session; }
struct Cpu_session : Session
struct Genode::Cpu_session : Session
{
/*********************
** Exception types **
*********************/
class Thread_creation_failed : public Exception { };
class State_access_failed : public Exception { };
class Quota_exceeded : public Thread_creation_failed { };
class Out_of_metadata : public Exception { };
static const char *service_name() { return "CPU"; }
enum { THREAD_NAME_LEN = 48 };
enum { PRIORITY_LIMIT = 1 << 16 };
enum { QUOTA_LIMIT_LOG2 = 15 };
enum { QUOTA_LIMIT = 1 << QUOTA_LIMIT_LOG2 };
enum { DEFAULT_PRIORITY = 0 };
typedef Rpc_in_buffer<THREAD_NAME_LEN> Name;
virtual ~Cpu_session() { }
/**
* Create a new thread
*
* \param quota CPU quota that shall be granted to the thread
* \param name name for the thread
* \param utcb Base of the UTCB that will be used by the thread
* \return capability representing the new thread
* \throw Thread_creation_failed
* \throw Out_of_metadata
* \throw Quota_exceeded
*/
virtual Thread_capability create_thread(size_t quota,
Name const &name,
addr_t utcb = 0) = 0;
/**
* Get dataspace of the UTCB that is used by the specified thread
*/
virtual Ram_dataspace_capability utcb(Thread_capability thread) = 0;
/**
* Kill an existing thread
*
* \param thread capability of the thread to kill
*/
virtual void kill_thread(Thread_capability thread) = 0;
/**
* Set paging capabilities for thread
*
* \param thread thread to configure
* \param pager capability used to propagate page faults
*/
virtual int set_pager(Thread_capability thread,
Pager_capability pager) = 0;
/**
* Modify instruction and stack pointer of thread - start the
* thread
*
* \param thread thread to start
* \param ip initial instruction pointer
* \param sp initial stack pointer
*
* \return 0 on success
*/
virtual int start(Thread_capability thread, addr_t ip, addr_t sp) = 0;
/**
* Pause the specified thread
*
* After calling this function, the execution of the thread can be
* continued by calling 'resume'.
*/
virtual void pause(Thread_capability thread) = 0;
/**
* Resume the specified thread
*/
virtual void resume(Thread_capability thread) = 0;
/**
* Cancel a currently blocking operation
*
* \param thread thread to unblock
*/
virtual void cancel_blocking(Thread_capability thread) = 0;
/**
* Get the current state of a specific thread
*
* \param thread targeted thread
* \return state of the targeted thread
* \throw State_access_failed
*/
virtual Thread_state state(Thread_capability thread) = 0;
/**
* Override the current state of a specific thread
*
* \param thread targeted thread
* \param state state that shall be applied
* \throw State_access_failed
*/
virtual void state(Thread_capability thread,
Thread_state const &state) = 0;
/**
* Register signal handler for exceptions of the specified thread
*
* If 'thread' is an invalid capability, the default exception
* handler for the CPU session is set. This handler is used for
* all threads that have no explicitly installed exception handler.
* The new default signal handler will take effect for threads
* created after the call.
*
* On Linux, this exception is delivered when the process triggers
* a SIGCHLD. On other platforms, this exception is delivered on
* the occurrence of CPU exceptions such as division by zero.
*/
virtual void exception_handler(Thread_capability thread,
Signal_context_capability handler) = 0;
/**
* Enable/disable single stepping for specified thread.
*
* Since this functions is currently supported by a small number of
* platforms, we provide a default implementation
*
* \param thread thread to set into single step mode
* \param enable true = enable single-step mode; false = disable
*/
virtual void single_step(Thread_capability, bool) {}
/**
* Return affinity space of CPU nodes available to the CPU session
*
* The dimension of the affinity space as returned by this function
* represent the physical CPUs that are available.
*/
virtual Affinity::Space affinity_space() const = 0;
/**
* Define affinity of thread to one or multiple CPU nodes
*
* In the normal case, a thread is assigned to a single CPU.
* Specifying more than one CPU node is supposed to principally
* allow a CPU service to balance the load of threads among
* multiple CPUs.
*/
virtual void affinity(Thread_capability thread,
Affinity::Location affinity) = 0;
/**
* Translate generic priority value to kernel-specific priority levels
*
* \param pf_prio_limit maximum priority used for the kernel, must
* be power of 2
* \param prio generic priority value as used by the CPU
* session interface
* \param inverse order of platform priorities, if true
* 'pf_prio_limit' corresponds to the highest
* priority, otherwise it refers to the
* lowest priority.
* \return platform-specific priority value
*/
static unsigned scale_priority(unsigned pf_prio_limit, unsigned prio,
bool inverse = true)
{
/*********************
** Exception types **
*********************/
/*
* Generic priority values are (0 is highest, 'PRIORITY_LIMIT'
* is lowest. On platforms where priority levels are defined
* the other way round, we have to invert the priority value.
*/
prio = inverse ? Cpu_session::PRIORITY_LIMIT - prio : prio;
class Thread_creation_failed : public Exception { };
class State_access_failed : public Exception { };
class Quota_exceeded : public Thread_creation_failed { };
class Out_of_metadata : public Exception { };
/* scale value to platform priority range 0..pf_prio_limit */
return (prio*pf_prio_limit)/Cpu_session::PRIORITY_LIMIT;
}
static const char *service_name() { return "CPU"; }
/**
* Request trace control dataspace
*
* The trace-control dataspace is used to propagate tracing
* control information from core to the threads of a CPU session.
*
* The trace-control dataspace is accounted to the CPU session.
*/
virtual Dataspace_capability trace_control() = 0;
enum { THREAD_NAME_LEN = 48 };
enum { PRIORITY_LIMIT = 1 << 16 };
enum { QUOTA_LIMIT_LOG2 = 15 };
enum { QUOTA_LIMIT = 1 << QUOTA_LIMIT_LOG2 };
enum { DEFAULT_PRIORITY = 0 };
/**
* Request index of a trace control block for given thread
*
* The trace control dataspace contains the control blocks for
* all threads of the CPU session. Each thread gets assigned a
* different index by the CPU service.
*/
virtual unsigned trace_control_index(Thread_capability thread) = 0;
typedef Rpc_in_buffer<THREAD_NAME_LEN> Name;
/**
* Request trace buffer for the specified thread
*
* The trace buffer is not accounted to the CPU session. It is
* owned by a TRACE session.
*/
virtual Dataspace_capability trace_buffer(Thread_capability thread) = 0;
virtual ~Cpu_session() { }
/**
* Request trace policy
*
* The trace policy buffer is not accounted to the CPU session. It
* is owned by a TRACE session.
*/
virtual Dataspace_capability trace_policy(Thread_capability thread) = 0;
/**
* Create a new thread
*
* \param quota CPU quota that shall be granted to the thread
* \param name name for the thread
* \param utcb Base of the UTCB that will be used by the thread
* \return capability representing the new thread
* \throw Thread_creation_failed
* \throw Out_of_metadata
* \throw Quota_exceeded
*/
virtual Thread_capability create_thread(size_t quota,
Name const &name,
addr_t utcb = 0) = 0;
/**
* Define reference account for the CPU session
*
* \param cpu_session reference account
*
* \return 0 on success
*
* Each CPU session requires another CPU session as reference
* account to transfer quota to and from. The reference account can
* be defined only once.
*/
virtual int ref_account(Cpu_session_capability cpu_session) = 0;
/**
* Get dataspace of the UTCB that is used by the specified thread
*/
virtual Ram_dataspace_capability utcb(Thread_capability thread) = 0;
/**
* Transfer quota to another CPU session
*
* \param cpu_session receiver of quota donation
* \param amount amount of quota to donate
* \return 0 on success
*
* Quota can only be transfered if the specified CPU session is
* either the reference account for this session or vice versa.
*/
virtual int transfer_quota(Cpu_session_capability cpu_session,
size_t amount) = 0;
/**
* Kill an existing thread
*
* \param thread capability of the thread to kill
*/
virtual void kill_thread(Thread_capability thread) = 0;
/**
* Return current quota limit
*/
virtual size_t quota() = 0;
/**
* Set paging capabilities for thread
*
* \param thread thread to configure
* \param pager capability used to propagate page faults
*/
virtual int set_pager(Thread_capability thread,
Pager_capability pager) = 0;
/**
* Return amount of used quota
*/
virtual size_t used() = 0;
/**
* Modify instruction and stack pointer of thread - start the
* thread
*
* \param thread thread to start
* \param ip initial instruction pointer
* \param sp initial stack pointer
*
* \return 0 on success
*/
virtual int start(Thread_capability thread, addr_t ip, addr_t sp) = 0;
/**
* Return amount of available quota
*/
size_t avail()
{
size_t q = quota(), u = used();
return q > u ? q - u : 0;
}
/**
* Pause the specified thread
*
* After calling this function, the execution of the thread can be
* continued by calling 'resume'.
*/
virtual void pause(Thread_capability thread) = 0;
/**
* Transform percentage of CPU utilization into CPU quota
*/
static size_t pc_to_quota(size_t const pc) {
return (pc << QUOTA_LIMIT_LOG2) / 100; }
/**
* Resume the specified thread
*/
virtual void resume(Thread_capability thread) = 0;
/*********************
** RPC declaration **
*********************/
/**
* Cancel a currently blocking operation
*
* \param thread thread to unblock
*/
virtual void cancel_blocking(Thread_capability thread) = 0;
GENODE_RPC_THROW(Rpc_create_thread, Thread_capability, create_thread,
GENODE_TYPE_LIST(Thread_creation_failed, Out_of_metadata),
size_t, Name const &, addr_t);
GENODE_RPC(Rpc_utcb, Ram_dataspace_capability, utcb, Thread_capability);
GENODE_RPC(Rpc_kill_thread, void, kill_thread, Thread_capability);
GENODE_RPC(Rpc_set_pager, int, set_pager, Thread_capability, Pager_capability);
GENODE_RPC(Rpc_start, int, start, Thread_capability, addr_t, addr_t);
GENODE_RPC(Rpc_pause, void, pause, Thread_capability);
GENODE_RPC(Rpc_resume, void, resume, Thread_capability);
GENODE_RPC(Rpc_cancel_blocking, void, cancel_blocking, Thread_capability);
GENODE_RPC_THROW(Rpc_get_state, Thread_state, state,
GENODE_TYPE_LIST(State_access_failed),
Thread_capability);
GENODE_RPC_THROW(Rpc_set_state, void, state,
GENODE_TYPE_LIST(State_access_failed),
Thread_capability, Thread_state const &);
GENODE_RPC(Rpc_exception_handler, void, exception_handler,
Thread_capability, Signal_context_capability);
GENODE_RPC(Rpc_single_step, void, single_step, Thread_capability, bool);
GENODE_RPC(Rpc_affinity_space, Affinity::Space, affinity_space);
GENODE_RPC(Rpc_affinity, void, affinity, Thread_capability, Affinity::Location);
GENODE_RPC(Rpc_trace_control, Dataspace_capability, trace_control);
GENODE_RPC(Rpc_trace_control_index, unsigned, trace_control_index, Thread_capability);
GENODE_RPC(Rpc_trace_buffer, Dataspace_capability, trace_buffer, Thread_capability);
GENODE_RPC(Rpc_trace_policy, Dataspace_capability, trace_policy, Thread_capability);
GENODE_RPC(Rpc_ref_account, int, ref_account, Cpu_session_capability);
GENODE_RPC(Rpc_transfer_quota, int, transfer_quota, Cpu_session_capability, size_t);
GENODE_RPC(Rpc_quota, size_t, quota);
GENODE_RPC(Rpc_used, size_t, used);
/**
* Get the current state of a specific thread
*
* \param thread targeted thread
* \return state of the targeted thread
* \throw State_access_failed
*/
virtual Thread_state state(Thread_capability thread) = 0;
/**
* Override the current state of a specific thread
*
* \param thread targeted thread
* \param state state that shall be applied
* \throw State_access_failed
*/
virtual void state(Thread_capability thread,
Thread_state const &state) = 0;
/**
* Register signal handler for exceptions of the specified thread
*
* If 'thread' is an invalid capability, the default exception
* handler for the CPU session is set. This handler is used for
* all threads that have no explicitly installed exception handler.
* The new default signal handler will take effect for threads
* created after the call.
*
* On Linux, this exception is delivered when the process triggers
* a SIGCHLD. On other platforms, this exception is delivered on
* the occurrence of CPU exceptions such as division by zero.
*/
virtual void exception_handler(Thread_capability thread,
Signal_context_capability handler) = 0;
/**
* Enable/disable single stepping for specified thread.
*
* Since this functions is currently supported by a small number of
* platforms, we provide a default implementation
*
* \param thread thread to set into single step mode
* \param enable true = enable single-step mode; false = disable
*/
virtual void single_step(Thread_capability, bool) {}
/**
* Return affinity space of CPU nodes available to the CPU session
*
* The dimension of the affinity space as returned by this function
* represent the physical CPUs that are available.
*/
virtual Affinity::Space affinity_space() const = 0;
/**
* Define affinity of thread to one or multiple CPU nodes
*
* In the normal case, a thread is assigned to a single CPU.
* Specifying more than one CPU node is supposed to principally
* allow a CPU service to balance the load of threads among
* multiple CPUs.
*/
virtual void affinity(Thread_capability thread,
Affinity::Location affinity) = 0;
/**
* Translate generic priority value to kernel-specific priority levels
*
* \param pf_prio_limit maximum priority used for the kernel, must
* be power of 2
* \param prio generic priority value as used by the CPU
* session interface
* \param inverse order of platform priorities, if true
* 'pf_prio_limit' corresponds to the highest
* priority, otherwise it refers to the
* lowest priority.
* \return platform-specific priority value
*/
static unsigned scale_priority(unsigned pf_prio_limit, unsigned prio,
bool inverse = true)
{
/*
* Generic priority values are (0 is highest, 'PRIORITY_LIMIT'
* is lowest. On platforms where priority levels are defined
* the other way round, we have to invert the priority value.
*/
prio = inverse ? Cpu_session::PRIORITY_LIMIT - prio : prio;
/* scale value to platform priority range 0..pf_prio_limit */
return (prio*pf_prio_limit)/Cpu_session::PRIORITY_LIMIT;
}
/**
* Request trace control dataspace
*
* The trace-control dataspace is used to propagate tracing
* control information from core to the threads of a CPU session.
*
* The trace-control dataspace is accounted to the CPU session.
*/
virtual Dataspace_capability trace_control() = 0;
/**
* Request index of a trace control block for given thread
*
* The trace control dataspace contains the control blocks for
* all threads of the CPU session. Each thread gets assigned a
* different index by the CPU service.
*/
virtual unsigned trace_control_index(Thread_capability thread) = 0;
/**
* Request trace buffer for the specified thread
*
* The trace buffer is not accounted to the CPU session. It is
* owned by a TRACE session.
*/
virtual Dataspace_capability trace_buffer(Thread_capability thread) = 0;
/**
* Request trace policy
*
* The trace policy buffer is not accounted to the CPU session. It
* is owned by a TRACE session.
*/
virtual Dataspace_capability trace_policy(Thread_capability thread) = 0;
/**
* Define reference account for the CPU session
*
* \param cpu_session reference account
*
* \return 0 on success
*
* Each CPU session requires another CPU session as reference
* account to transfer quota to and from. The reference account can
* be defined only once.
*/
virtual int ref_account(Cpu_session_capability cpu_session) = 0;
/**
* Transfer quota to another CPU session
*
* \param cpu_session receiver of quota donation
* \param amount amount of quota to donate
* \return 0 on success
*
* Quota can only be transfered if the specified CPU session is
* either the reference account for this session or vice versa.
*/
virtual int transfer_quota(Cpu_session_capability cpu_session,
size_t amount) = 0;
/**
* Return current quota limit
*/
virtual size_t quota() = 0;
/**
* Return amount of used quota
*/
virtual size_t used() = 0;
/**
* Return amount of available quota
*/
size_t avail()
{
size_t q = quota(), u = used();
return q > u ? q - u : 0;
}
/**
* Transform percentage of CPU utilization into CPU quota
*/
static size_t pc_to_quota(size_t const pc) {
return (pc << QUOTA_LIMIT_LOG2) / 100; }
/*********************
** RPC declaration **
*********************/
GENODE_RPC_THROW(Rpc_create_thread, Thread_capability, create_thread,
GENODE_TYPE_LIST(Thread_creation_failed, Out_of_metadata),
size_t, Name const &, addr_t);
GENODE_RPC(Rpc_utcb, Ram_dataspace_capability, utcb, Thread_capability);
GENODE_RPC(Rpc_kill_thread, void, kill_thread, Thread_capability);
GENODE_RPC(Rpc_set_pager, int, set_pager, Thread_capability, Pager_capability);
GENODE_RPC(Rpc_start, int, start, Thread_capability, addr_t, addr_t);
GENODE_RPC(Rpc_pause, void, pause, Thread_capability);
GENODE_RPC(Rpc_resume, void, resume, Thread_capability);
GENODE_RPC(Rpc_cancel_blocking, void, cancel_blocking, Thread_capability);
GENODE_RPC_THROW(Rpc_get_state, Thread_state, state,
GENODE_TYPE_LIST(State_access_failed),
Thread_capability);
GENODE_RPC_THROW(Rpc_set_state, void, state,
GENODE_TYPE_LIST(State_access_failed),
Thread_capability, Thread_state const &);
GENODE_RPC(Rpc_exception_handler, void, exception_handler,
Thread_capability, Signal_context_capability);
GENODE_RPC(Rpc_single_step, void, single_step, Thread_capability, bool);
GENODE_RPC(Rpc_affinity_space, Affinity::Space, affinity_space);
GENODE_RPC(Rpc_affinity, void, affinity, Thread_capability, Affinity::Location);
GENODE_RPC(Rpc_trace_control, Dataspace_capability, trace_control);
GENODE_RPC(Rpc_trace_control_index, unsigned, trace_control_index, Thread_capability);
GENODE_RPC(Rpc_trace_buffer, Dataspace_capability, trace_buffer, Thread_capability);
GENODE_RPC(Rpc_trace_policy, Dataspace_capability, trace_policy, Thread_capability);
GENODE_RPC(Rpc_ref_account, int, ref_account, Cpu_session_capability);
GENODE_RPC(Rpc_transfer_quota, int, transfer_quota, Cpu_session_capability, size_t);
GENODE_RPC(Rpc_quota, size_t, quota);
GENODE_RPC(Rpc_used, size_t, used);
/*
* 'GENODE_RPC_INTERFACE' declaration done manually
*
* The number of RPC function of this interface exceeds the maximum
* number of elements supported by 'Meta::Type_list'. Therefore, we
* construct the type list by hand using nested type tuples instead
* of employing the convenience macro 'GENODE_RPC_INTERFACE'.
*/
typedef Meta::Type_tuple<Rpc_create_thread,
Meta::Type_tuple<Rpc_utcb,
Meta::Type_tuple<Rpc_kill_thread,
Meta::Type_tuple<Rpc_set_pager,
Meta::Type_tuple<Rpc_start,
Meta::Type_tuple<Rpc_pause,
Meta::Type_tuple<Rpc_resume,
Meta::Type_tuple<Rpc_cancel_blocking,
Meta::Type_tuple<Rpc_set_state,
Meta::Type_tuple<Rpc_get_state,
Meta::Type_tuple<Rpc_exception_handler,
Meta::Type_tuple<Rpc_single_step,
Meta::Type_tuple<Rpc_affinity_space,
Meta::Type_tuple<Rpc_affinity,
Meta::Type_tuple<Rpc_trace_control,
Meta::Type_tuple<Rpc_trace_control_index,
Meta::Type_tuple<Rpc_trace_buffer,
Meta::Type_tuple<Rpc_trace_policy,
Meta::Type_tuple<Rpc_ref_account,
Meta::Type_tuple<Rpc_transfer_quota,
Meta::Type_tuple<Rpc_quota,
Meta::Type_tuple<Rpc_used,
Meta::Empty>
> > > > > > > > > > > > > > > > > > > > > Rpc_functions;
};
}
/*
* 'GENODE_RPC_INTERFACE' declaration done manually
*
* The number of RPC function of this interface exceeds the maximum
* number of elements supported by 'Meta::Type_list'. Therefore, we
* construct the type list by hand using nested type tuples instead
* of employing the convenience macro 'GENODE_RPC_INTERFACE'.
*/
typedef Meta::Type_tuple<Rpc_create_thread,
Meta::Type_tuple<Rpc_utcb,
Meta::Type_tuple<Rpc_kill_thread,
Meta::Type_tuple<Rpc_set_pager,
Meta::Type_tuple<Rpc_start,
Meta::Type_tuple<Rpc_pause,
Meta::Type_tuple<Rpc_resume,
Meta::Type_tuple<Rpc_cancel_blocking,
Meta::Type_tuple<Rpc_set_state,
Meta::Type_tuple<Rpc_get_state,
Meta::Type_tuple<Rpc_exception_handler,
Meta::Type_tuple<Rpc_single_step,
Meta::Type_tuple<Rpc_affinity_space,
Meta::Type_tuple<Rpc_affinity,
Meta::Type_tuple<Rpc_trace_control,
Meta::Type_tuple<Rpc_trace_control_index,
Meta::Type_tuple<Rpc_trace_buffer,
Meta::Type_tuple<Rpc_trace_policy,
Meta::Type_tuple<Rpc_ref_account,
Meta::Type_tuple<Rpc_transfer_quota,
Meta::Type_tuple<Rpc_quota,
Meta::Type_tuple<Rpc_used,
Meta::Empty>
> > > > > > > > > > > > > > > > > > > > > Rpc_functions;
};
#endif /* _INCLUDE__CPU_SESSION__CPU_SESSION_H_ */

View File

@ -17,17 +17,17 @@
#include <dataspace/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Dataspace_client; }
struct Dataspace_client : Rpc_client<Dataspace>
{
explicit Dataspace_client(Dataspace_capability ds)
: Rpc_client<Dataspace>(ds) { }
size_t size() { return call<Rpc_size>(); }
addr_t phys_addr() { return call<Rpc_phys_addr>(); }
bool writable() { return call<Rpc_writable>(); }
};
}
struct Genode::Dataspace_client : Rpc_client<Dataspace>
{
explicit Dataspace_client(Dataspace_capability ds)
: Rpc_client<Dataspace>(ds) { }
size_t size() override { return call<Rpc_size>(); }
addr_t phys_addr() override { return call<Rpc_phys_addr>(); }
bool writable() override { return call<Rpc_writable>(); }
};
#endif /* _INCLUDE__DATASPACE__CLIENT_H_ */

View File

@ -17,38 +17,38 @@
#include <base/stdint.h>
#include <base/rpc.h>
namespace Genode {
struct Dataspace
{
virtual ~Dataspace() { }
/**
* Request size of dataspace
*/
virtual size_t size() = 0;
/**
* Request base address in physical address space
*/
virtual addr_t phys_addr() = 0;
/**
* Return true if dataspace is writable
*/
virtual bool writable() = 0;
namespace Genode { struct Dataspace; }
/*********************
** RPC declaration **
*********************/
struct Genode::Dataspace
{
virtual ~Dataspace() { }
GENODE_RPC(Rpc_size, size_t, size);
GENODE_RPC(Rpc_phys_addr, addr_t, phys_addr);
GENODE_RPC(Rpc_writable, bool, writable);
/**
* Request size of dataspace
*/
virtual size_t size() = 0;
GENODE_RPC_INTERFACE(Rpc_size, Rpc_phys_addr, Rpc_writable);
};
}
/**
* Request base address in physical address space
*/
virtual addr_t phys_addr() = 0;
/**
* Return true if dataspace is writable
*/
virtual bool writable() = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_size, size_t, size);
GENODE_RPC(Rpc_phys_addr, addr_t, phys_addr);
GENODE_RPC(Rpc_writable, bool, writable);
GENODE_RPC_INTERFACE(Rpc_size, Rpc_phys_addr, Rpc_writable);
};
#endif /* _INCLUDE__DATASPACE__DATASPACE_H_ */

View File

@ -19,173 +19,173 @@
/* Genode includes */
#include <util/mmio.h>
namespace Genode
namespace Genode { class Epit_base; }
/**
* Core timer
*/
class Genode::Epit_base : public Mmio
{
/**
* Core timer
*/
class Epit_base : public Mmio
{
protected:
protected:
enum { TICS_PER_MS = 32 };
enum { TICS_PER_MS = 32 };
/**
* Control register
*/
struct Cr : Register<0x0, 32>
/**
* Control register
*/
struct Cr : Register<0x0, 32>
{
struct En : Bitfield<0, 1> { }; /* enable timer */
struct En_mod : Bitfield<1, 1> /* reload on enable */
{
struct En : Bitfield<0, 1> { }; /* enable timer */
enum { RELOAD = 1 };
};
struct En_mod : Bitfield<1, 1> /* reload on enable */
{
enum { RELOAD = 1 };
};
struct Oci_en : Bitfield<2, 1> { }; /* interrupt on compare */
struct Oci_en : Bitfield<2, 1> { }; /* interrupt on compare */
struct Rld : Bitfield<3, 1> /* reload or roll-over */
{
enum { RELOAD_FROM_LR = 1 };
};
struct Rld : Bitfield<3, 1> /* reload or roll-over */
{
enum { RELOAD_FROM_LR = 1 };
};
struct Prescaler : Bitfield<4, 12> /* clock input divisor */
{
enum { DIVIDE_BY_1 = 0 };
};
struct Prescaler : Bitfield<4, 12> /* clock input divisor */
{
enum { DIVIDE_BY_1 = 0 };
};
struct Swr : Bitfield<16, 1> { }; /* software reset bit */
struct Iovw : Bitfield<17, 1> { }; /* enable overwrite */
struct Dbg_en : Bitfield<18, 1> { }; /* enable in debug mode */
struct Wait_en : Bitfield<19, 1> { }; /* enable in wait mode */
struct Doz_en : Bitfield<20, 1> { }; /* enable in doze mode */
struct Stop_en : Bitfield<21, 1> { }; /* enable in stop mode */
struct Swr : Bitfield<16, 1> { }; /* software reset bit */
struct Iovw : Bitfield<17, 1> { }; /* enable overwrite */
struct Dbg_en : Bitfield<18, 1> { }; /* enable in debug mode */
struct Wait_en : Bitfield<19, 1> { }; /* enable in wait mode */
struct Doz_en : Bitfield<20, 1> { }; /* enable in doze mode */
struct Stop_en : Bitfield<21, 1> { }; /* enable in stop mode */
struct Om : Bitfield<22, 2> /* mode of the output pin */
{
enum { DISCONNECTED = 0 };
};
struct Om : Bitfield<22, 2> /* mode of the output pin */
{
enum { DISCONNECTED = 0 };
};
struct Clk_src : Bitfield<24, 2> /* select clock input */
{
enum { IPG_CLK_32K = 3 };
};
/**
* Register value that configures the timer for a one-shot run
*/
static access_t prepare_one_shot()
{
return En::bits(0) |
En_mod::bits(En_mod::RELOAD) |
Oci_en::bits(1) |
Rld::bits(Rld::RELOAD_FROM_LR) |
Prescaler::bits(Prescaler::DIVIDE_BY_1) |
Swr::bits(0) |
Iovw::bits(0) |
Dbg_en::bits(0) |
Wait_en::bits(0) |
Doz_en::bits(0) |
Stop_en::bits(0) |
Om::bits(Om::DISCONNECTED) |
Clk_src::bits(Clk_src::IPG_CLK_32K);
}
struct Clk_src : Bitfield<24, 2> /* select clock input */
{
enum { IPG_CLK_32K = 3 };
};
/**
* Status register
* Register value that configures the timer for a one-shot run
*/
struct Sr : Register<0x4, 32>
static access_t prepare_one_shot()
{
struct Ocif : Bitfield<0, 1> { }; /* IRQ status, write 1 clears */
};
struct Lr : Register<0x8, 32> { }; /* load value register */
struct Cmpr : Register<0xc, 32> { }; /* compare value register */
struct Cnt : Register<0x10, 32> { }; /* counter register */
/**
* Disable timer and clear its interrupt output
*/
void _reset()
{
/* wait until ongoing reset operations are finished */
while (read<Cr::Swr>()) ;
/* disable timer */
write<Cr::En>(0);
/* clear interrupt */
write<Sr::Ocif>(1);
return En::bits(0) |
En_mod::bits(En_mod::RELOAD) |
Oci_en::bits(1) |
Rld::bits(Rld::RELOAD_FROM_LR) |
Prescaler::bits(Prescaler::DIVIDE_BY_1) |
Swr::bits(0) |
Iovw::bits(0) |
Dbg_en::bits(0) |
Wait_en::bits(0) |
Doz_en::bits(0) |
Stop_en::bits(0) |
Om::bits(Om::DISCONNECTED) |
Clk_src::bits(Clk_src::IPG_CLK_32K);
}
};
void _start_one_shot(unsigned const tics)
{
/* stop timer */
_reset();
/**
* Status register
*/
struct Sr : Register<0x4, 32>
{
struct Ocif : Bitfield<0, 1> { }; /* IRQ status, write 1 clears */
};
/* configure timer for a one-shot */
write<Cr>(Cr::prepare_one_shot());
write<Lr>(tics);
write<Cmpr>(0);
struct Lr : Register<0x8, 32> { }; /* load value register */
struct Cmpr : Register<0xc, 32> { }; /* compare value register */
struct Cnt : Register<0x10, 32> { }; /* counter register */
/* start timer */
write<Cr::En>(1);
}
/**
* Disable timer and clear its interrupt output
*/
void _reset()
{
/* wait until ongoing reset operations are finished */
while (read<Cr::Swr>()) ;
public:
/* disable timer */
write<Cr::En>(0);
/**
* Constructor
*/
Epit_base(addr_t base) : Mmio(base) { _reset(); }
/* clear interrupt */
write<Sr::Ocif>(1);
}
/**
* Start single timeout run
*
* \param tics delay of timer interrupt
*/
void start_one_shot(unsigned const tics, unsigned)
{
_start_one_shot(tics);
}
void _start_one_shot(unsigned const tics)
{
/* stop timer */
_reset();
/**
* Stop the timer from a one-shot run
*
* \return last native timer value of the one-shot run
*/
unsigned long stop_one_shot()
{
/* disable timer */
write<Cr::En>(0);
return value(0);
}
/* configure timer for a one-shot */
write<Cr>(Cr::prepare_one_shot());
write<Lr>(tics);
write<Cmpr>(0);
/**
* Translate milliseconds to a native timer value
*/
unsigned ms_to_tics(unsigned const ms)
{
return TICS_PER_MS * ms;
}
/* start timer */
write<Cr::En>(1);
}
/**
* Translate native timer value to milliseconds
*/
unsigned tics_to_ms(unsigned const tics)
{
return tics / TICS_PER_MS;
}
public:
/**
* Return current native timer value
*/
unsigned value(unsigned const)
{
return read<Sr::Ocif>() ? 0 : read<Cnt>();
}
};
}
/**
* Constructor
*/
Epit_base(addr_t base) : Mmio(base) { _reset(); }
/**
* Start single timeout run
*
* \param tics delay of timer interrupt
*/
void start_one_shot(unsigned const tics, unsigned)
{
_start_one_shot(tics);
}
/**
* Stop the timer from a one-shot run
*
* \return last native timer value of the one-shot run
*/
unsigned long stop_one_shot()
{
/* disable timer */
write<Cr::En>(0);
return value(0);
}
/**
* Translate milliseconds to a native timer value
*/
unsigned ms_to_tics(unsigned const ms)
{
return TICS_PER_MS * ms;
}
/**
* Translate native timer value to milliseconds
*/
unsigned tics_to_ms(unsigned const tics)
{
return tics / TICS_PER_MS;
}
/**
* Return current native timer value
*/
unsigned value(unsigned const)
{
return read<Sr::Ocif>() ? 0 : read<Cnt>();
}
};
#endif /* _INCLUDE__DRIVERS__TIMER__EPIT_BASE_H_ */

View File

@ -17,230 +17,230 @@
/* Genode includes */
#include <util/mmio.h>
namespace Genode
namespace Genode { class Exynos_uart_base; }
/**
* Exynos UART driver base
*/
class Genode::Exynos_uart_base : Mmio
{
/**
* Exynos UART driver base
*/
class Exynos_uart_base : Mmio
{
protected:
protected:
/**
* Line control
*/
struct Ulcon : Register<0x0, 32>
{
struct Word_length : Bitfield<0, 2> { enum { _8_BIT = 3 }; };
struct Stop_bits : Bitfield<2, 1> { enum { _1_BIT = 0 }; };
struct Parity_mode : Bitfield<3, 3> { enum { NONE = 0 }; };
struct Infrared_mode : Bitfield<6, 1> { };
/**
* Line control
* Initialization value
*/
struct Ulcon : Register<0x0, 32>
static access_t init_value()
{
struct Word_length : Bitfield<0, 2> { enum { _8_BIT = 3 }; };
struct Stop_bits : Bitfield<2, 1> { enum { _1_BIT = 0 }; };
struct Parity_mode : Bitfield<3, 3> { enum { NONE = 0 }; };
struct Infrared_mode : Bitfield<6, 1> { };
/**
* Initialization value
*/
static access_t init_value()
{
return Word_length::bits(Word_length::_8_BIT) |
Stop_bits::bits(Stop_bits::_1_BIT) |
Parity_mode::bits(Parity_mode::NONE) |
Infrared_mode::bits(0);
}
};
/**
* Control
*/
struct Ucon : Register<0x4, 32>
{
struct Receive_mode : Bitfield<0, 2> { enum { IRQ_POLL = 1 }; };
struct Transmit_mode : Bitfield<2, 2> { enum { IRQ_POLL = 1 }; };
struct Send_brk_signal : Bitfield<4, 1> { };
struct Loop_back_mode : Bitfield<5, 1> { };
struct Rx_err_irq : Bitfield<6, 1> { };
struct Rx_timeout : Bitfield<7, 1> { };
struct Rx_irq_type : Bitfield<8, 1> { enum { LEVEL = 1 }; };
struct Tx_irq_type : Bitfield<9, 1> { enum { LEVEL = 1 }; };
struct Rx_to_dma_susp : Bitfield<10, 1> { };
struct Rx_to_empty_rx : Bitfield<11, 1> { };
struct Rx_to_interval : Bitfield<12, 4> { };
struct Rx_dma_bst_size : Bitfield<16, 3> { };
struct Tx_dma_bst_size : Bitfield<20, 3> { };
/**
* Initialization value
*/
static access_t init_value()
{
return Receive_mode::bits(Receive_mode::IRQ_POLL) |
Transmit_mode::bits(Transmit_mode::IRQ_POLL) |
Rx_timeout::bits(1);
}
};
/**
* FIFO control
*/
struct Ufcon : Register<0x8, 32>
{
struct Fifo_en : Bitfield<0, 1> { };
struct Rx_fifo_rst : Bitfield<1, 1> { };
struct Tx_fifo_rst : Bitfield<2, 1> { };
};
/**
* Modem control
*/
struct Umcon : Register<0xc, 32>
{
struct Send_request : Bitfield<0, 1> { };
struct Modem_irq : Bitfield<3, 1> { };
struct Auto_flow_ctl : Bitfield<4, 1> { };
struct Rts_trigger : Bitfield<5, 3> { };
/**
* Initialization value
*/
static access_t init_value()
{
return Send_request::bits(0) |
Modem_irq::bits(0) |
Auto_flow_ctl::bits(0) |
Rts_trigger::bits(0);
}
};
/**
* FIFO status
*/
struct Ufstat : Register<0x18, 32>
{
struct Rx_fifo_count : Bitfield<0, 8> { };
struct Rx_fifo_full : Bitfield<8, 1> { };
struct Tx_fifo_full : Bitfield<24, 1> { };
};
/**
* Transmit buffer
*/
struct Utxh : Register<0x20, 32>
{
struct Transmit_data : Bitfield<0, 8> { };
};
/**
* Receive buffer
*/
struct Urxh : Register<0x24, 32>
{
struct Receive_data : Bitfield<0, 8> { };
};
/**
* Baud Rate Divisor
*/
struct Ubrdiv : Register<0x28, 32>
{
struct Baud_rate_div : Bitfield<0, 16> { };
};
/**
* Fractional part of Baud Rate Divisor
*/
struct Ufracval : Register<0x2c, 32>
{
struct Baud_rate_frac : Bitfield<0, 4> { };
};
/**
* Interrupt mask register
*/
template <unsigned OFF>
struct Uintx : Register<OFF, 32>
{
struct Rxd : Register<OFF, 32>::template Bitfield<0, 1> { };
struct Error : Register<OFF, 32>::template Bitfield<1, 1> { };
struct Txd : Register<OFF, 32>::template Bitfield<2, 1> { };
struct Modem : Register<OFF, 32>::template Bitfield<3, 1> { };
};
using Uintp = Uintx<0x30>;
using Uintm = Uintx<0x38>;
void _rx_enable()
{
write<Ufcon::Fifo_en>(1);
/* mask all IRQs except receive IRQ */
write<Uintm>(Uintm::Error::bits(1) |
Uintm::Txd::bits(1) |
Uintm::Modem::bits(1));
/* clear pending IRQs */
write<Uintp>(Uintp::Rxd::bits(1) |
Uintp::Error::bits(1) |
Uintp::Txd::bits(1) |
Uintp::Modem::bits(1));
return Word_length::bits(Word_length::_8_BIT) |
Stop_bits::bits(Stop_bits::_1_BIT) |
Parity_mode::bits(Parity_mode::NONE) |
Infrared_mode::bits(0);
}
};
bool _rx_avail() {
return (read<Ufstat>() & (Ufstat::Rx_fifo_count::bits(0xff)
| Ufstat::Rx_fifo_full::bits(1))); }
/**
* Control
*/
struct Ucon : Register<0x4, 32>
{
struct Receive_mode : Bitfield<0, 2> { enum { IRQ_POLL = 1 }; };
struct Transmit_mode : Bitfield<2, 2> { enum { IRQ_POLL = 1 }; };
struct Send_brk_signal : Bitfield<4, 1> { };
struct Loop_back_mode : Bitfield<5, 1> { };
struct Rx_err_irq : Bitfield<6, 1> { };
struct Rx_timeout : Bitfield<7, 1> { };
struct Rx_irq_type : Bitfield<8, 1> { enum { LEVEL = 1 }; };
struct Tx_irq_type : Bitfield<9, 1> { enum { LEVEL = 1 }; };
struct Rx_to_dma_susp : Bitfield<10, 1> { };
struct Rx_to_empty_rx : Bitfield<11, 1> { };
struct Rx_to_interval : Bitfield<12, 4> { };
struct Rx_dma_bst_size : Bitfield<16, 3> { };
struct Tx_dma_bst_size : Bitfield<20, 3> { };
/**
* Return character received via UART
* Initialization value
*/
char _rx_char()
static access_t init_value()
{
read<Ufcon>();
char c = read<Urxh::Receive_data>();
/* clear pending RX IRQ */
write<Uintp>(Uintp::Rxd::bits(1));
return c;
return Receive_mode::bits(Receive_mode::IRQ_POLL) |
Transmit_mode::bits(Transmit_mode::IRQ_POLL) |
Rx_timeout::bits(1);
}
};
public:
/**
* FIFO control
*/
struct Ufcon : Register<0x8, 32>
{
struct Fifo_en : Bitfield<0, 1> { };
struct Rx_fifo_rst : Bitfield<1, 1> { };
struct Tx_fifo_rst : Bitfield<2, 1> { };
};
/**
* Modem control
*/
struct Umcon : Register<0xc, 32>
{
struct Send_request : Bitfield<0, 1> { };
struct Modem_irq : Bitfield<3, 1> { };
struct Auto_flow_ctl : Bitfield<4, 1> { };
struct Rts_trigger : Bitfield<5, 3> { };
/**
* Constructor
*
* \param base MMIO base address
* \param clock reference clock
* \param baud_rate targeted baud rate
* Initialization value
*/
Exynos_uart_base(addr_t const base, unsigned const clock,
unsigned const baud_rate) : Mmio(base)
static access_t init_value()
{
/* RX and TX FIFO reset */
write<Ufcon::Rx_fifo_rst>(1);
write<Ufcon::Tx_fifo_rst>(1);
while (read<Ufcon::Rx_fifo_rst>() || read<Ufcon::Tx_fifo_rst>()) ;
/* init control registers */
write<Ulcon>(Ulcon::init_value());
write<Ucon>(Ucon::init_value());
write<Umcon>(Umcon::init_value());
/* apply baud rate */
float const div_val = ((float)clock / (baud_rate * 16)) - 1;
Ubrdiv::access_t const ubrdiv = div_val;
Ufracval::access_t const ufracval =
((float)div_val - ubrdiv) * 16;
write<Ubrdiv::Baud_rate_div>(ubrdiv);
write<Ufracval::Baud_rate_frac>(ufracval);
return Send_request::bits(0) |
Modem_irq::bits(0) |
Auto_flow_ctl::bits(0) |
Rts_trigger::bits(0);
}
};
/**
* Print character 'c' through the UART
*/
void put_char(char const c)
{
while (read<Ufstat::Tx_fifo_full>()) ;
write<Utxh::Transmit_data>(c);
}
};
}
/**
* FIFO status
*/
struct Ufstat : Register<0x18, 32>
{
struct Rx_fifo_count : Bitfield<0, 8> { };
struct Rx_fifo_full : Bitfield<8, 1> { };
struct Tx_fifo_full : Bitfield<24, 1> { };
};
/**
* Transmit buffer
*/
struct Utxh : Register<0x20, 32>
{
struct Transmit_data : Bitfield<0, 8> { };
};
/**
* Receive buffer
*/
struct Urxh : Register<0x24, 32>
{
struct Receive_data : Bitfield<0, 8> { };
};
/**
* Baud Rate Divisor
*/
struct Ubrdiv : Register<0x28, 32>
{
struct Baud_rate_div : Bitfield<0, 16> { };
};
/**
* Fractional part of Baud Rate Divisor
*/
struct Ufracval : Register<0x2c, 32>
{
struct Baud_rate_frac : Bitfield<0, 4> { };
};
/**
* Interrupt mask register
*/
template <unsigned OFF>
struct Uintx : Register<OFF, 32>
{
struct Rxd : Register<OFF, 32>::template Bitfield<0, 1> { };
struct Error : Register<OFF, 32>::template Bitfield<1, 1> { };
struct Txd : Register<OFF, 32>::template Bitfield<2, 1> { };
struct Modem : Register<OFF, 32>::template Bitfield<3, 1> { };
};
using Uintp = Uintx<0x30>;
using Uintm = Uintx<0x38>;
void _rx_enable()
{
write<Ufcon::Fifo_en>(1);
/* mask all IRQs except receive IRQ */
write<Uintm>(Uintm::Error::bits(1) |
Uintm::Txd::bits(1) |
Uintm::Modem::bits(1));
/* clear pending IRQs */
write<Uintp>(Uintp::Rxd::bits(1) |
Uintp::Error::bits(1) |
Uintp::Txd::bits(1) |
Uintp::Modem::bits(1));
}
bool _rx_avail() {
return (read<Ufstat>() & (Ufstat::Rx_fifo_count::bits(0xff)
| Ufstat::Rx_fifo_full::bits(1))); }
/**
* Return character received via UART
*/
char _rx_char()
{
read<Ufcon>();
char c = read<Urxh::Receive_data>();
/* clear pending RX IRQ */
write<Uintp>(Uintp::Rxd::bits(1));
return c;
}
public:
/**
* Constructor
*
* \param base MMIO base address
* \param clock reference clock
* \param baud_rate targeted baud rate
*/
Exynos_uart_base(addr_t const base, unsigned const clock,
unsigned const baud_rate) : Mmio(base)
{
/* RX and TX FIFO reset */
write<Ufcon::Rx_fifo_rst>(1);
write<Ufcon::Tx_fifo_rst>(1);
while (read<Ufcon::Rx_fifo_rst>() || read<Ufcon::Tx_fifo_rst>()) ;
/* init control registers */
write<Ulcon>(Ulcon::init_value());
write<Ucon>(Ucon::init_value());
write<Umcon>(Umcon::init_value());
/* apply baud rate */
float const div_val = ((float)clock / (baud_rate * 16)) - 1;
Ubrdiv::access_t const ubrdiv = div_val;
Ufracval::access_t const ufracval =
((float)div_val - ubrdiv) * 16;
write<Ubrdiv::Baud_rate_div>(ubrdiv);
write<Ufracval::Baud_rate_frac>(ufracval);
}
/**
* Print character 'c' through the UART
*/
void put_char(char const c)
{
while (read<Ufstat::Tx_fifo_full>()) ;
write<Utxh::Transmit_data>(c);
}
};
#endif /* _INCLUDE__DRIVERS__UART__EXYNOS_UART_BASE_H_ */

View File

@ -18,257 +18,257 @@
/* Genode includes */
#include <util/mmio.h>
namespace Genode
namespace Genode { class Imx_uart_base; }
/**
* Driver base for i.MX UART-module
*/
class Genode::Imx_uart_base : Mmio
{
/**
* Driver base for i.MX UART-module
* Control register 1
*/
class Imx_uart_base : Mmio
struct Cr1 : Register<0x80, 32>
{
/**
* Control register 1
*/
struct Cr1 : Register<0x80, 32>
struct Uart_en : Bitfield<0, 1> { }; /* enable UART */
struct Doze : Bitfield<1, 1> { }; /* disable on doze */
struct At_dma_en : Bitfield<2, 1> { }; /* aging DMA
* timer on */
struct Tx_dma_en : Bitfield<3, 1> { }; /* TX ready DMA on */
struct Snd_brk : Bitfield<4, 1> { }; /* send breaks */
struct Rtsd_en : Bitfield<5, 1> { }; /* RTS delta IRQ on */
struct Tx_mpty_en : Bitfield<6, 1> { }; /* TX empty IRQ on */
struct Ir_en : Bitfield<7, 1> { }; /* enable infrared */
struct Rx_dma_en : Bitfield<8, 1> { }; /* RX ready DMA on */
struct R_rdy_en : Bitfield<9, 1> { }; /* RX ready IRQ on */
struct Icd : Bitfield<10, 2> /* idle IRQ condition */
{
struct Uart_en : Bitfield<0, 1> { }; /* enable UART */
struct Doze : Bitfield<1, 1> { }; /* disable on doze */
struct At_dma_en : Bitfield<2, 1> { }; /* aging DMA
* timer on */
struct Tx_dma_en : Bitfield<3, 1> { }; /* TX ready DMA on */
struct Snd_brk : Bitfield<4, 1> { }; /* send breaks */
struct Rtsd_en : Bitfield<5, 1> { }; /* RTS delta IRQ on */
struct Tx_mpty_en : Bitfield<6, 1> { }; /* TX empty IRQ on */
struct Ir_en : Bitfield<7, 1> { }; /* enable infrared */
struct Rx_dma_en : Bitfield<8, 1> { }; /* RX ready DMA on */
struct R_rdy_en : Bitfield<9, 1> { }; /* RX ready IRQ on */
struct Icd : Bitfield<10, 2> /* idle IRQ condition */
{
enum { IDLE_4_FRAMES = 0 };
};
struct Id_en : Bitfield<12, 1> { }; /* enable idle IRQ */
struct T_rdy_en : Bitfield<13, 1> { }; /* TX ready IRQ on */
struct Adbr : Bitfield<14, 1> { }; /* enable baud-rate
* auto detect */
struct Ad_en : Bitfield<15, 1> { }; /* enable ADBR IRQ */
/**
* Initialization value
*/
static access_t init_value()
{
return Uart_en::bits(1) |
Doze::bits(0) |
At_dma_en::bits(0) |
Tx_dma_en::bits(0) |
Snd_brk::bits(0) |
Rtsd_en::bits(0) |
Tx_mpty_en::bits(0) |
Ir_en::bits(0) |
Rx_dma_en::bits(0) |
R_rdy_en::bits(0) |
Id_en::bits(0) |
T_rdy_en::bits(0) |
Adbr::bits(0) |
Ad_en::bits(0);
}
enum { IDLE_4_FRAMES = 0 };
};
/**
* Control register 2
*/
struct Cr2 : Register<0x84, 32>
{
struct S_rst : Bitfield<0, 1> /* SW reset bit */
{
enum { NO_RESET = 1 };
};
struct Rx_en : Bitfield<1, 1> { }; /* enable receiver */
struct Tx_en : Bitfield<2, 1> { }; /* enable transmitter */
struct At_en : Bitfield<3, 1> { }; /* enable aging timer */
struct Rts_en : Bitfield<4, 1> { }; /* send request IRQ on */
struct Ws : Bitfield<5, 1> /* select word size */
{
enum { _8_BITS = 1 };
};
struct Stpb : Bitfield<6, 1> /* number of stop bits */
{
enum { _1_BIT = 0 };
};
struct Pr_en : Bitfield<8, 1> { }; /* enable parity */
struct Esc_en : Bitfield<11, 1> { }; /* escape detection on */
struct Ctsc : Bitfield<13, 1> /* select CTS control */
{
enum { BY_RECEIVER = 1 };
};
struct Irts : Bitfield<14, 1> { }; /* ignore RTS pin */
struct Esci : Bitfield<15, 1> { }; /* enable escape IRQ */
/**
* Initialization value
*/
static access_t init_value()
{
return S_rst::bits(S_rst::NO_RESET) |
Rx_en::bits(0) |
Tx_en::bits(1) |
At_en::bits(0) |
Rts_en::bits(0) |
Ws::bits(Ws::_8_BITS) |
Stpb::bits(Stpb::_1_BIT) |
Pr_en::bits(0) |
Esc_en::bits(0) |
Ctsc::bits(Ctsc::BY_RECEIVER) |
Irts::bits(1) |
Esci::bits(0);
}
};
struct Id_en : Bitfield<12, 1> { }; /* enable idle IRQ */
struct T_rdy_en : Bitfield<13, 1> { }; /* TX ready IRQ on */
struct Adbr : Bitfield<14, 1> { }; /* enable baud-rate
* auto detect */
struct Ad_en : Bitfield<15, 1> { }; /* enable ADBR IRQ */
/**
* Control register 3
* Initialization value
*/
struct Cr3 : Register<0x88, 32>
static access_t init_value()
{
struct Rxdmux_sel : Bitfield<2, 1> { }; /* use muxed RXD */
struct Aci_en : Bitfield<0, 1> { }; /* autobaud count IRQ on */
struct Dtrd_en : Bitfield<3, 1> { }; /* data terminal ready
* delta IRQ on */
struct Awak_en : Bitfield<4, 1> { }; /* wake IRQ on */
struct Air_int_en : Bitfield<5, 1> { }; /* IR wake IRQ on */
struct Rx_ds_en : Bitfield<6, 1> { }; /* RX status IRQ on */
struct Ad_nimp : Bitfield<7, 1> { }; /* autobaud detect off */
struct Ri_en : Bitfield<8, 1> { }; /* ring indicator IRQ on */
struct Dcd_en : Bitfield<9, 1> { }; /* data carrier detect
* IRQ on */
struct Dsr : Bitfield<10,1> { }; /* DSR/DTR output */
struct Frame_en : Bitfield<11,1> { }; /* frame error IRQ on */
struct Parity_en : Bitfield<12,1> { }; /* parity error IRQ on */
struct Dtr_en : Bitfield<13,1> { }; /* data terminal ready
* IRQ on */
struct Dpec_ctrl : Bitfield<14,2> { }; /* DTR/DSR IRQ edge
* control */
/**
* Initialization value
*/
static access_t init_value()
{
return Aci_en::bits(0) |
Rxdmux_sel::bits(0) |
Dtrd_en::bits(0) |
Awak_en::bits(0) |
Air_int_en::bits(0) |
Rx_ds_en::bits(0) |
Ad_nimp::bits(1) |
Ri_en::bits(0) |
Dcd_en::bits(0) |
Dsr::bits(0) |
Frame_en::bits(0) |
Parity_en::bits(0) |
Dtr_en::bits(0) |
Dpec_ctrl::bits(0);
}
};
/**
* Control register 4
*/
struct Cr4 : Register<0x8c, 32>
{
struct Dr_en : Bitfield<0, 1> { }; /* RX data ready IRQ on */
struct Or_en : Bitfield<1, 1> { }; /* RX overrun IRQ on */
struct Bk_en : Bitfield<2, 1> { }; /* BREAK IRQ on */
struct Tc_en : Bitfield<3, 1> { }; /* TX complete IRQ on */
struct Lp_dis : Bitfield<4, 1> { }; /* low power off */
struct IR_sc : Bitfield<5, 1> { }; /* use UART ref clock
* for vote logic */
struct Id_dma_en : Bitfield<6, 1> { }; /* idle DMA IRQ on */
struct Wake_en : Bitfield<7, 1> { }; /* WAKE IRQ on */
struct IR_en : Bitfield<8, 1> { }; /* serial IR IRQ on */
struct Cts_level : Bitfield<10,6> { }; /* CTS trigger level*/
/**
* Initialization value
*/
static access_t init_value()
{
return Dr_en::bits(0) |
Or_en::bits(0) |
Bk_en::bits(0) |
Tc_en::bits(0) |
Lp_dis::bits(0) |
IR_sc::bits(0) |
Id_dma_en::bits(0) |
Wake_en::bits(0) |
IR_en::bits(0) |
Cts_level::bits(0);
}
};
/**
* Status register 2
*/
struct Sr2 : Register<0x98, 32>
{
struct Txdc : Bitfield<3, 1> { }; /* transmission complete */
};
/**
* Transmitter register
*/
struct Txd : Register<0x40, 32>
{
struct Tx_data : Bitfield<0, 8> { }; /* transmit data */
};
/**
* Transmit character 'c' without care about its type
*/
inline void _put_char(char const c)
{
while (!read<Sr2::Txdc>()) ;
write<Txd::Tx_data>(c);
return Uart_en::bits(1) |
Doze::bits(0) |
At_dma_en::bits(0) |
Tx_dma_en::bits(0) |
Snd_brk::bits(0) |
Rtsd_en::bits(0) |
Tx_mpty_en::bits(0) |
Ir_en::bits(0) |
Rx_dma_en::bits(0) |
R_rdy_en::bits(0) |
Id_en::bits(0) |
T_rdy_en::bits(0) |
Adbr::bits(0) |
Ad_en::bits(0);
}
public:
/**
* Constructor
*
* \param base device MMIO base
*/
explicit Imx_uart_base(addr_t const base) : Mmio(base)
{
write<Cr1>(Cr1::init_value());
write<Cr2>(Cr2::init_value());
write<Cr3>(Cr3::init_value());
write<Cr4>(Cr4::init_value());
}
/**
* Print character 'c' through the UART
*/
inline void put_char(char const c)
{
enum { ASCII_LINE_FEED = 10,
ASCII_CARRIAGE_RETURN = 13 };
/* prepend line feed with carriage return */
if (c == ASCII_LINE_FEED) _put_char(ASCII_CARRIAGE_RETURN);
/* transmit character */
_put_char(c);
}
};
}
/**
* Control register 2
*/
struct Cr2 : Register<0x84, 32>
{
struct S_rst : Bitfield<0, 1> /* SW reset bit */
{
enum { NO_RESET = 1 };
};
struct Rx_en : Bitfield<1, 1> { }; /* enable receiver */
struct Tx_en : Bitfield<2, 1> { }; /* enable transmitter */
struct At_en : Bitfield<3, 1> { }; /* enable aging timer */
struct Rts_en : Bitfield<4, 1> { }; /* send request IRQ on */
struct Ws : Bitfield<5, 1> /* select word size */
{
enum { _8_BITS = 1 };
};
struct Stpb : Bitfield<6, 1> /* number of stop bits */
{
enum { _1_BIT = 0 };
};
struct Pr_en : Bitfield<8, 1> { }; /* enable parity */
struct Esc_en : Bitfield<11, 1> { }; /* escape detection on */
struct Ctsc : Bitfield<13, 1> /* select CTS control */
{
enum { BY_RECEIVER = 1 };
};
struct Irts : Bitfield<14, 1> { }; /* ignore RTS pin */
struct Esci : Bitfield<15, 1> { }; /* enable escape IRQ */
/**
* Initialization value
*/
static access_t init_value()
{
return S_rst::bits(S_rst::NO_RESET) |
Rx_en::bits(0) |
Tx_en::bits(1) |
At_en::bits(0) |
Rts_en::bits(0) |
Ws::bits(Ws::_8_BITS) |
Stpb::bits(Stpb::_1_BIT) |
Pr_en::bits(0) |
Esc_en::bits(0) |
Ctsc::bits(Ctsc::BY_RECEIVER) |
Irts::bits(1) |
Esci::bits(0);
}
};
/**
* Control register 3
*/
struct Cr3 : Register<0x88, 32>
{
struct Rxdmux_sel : Bitfield<2, 1> { }; /* use muxed RXD */
struct Aci_en : Bitfield<0, 1> { }; /* autobaud count IRQ on */
struct Dtrd_en : Bitfield<3, 1> { }; /* data terminal ready
* delta IRQ on */
struct Awak_en : Bitfield<4, 1> { }; /* wake IRQ on */
struct Air_int_en : Bitfield<5, 1> { }; /* IR wake IRQ on */
struct Rx_ds_en : Bitfield<6, 1> { }; /* RX status IRQ on */
struct Ad_nimp : Bitfield<7, 1> { }; /* autobaud detect off */
struct Ri_en : Bitfield<8, 1> { }; /* ring indicator IRQ on */
struct Dcd_en : Bitfield<9, 1> { }; /* data carrier detect
* IRQ on */
struct Dsr : Bitfield<10,1> { }; /* DSR/DTR output */
struct Frame_en : Bitfield<11,1> { }; /* frame error IRQ on */
struct Parity_en : Bitfield<12,1> { }; /* parity error IRQ on */
struct Dtr_en : Bitfield<13,1> { }; /* data terminal ready
* IRQ on */
struct Dpec_ctrl : Bitfield<14,2> { }; /* DTR/DSR IRQ edge
* control */
/**
* Initialization value
*/
static access_t init_value()
{
return Aci_en::bits(0) |
Rxdmux_sel::bits(0) |
Dtrd_en::bits(0) |
Awak_en::bits(0) |
Air_int_en::bits(0) |
Rx_ds_en::bits(0) |
Ad_nimp::bits(1) |
Ri_en::bits(0) |
Dcd_en::bits(0) |
Dsr::bits(0) |
Frame_en::bits(0) |
Parity_en::bits(0) |
Dtr_en::bits(0) |
Dpec_ctrl::bits(0);
}
};
/**
* Control register 4
*/
struct Cr4 : Register<0x8c, 32>
{
struct Dr_en : Bitfield<0, 1> { }; /* RX data ready IRQ on */
struct Or_en : Bitfield<1, 1> { }; /* RX overrun IRQ on */
struct Bk_en : Bitfield<2, 1> { }; /* BREAK IRQ on */
struct Tc_en : Bitfield<3, 1> { }; /* TX complete IRQ on */
struct Lp_dis : Bitfield<4, 1> { }; /* low power off */
struct IR_sc : Bitfield<5, 1> { }; /* use UART ref clock
* for vote logic */
struct Id_dma_en : Bitfield<6, 1> { }; /* idle DMA IRQ on */
struct Wake_en : Bitfield<7, 1> { }; /* WAKE IRQ on */
struct IR_en : Bitfield<8, 1> { }; /* serial IR IRQ on */
struct Cts_level : Bitfield<10,6> { }; /* CTS trigger level*/
/**
* Initialization value
*/
static access_t init_value()
{
return Dr_en::bits(0) |
Or_en::bits(0) |
Bk_en::bits(0) |
Tc_en::bits(0) |
Lp_dis::bits(0) |
IR_sc::bits(0) |
Id_dma_en::bits(0) |
Wake_en::bits(0) |
IR_en::bits(0) |
Cts_level::bits(0);
}
};
/**
* Status register 2
*/
struct Sr2 : Register<0x98, 32>
{
struct Txdc : Bitfield<3, 1> { }; /* transmission complete */
};
/**
* Transmitter register
*/
struct Txd : Register<0x40, 32>
{
struct Tx_data : Bitfield<0, 8> { }; /* transmit data */
};
/**
* Transmit character 'c' without care about its type
*/
inline void _put_char(char const c)
{
while (!read<Sr2::Txdc>()) ;
write<Txd::Tx_data>(c);
}
public:
/**
* Constructor
*
* \param base device MMIO base
*/
explicit Imx_uart_base(addr_t const base) : Mmio(base)
{
write<Cr1>(Cr1::init_value());
write<Cr2>(Cr2::init_value());
write<Cr3>(Cr3::init_value());
write<Cr4>(Cr4::init_value());
}
/**
* Print character 'c' through the UART
*/
inline void put_char(char const c)
{
enum { ASCII_LINE_FEED = 10,
ASCII_CARRIAGE_RETURN = 13 };
/* prepend line feed with carriage return */
if (c == ASCII_LINE_FEED) _put_char(ASCII_CARRIAGE_RETURN);
/* transmit character */
_put_char(c);
}
};
#endif /* _INCLUDE__DRIVERS__UART__IMX_UART_BASE_H_ */

View File

@ -17,116 +17,116 @@
/* Genode includes */
#include <util/mmio.h>
namespace Genode
namespace Genode { class Pl011_base; }
/**
* Driver base for the PrimeCell UART PL011 Revision r1p3
*/
class Genode::Pl011_base : Mmio
{
/**
* Driver base for the PrimeCell UART PL011 Revision r1p3
*/
class Pl011_base : Mmio
{
protected:
protected:
enum { MAX_BAUD_RATE = 0xfffffff };
enum { MAX_BAUD_RATE = 0xfffffff };
/**
* Data register
*/
struct Uartdr : public Register<0x00, 16>
{
struct Data : Bitfield<0,8> { };
struct Fe : Bitfield<8,1> { };
struct Pe : Bitfield<9,1> { };
struct Be : Bitfield<10,1> { };
struct Oe : Bitfield<11,1> { };
};
/**
* Data register
*/
struct Uartdr : public Register<0x00, 16>
{
struct Data : Bitfield<0,8> { };
struct Fe : Bitfield<8,1> { };
struct Pe : Bitfield<9,1> { };
struct Be : Bitfield<10,1> { };
struct Oe : Bitfield<11,1> { };
};
/**
* Flag register
*/
struct Uartfr : public Register<0x18, 16>
{
struct Cts : Bitfield<0,1> { };
struct Dsr : Bitfield<1,1> { };
struct Dcd : Bitfield<2,1> { };
struct Busy : Bitfield<3,1> { };
struct Rxfe : Bitfield<4,1> { };
struct Txff : Bitfield<5,1> { };
struct Rxff : Bitfield<6,1> { };
struct Txfe : Bitfield<7,1> { };
struct Ri : Bitfield<8,1> { };
};
/**
* Flag register
*/
struct Uartfr : public Register<0x18, 16>
{
struct Cts : Bitfield<0,1> { };
struct Dsr : Bitfield<1,1> { };
struct Dcd : Bitfield<2,1> { };
struct Busy : Bitfield<3,1> { };
struct Rxfe : Bitfield<4,1> { };
struct Txff : Bitfield<5,1> { };
struct Rxff : Bitfield<6,1> { };
struct Txfe : Bitfield<7,1> { };
struct Ri : Bitfield<8,1> { };
};
/**
* Integer baud rate register
*/
struct Uartibrd : public Register<0x24, 16>
{
struct Ibrd : Bitfield<0,15> { };
};
/**
* Integer baud rate register
*/
struct Uartibrd : public Register<0x24, 16>
{
struct Ibrd : Bitfield<0,15> { };
};
/**
* Fractional Baud Rate Register
*/
struct Uartfbrd : public Register<0x28, 8>
{
struct Fbrd : Bitfield<0,6> { };
};
/**
* Fractional Baud Rate Register
*/
struct Uartfbrd : public Register<0x28, 8>
{
struct Fbrd : Bitfield<0,6> { };
};
/**
* Line Control Register
*/
struct Uartlcrh : public Register<0x2c, 16>
{
struct Wlen : Bitfield<5,2> {
enum {
WORD_LENGTH_8BITS = 3,
WORD_LENGTH_7BITS = 2,
WORD_LENGTH_6BITS = 1,
WORD_LENGTH_5BITS = 0,
};
/**
* Line Control Register
*/
struct Uartlcrh : public Register<0x2c, 16>
{
struct Wlen : Bitfield<5,2> {
enum {
WORD_LENGTH_8BITS = 3,
WORD_LENGTH_7BITS = 2,
WORD_LENGTH_6BITS = 1,
WORD_LENGTH_5BITS = 0,
};
};
};
/**
* Control Register
*/
struct Uartcr : public Register<0x30, 16>
{
struct Uarten : Bitfield<0,1> { };
struct Txe : Bitfield<8,1> { };
struct Rxe : Bitfield<9,1> { };
};
/**
* Control Register
*/
struct Uartcr : public Register<0x30, 16>
{
struct Uarten : Bitfield<0,1> { };
struct Txe : Bitfield<8,1> { };
struct Rxe : Bitfield<9,1> { };
};
/**
* Interrupt Mask Set/Clear
*/
struct Uartimsc : public Register<0x38, 16>
{
struct Imsc : Bitfield<0,11> { };
};
/**
* Interrupt Mask Set/Clear
*/
struct Uartimsc : public Register<0x38, 16>
{
struct Imsc : Bitfield<0,11> { };
};
/**
* Idle until the device is ready for action
*/
void _wait_until_ready() { while (read<Uartfr::Busy>()) ; }
/**
* Idle until the device is ready for action
*/
void _wait_until_ready() { while (read<Uartfr::Busy>()) ; }
public:
public:
/**
* Constructor
* \param base device MMIO base
* \param clock device reference clock frequency
* \param baud_rate targeted UART baud rate
*/
inline Pl011_base(addr_t const base, uint32_t const clock,
uint32_t const baud_rate);
/**
* Constructor
* \param base device MMIO base
* \param clock device reference clock frequency
* \param baud_rate targeted UART baud rate
*/
inline Pl011_base(addr_t const base, uint32_t const clock,
uint32_t const baud_rate);
/**
* Send ASCII char 'c' over the UART interface
*/
inline void put_char(char const c);
};
}
/**
* Send ASCII char 'c' over the UART interface
*/
inline void put_char(char const c);
};
Genode::Pl011_base::Pl011_base(addr_t const base, uint32_t const clock,

View File

@ -17,231 +17,231 @@
/* Genode includes */
#include <util/mmio.h>
namespace Genode
namespace Genode { class Tl16c750_base; }
/**
* Base driver Texas instruments TL16C750 UART module
*
* In contrast to the abilities of the TL16C750, this driver targets only
* the basic UART functionalities.
*/
class Genode::Tl16c750_base : public Mmio
{
/**
* Base driver Texas instruments TL16C750 UART module
*
* In contrast to the abilities of the TL16C750, this driver targets only
* the basic UART functionalities.
*/
class Tl16c750_base : public Mmio
{
protected:
/**
* Least significant divisor part
*/
struct Uart_dll : Register<0x0, 32>
{
struct Clock_lsb : Bitfield<0, 8> { };
};
protected:
/**
* Least significant divisor part
*/
struct Uart_dll : Register<0x0, 32>
{
struct Clock_lsb : Bitfield<0, 8> { };
};
/**
* Transmit holding register
*/
struct Uart_thr : Register<0x0, 32>
{
struct Thr : Bitfield<0, 8> { };
};
/**
* Transmit holding register
*/
struct Uart_thr : Register<0x0, 32>
{
struct Thr : Bitfield<0, 8> { };
};
/**
* Receiver holding register
/**
* Receiver holding register
*/
struct Uart_rhr : Register<0x0, 32>
{
struct Rhr : Bitfield<0, 8> { };
};
/**
* Most significant divisor part
*/
struct Uart_dlh : Register<0x4, 32>
{
struct Clock_msb : Bitfield<0, 6> { };
};
/**
* Interrupt enable register
*/
struct Uart_ier : Register<0x4, 32>
{
struct Rhr_it : Bitfield<0, 1> { };
struct Thr_it : Bitfield<1, 1> { };
struct Line_sts_it : Bitfield<2, 1> { };
struct Modem_sts_it : Bitfield<3, 1> { };
struct Sleep_mode : Bitfield<4, 1> { };
struct Xoff_it : Bitfield<5, 1> { };
struct Rts_it : Bitfield<6, 1> { };
struct Cts_it : Bitfield<7, 1> { };
};
/**
* Interrupt identification register
*/
struct Uart_iir : Register<0x8, 32>
{
struct It_pending : Bitfield<0, 1> { };
};
/**
* FIFO control register
*/
struct Uart_fcr : Register<0x8, 32>
{
struct Fifo_enable : Bitfield<0, 1> { };
};
/**
* Line control register
*/
struct Uart_lcr : Register<0xc, 32>
{
struct Char_length : Bitfield<0, 2>
{
enum { _8_BIT = 3 };
};
struct Nb_stop : Bitfield<2, 1>
{
enum { _1_STOP_BIT = 0 };
};
struct Parity_en : Bitfield<3, 1> { };
struct Break_en : Bitfield<6, 1> { };
struct Div_en : Bitfield<7, 1> { };
struct Reg_mode : Bitfield<0, 8>
{
enum { OPERATIONAL = 0, CONFIG_A = 0x80, CONFIG_B = 0xbf };
};
};
/**
* Modem control register
*/
struct Uart_mcr : Register<0x10, 32>
{
struct Tcr_tlr : Bitfield<6, 1> { };
};
/**
* Line status register
*/
struct Uart_lsr : Register<0x14, 32>
{
struct Rx_fifo_empty : Bitfield<0, 1> { };
struct Tx_fifo_empty : Bitfield<5, 1> { };
};
/**
* Mode definition register 1
*/
struct Uart_mdr1 : Register<0x20, 32>
{
struct Mode_select : Bitfield<0, 3>
{
enum { UART_16X = 0, DISABLED = 7 };
};
};
/**
* System control register
*/
struct Uart_sysc : Register<0x54, 32>
{
struct Softreset : Bitfield<1, 1> { };
};
/**
* System status register
*/
struct Uart_syss : Register<0x58, 32>
{
struct Resetdone : Bitfield<0, 1> { };
};
void _init(unsigned long const clock, unsigned long const baud_rate)
{
/* disable UART */
write<Uart_mdr1::Mode_select>(Uart_mdr1::Mode_select::DISABLED);
/* enable access to 'Uart_fcr' and 'Uart_ier' */
write<Uart_lcr::Reg_mode>(Uart_lcr::Reg_mode::OPERATIONAL);
/*
* Configure FIFOs, we don't use any interrupts or DMA,
* thus FIFO trigger and DMA configurations are dispensable.
*/
struct Uart_rhr : Register<0x0, 32>
{
struct Rhr : Bitfield<0, 8> { };
};
write<Uart_fcr::Fifo_enable>(1);
/**
* Most significant divisor part
*/
struct Uart_dlh : Register<0x4, 32>
{
struct Clock_msb : Bitfield<0, 6> { };
};
/* disable interrupts and sleep mode */
write<Uart_ier>(Uart_ier::Rhr_it::bits(0)
| Uart_ier::Thr_it::bits(0)
| Uart_ier::Line_sts_it::bits(0)
| Uart_ier::Modem_sts_it::bits(0)
| Uart_ier::Sleep_mode::bits(0)
| Uart_ier::Xoff_it::bits(0)
| Uart_ier::Rts_it::bits(0)
| Uart_ier::Cts_it::bits(0));
/**
* Interrupt enable register
*/
struct Uart_ier : Register<0x4, 32>
{
struct Rhr_it : Bitfield<0, 1> { };
struct Thr_it : Bitfield<1, 1> { };
struct Line_sts_it : Bitfield<2, 1> { };
struct Modem_sts_it : Bitfield<3, 1> { };
struct Sleep_mode : Bitfield<4, 1> { };
struct Xoff_it : Bitfield<5, 1> { };
struct Rts_it : Bitfield<6, 1> { };
struct Cts_it : Bitfield<7, 1> { };
};
/* enable access to 'Uart_dlh' and 'Uart_dll' */
write<Uart_lcr::Reg_mode>(Uart_lcr::Reg_mode::CONFIG_B);
/**
* Interrupt identification register
/*
* Load the new divisor value (this driver solely uses
* 'UART_16X' mode)
*/
struct Uart_iir : Register<0x8, 32>
{
struct It_pending : Bitfield<0, 1> { };
};
enum { UART_16X_DIVIDER_LOG2 = 4 };
unsigned long const adjusted_br = baud_rate << UART_16X_DIVIDER_LOG2;
double const divisor = (double)clock / adjusted_br;
unsigned long const divisor_uint = (unsigned long)divisor;
write<Uart_dll::Clock_lsb>(divisor_uint);
write<Uart_dlh::Clock_msb>(divisor_uint>>Uart_dll::Clock_lsb::WIDTH);
/**
* FIFO control register
*/
struct Uart_fcr : Register<0x8, 32>
{
struct Fifo_enable : Bitfield<0, 1> { };
};
/**
* Line control register
*/
struct Uart_lcr : Register<0xc, 32>
{
struct Char_length : Bitfield<0, 2>
{
enum { _8_BIT = 3 };
};
struct Nb_stop : Bitfield<2, 1>
{
enum { _1_STOP_BIT = 0 };
};
struct Parity_en : Bitfield<3, 1> { };
struct Break_en : Bitfield<6, 1> { };
struct Div_en : Bitfield<7, 1> { };
struct Reg_mode : Bitfield<0, 8>
{
enum { OPERATIONAL = 0, CONFIG_A = 0x80, CONFIG_B = 0xbf };
};
};
/**
* Modem control register
*/
struct Uart_mcr : Register<0x10, 32>
{
struct Tcr_tlr : Bitfield<6, 1> { };
};
/**
* Line status register
*/
struct Uart_lsr : Register<0x14, 32>
{
struct Rx_fifo_empty : Bitfield<0, 1> { };
struct Tx_fifo_empty : Bitfield<5, 1> { };
};
/**
* Mode definition register 1
*/
struct Uart_mdr1 : Register<0x20, 32>
{
struct Mode_select : Bitfield<0, 3>
{
enum { UART_16X = 0, DISABLED = 7 };
};
};
/**
* System control register
*/
struct Uart_sysc : Register<0x54, 32>
{
struct Softreset : Bitfield<1, 1> { };
};
/**
* System status register
*/
struct Uart_syss : Register<0x58, 32>
{
struct Resetdone : Bitfield<0, 1> { };
};
void _init(unsigned long const clock, unsigned long const baud_rate)
{
/* disable UART */
write<Uart_mdr1::Mode_select>(Uart_mdr1::Mode_select::DISABLED);
/* enable access to 'Uart_fcr' and 'Uart_ier' */
write<Uart_lcr::Reg_mode>(Uart_lcr::Reg_mode::OPERATIONAL);
/*
* Configure FIFOs, we don't use any interrupts or DMA,
* thus FIFO trigger and DMA configurations are dispensable.
*/
write<Uart_fcr::Fifo_enable>(1);
/* disable interrupts and sleep mode */
write<Uart_ier>(Uart_ier::Rhr_it::bits(0)
| Uart_ier::Thr_it::bits(0)
| Uart_ier::Line_sts_it::bits(0)
| Uart_ier::Modem_sts_it::bits(0)
| Uart_ier::Sleep_mode::bits(0)
| Uart_ier::Xoff_it::bits(0)
| Uart_ier::Rts_it::bits(0)
| Uart_ier::Cts_it::bits(0));
/* enable access to 'Uart_dlh' and 'Uart_dll' */
write<Uart_lcr::Reg_mode>(Uart_lcr::Reg_mode::CONFIG_B);
/*
* Load the new divisor value (this driver solely uses
* 'UART_16X' mode)
*/
enum { UART_16X_DIVIDER_LOG2 = 4 };
unsigned long const adjusted_br = baud_rate << UART_16X_DIVIDER_LOG2;
double const divisor = (double)clock / adjusted_br;
unsigned long const divisor_uint = (unsigned long)divisor;
write<Uart_dll::Clock_lsb>(divisor_uint);
write<Uart_dlh::Clock_msb>(divisor_uint>>Uart_dll::Clock_lsb::WIDTH);
/*
* Configure protocol formatting and thereby return to
* operational mode.
*/
write<Uart_lcr>(Uart_lcr::Char_length::bits(Uart_lcr::Char_length::_8_BIT)
| Uart_lcr::Nb_stop::bits(Uart_lcr::Nb_stop::_1_STOP_BIT)
| Uart_lcr::Parity_en::bits(0)
| Uart_lcr::Break_en::bits(0)
| Uart_lcr::Div_en::bits(0));
/*
* Switch to UART mode, we don't use hardware or software flow
* control, thus according configurations are dispensable
*/
write<Uart_mdr1::Mode_select>(Uart_mdr1::Mode_select::UART_16X);
}
public:
/**
* Constructor
*
* \param base MMIO base address
* \param clock reference clock
* \param baud_rate targeted baud rate
/*
* Configure protocol formatting and thereby return to
* operational mode.
*/
Tl16c750_base(addr_t const base, unsigned long const clock,
unsigned long const baud_rate) : Mmio(base)
{
/* reset and init UART */
write<Uart_sysc::Softreset>(1);
while (!read<Uart_syss::Resetdone>()) ;
_init(clock, baud_rate);
}
write<Uart_lcr>(Uart_lcr::Char_length::bits(Uart_lcr::Char_length::_8_BIT)
| Uart_lcr::Nb_stop::bits(Uart_lcr::Nb_stop::_1_STOP_BIT)
| Uart_lcr::Parity_en::bits(0)
| Uart_lcr::Break_en::bits(0)
| Uart_lcr::Div_en::bits(0));
/**
* Transmit ASCII char 'c'
/*
* Switch to UART mode, we don't use hardware or software flow
* control, thus according configurations are dispensable
*/
void put_char(char const c)
{
/* wait as long as the transmission buffer is full */
while (!read<Uart_lsr::Tx_fifo_empty>()) ;
write<Uart_mdr1::Mode_select>(Uart_mdr1::Mode_select::UART_16X);
}
/* transmit character */
write<Uart_thr::Thr>(c);
}
};
}
public:
/**
* Constructor
*
* \param base MMIO base address
* \param clock reference clock
* \param baud_rate targeted baud rate
*/
Tl16c750_base(addr_t const base, unsigned long const clock,
unsigned long const baud_rate) : Mmio(base)
{
/* reset and init UART */
write<Uart_sysc::Softreset>(1);
while (!read<Uart_syss::Resetdone>()) ;
_init(clock, baud_rate);
}
/**
* Transmit ASCII char 'c'
*/
void put_char(char const c)
{
/* wait as long as the transmission buffer is full */
while (!read<Uart_lsr::Tx_fifo_empty>()) ;
/* transmit character */
write<Uart_thr::Thr>(c);
}
};
#endif /* _INCLUDE__DRIVERS__UART__TL16C750_BASE_H_ */

View File

@ -17,15 +17,15 @@
#include <io_mem_session/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Io_mem_session_client; }
struct Io_mem_session_client : Rpc_client<Io_mem_session>
{
explicit Io_mem_session_client(Io_mem_session_capability session)
: Rpc_client<Io_mem_session>(session) { }
Io_mem_dataspace_capability dataspace() { return call<Rpc_dataspace>(); }
};
}
struct Genode::Io_mem_session_client : Rpc_client<Io_mem_session>
{
explicit Io_mem_session_client(Io_mem_session_capability session)
: Rpc_client<Io_mem_session>(session) { }
Io_mem_dataspace_capability dataspace() override { return call<Rpc_dataspace>(); }
};
#endif /* _INCLUDE__IO_MEM_SESSION__CLIENT_H_ */

View File

@ -17,26 +17,26 @@
#include <io_mem_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Io_mem_connection; }
struct Io_mem_connection : Connection<Io_mem_session>, Io_mem_session_client
{
/**
* Constructor
*
* \param base physical base address of memory-mapped I/O resource
* \param size size memory-mapped I/O resource
* \param write_combined enable write-combined access to I/O memory
*/
Io_mem_connection(addr_t base, size_t size, bool write_combined = false)
:
Connection<Io_mem_session>(
session("ram_quota=4K, base=0x%p, size=0x%zx, wc=%s",
base, size, write_combined ? "yes" : "no")),
Io_mem_session_client(cap())
{ }
};
}
struct Genode::Io_mem_connection : Connection<Io_mem_session>, Io_mem_session_client
{
/**
* Constructor
*
* \param base physical base address of memory-mapped I/O resource
* \param size size memory-mapped I/O resource
* \param write_combined enable write-combined access to I/O memory
*/
Io_mem_connection(addr_t base, size_t size, bool write_combined = false)
:
Connection<Io_mem_session>(
session("ram_quota=4K, base=0x%p, size=0x%zx, wc=%s",
base, size, write_combined ? "yes" : "no")),
Io_mem_session_client(cap())
{ }
};
#endif /* _INCLUDE__IO_MEM_SESSION__CONNECTION_H_ */

View File

@ -19,33 +19,37 @@
namespace Genode {
struct Io_mem_dataspace : Dataspace { };
struct Io_mem_dataspace;
struct Io_mem_session;
typedef Capability<Io_mem_dataspace> Io_mem_dataspace_capability;
struct Io_mem_session : Session
{
static const char *service_name() { return "IO_MEM"; }
virtual ~Io_mem_session() { }
/**
* Request dataspace containing the IO_MEM session data
*
* \return capability to IO_MEM dataspace
* (may be invalid)
*/
virtual Io_mem_dataspace_capability dataspace() = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_dataspace, Io_mem_dataspace_capability, dataspace);
GENODE_RPC_INTERFACE(Rpc_dataspace);
};
}
struct Genode::Io_mem_dataspace : Dataspace { };
struct Genode::Io_mem_session : Session
{
static const char *service_name() { return "IO_MEM"; }
virtual ~Io_mem_session() { }
/**
* Request dataspace containing the IO_MEM session data
*
* \return capability to IO_MEM dataspace
* (may be invalid)
*/
virtual Io_mem_dataspace_capability dataspace() = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_dataspace, Io_mem_dataspace_capability, dataspace);
GENODE_RPC_INTERFACE(Rpc_dataspace);
};
#endif /* _INCLUDE__IO_MEM_SESSION__IO_MEM_SESSION_H_ */

View File

@ -17,31 +17,31 @@
#include <io_port_session/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Io_port_session_client; }
struct Io_port_session_client : Rpc_client<Io_port_session>
{
explicit Io_port_session_client(Io_port_session_capability session)
: Rpc_client<Io_port_session>(session) { }
unsigned char inb(unsigned short address) {
return call<Rpc_inb>(address); }
struct Genode::Io_port_session_client : Rpc_client<Io_port_session>
{
explicit Io_port_session_client(Io_port_session_capability session)
: Rpc_client<Io_port_session>(session) { }
unsigned short inw(unsigned short address) {
return call<Rpc_inw>(address); }
unsigned char inb(unsigned short address) override {
return call<Rpc_inb>(address); }
unsigned inl(unsigned short address) {
return call<Rpc_inl>(address); }
unsigned short inw(unsigned short address) override {
return call<Rpc_inw>(address); }
void outb(unsigned short address, unsigned char value) {
call<Rpc_outb>(address, value); }
unsigned inl(unsigned short address) override {
return call<Rpc_inl>(address); }
void outw(unsigned short address, unsigned short value) {
call<Rpc_outw>(address, value); }
void outb(unsigned short address, unsigned char value) override {
call<Rpc_outb>(address, value); }
void outl(unsigned short address, unsigned value) {
call<Rpc_outl>(address, value); }
};
}
void outw(unsigned short address, unsigned short value) override {
call<Rpc_outw>(address, value); }
void outl(unsigned short address, unsigned value) override {
call<Rpc_outl>(address, value); }
};
#endif /* _INCLUDE__IO_PORT_SESSION__CLIENT_H_ */

View File

@ -17,26 +17,26 @@
#include <io_port_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Io_port_connection; }
struct Io_port_connection : Connection<Io_port_session>,
Io_port_session_client
{
/**
* Constructor
*
* \param base base address of port range
* \param size size of port range
*/
Io_port_connection(unsigned base, unsigned size)
:
Connection<Io_port_session>(
session("ram_quota=4K, io_port_base=%u, io_port_size=%u",
base, size)),
Io_port_session_client(cap())
{ }
};
}
struct Genode::Io_port_connection : Connection<Io_port_session>,
Io_port_session_client
{
/**
* Constructor
*
* \param base base address of port range
* \param size size of port range
*/
Io_port_connection(unsigned base, unsigned size)
:
Connection<Io_port_session>(
session("ram_quota=4K, io_port_base=%u, io_port_size=%u",
base, size)),
Io_port_session_client(cap())
{ }
};
#endif /* _INCLUDE__IO_PORT_SESSION__CONNECTION_H_ */

View File

@ -28,89 +28,89 @@
#include <base/capability.h>
#include <session/session.h>
namespace Genode {
struct Io_port_session : Session
{
static const char *service_name() { return "IO_PORT"; }
virtual ~Io_port_session() { }
/******************************
** Read value from I/O port **
******************************/
/**
* Read byte (8 bit)
*
* \param address physical I/O port address
*
* \return value read from port
*/
virtual unsigned char inb(unsigned short address) = 0;
/**
* Read word (16 bit)
*
* \param address physical I/O port address
*
* \return value read from port
*/
virtual unsigned short inw(unsigned short address) = 0;
/**
* Read double word (32 bit)
*
* \param address physical I/O port address
*
* \return value read from port
*/
virtual unsigned inl(unsigned short address) = 0;
namespace Genode { struct Io_port_session; }
/*****************************
** Write value to I/O port **
*****************************/
struct Genode::Io_port_session : Session
{
static const char *service_name() { return "IO_PORT"; }
/**
* Write byte (8 bit)
*
* \param address physical I/O port address
* \param value value to write to port
*/
virtual void outb(unsigned short address, unsigned char value) = 0;
virtual ~Io_port_session() { }
/**
* Write word (16 bit)
*
* \param address physical I/O port address
* \param value value to write to port
*/
virtual void outw(unsigned short address, unsigned short value) = 0;
/******************************
** Read value from I/O port **
******************************/
/**
* Write double word (32 bit)
*
* \param address physical I/O port address
* \param value value to write to port
*/
virtual void outl(unsigned short address, unsigned value) = 0;
/**
* Read byte (8 bit)
*
* \param address physical I/O port address
*
* \return value read from port
*/
virtual unsigned char inb(unsigned short address) = 0;
/**
* Read word (16 bit)
*
* \param address physical I/O port address
*
* \return value read from port
*/
virtual unsigned short inw(unsigned short address) = 0;
/**
* Read double word (32 bit)
*
* \param address physical I/O port address
*
* \return value read from port
*/
virtual unsigned inl(unsigned short address) = 0;
/*********************
** RPC declaration **
*********************/
/*****************************
** Write value to I/O port **
*****************************/
GENODE_RPC(Rpc_inb, unsigned char, inb, unsigned short);
GENODE_RPC(Rpc_inw, unsigned short, inw, unsigned short);
GENODE_RPC(Rpc_inl, unsigned, inl, unsigned short);
/**
* Write byte (8 bit)
*
* \param address physical I/O port address
* \param value value to write to port
*/
virtual void outb(unsigned short address, unsigned char value) = 0;
GENODE_RPC(Rpc_outb, void, outb, unsigned short, unsigned char);
GENODE_RPC(Rpc_outw, void, outw, unsigned short, unsigned short);
GENODE_RPC(Rpc_outl, void, outl, unsigned short, unsigned);
/**
* Write word (16 bit)
*
* \param address physical I/O port address
* \param value value to write to port
*/
virtual void outw(unsigned short address, unsigned short value) = 0;
GENODE_RPC_INTERFACE(Rpc_inb, Rpc_inw, Rpc_inl, Rpc_outb, Rpc_outw, Rpc_outl);
};
}
/**
* Write double word (32 bit)
*
* \param address physical I/O port address
* \param value value to write to port
*/
virtual void outl(unsigned short address, unsigned value) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_inb, unsigned char, inb, unsigned short);
GENODE_RPC(Rpc_inw, unsigned short, inw, unsigned short);
GENODE_RPC(Rpc_inl, unsigned, inl, unsigned short);
GENODE_RPC(Rpc_outb, void, outb, unsigned short, unsigned char);
GENODE_RPC(Rpc_outw, void, outw, unsigned short, unsigned short);
GENODE_RPC(Rpc_outl, void, outl, unsigned short, unsigned);
GENODE_RPC_INTERFACE(Rpc_inb, Rpc_inw, Rpc_inl, Rpc_outb, Rpc_outw, Rpc_outl);
};
#endif /* _INCLUDE__IO_PORT_SESSION__IO_PORT_SESSION_H_ */

View File

@ -17,15 +17,15 @@
#include <irq_session/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Irq_session_client; }
struct Irq_session_client : Rpc_client<Irq_session>
{
explicit Irq_session_client(Irq_session_capability session)
: Rpc_client<Irq_session>(session) { }
void wait_for_irq() { call<Rpc_wait_for_irq>(); }
};
}
struct Genode::Irq_session_client : Rpc_client<Irq_session>
{
explicit Irq_session_client(Irq_session_capability session)
: Rpc_client<Irq_session>(session) { }
void wait_for_irq() override { call<Rpc_wait_for_irq>(); }
};
#endif /* _INCLUDE__IRQ_SESSION__CLIENT_H_ */

View File

@ -17,27 +17,26 @@
#include <irq_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Irq_connection; }
struct Irq_connection : Connection<Irq_session>, Irq_session_client
{
/**
* Constructor
*
* \param irq physical interrupt number
* \param trigger interrupt trigger (e.g., level/edge)
* \param polarity interrupt trigger polarity (e.g., low/high)
*/
Irq_connection(unsigned irq,
Irq_session::Trigger trigger = Irq_session::TRIGGER_UNCHANGED,
Irq_session::Polarity polarity = Irq_session::POLARITY_UNCHANGED)
:
Connection<Irq_session>(
session("ram_quota=4K, irq_number=%u, irq_trigger=%u, irq_polarity=%u",
irq, trigger, polarity)),
Irq_session_client(cap())
{ }
};
}
struct Genode::Irq_connection : Connection<Irq_session>, Irq_session_client
{
/**
* Constructor
*
* \param irq physical interrupt number
* \param trigger interrupt trigger (e.g., level/edge)
* \param polarity interrupt trigger polarity (e.g., low/high)
*/
Irq_connection(unsigned irq,
Irq_session::Trigger trigger = Irq_session::TRIGGER_UNCHANGED,
Irq_session::Polarity polarity = Irq_session::POLARITY_UNCHANGED)
:
Connection<Irq_session>(
session("ram_quota=4K, irq_number=%u, irq_trigger=%u, irq_polarity=%u",
irq, trigger, polarity)),
Irq_session_client(cap())
{ }
};
#endif /* _INCLUDE__IRQ_SESSION__CONNECTION_H_ */

View File

@ -24,35 +24,34 @@
#include <base/capability.h>
#include <session/session.h>
namespace Genode {
struct Irq_session : Session
{
/**
* Interrupt trigger
*/
enum Trigger { TRIGGER_UNCHANGED = 0, TRIGGER_LEVEL, TRIGGER_EDGE };
/**
* Interrupt trigger polarity
*/
enum Polarity { POLARITY_UNCHANGED = 0, POLARITY_HIGH, POLARITY_LOW };
static const char *service_name() { return "IRQ"; }
virtual ~Irq_session() { }
virtual void wait_for_irq() = 0;
namespace Genode { struct Irq_session; }
/*********************
** RPC declaration **
*********************/
struct Genode::Irq_session : Session
{
/**
* Interrupt trigger
*/
enum Trigger { TRIGGER_UNCHANGED = 0, TRIGGER_LEVEL, TRIGGER_EDGE };
GENODE_RPC(Rpc_wait_for_irq, void, wait_for_irq);
GENODE_RPC_INTERFACE(Rpc_wait_for_irq);
};
}
/**
* Interrupt trigger polarity
*/
enum Polarity { POLARITY_UNCHANGED = 0, POLARITY_HIGH, POLARITY_LOW };
static const char *service_name() { return "IRQ"; }
virtual ~Irq_session() { }
virtual void wait_for_irq() = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_wait_for_irq, void, wait_for_irq);
GENODE_RPC_INTERFACE(Rpc_wait_for_irq);
};
#endif /* _INCLUDE__IRQ_SESSION__IRQ_SESSION_H_ */

View File

@ -17,16 +17,16 @@
#include <log_session/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Log_session_client; }
struct Log_session_client : Rpc_client<Log_session>
{
explicit Log_session_client(Log_session_capability session)
: Rpc_client<Log_session>(session) { }
size_t write(String const &string) {
return call<Rpc_write>(string); }
};
}
struct Genode::Log_session_client : Rpc_client<Log_session>
{
explicit Log_session_client(Log_session_capability session)
: Rpc_client<Log_session>(session) { }
size_t write(String const &string) override {
return call<Rpc_write>(string); }
};
#endif /* _INCLUDE__LOG_SESSION__CLIENT_H_ */

View File

@ -17,16 +17,16 @@
#include <log_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Log_connection; }
struct Log_connection : Connection<Log_session>, Log_session_client
{
Log_connection()
:
Connection<Log_session>(session("ram_quota=8K")),
Log_session_client(cap())
{ }
};
}
struct Genode::Log_connection : Connection<Log_session>, Log_session_client
{
Log_connection()
:
Connection<Log_session>(session("ram_quota=8K")),
Log_session_client(cap())
{ }
};
#endif /* _INCLUDE__LOG_SESSION__CONNECTION_H_ */

View File

@ -19,31 +19,31 @@
#include <base/rpc_args.h>
#include <session/session.h>
namespace Genode {
struct Log_session : Session
{
static const char *service_name() { return "LOG"; }
virtual ~Log_session() { }
typedef Rpc_in_buffer<256> String;
/**
* Output null-terminated string
*
* \return number of written characters
*/
virtual size_t write(String const &string) = 0;
namespace Genode { struct Log_session; }
/*********************
** RPC declaration **
*********************/
struct Genode::Log_session : Session
{
static const char *service_name() { return "LOG"; }
GENODE_RPC(Rpc_write, size_t, write, String const &);
GENODE_RPC_INTERFACE(Rpc_write);
};
}
virtual ~Log_session() { }
typedef Rpc_in_buffer<256> String;
/**
* Output null-terminated string
*
* \return number of written characters
*/
virtual size_t write(String const &string) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_write, size_t, write, String const &);
GENODE_RPC_INTERFACE(Rpc_write);
};
#endif /* _INCLUDE__LOG_SESSION__LOG_SESSION_H_ */

View File

@ -17,44 +17,44 @@
#include <parent/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Parent_client; }
struct Parent_client : Rpc_client<Parent>
{
explicit Parent_client(Parent_capability parent)
: Rpc_client<Parent>(parent) { }
void exit(int exit_value) { call<Rpc_exit>(exit_value); }
struct Genode::Parent_client : Rpc_client<Parent>
{
explicit Parent_client(Parent_capability parent)
: Rpc_client<Parent>(parent) { }
void announce(Service_name const &service, Root_capability root) {
call<Rpc_announce>(service, root); }
void exit(int exit_value) override { call<Rpc_exit>(exit_value); }
Session_capability session(Service_name const &service,
Session_args const &args,
Affinity const &affinity) {
return call<Rpc_session>(service, args, affinity); }
void announce(Service_name const &service, Root_capability root) override {
call<Rpc_announce>(service, root); }
void upgrade(Session_capability to_session, Upgrade_args const &args) {
call<Rpc_upgrade>(to_session, args); }
Session_capability session(Service_name const &service,
Session_args const &args,
Affinity const &affinity) override {
return call<Rpc_session>(service, args, affinity); }
void close(Session_capability session) { call<Rpc_close>(session); }
void upgrade(Session_capability to_session, Upgrade_args const &args) override {
call<Rpc_upgrade>(to_session, args); }
Thread_capability main_thread_cap() const {
return call<Rpc_main_thread>(); }
void close(Session_capability session) override { call<Rpc_close>(session); }
void resource_avail_sigh(Signal_context_capability sigh) {
call<Rpc_resource_avail_sigh>(sigh); }
Thread_capability main_thread_cap() const override {
return call<Rpc_main_thread>(); }
void resource_request(Resource_args const &args) {
call<Rpc_resource_request>(args); }
void resource_avail_sigh(Signal_context_capability sigh) override {
call<Rpc_resource_avail_sigh>(sigh); }
void yield_sigh(Signal_context_capability sigh) {
call<Rpc_yield_sigh>(sigh); }
void resource_request(Resource_args const &args) override {
call<Rpc_resource_request>(args); }
Resource_args yield_request() { return call<Rpc_yield_request>(); }
void yield_sigh(Signal_context_capability sigh) override {
call<Rpc_yield_sigh>(sigh); }
void yield_response() { call<Rpc_yield_response>(); }
};
}
Resource_args yield_request() override { return call<Rpc_yield_request>(); }
void yield_response() override { call<Rpc_yield_response>(); }
};
#endif /* _INCLUDE__PARENT__CLIENT_H_ */

View File

@ -21,249 +21,249 @@
#include <session/capability.h>
#include <root/capability.h>
namespace Genode {
namespace Genode { class Parent; }
class Parent
{
private:
/**
* Recursively announce inherited service interfaces
*
* At compile time, the 'ROOT' type is inspected for the presence
* of the 'Rpc_inherited_interface' type in the corresponding
* session interface. If present, the session type gets announced.
* This works recursively.
*/
template <typename ROOT>
void _announce_base(Capability<ROOT> const &, Meta::Bool_to_type<false> *) { }
class Genode::Parent
{
private:
/**
* Recursively announce inherited service interfaces
*
* At compile time, the 'ROOT' type is inspected for the presence
* of the 'Rpc_inherited_interface' type in the corresponding
* session interface. If present, the session type gets announced.
* This works recursively.
*/
template <typename ROOT>
void _announce_base(Capability<ROOT> const &, Meta::Bool_to_type<false> *) { }
/*
* This overload gets selected if the ROOT interface corresponds to
* an inherited session type.
*/
template <typename ROOT>
inline void _announce_base(Capability<ROOT> const &, Meta::Bool_to_type<true> *);
public:
/*********************
** Exception types **
*********************/
class Exception : public ::Genode::Exception { };
class Service_denied : public Exception { };
class Quota_exceeded : public Exception { };
class Unavailable : public Exception { };
typedef Rpc_in_buffer<64> Service_name;
typedef Rpc_in_buffer<160> Session_args;
typedef Rpc_in_buffer<160> Upgrade_args;
/**
* Use 'String' instead of 'Rpc_in_buffer' because 'Resource_args'
* is used as both in and out parameter.
*/
typedef String<160> Resource_args;
virtual ~Parent() { }
/**
* Tell parent to exit the program
*/
virtual void exit(int exit_value) = 0;
/**
* Announce service to the parent
*/
virtual void announce(Service_name const &service_name,
Root_capability service_root) = 0;
/**
* Announce service to the parent
*
* \param service_root root capability
*
* The type of the specified 'service_root' capability match with
* an interface that provides a 'Session_type' type (i.e., a
* 'Typed_root' interface). This 'Session_type' is expected to
* host a static function called 'service_name' returning the
* name of the provided interface as null-terminated string.
*/
template <typename ROOT_INTERFACE>
void announce(Capability<ROOT_INTERFACE> const &service_root)
{
typedef typename ROOT_INTERFACE::Session_type Session;
announce(Session::service_name(), service_root);
/*
* This overload gets selected if the ROOT interface corresponds to
* an inherited session type.
*/
template <typename ROOT>
inline void _announce_base(Capability<ROOT> const &, Meta::Bool_to_type<true> *);
public:
/*********************
** Exception types **
*********************/
class Exception : public ::Genode::Exception { };
class Service_denied : public Exception { };
class Quota_exceeded : public Exception { };
class Unavailable : public Exception { };
typedef Rpc_in_buffer<64> Service_name;
typedef Rpc_in_buffer<160> Session_args;
typedef Rpc_in_buffer<160> Upgrade_args;
/**
* Use 'String' instead of 'Rpc_in_buffer' because 'Resource_args'
* is used as both in and out parameter.
*/
typedef String<160> Resource_args;
virtual ~Parent() { }
/**
* Tell parent to exit the program
*/
virtual void exit(int exit_value) = 0;
/**
* Announce service to the parent
*/
virtual void announce(Service_name const &service_name,
Root_capability service_root) = 0;
/**
* Announce service to the parent
* Announce inherited session types
*
* \param service_root root capability
*
* The type of the specified 'service_root' capability match with
* an interface that provides a 'Session_type' type (i.e., a
* 'Typed_root' interface). This 'Session_type' is expected to
* host a static function called 'service_name' returning the
* name of the provided interface as null-terminated string.
* Select the overload based on the presence of the type
* 'Rpc_inherited_interface' within the session type.
*/
template <typename ROOT_INTERFACE>
void announce(Capability<ROOT_INTERFACE> const &service_root)
{
typedef typename ROOT_INTERFACE::Session_type Session;
announce(Session::service_name(), service_root);
_announce_base(service_root,
(Meta::Bool_to_type<Rpc_interface_is_inherited<Session>::VALUE> *)0);
}
/*
* Announce inherited session types
*
* Select the overload based on the presence of the type
* 'Rpc_inherited_interface' within the session type.
*/
_announce_base(service_root,
(Meta::Bool_to_type<Rpc_interface_is_inherited<Session>::VALUE> *)0);
}
/**
* Create session to a service
*
* \param service_name name of the requested interface
* \param args session constructor arguments
* \param affinity preferred CPU affinity for the session
*
* \throw Service_denied parent denies session request
* \throw Quota_exceeded our own quota does not suffice for
* the creation of the new session
* \throw Unavailable
*
* \return untyped capability to new session
*
* The use of this function is discouraged. Please use the type safe
* 'session()' template instead.
*/
virtual Session_capability session(Service_name const &service_name,
Session_args const &args,
Affinity const &affinity = Affinity()) = 0;
/**
* Create session to a service
*
* \param service_name name of the requested interface
* \param args session constructor arguments
* \param affinity preferred CPU affinity for the session
*
* \throw Service_denied parent denies session request
* \throw Quota_exceeded our own quota does not suffice for
* the creation of the new session
* \throw Unavailable
*
* \return untyped capability to new session
*
* The use of this function is discouraged. Please use the type safe
* 'session()' template instead.
*/
virtual Session_capability session(Service_name const &service_name,
Session_args const &args,
Affinity const &affinity = Affinity()) = 0;
/**
* Create session to a service
*
* \param SESSION_TYPE session interface type
* \param args session constructor arguments
* \param affinity preferred CPU affinity for the session
*
* \throw Service_denied parent denies session request
* \throw Quota_exceeded our own quota does not suffice for
* the creation of the new session
* \throw Unavailable
*
* \return capability to new session
*/
template <typename SESSION_TYPE>
Capability<SESSION_TYPE> session(Session_args const &args,
Affinity const &affinity = Affinity())
{
Session_capability cap = session(SESSION_TYPE::service_name(),
args, affinity);
return reinterpret_cap_cast<SESSION_TYPE>(cap);
}
/**
* Create session to a service
*
* \param SESSION_TYPE session interface type
* \param args session constructor arguments
* \param affinity preferred CPU affinity for the session
*
* \throw Service_denied parent denies session request
* \throw Quota_exceeded our own quota does not suffice for
* the creation of the new session
* \throw Unavailable
*
* \return capability to new session
*/
template <typename SESSION_TYPE>
Capability<SESSION_TYPE> session(Session_args const &args,
Affinity const &affinity = Affinity())
{
Session_capability cap = session(SESSION_TYPE::service_name(),
args, affinity);
return reinterpret_cap_cast<SESSION_TYPE>(cap);
}
/**
* Transfer our quota to the server that provides the specified session
*
* \param to_session recipient session
* \param args description of the amount of quota to transfer
*
* \throw Quota_exceeded quota could not be transferred
*
* The 'args' argument has the same principle format as the 'args'
* argument of the 'session' function.
* The error case indicates that there is not enough unused quota on
* the source side.
*/
virtual void upgrade(Session_capability to_session,
Upgrade_args const &args) = 0;
/**
* Transfer our quota to the server that provides the specified session
*
* \param to_session recipient session
* \param args description of the amount of quota to transfer
*
* \throw Quota_exceeded quota could not be transferred
*
* The 'args' argument has the same principle format as the 'args'
* argument of the 'session' function.
* The error case indicates that there is not enough unused quota on
* the source side.
*/
virtual void upgrade(Session_capability to_session,
Upgrade_args const &args) = 0;
/**
* Close session
*/
virtual void close(Session_capability session) = 0;
/**
* Close session
*/
virtual void close(Session_capability session) = 0;
/**
* Provide thread_cap of main thread
*/
virtual Thread_capability main_thread_cap() const = 0;
/**
* Provide thread_cap of main thread
*/
virtual Thread_capability main_thread_cap() const = 0;
/**
* Register signal handler for resource notifications
*/
virtual void resource_avail_sigh(Signal_context_capability sigh) = 0;
/**
* Register signal handler for resource notifications
*/
virtual void resource_avail_sigh(Signal_context_capability sigh) = 0;
/**
* Request additional resources
*
* By invoking this function, a process is able to inform its
* parent about the need for additional resources. The argument
* string contains a resource description in the same format as
* used for session-construction arguments. In particular, for
* requesting additional RAM quota, the argument looks like
* "ram_quota=<amount>" where 'amount' is the amount of additional
* resources expected from the parent. If the parent complies with
* the request, it submits a resource-available signal to the
* handler registered via 'resource_avail_sigh()'. On the reception
* of such a signal, the process can re-evaluate its resource quota
* and resume execution.
*/
virtual void resource_request(Resource_args const &args) = 0;
/**
* Request additional resources
*
* By invoking this function, a process is able to inform its
* parent about the need for additional resources. The argument
* string contains a resource description in the same format as
* used for session-construction arguments. In particular, for
* requesting additional RAM quota, the argument looks like
* "ram_quota=<amount>" where 'amount' is the amount of additional
* resources expected from the parent. If the parent complies with
* the request, it submits a resource-available signal to the
* handler registered via 'resource_avail_sigh()'. On the reception
* of such a signal, the process can re-evaluate its resource quota
* and resume execution.
*/
virtual void resource_request(Resource_args const &args) = 0;
/**
* Register signal handler for resource yield notifications
*
* Using the yield signal, the parent is able to inform the process
* about its wish to regain resources.
*/
virtual void yield_sigh(Signal_context_capability sigh) = 0;
/**
* Register signal handler for resource yield notifications
*
* Using the yield signal, the parent is able to inform the process
* about its wish to regain resources.
*/
virtual void yield_sigh(Signal_context_capability sigh) = 0;
/**
* Obtain information about the amount of resources to free
*
* The amount of resources returned by this function is the
* goal set by the parent. It is not commanded but merely meant
* as a friendly beg to cooperate. The process is not obligated
* to comply. If the process decides to take action to free
* resources, it can inform its parent about the availability
* of freed up resources by calling 'yield_response()'.
*/
virtual Resource_args yield_request() = 0;
/**
* Obtain information about the amount of resources to free
*
* The amount of resources returned by this function is the
* goal set by the parent. It is not commanded but merely meant
* as a friendly beg to cooperate. The process is not obligated
* to comply. If the process decides to take action to free
* resources, it can inform its parent about the availability
* of freed up resources by calling 'yield_response()'.
*/
virtual Resource_args yield_request() = 0;
/**
* Notify the parent about a response to a yield request
*/
virtual void yield_response() = 0;
/**
* Notify the parent about a response to a yield request
*/
virtual void yield_response() = 0;
/*********************
** RPC declaration **
*********************/
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_exit, void, exit, int);
GENODE_RPC(Rpc_announce, void, announce,
Service_name const &, Root_capability);
GENODE_RPC_THROW(Rpc_session, Session_capability, session,
GENODE_TYPE_LIST(Service_denied, Quota_exceeded, Unavailable),
Service_name const &, Session_args const &, Affinity const &);
GENODE_RPC_THROW(Rpc_upgrade, void, upgrade,
GENODE_TYPE_LIST(Quota_exceeded),
Session_capability, Upgrade_args const &);
GENODE_RPC(Rpc_close, void, close, Session_capability);
GENODE_RPC(Rpc_main_thread, Thread_capability, main_thread_cap);
GENODE_RPC(Rpc_resource_avail_sigh, void, resource_avail_sigh,
Signal_context_capability);
GENODE_RPC(Rpc_resource_request, void, resource_request,
Resource_args const &);
GENODE_RPC(Rpc_yield_sigh, void, yield_sigh, Signal_context_capability);
GENODE_RPC(Rpc_yield_request, Resource_args, yield_request);
GENODE_RPC(Rpc_yield_response, void, yield_response);
GENODE_RPC(Rpc_exit, void, exit, int);
GENODE_RPC(Rpc_announce, void, announce,
Service_name const &, Root_capability);
GENODE_RPC_THROW(Rpc_session, Session_capability, session,
GENODE_TYPE_LIST(Service_denied, Quota_exceeded, Unavailable),
Service_name const &, Session_args const &, Affinity const &);
GENODE_RPC_THROW(Rpc_upgrade, void, upgrade,
GENODE_TYPE_LIST(Quota_exceeded),
Session_capability, Upgrade_args const &);
GENODE_RPC(Rpc_close, void, close, Session_capability);
GENODE_RPC(Rpc_main_thread, Thread_capability, main_thread_cap);
GENODE_RPC(Rpc_resource_avail_sigh, void, resource_avail_sigh,
Signal_context_capability);
GENODE_RPC(Rpc_resource_request, void, resource_request,
Resource_args const &);
GENODE_RPC(Rpc_yield_sigh, void, yield_sigh, Signal_context_capability);
GENODE_RPC(Rpc_yield_request, Resource_args, yield_request);
GENODE_RPC(Rpc_yield_response, void, yield_response);
typedef Meta::Type_tuple<Rpc_exit,
Meta::Type_tuple<Rpc_announce,
Meta::Type_tuple<Rpc_session,
Meta::Type_tuple<Rpc_upgrade,
Meta::Type_tuple<Rpc_close,
Meta::Type_tuple<Rpc_main_thread,
Meta::Type_tuple<Rpc_resource_avail_sigh,
Meta::Type_tuple<Rpc_resource_request,
Meta::Type_tuple<Rpc_yield_sigh,
Meta::Type_tuple<Rpc_yield_request,
Meta::Type_tuple<Rpc_yield_response,
Meta::Empty>
> > > > > > > > > > Rpc_functions;
};
}
typedef Meta::Type_tuple<Rpc_exit,
Meta::Type_tuple<Rpc_announce,
Meta::Type_tuple<Rpc_session,
Meta::Type_tuple<Rpc_upgrade,
Meta::Type_tuple<Rpc_close,
Meta::Type_tuple<Rpc_main_thread,
Meta::Type_tuple<Rpc_resource_avail_sigh,
Meta::Type_tuple<Rpc_resource_request,
Meta::Type_tuple<Rpc_yield_sigh,
Meta::Type_tuple<Rpc_yield_request,
Meta::Type_tuple<Rpc_yield_response,
Meta::Empty>
> > > > > > > > > > Rpc_functions;
};
template <typename ROOT_INTERFACE>

View File

@ -17,21 +17,21 @@
#include <pd_session/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Pd_session_client; }
struct Pd_session_client : Rpc_client<Pd_session>
{
explicit Pd_session_client(Pd_session_capability session)
: Rpc_client<Pd_session>(session) { }
int bind_thread(Thread_capability thread) {
return call<Rpc_bind_thread>(thread); }
struct Genode::Pd_session_client : Rpc_client<Pd_session>
{
explicit Pd_session_client(Pd_session_capability session)
: Rpc_client<Pd_session>(session) { }
int assign_parent(Parent_capability parent) {
return call<Rpc_assign_parent>(parent); }
int bind_thread(Thread_capability thread) override {
return call<Rpc_bind_thread>(thread); }
bool assign_pci(addr_t) { return false; }
};
}
int assign_parent(Parent_capability parent) override {
return call<Rpc_assign_parent>(parent); }
bool assign_pci(addr_t) { return false; }
};
#endif /* _INCLUDE__PD_SESSION__CLIENT_H_ */

View File

@ -17,21 +17,21 @@
#include <pd_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Pd_connection; }
struct Pd_connection : Connection<Pd_session>, Pd_session_client
{
/**
* Constructor
*
* \param label session label
*/
Pd_connection(char const *label = "", Native_pd_args const *pd_args = 0)
:
Connection<Pd_session>(session("ram_quota=4K, label=\"%s\"", label)),
Pd_session_client(cap())
{ }
};
}
struct Genode::Pd_connection : Connection<Pd_session>, Pd_session_client
{
/**
* Constructor
*
* \param label session label
*/
Pd_connection(char const *label = "", Native_pd_args const *pd_args = 0)
:
Connection<Pd_session>(session("ram_quota=4K, label=\"%s\"", label)),
Pd_session_client(cap())
{ }
};
#endif /* _INCLUDE__PD_SESSION__CONNECTION_H_ */

View File

@ -20,44 +20,44 @@
#include <parent/capability.h>
#include <session/session.h>
namespace Genode {
struct Pd_session : Session
{
static const char *service_name() { return "PD"; }
virtual ~Pd_session() { }
/**
* Bind thread to protection domain
*
* \param thread capability of thread to bind
*
* \return 0 on success or negative error code
*
* After successful bind, the thread will execute inside this
* protection domain when started.
*/
virtual int bind_thread(Thread_capability thread) = 0;
/**
* Assign parent to protection domain
*
* \param parent capability of parent interface
* \return 0 on success, or negative error code
*/
virtual int assign_parent(Parent_capability parent) = 0;
namespace Genode { struct Pd_session; }
/*********************
** RPC declaration **
*********************/
struct Genode::Pd_session : Session
{
static const char *service_name() { return "PD"; }
GENODE_RPC(Rpc_bind_thread, int, bind_thread, Thread_capability);
GENODE_RPC(Rpc_assign_parent, int, assign_parent, Parent_capability);
virtual ~Pd_session() { }
GENODE_RPC_INTERFACE(Rpc_bind_thread, Rpc_assign_parent);
};
}
/**
* Bind thread to protection domain
*
* \param thread capability of thread to bind
*
* \return 0 on success or negative error code
*
* After successful bind, the thread will execute inside this
* protection domain when started.
*/
virtual int bind_thread(Thread_capability thread) = 0;
/**
* Assign parent to protection domain
*
* \param parent capability of parent interface
* \return 0 on success, or negative error code
*/
virtual int assign_parent(Parent_capability parent) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_bind_thread, int, bind_thread, Thread_capability);
GENODE_RPC(Rpc_assign_parent, int, assign_parent, Parent_capability);
GENODE_RPC_INTERFACE(Rpc_bind_thread, Rpc_assign_parent);
};
#endif /* _INCLUDE__PD_SESSION__PD_SESSION_H_ */

View File

@ -17,34 +17,34 @@
/* Genode includes */
#include <platform_exynos5/board_base.h>
namespace Genode
namespace Genode { struct Board_base; }
/**
* Board driver base
*/
struct Genode::Board_base : Exynos5
{
/**
* Board driver base
*/
struct Board_base : Exynos5
enum
{
enum
{
/* clock management unit */
CMU_MMIO_BASE = 0x10010000,
CMU_MMIO_SIZE = 0x24000,
/* clock management unit */
CMU_MMIO_BASE = 0x10010000,
CMU_MMIO_SIZE = 0x24000,
/* power management unit */
PMU_MMIO_BASE = 0x10040000,
PMU_MMIO_SIZE = 0x5000,
/* power management unit */
PMU_MMIO_BASE = 0x10040000,
PMU_MMIO_SIZE = 0x5000,
/* USB */
USB_HOST20_IRQ = 103,
USB_DRD30_IRQ = 104,
/* USB */
USB_HOST20_IRQ = 103,
USB_DRD30_IRQ = 104,
/* UART */
UART_2_CLOCK = 100000000,
/* UART */
UART_2_CLOCK = 100000000,
/* wether board provides security extension */
SECURITY_EXTENSION = 1,
};
/* wether board provides security extension */
SECURITY_EXTENSION = 1,
};
}
};
#endif /* _INCLUDE__DRIVERS__BOARD_BASE_H_ */

View File

@ -14,104 +14,104 @@
#ifndef _INCLUDE__PLATFORM__IMX53__DRIVERS__BOARD_BASE_SUPPORT_H_
#define _INCLUDE__PLATFORM__IMX53__DRIVERS__BOARD_BASE_SUPPORT_H_
namespace Imx53
namespace Imx53 { struct Board_base; }
/**
* i.MX53 motherboard
*/
struct Imx53::Board_base
{
/**
* i.MX53 motherboard
*/
struct Board_base
{
enum {
MMIO_BASE = 0x0,
MMIO_SIZE = 0x70000000,
enum {
MMIO_BASE = 0x0,
MMIO_SIZE = 0x70000000,
UART_1_IRQ = 31,
UART_1_MMIO_BASE = 0x53fbc000,
UART_1_MMIO_SIZE = 0x00004000,
UART_1_IRQ = 31,
UART_1_MMIO_BASE = 0x53fbc000,
UART_1_MMIO_SIZE = 0x00004000,
EPIT_1_IRQ = 40,
EPIT_1_MMIO_BASE = 0x53fac000,
EPIT_1_MMIO_SIZE = 0x00004000,
EPIT_1_IRQ = 40,
EPIT_1_MMIO_BASE = 0x53fac000,
EPIT_1_MMIO_SIZE = 0x00004000,
EPIT_2_IRQ = 41,
EPIT_2_MMIO_BASE = 0x53fb0000,
EPIT_2_MMIO_SIZE = 0x00004000,
EPIT_2_IRQ = 41,
EPIT_2_MMIO_BASE = 0x53fb0000,
EPIT_2_MMIO_SIZE = 0x00004000,
GPIO1_MMIO_BASE = 0x53f84000,
GPIO1_MMIO_SIZE = 0x4000,
GPIO2_MMIO_BASE = 0x53f88000,
GPIO2_MMIO_SIZE = 0x4000,
GPIO3_MMIO_BASE = 0x53f8c000,
GPIO3_MMIO_SIZE = 0x4000,
GPIO4_MMIO_BASE = 0x53f90000,
GPIO4_MMIO_SIZE = 0x4000,
GPIO5_MMIO_BASE = 0x53fdc000,
GPIO5_MMIO_SIZE = 0x4000,
GPIO6_MMIO_BASE = 0x53fe0000,
GPIO6_MMIO_SIZE = 0x4000,
GPIO7_MMIO_BASE = 0x53fe4000,
GPIO7_MMIO_SIZE = 0x4000,
GPIO1_IRQL = 50,
GPIO1_IRQH = 51,
GPIO2_IRQL = 52,
GPIO2_IRQH = 53,
GPIO3_IRQL = 54,
GPIO3_IRQH = 55,
GPIO4_IRQL = 56,
GPIO4_IRQH = 57,
GPIO5_IRQL = 103,
GPIO5_IRQH = 104,
GPIO6_IRQL = 105,
GPIO6_IRQH = 106,
GPIO7_IRQL = 107,
GPIO7_IRQH = 108,
GPIO1_MMIO_BASE = 0x53f84000,
GPIO1_MMIO_SIZE = 0x4000,
GPIO2_MMIO_BASE = 0x53f88000,
GPIO2_MMIO_SIZE = 0x4000,
GPIO3_MMIO_BASE = 0x53f8c000,
GPIO3_MMIO_SIZE = 0x4000,
GPIO4_MMIO_BASE = 0x53f90000,
GPIO4_MMIO_SIZE = 0x4000,
GPIO5_MMIO_BASE = 0x53fdc000,
GPIO5_MMIO_SIZE = 0x4000,
GPIO6_MMIO_BASE = 0x53fe0000,
GPIO6_MMIO_SIZE = 0x4000,
GPIO7_MMIO_BASE = 0x53fe4000,
GPIO7_MMIO_SIZE = 0x4000,
GPIO1_IRQL = 50,
GPIO1_IRQH = 51,
GPIO2_IRQL = 52,
GPIO2_IRQH = 53,
GPIO3_IRQL = 54,
GPIO3_IRQH = 55,
GPIO4_IRQL = 56,
GPIO4_IRQH = 57,
GPIO5_IRQL = 103,
GPIO5_IRQH = 104,
GPIO6_IRQL = 105,
GPIO6_IRQH = 106,
GPIO7_IRQL = 107,
GPIO7_IRQH = 108,
IRQ_CONTROLLER_BASE = 0x0fffc000,
IRQ_CONTROLLER_SIZE = 0x00004000,
IRQ_CONTROLLER_BASE = 0x0fffc000,
IRQ_CONTROLLER_SIZE = 0x00004000,
AIPS_1_MMIO_BASE = 0x53f00000,
AIPS_2_MMIO_BASE = 0x63f00000,
AIPS_1_MMIO_BASE = 0x53f00000,
AIPS_2_MMIO_BASE = 0x63f00000,
IOMUXC_BASE = 0x53fa8000,
IOMUXC_SIZE = 0x00004000,
IOMUXC_BASE = 0x53fa8000,
IOMUXC_SIZE = 0x00004000,
PWM2_BASE = 0x53fb8000,
PWM2_SIZE = 0x00004000,
PWM2_BASE = 0x53fb8000,
PWM2_SIZE = 0x00004000,
IPU_BASE = 0x18000000,
IPU_SIZE = 0x08000000,
IPU_BASE = 0x18000000,
IPU_SIZE = 0x08000000,
SRC_BASE = 0x53fd0000,
SRC_SIZE = 0x00004000,
SRC_BASE = 0x53fd0000,
SRC_SIZE = 0x00004000,
CCM_BASE = 0x53FD4000,
CCM_SIZE = 0x00004000,
CCM_BASE = 0x53FD4000,
CCM_SIZE = 0x00004000,
I2C_2_IRQ = 63,
I2C_2_BASE = 0x63fc4000,
I2C_2_SIZE = 0x00004000,
I2C_2_IRQ = 63,
I2C_2_BASE = 0x63fc4000,
I2C_2_SIZE = 0x00004000,
I2C_3_IRQ = 64,
I2C_3_BASE = 0x53fec000,
I2C_3_SIZE = 0x00004000,
I2C_3_IRQ = 64,
I2C_3_BASE = 0x53fec000,
I2C_3_SIZE = 0x00004000,
IIM_BASE = 0x63f98000,
IIM_SIZE = 0x00004000,
IIM_BASE = 0x63f98000,
IIM_SIZE = 0x00004000,
CSU_BASE = 0x63f9c000,
CSU_SIZE = 0x00001000,
CSU_BASE = 0x63f9c000,
CSU_SIZE = 0x00001000,
M4IF_BASE = 0x63fd8000,
M4IF_SIZE = 0x00001000,
M4IF_BASE = 0x63fd8000,
M4IF_SIZE = 0x00001000,
/* wether board provides security extension */
SECURITY_EXTENSION = 1,
/* wether board provides security extension */
SECURITY_EXTENSION = 1,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 6,
};
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 6,
};
}
};
#endif /* _INCLUDE__PLATFORM__IMX53__DRIVERS__BOARD_BASE_SUPPORT_H_ */

View File

@ -17,21 +17,21 @@
/* Genode includes */
#include <platform/imx53/drivers/board_base_support.h>
namespace Genode
namespace Genode { struct Board_base; }
/**
* i.MX53 starter board
*/
struct Genode::Board_base : Imx53::Board_base
{
/**
* i.MX53 starter board
*/
struct Board_base : Imx53::Board_base
{
enum {
RAM0_BASE = 0x70000000,
RAM0_SIZE = 0x20000000,
RAM1_BASE = 0xb0000000,
RAM1_SIZE = 0x20000000,
};
enum {
RAM0_BASE = 0x70000000,
RAM0_SIZE = 0x20000000,
RAM1_BASE = 0xb0000000,
RAM1_SIZE = 0x20000000,
};
}
};
#endif /* _INCLUDE__PLATFORM__IMX53_QSB__DRIVERS__BOARD_BASE_H_ */

View File

@ -17,22 +17,22 @@
/* Genode includes */
#include <platform_exynos5/board_base.h>
namespace Genode
{
/**
* Board driver base
*/
struct Board_base : Exynos5
{
enum
{
/* UART */
UART_2_CLOCK = 62668800,
namespace Genode { struct Board_base; }
/* wether board provides security extension */
SECURITY_EXTENSION = 0,
};
/**
* Board driver base
*/
struct Genode::Board_base : Exynos5
{
enum
{
/* UART */
UART_2_CLOCK = 62668800,
/* wether board provides security extension */
SECURITY_EXTENSION = 0,
};
}
};
#endif /* _INCLUDE__DRIVERS__BOARD_BASE_H_ */

View File

@ -14,95 +14,94 @@
#ifndef _INCLUDE__DRIVERS__BOARD_BASE_H_
#define _INCLUDE__DRIVERS__BOARD_BASE_H_
namespace Genode
namespace Genode { struct Board_base; }
/**
* Driver for the OMAP4 PandaBoard revision A2
*/
struct Genode::Board_base
{
/**
* Driver for the OMAP4 PandaBoard revision A2
*/
struct Board_base
{
enum
{
/* device IO memory */
MMIO_0_BASE = 0x48000000,
MMIO_0_SIZE = 0x01000000,
MMIO_1_BASE = 0x4a000000,
MMIO_1_SIZE = 0x01000000,
enum {
/* normal RAM */
RAM_0_BASE = 0x80000000,
RAM_0_SIZE = 0x40000000,
/* device IO memory */
MMIO_0_BASE = 0x48000000,
MMIO_0_SIZE = 0x01000000,
MMIO_1_BASE = 0x4a000000,
MMIO_1_SIZE = 0x01000000,
/* clocks */
SYS_CLK = 38400000,
/* normal RAM */
RAM_0_BASE = 0x80000000,
RAM_0_SIZE = 0x40000000,
/* UART controllers */
TL16C750_1_MMIO_BASE = MMIO_0_BASE + 0x6a000,
TL16C750_2_MMIO_BASE = MMIO_0_BASE + 0x6c000,
TL16C750_3_MMIO_BASE = MMIO_0_BASE + 0x20000,
TL16C750_4_MMIO_BASE = MMIO_0_BASE + 0x6e000,
TL16C750_MMIO_SIZE = 0x2000,
TL16C750_CLOCK = 48*1000*1000,
TL16C750_1_IRQ = 104,
TL16C750_2_IRQ = 105,
TL16C750_3_IRQ = 106,
TL16C750_4_IRQ = 102,
/* clocks */
SYS_CLK = 38400000,
/* USB */
HSUSB_EHCI_IRQ = 109,
/* UART controllers */
TL16C750_1_MMIO_BASE = MMIO_0_BASE + 0x6a000,
TL16C750_2_MMIO_BASE = MMIO_0_BASE + 0x6c000,
TL16C750_3_MMIO_BASE = MMIO_0_BASE + 0x20000,
TL16C750_4_MMIO_BASE = MMIO_0_BASE + 0x6e000,
TL16C750_MMIO_SIZE = 0x2000,
TL16C750_CLOCK = 48*1000*1000,
TL16C750_1_IRQ = 104,
TL16C750_2_IRQ = 105,
TL16C750_3_IRQ = 106,
TL16C750_4_IRQ = 102,
/* timer modules */
GP_TIMER_3_IRQ = 71,
/* USB */
HSUSB_EHCI_IRQ = 109,
/* CPU */
CORTEX_A9_PRIVATE_MEM_BASE = 0x48240000,
CORTEX_A9_PRIVATE_MEM_SIZE = 0x00002000,
CORTEX_A9_PRIVATE_TIMER_CLK = 350000000,
/* timer modules */
GP_TIMER_3_IRQ = 71,
/* L2 cache */
PL310_MMIO_BASE = 0x48242000,
PL310_MMIO_SIZE = 0x00001000,
/* CPU */
CORTEX_A9_PRIVATE_MEM_BASE = 0x48240000,
CORTEX_A9_PRIVATE_MEM_SIZE = 0x00002000,
CORTEX_A9_PRIVATE_TIMER_CLK = 350000000,
/* display subsystem */
DSS_MMIO_BASE = 0x58000000,
DSS_MMIO_SIZE = 0x00001000,
DISPC_MMIO_BASE = 0x58001000,
DISPC_MMIO_SIZE = 0x00001000,
HDMI_MMIO_BASE = 0x58006000,
HDMI_MMIO_SIZE = 0x00001000,
/* L2 cache */
PL310_MMIO_BASE = 0x48242000,
PL310_MMIO_SIZE = 0x00001000,
/* GPIO */
GPIO1_MMIO_BASE = 0x4a310000,
GPIO1_MMIO_SIZE = 0x1000,
GPIO1_IRQ = 29 + 32,
GPIO2_MMIO_BASE = 0x48055000,
GPIO2_MMIO_SIZE = 0x1000,
GPIO2_IRQ = 30 + 32,
GPIO3_MMIO_BASE = 0x48057000,
GPIO3_MMIO_SIZE = 0x1000,
GPIO3_IRQ = 31 + 32,
GPIO4_MMIO_BASE = 0x48059000,
GPIO4_MMIO_SIZE = 0x1000,
GPIO4_IRQ = 32 + 32,
GPIO5_MMIO_BASE = 0x4805b000,
GPIO5_MMIO_SIZE = 0x1000,
GPIO5_IRQ = 33 + 32,
GPIO6_MMIO_BASE = 0x4805d000,
GPIO6_MMIO_SIZE = 0x1000,
GPIO6_IRQ = 34 + 32,
/* display subsystem */
DSS_MMIO_BASE = 0x58000000,
DSS_MMIO_SIZE = 0x00001000,
DISPC_MMIO_BASE = 0x58001000,
DISPC_MMIO_SIZE = 0x00001000,
HDMI_MMIO_BASE = 0x58006000,
HDMI_MMIO_SIZE = 0x00001000,
/* SD card */
HSMMC_IRQ = 115,
/* GPIO */
GPIO1_MMIO_BASE = 0x4a310000,
GPIO1_MMIO_SIZE = 0x1000,
GPIO1_IRQ = 29 + 32,
GPIO2_MMIO_BASE = 0x48055000,
GPIO2_MMIO_SIZE = 0x1000,
GPIO2_IRQ = 30 + 32,
GPIO3_MMIO_BASE = 0x48057000,
GPIO3_MMIO_SIZE = 0x1000,
GPIO3_IRQ = 31 + 32,
GPIO4_MMIO_BASE = 0x48059000,
GPIO4_MMIO_SIZE = 0x1000,
GPIO4_IRQ = 32 + 32,
GPIO5_MMIO_BASE = 0x4805b000,
GPIO5_MMIO_SIZE = 0x1000,
GPIO5_IRQ = 33 + 32,
GPIO6_MMIO_BASE = 0x4805d000,
GPIO6_MMIO_SIZE = 0x1000,
GPIO6_IRQ = 34 + 32,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 2, /* FIXME get correct value from board spec */
/* SD card */
HSMMC_IRQ = 115,
/* wether board provides security extension */
SECURITY_EXTENSION = 0,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 2, /* FIXME get correct value from board spec */
};
/* wether board provides security extension */
SECURITY_EXTENSION = 0,
};
}
};
#endif /* _INCLUDE__DRIVERS__BOARD_BASE_H_ */

View File

@ -14,76 +14,76 @@
#ifndef _INCLUDE__DRIVERS__BOARD_BASE_H_
#define _INCLUDE__DRIVERS__BOARD_BASE_H_
namespace Genode
namespace Genode { struct Board_base; }
/**
* Driver for the Realview PBXA9 board
*/
struct Genode::Board_base
{
/**
* Driver for the Realview PBXA9 board
*/
struct Board_base
{
enum
{
/* normal RAM */
RAM_0_BASE = 0x70000000,
RAM_0_SIZE = 0x20000000,
RAM_1_BASE = 0x20000000,
RAM_1_SIZE = 0x10000000,
enum {
/* device IO memory */
MMIO_0_BASE = 0x10000000,
MMIO_0_SIZE = 0x10000000,
MMIO_1_BASE = 0x4e000000,
MMIO_1_SIZE = 0x01000000,
/* normal RAM */
RAM_0_BASE = 0x70000000,
RAM_0_SIZE = 0x20000000,
RAM_1_BASE = 0x20000000,
RAM_1_SIZE = 0x10000000,
NORTHBRIDGE_AHB_BASE = 0x10020000,
NORTHBRIDGE_AHB_SIZE = 768*1024,
/* device IO memory */
MMIO_0_BASE = 0x10000000,
MMIO_0_SIZE = 0x10000000,
MMIO_1_BASE = 0x4e000000,
MMIO_1_SIZE = 0x01000000,
/* southbridge */
SOUTHBRIDGE_APB_BASE = 0x10000000,
SOUTHBRIDGE_APB_SIZE = 128*1024,
NORTHBRIDGE_AHB_BASE = 0x10020000,
NORTHBRIDGE_AHB_SIZE = 768*1024,
/* clocks */
OSC_6_CLOCK = 24*1000*1000,
/* southbridge */
SOUTHBRIDGE_APB_BASE = 0x10000000,
SOUTHBRIDGE_APB_SIZE = 128*1024,
/* CPU */
CORTEX_A9_PRIVATE_TIMER_CLK = 100000000,
CORTEX_A9_PRIVATE_MEM_BASE = 0x1f000000,
CORTEX_A9_PRIVATE_MEM_SIZE = 0x01000000,
/* clocks */
OSC_6_CLOCK = 24*1000*1000,
/* UART */
PL011_0_MMIO_BASE = 0x10009000,
PL011_0_MMIO_SIZE = 0x00001000,
PL011_0_CLOCK = OSC_6_CLOCK,
PL011_0_IRQ = 44,
PL011_1_IRQ = 45,
PL011_2_IRQ = 46,
PL011_3_IRQ = 47,
/* CPU */
CORTEX_A9_PRIVATE_TIMER_CLK = 100000000,
CORTEX_A9_PRIVATE_MEM_BASE = 0x1f000000,
CORTEX_A9_PRIVATE_MEM_SIZE = 0x01000000,
/* timer */
SP804_0_1_MMIO_BASE = 0x10011000,
SP804_0_1_MMIO_SIZE = 0x00001000,
SP804_0_1_IRQ = 36,
SP804_0_1_CLOCK = 1000*1000,
/* UART */
PL011_0_MMIO_BASE = 0x10009000,
PL011_0_MMIO_SIZE = 0x00001000,
PL011_0_CLOCK = OSC_6_CLOCK,
PL011_0_IRQ = 44,
PL011_1_IRQ = 45,
PL011_2_IRQ = 46,
PL011_3_IRQ = 47,
/* keyboard & mouse */
KMI_0_IRQ = 52,
KMI_1_IRQ = 53,
/* timer */
SP804_0_1_MMIO_BASE = 0x10011000,
SP804_0_1_MMIO_SIZE = 0x00001000,
SP804_0_1_IRQ = 36,
SP804_0_1_CLOCK = 1000*1000,
/* LAN */
ETHERNET_IRQ = 60,
/* keyboard & mouse */
KMI_0_IRQ = 52,
KMI_1_IRQ = 53,
/* SD card */
PL180_IRQ_0 = 49,
PL180_IRQ_1 = 50,
/* LAN */
ETHERNET_IRQ = 60,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 2, /* FIXME get correct value from board spec */
/* SD card */
PL180_IRQ_0 = 49,
PL180_IRQ_1 = 50,
/* wether board provides security extension */
SECURITY_EXTENSION = 0,
};
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 2, /* FIXME get correct value from board spec */
/* wether board provides security extension */
SECURITY_EXTENSION = 0,
};
}
};
#endif /* _INCLUDE__DRIVERS__BOARD_BASE_H_ */

View File

@ -17,58 +17,58 @@
/* Genode includes */
#include <util/mmio.h>
namespace Genode
namespace Genode { struct Board_base; }
struct Genode::Board_base
{
struct Board_base
{
enum {
RAM_0_BASE = 0x00000000,
RAM_0_SIZE = 0x10000000, /* XXX ? */
enum {
RAM_0_BASE = 0x00000000,
RAM_0_SIZE = 0x10000000, /* XXX ? */
MMIO_0_BASE = 0x20000000,
MMIO_0_SIZE = 0x02000000,
MMIO_0_BASE = 0x20000000,
MMIO_0_SIZE = 0x02000000,
/*
* IRQ numbers 0..7 refer to the basic IRQs.
* IRQ numbers 8..39 refer to GPU IRQs 0..31.
* IRQ numbers 40..71 refer to GPU IRQs 32..63.
*/
GPU_IRQ_BASE = 8,
/*
* IRQ numbers 0..7 refer to the basic IRQs.
* IRQ numbers 8..39 refer to GPU IRQs 0..31.
* IRQ numbers 40..71 refer to GPU IRQs 32..63.
*/
GPU_IRQ_BASE = 8,
SYSTEM_TIMER_IRQ = GPU_IRQ_BASE + 1,
SYSTEM_TIMER_MMIO_BASE = 0x20003000,
SYSTEM_TIMER_MMIO_SIZE = 0x1000,
SYSTEM_TIMER_CLOCK = 1000000,
SYSTEM_TIMER_IRQ = GPU_IRQ_BASE + 1,
SYSTEM_TIMER_MMIO_BASE = 0x20003000,
SYSTEM_TIMER_MMIO_SIZE = 0x1000,
SYSTEM_TIMER_CLOCK = 1000000,
PL011_0_IRQ = 57,
PL011_0_MMIO_BASE = 0x20201000,
PL011_0_MMIO_SIZE = 0x1000,
PL011_0_CLOCK = 3000000,
PL011_0_IRQ = 57,
PL011_0_MMIO_BASE = 0x20201000,
PL011_0_MMIO_SIZE = 0x1000,
PL011_0_CLOCK = 3000000,
IRQ_CONTROLLER_BASE = 0x2000b200,
IRQ_CONTROLLER_SIZE = 0x100,
IRQ_CONTROLLER_BASE = 0x2000b200,
IRQ_CONTROLLER_SIZE = 0x100,
USB_DWC_OTG_BASE = 0x20980000,
USB_DWC_OTG_SIZE = 0x20000,
USB_DWC_OTG_BASE = 0x20980000,
USB_DWC_OTG_SIZE = 0x20000,
/* timer */
TIMER_IRQ = 0,
/* timer */
TIMER_IRQ = 0,
/* USB host controller */
DWC_IRQ = 17,
/* USB host controller */
DWC_IRQ = 17,
SECURITY_EXTENSION = 0,
SECURITY_EXTENSION = 0,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 5,
};
enum Videocore_cache_policy { NON_COHERENT = 0,
COHERENT = 1,
L2_ONLY = 2,
UNCACHED = 3 };
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 5,
};
}
enum Videocore_cache_policy { NON_COHERENT = 0,
COHERENT = 1,
L2_ONLY = 2,
UNCACHED = 3 };
};
#endif /* _INCLUDE__PLATFORM__BOARD_BASE_H_ */

View File

@ -17,26 +17,26 @@
/* Genode includes */
#include <platform/imx53/drivers/board_base_support.h>
namespace Genode
namespace Genode { struct Board_base; }
/**
* i.MX53 starter board
*/
struct Genode::Board_base : Imx53::Board_base
{
/**
* i.MX53 starter board
*/
struct Board_base : Imx53::Board_base
{
enum {
/*
* These two regions are physically one RAM region but we split it
* to keep the enum names compliant with other i.MX53 boards. This
* way, more files can be shared between the platforms.
*/
RAM0_BASE = 0x70000000,
RAM0_SIZE = 0x10000000,
RAM1_BASE = 0x80000000,
RAM1_SIZE = 0x10000000,
};
enum {
/*
* These two regions are physically one RAM region but we split it
* to keep the enum names compliant with other i.MX53 boards. This
* way, more files can be shared between the platforms.
*/
RAM0_BASE = 0x70000000,
RAM0_SIZE = 0x10000000,
RAM1_BASE = 0x80000000,
RAM1_SIZE = 0x10000000,
};
}
};
#endif /* _INCLUDE__PLATFORM__IMX53_QSB__DRIVERS__BOARD_BASE_H_ */

View File

@ -14,70 +14,70 @@
#ifndef _INCLUDE__PLATFORM__VEA9X4__DRIVERS__BOARD_BASE_H_
#define _INCLUDE__PLATFORM__VEA9X4__DRIVERS__BOARD_BASE_H_
namespace Genode
namespace Genode { struct Board_base; }
/**
* Driver for the Versatile Express A9X4 board
*
* Implies the uATX motherboard and the CoreTile Express A9X4 daughterboard
*/
struct Genode::Board_base
{
/**
* Driver for the Versatile Express A9X4 board
*
* Implies the uATX motherboard and the CoreTile Express A9X4 daughterboard
*/
struct Board_base
enum
{
enum
{
/* MMIO */
MMIO_0_BASE = 0x10000000,
MMIO_0_SIZE = 0x10000000,
MMIO_1_BASE = 0x4C000000,
MMIO_1_SIZE = 0x04000000,
/* MMIO */
MMIO_0_BASE = 0x10000000,
MMIO_0_SIZE = 0x10000000,
MMIO_1_BASE = 0x4C000000,
MMIO_1_SIZE = 0x04000000,
/* RAM */
RAM_0_BASE = 0x60000000,
RAM_0_SIZE = 0x20000000,
RAM_1_BASE = 0x84000000,
RAM_1_SIZE = 0x1c000000,
RAM_2_BASE = 0x48000000,
RAM_2_SIZE = 0x02000000,
/* RAM */
RAM_0_BASE = 0x60000000,
RAM_0_SIZE = 0x20000000,
RAM_1_BASE = 0x84000000,
RAM_1_SIZE = 0x1c000000,
RAM_2_BASE = 0x48000000,
RAM_2_SIZE = 0x02000000,
/* UART */
PL011_0_MMIO_BASE = MMIO_0_BASE + 0x9000,
PL011_0_MMIO_SIZE = 0x1000,
PL011_0_CLOCK = 24*1000*1000,
PL011_0_IRQ = 37,
PL011_1_IRQ = 38,
PL011_2_IRQ = 39,
PL011_3_IRQ = 40,
/* UART */
PL011_0_MMIO_BASE = MMIO_0_BASE + 0x9000,
PL011_0_MMIO_SIZE = 0x1000,
PL011_0_CLOCK = 24*1000*1000,
PL011_0_IRQ = 37,
PL011_1_IRQ = 38,
PL011_2_IRQ = 39,
PL011_3_IRQ = 40,
/* timer/counter */
SP804_0_1_MMIO_BASE = MMIO_0_BASE + 0x11000,
SP804_0_1_MMIO_SIZE = 0x1000,
SP804_0_1_CLOCK = 1000*1000,
SP804_0_1_IRQ = 34,
/* timer/counter */
SP804_0_1_MMIO_BASE = MMIO_0_BASE + 0x11000,
SP804_0_1_MMIO_SIZE = 0x1000,
SP804_0_1_CLOCK = 1000*1000,
SP804_0_1_IRQ = 34,
/* PS2 */
KMI_0_IRQ = 44,
KMI_1_IRQ = 45,
/* PS2 */
KMI_0_IRQ = 44,
KMI_1_IRQ = 45,
/* LAN */
LAN9118_IRQ = 47,
/* LAN */
LAN9118_IRQ = 47,
/* card reader */
PL180_0_IRQ = 9,
PL180_1_IRQ = 10,
/* card reader */
PL180_0_IRQ = 9,
PL180_1_IRQ = 10,
/* CPU */
CORTEX_A9_PRIVATE_MEM_BASE = 0x1e000000,
CORTEX_A9_PRIVATE_MEM_SIZE = 0x2000,
CORTEX_A9_PRIVATE_TIMER_CLK = 200010000,
/* CPU */
CORTEX_A9_PRIVATE_MEM_BASE = 0x1e000000,
CORTEX_A9_PRIVATE_MEM_SIZE = 0x2000,
CORTEX_A9_PRIVATE_TIMER_CLK = 200010000,
/* wether board provides security extension */
SECURITY_EXTENSION = 1,
/* wether board provides security extension */
SECURITY_EXTENSION = 1,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 2, /* FIXME get correct value from board spec */
};
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 2, /* FIXME get correct value from board spec */
};
}
};
#endif /* _INCLUDE__PLATFORM__VEA9X4__DRIVERS__BOARD_BASE_H_ */

View File

@ -14,73 +14,69 @@
#ifndef _EXYNOS5__BOARD_BASE_H_
#define _EXYNOS5__BOARD_BASE_H_
namespace Genode
namespace Genode { struct Exynos5; }
/**
* Board-driver base
*/
struct Genode::Exynos5
{
/**
* Board-driver base
*/
class Exynos5;
}
enum {
/* normal RAM */
RAM_0_BASE = 0x40000000,
RAM_0_SIZE = 0x80000000,
class Genode::Exynos5
{
public:
/* device IO memory */
MMIO_0_BASE = 0x10000000,
MMIO_0_SIZE = 0x10000000,
enum {
/* normal RAM */
RAM_0_BASE = 0x40000000,
RAM_0_SIZE = 0x80000000,
/* interrupt controller */
IRQ_CONTROLLER_BASE = 0x10480000,
IRQ_CONTROLLER_SIZE = 0x00010000,
IRQ_CONTROLLER_VT_CTRL_BASE = 0x10484000,
IRQ_CONTROLLER_VT_CPU_BASE = 0x10486000,
IRQ_CONTROLLER_VT_CPU_SIZE = 0x1000,
/* device IO memory */
MMIO_0_BASE = 0x10000000,
MMIO_0_SIZE = 0x10000000,
/* virtual interrupts */
VT_MAINTAINANCE_IRQ = 25,
VT_TIMER_IRQ = 27,
/* interrupt controller */
IRQ_CONTROLLER_BASE = 0x10480000,
IRQ_CONTROLLER_SIZE = 0x00010000,
IRQ_CONTROLLER_VT_CTRL_BASE = 0x10484000,
IRQ_CONTROLLER_VT_CPU_BASE = 0x10486000,
IRQ_CONTROLLER_VT_CPU_SIZE = 0x1000,
/* UART */
UART_2_MMIO_BASE = 0x12C20000,
UART_2_IRQ = 85,
/* virtual interrupts */
VT_MAINTAINANCE_IRQ = 25,
VT_TIMER_IRQ = 27,
/* pulse-width-modulation timer */
PWM_MMIO_BASE = 0x12dd0000,
PWM_MMIO_SIZE = 0x1000,
PWM_CLOCK = 66000000,
PWM_IRQ_0 = 68,
/* UART */
UART_2_MMIO_BASE = 0x12C20000,
UART_2_IRQ = 85,
/* multicore timer */
MCT_MMIO_BASE = 0x101c0000,
MCT_MMIO_SIZE = 0x1000,
MCT_CLOCK = 24000000,
MCT_IRQ_L0 = 152,
MCT_IRQ_L1 = 153,
/* pulse-width-modulation timer */
PWM_MMIO_BASE = 0x12dd0000,
PWM_MMIO_SIZE = 0x1000,
PWM_CLOCK = 66000000,
PWM_IRQ_0 = 68,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 6,
/* multicore timer */
MCT_MMIO_BASE = 0x101c0000,
MCT_MMIO_SIZE = 0x1000,
MCT_CLOCK = 24000000,
MCT_IRQ_L0 = 152,
MCT_IRQ_L1 = 153,
/* IRAM */
IRAM_BASE = 0x02020000,
/* CPU cache */
CACHE_LINE_SIZE_LOG2 = 6,
/* hardware name of the primary processor */
PRIMARY_MPIDR_AFF_0 = 0,
/* IRAM */
IRAM_BASE = 0x02020000,
/* SATA/AHCI */
SATA_IRQ = 147,
/* hardware name of the primary processor */
PRIMARY_MPIDR_AFF_0 = 0,
/* SD card */
SDMMC0_IRQ = 107,
/* SATA/AHCI */
SATA_IRQ = 147,
/* SD card */
SDMMC0_IRQ = 107,
/* I2C */
I2C_HDMI_IRQ = 96,
};
/* I2C */
I2C_HDMI_IRQ = 96,
};
};
#endif /* _EXYNOS5__BOARD_BASE_H_ */

View File

@ -18,29 +18,29 @@
#include <ram_session/ram_session.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Ram_session_client; }
struct Ram_session_client : Rpc_client<Ram_session>
{
explicit Ram_session_client(Ram_session_capability session)
: Rpc_client<Ram_session>(session) { }
Ram_dataspace_capability alloc(size_t size,
Cache_attribute cached = CACHED) {
return call<Rpc_alloc>(size, cached); }
struct Genode::Ram_session_client : Rpc_client<Ram_session>
{
explicit Ram_session_client(Ram_session_capability session)
: Rpc_client<Ram_session>(session) { }
void free(Ram_dataspace_capability ds) { call<Rpc_free>(ds); }
Ram_dataspace_capability alloc(size_t size,
Cache_attribute cached = CACHED) override {
return call<Rpc_alloc>(size, cached); }
int ref_account(Ram_session_capability ram_session) {
return call<Rpc_ref_account>(ram_session); }
void free(Ram_dataspace_capability ds) override { call<Rpc_free>(ds); }
int transfer_quota(Ram_session_capability ram_session, size_t amount) {
return call<Rpc_transfer_quota>(ram_session, amount); }
int ref_account(Ram_session_capability ram_session) override {
return call<Rpc_ref_account>(ram_session); }
size_t quota() { return call<Rpc_quota>(); }
int transfer_quota(Ram_session_capability ram_session, size_t amount) override {
return call<Rpc_transfer_quota>(ram_session, amount); }
size_t used() { return call<Rpc_used>(); }
};
}
size_t quota() override { return call<Rpc_quota>(); }
size_t used() override { return call<Rpc_used>(); }
};
#endif /* _INCLUDE__RAM_SESSION__CLIENT_H_ */

View File

@ -17,9 +17,8 @@
#include <ram_session/client.h>
#include <base/connection.h>
namespace Genode {
struct Ram_connection;
}
namespace Genode { struct Ram_connection; }
struct Genode::Ram_connection : Connection<Ram_session>, Ram_session_client
{

View File

@ -24,109 +24,115 @@
namespace Genode {
struct Ram_dataspace : Dataspace { };
struct Ram_dataspace;
typedef Capability<Ram_dataspace> Ram_dataspace_capability;
struct Ram_session : Session
{
static const char *service_name() { return "RAM"; }
/*********************
** Exception types **
*********************/
class Alloc_failed : public Exception { };
class Quota_exceeded : public Alloc_failed { };
class Out_of_metadata : public Alloc_failed { };
/**
* Destructor
*/
virtual ~Ram_session() { }
/**
* Allocate RAM dataspace
*
* \param size size of RAM dataspace
* \param cached selects cacheability attributes of the memory,
* uncached memory, i.e., for DMA buffers
*
* \throw Quota_exceeded
* \throw Out_of_metadata
* \return capability to new RAM dataspace
*/
virtual Ram_dataspace_capability alloc(size_t size,
Cache_attribute cached = CACHED) = 0;
/**
* Free RAM dataspace
*
* \param ds dataspace capability as returned by alloc
*/
virtual void free(Ram_dataspace_capability ds) = 0;
/**
* Define reference account for the RAM session
*
* \param ram_session reference account
*
* \return 0 on success
*
* Each RAM session requires another RAM session as reference
* account to transfer quota to and from. The reference account can
* be defined only once.
*/
virtual int ref_account(Ram_session_capability ram_session) = 0;
/**
* Transfer quota to another RAM session
*
* \param ram_session receiver of quota donation
* \param amount amount of quota to donate
* \return 0 on success
*
* Quota can only be transfered if the specified RAM session is
* either the reference account for this session or vice versa.
*/
virtual int transfer_quota(Ram_session_capability ram_session, size_t amount) = 0;
/**
* Return current quota limit
*/
virtual size_t quota() = 0;
/**
* Return used quota
*/
virtual size_t used() = 0;
/**
* Return amount of available quota
*/
size_t avail()
{
size_t q = quota(), u = used();
return q > u ? q - u : 0;
}
/*********************
** RPC declaration **
*********************/
GENODE_RPC_THROW(Rpc_alloc, Ram_dataspace_capability, alloc,
GENODE_TYPE_LIST(Quota_exceeded, Out_of_metadata),
size_t, Cache_attribute);
GENODE_RPC(Rpc_free, void, free, Ram_dataspace_capability);
GENODE_RPC(Rpc_ref_account, int, ref_account, Ram_session_capability);
GENODE_RPC(Rpc_transfer_quota, int, transfer_quota, Ram_session_capability, size_t);
GENODE_RPC(Rpc_quota, size_t, quota);
GENODE_RPC(Rpc_used, size_t, used);
GENODE_RPC_INTERFACE(Rpc_alloc, Rpc_free, Rpc_ref_account,
Rpc_transfer_quota, Rpc_quota, Rpc_used);
};
struct Ram_session;
}
struct Genode::Ram_dataspace : Dataspace { };
struct Genode::Ram_session : Session
{
static const char *service_name() { return "RAM"; }
/*********************
** Exception types **
*********************/
class Alloc_failed : public Exception { };
class Quota_exceeded : public Alloc_failed { };
class Out_of_metadata : public Alloc_failed { };
/**
* Destructor
*/
virtual ~Ram_session() { }
/**
* Allocate RAM dataspace
*
* \param size size of RAM dataspace
* \param cached selects cacheability attributes of the memory,
* uncached memory, i.e., for DMA buffers
*
* \throw Quota_exceeded
* \throw Out_of_metadata
* \return capability to new RAM dataspace
*/
virtual Ram_dataspace_capability alloc(size_t size,
Cache_attribute cached = CACHED) = 0;
/**
* Free RAM dataspace
*
* \param ds dataspace capability as returned by alloc
*/
virtual void free(Ram_dataspace_capability ds) = 0;
/**
* Define reference account for the RAM session
*
* \param ram_session reference account
*
* \return 0 on success
*
* Each RAM session requires another RAM session as reference
* account to transfer quota to and from. The reference account can
* be defined only once.
*/
virtual int ref_account(Ram_session_capability ram_session) = 0;
/**
* Transfer quota to another RAM session
*
* \param ram_session receiver of quota donation
* \param amount amount of quota to donate
* \return 0 on success
*
* Quota can only be transfered if the specified RAM session is
* either the reference account for this session or vice versa.
*/
virtual int transfer_quota(Ram_session_capability ram_session, size_t amount) = 0;
/**
* Return current quota limit
*/
virtual size_t quota() = 0;
/**
* Return used quota
*/
virtual size_t used() = 0;
/**
* Return amount of available quota
*/
size_t avail()
{
size_t q = quota(), u = used();
return q > u ? q - u : 0;
}
/*********************
** RPC declaration **
*********************/
GENODE_RPC_THROW(Rpc_alloc, Ram_dataspace_capability, alloc,
GENODE_TYPE_LIST(Quota_exceeded, Out_of_metadata),
size_t, Cache_attribute);
GENODE_RPC(Rpc_free, void, free, Ram_dataspace_capability);
GENODE_RPC(Rpc_ref_account, int, ref_account, Ram_session_capability);
GENODE_RPC(Rpc_transfer_quota, int, transfer_quota, Ram_session_capability, size_t);
GENODE_RPC(Rpc_quota, size_t, quota);
GENODE_RPC(Rpc_used, size_t, used);
GENODE_RPC_INTERFACE(Rpc_alloc, Rpc_free, Rpc_ref_account,
Rpc_transfer_quota, Rpc_quota, Rpc_used);
};
#endif /* _INCLUDE__RAM_SESSION__RAM_SESSION_H_ */

View File

@ -17,41 +17,41 @@
#include <rm_session/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Rm_session_client; }
struct Rm_session_client : Rpc_client<Rm_session>
struct Genode::Rm_session_client : Rpc_client<Rm_session>
{
explicit Rm_session_client(Rm_session_capability session)
: Rpc_client<Rm_session>(session) { }
Local_addr attach(Dataspace_capability ds, size_t size = 0,
off_t offset = 0, bool use_local_addr = false,
Local_addr local_addr = (void *)0,
bool executable = false) override
{
explicit Rm_session_client(Rm_session_capability session)
: Rpc_client<Rm_session>(session) { }
return call<Rpc_attach>(ds, size, offset,
use_local_addr, local_addr,
executable);
}
Local_addr attach(Dataspace_capability ds, size_t size = 0,
off_t offset = 0, bool use_local_addr = false,
Local_addr local_addr = (void *)0,
bool executable = false)
{
return call<Rpc_attach>(ds, size, offset,
use_local_addr, local_addr,
executable);
}
void detach(Local_addr local_addr) override {
call<Rpc_detach>(local_addr); }
void detach(Local_addr local_addr) {
call<Rpc_detach>(local_addr); }
Pager_capability add_client(Thread_capability thread) override {
return call<Rpc_add_client>(thread); }
Pager_capability add_client(Thread_capability thread) {
return call<Rpc_add_client>(thread); }
void remove_client(Pager_capability pager) override {
call<Rpc_remove_client>(pager); }
void remove_client(Pager_capability pager) {
call<Rpc_remove_client>(pager); }
void fault_handler(Signal_context_capability handler) override {
call<Rpc_fault_handler>(handler); }
void fault_handler(Signal_context_capability handler) {
call<Rpc_fault_handler>(handler); }
State state() override {
return call<Rpc_state>(); }
State state() {
return call<Rpc_state>(); }
Dataspace_capability dataspace() {
return call<Rpc_dataspace>(); }
};
}
Dataspace_capability dataspace() override {
return call<Rpc_dataspace>(); }
};
#endif /* _INCLUDE__RM_SESSION__CLIENT_H_ */

View File

@ -17,24 +17,24 @@
#include <rm_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Rm_connection; }
struct Rm_connection : Connection<Rm_session>, Rm_session_client
{
enum { RAM_QUOTA = 64*1024 };
/**
* Constructor
*
* \param start start of the managed VM-region
* \param size size of the VM-region to manage
*/
Rm_connection(addr_t start = ~0UL, size_t size = 0) :
Connection<Rm_session>(
session("ram_quota=64K, start=0x%p, size=0x%zx",
start, size)),
Rm_session_client(cap()) { }
};
}
struct Genode::Rm_connection : Connection<Rm_session>, Rm_session_client
{
enum { RAM_QUOTA = 64*1024 };
/**
* Constructor
*
* \param start start of the managed VM-region
* \param size size of the VM-region to manage
*/
Rm_connection(addr_t start = ~0UL, size_t size = 0) :
Connection<Rm_session>(
session("ram_quota=64K, start=0x%p, size=0x%zx",
start, size)),
Rm_session_client(cap()) { }
};
#endif /* _INCLUDE__RM_SESSION__CONNECTION_H_ */

View File

@ -22,199 +22,199 @@
#include <pager/capability.h>
#include <session/session.h>
namespace Genode {
namespace Genode { struct Rm_session; }
struct Rm_session : Session
struct Genode::Rm_session : Session
{
enum Fault_type {
READY = 0, READ_FAULT = 1, WRITE_FAULT = 2, EXEC_FAULT = 3 };
/**
* State of region-manager session
*
* If a client accesses a location outside the regions attached to
* the region-manager session, a fault occurs and gets signalled to
* the registered fault handler. The fault handler, in turn needs
* the information about the fault address and fault type to
* resolve the fault. This information is represented by this
* structure.
*/
struct State
{
enum Fault_type {
READY = 0, READ_FAULT = 1, WRITE_FAULT = 2, EXEC_FAULT = 3 };
/**
* Type of occurred fault
*/
Fault_type type;
/**
* State of region-manager session
*
* If a client accesses a location outside the regions attached to
* the region-manager session, a fault occurs and gets signalled to
* the registered fault handler. The fault handler, in turn needs
* the information about the fault address and fault type to
* resolve the fault. This information is represented by this
* structure.
* Fault address
*/
struct State
{
/**
* Type of occurred fault
*/
Fault_type type;
/**
* Fault address
*/
addr_t addr;
/**
* Default constructor
*/
State() : type(READY), addr(0) { }
/**
* Constructor
*/
State(Fault_type fault_type, addr_t fault_addr) :
type(fault_type), addr(fault_addr) { }
};
addr_t addr;
/**
* Helper for tranferring the bit representation of a pointer as RPC
* argument.
* Default constructor
*/
class Local_addr
{
private:
void *_ptr;
public:
template <typename T>
Local_addr(T ptr) : _ptr((void *)ptr) { }
Local_addr() : _ptr(0) { }
template <typename T>
operator T () { return (T)_ptr; }
};
static const char *service_name() { return "RM"; }
/*********************
** Exception types **
*********************/
class Attach_failed : public Exception { };
class Invalid_args : public Attach_failed { };
class Invalid_dataspace : public Attach_failed { };
class Region_conflict : public Attach_failed { };
class Out_of_metadata : public Attach_failed { };
class Invalid_thread : public Exception { };
class Unbound_thread : public Exception { };
State() : type(READY), addr(0) { }
/**
* Destructor
* Constructor
*/
virtual ~Rm_session() { }
/**
* Map dataspace into local address space
*
* \param ds capability of dataspace to map
* \param size size of the locally mapped region
* default (0) is the whole dataspace
* \param offset start at offset in dataspace (page-aligned)
* \param use_local_addr if set to true, attach the dataspace at
* the specified 'local_addr'
* \param local_addr local destination address
* \param executable if the mapping should be executable
*
* \throw Attach_failed if dataspace or offset is invalid,
* or on region conflict
* \throw Out_of_metadata if meta-data backing store is exhausted
*
* \return local address of mapped dataspace
*
*/
virtual Local_addr attach(Dataspace_capability ds,
size_t size = 0, off_t offset = 0,
bool use_local_addr = false,
Local_addr local_addr = (void *)0,
bool executable = false) = 0;
/**
* Shortcut for attaching a dataspace at a predefined local address
*/
Local_addr attach_at(Dataspace_capability ds, addr_t local_addr,
size_t size = 0, off_t offset = 0) {
return attach(ds, size, offset, true, local_addr); }
/**
* Shortcut for attaching a dataspace executable at a predefined local address
*/
Local_addr attach_executable(Dataspace_capability ds, addr_t local_addr,
size_t size = 0, off_t offset = 0) {
return attach(ds, size, offset, true, local_addr, true); }
/**
* Remove region from local address space
*/
virtual void detach(Local_addr local_addr) = 0;
/**
* Add client to pager
*
* \param thread thread that will be paged
* \throw Invalid_thread
* \throw Out_of_metadata
* \throw Unbound_thread
* \return capability to be used for handling page faults
*
* This method must be called at least once to establish a valid
* communication channel between the pager part of the region manager
* and the client thread.
*/
virtual Pager_capability add_client(Thread_capability thread) = 0;
/**
* Remove client from pager
*
* \param pager pager capability of client to be removed
*/
virtual void remove_client(Pager_capability) = 0;
/**
* Register signal handler for region-manager faults
*
* On Linux, this signal is never delivered because page-fault handling
* is performed by the Linux kernel. On microkernel platforms,
* unresolvable page faults (traditionally called segmentation fault)
* will result in the delivery of the signal.
*/
virtual void fault_handler(Signal_context_capability handler) = 0;
/**
* Request current state of RM session
*/
virtual State state() = 0;
/**
* Return dataspace representation of region-manager session
*/
virtual Dataspace_capability dataspace() = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC_THROW(Rpc_attach, Local_addr, attach,
GENODE_TYPE_LIST(Invalid_dataspace, Region_conflict,
Out_of_metadata, Invalid_args),
Dataspace_capability, size_t, off_t, bool, Local_addr, bool);
GENODE_RPC(Rpc_detach, void, detach, Local_addr);
GENODE_RPC_THROW(Rpc_add_client, Pager_capability, add_client,
GENODE_TYPE_LIST(Invalid_thread, Out_of_metadata),
Thread_capability);
GENODE_RPC(Rpc_remove_client, void, remove_client, Pager_capability);
GENODE_RPC(Rpc_fault_handler, void, fault_handler, Signal_context_capability);
GENODE_RPC(Rpc_state, State, state);
GENODE_RPC(Rpc_dataspace, Dataspace_capability, dataspace);
GENODE_RPC_INTERFACE(Rpc_attach, Rpc_detach, Rpc_add_client,
Rpc_remove_client, Rpc_fault_handler, Rpc_state,
Rpc_dataspace);
State(Fault_type fault_type, addr_t fault_addr) :
type(fault_type), addr(fault_addr) { }
};
}
/**
* Helper for tranferring the bit representation of a pointer as RPC
* argument.
*/
class Local_addr
{
private:
void *_ptr;
public:
template <typename T>
Local_addr(T ptr) : _ptr((void *)ptr) { }
Local_addr() : _ptr(0) { }
template <typename T>
operator T () { return (T)_ptr; }
};
static const char *service_name() { return "RM"; }
/*********************
** Exception types **
*********************/
class Attach_failed : public Exception { };
class Invalid_args : public Attach_failed { };
class Invalid_dataspace : public Attach_failed { };
class Region_conflict : public Attach_failed { };
class Out_of_metadata : public Attach_failed { };
class Invalid_thread : public Exception { };
class Unbound_thread : public Exception { };
/**
* Destructor
*/
virtual ~Rm_session() { }
/**
* Map dataspace into local address space
*
* \param ds capability of dataspace to map
* \param size size of the locally mapped region
* default (0) is the whole dataspace
* \param offset start at offset in dataspace (page-aligned)
* \param use_local_addr if set to true, attach the dataspace at
* the specified 'local_addr'
* \param local_addr local destination address
* \param executable if the mapping should be executable
*
* \throw Attach_failed if dataspace or offset is invalid,
* or on region conflict
* \throw Out_of_metadata if meta-data backing store is exhausted
*
* \return local address of mapped dataspace
*
*/
virtual Local_addr attach(Dataspace_capability ds,
size_t size = 0, off_t offset = 0,
bool use_local_addr = false,
Local_addr local_addr = (void *)0,
bool executable = false) = 0;
/**
* Shortcut for attaching a dataspace at a predefined local address
*/
Local_addr attach_at(Dataspace_capability ds, addr_t local_addr,
size_t size = 0, off_t offset = 0) {
return attach(ds, size, offset, true, local_addr); }
/**
* Shortcut for attaching a dataspace executable at a predefined local address
*/
Local_addr attach_executable(Dataspace_capability ds, addr_t local_addr,
size_t size = 0, off_t offset = 0) {
return attach(ds, size, offset, true, local_addr, true); }
/**
* Remove region from local address space
*/
virtual void detach(Local_addr local_addr) = 0;
/**
* Add client to pager
*
* \param thread thread that will be paged
* \throw Invalid_thread
* \throw Out_of_metadata
* \throw Unbound_thread
* \return capability to be used for handling page faults
*
* This method must be called at least once to establish a valid
* communication channel between the pager part of the region manager
* and the client thread.
*/
virtual Pager_capability add_client(Thread_capability thread) = 0;
/**
* Remove client from pager
*
* \param pager pager capability of client to be removed
*/
virtual void remove_client(Pager_capability) = 0;
/**
* Register signal handler for region-manager faults
*
* On Linux, this signal is never delivered because page-fault handling
* is performed by the Linux kernel. On microkernel platforms,
* unresolvable page faults (traditionally called segmentation fault)
* will result in the delivery of the signal.
*/
virtual void fault_handler(Signal_context_capability handler) = 0;
/**
* Request current state of RM session
*/
virtual State state() = 0;
/**
* Return dataspace representation of region-manager session
*/
virtual Dataspace_capability dataspace() = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC_THROW(Rpc_attach, Local_addr, attach,
GENODE_TYPE_LIST(Invalid_dataspace, Region_conflict,
Out_of_metadata, Invalid_args),
Dataspace_capability, size_t, off_t, bool, Local_addr, bool);
GENODE_RPC(Rpc_detach, void, detach, Local_addr);
GENODE_RPC_THROW(Rpc_add_client, Pager_capability, add_client,
GENODE_TYPE_LIST(Invalid_thread, Out_of_metadata),
Thread_capability);
GENODE_RPC(Rpc_remove_client, void, remove_client, Pager_capability);
GENODE_RPC(Rpc_fault_handler, void, fault_handler, Signal_context_capability);
GENODE_RPC(Rpc_state, State, state);
GENODE_RPC(Rpc_dataspace, Dataspace_capability, dataspace);
GENODE_RPC_INTERFACE(Rpc_attach, Rpc_detach, Rpc_add_client,
Rpc_remove_client, Rpc_fault_handler, Rpc_state,
Rpc_dataspace);
};
#endif /* _INCLUDE__RM_SESSION__RM_SESSION_H_ */

View File

@ -19,6 +19,7 @@
namespace Genode { struct Rom_session_client; }
struct Genode::Rom_session_client : Rpc_client<Rom_session>
{
explicit Rom_session_client(Rom_session_capability session)

View File

@ -18,43 +18,44 @@
#include <base/connection.h>
#include <base/printf.h>
namespace Genode {
namespace Genode { class Rom_connection; }
class Rom_connection : public Connection<Rom_session>,
public Rom_session_client
{
public:
class Rom_connection_failed : public Parent::Exception { };
class Genode::Rom_connection : public Connection<Rom_session>,
public Rom_session_client
{
public:
private:
class Rom_connection_failed : public Parent::Exception { };
Rom_session_capability _create_session(const char *module_name, const char *label)
{
try {
return session("ram_quota=4K, filename=\"%s\", label=\"%s\"",
module_name, label ? label: module_name); }
catch (...) {
PERR("Could not open ROM session for module \"%s\"", module_name);
throw Rom_connection_failed();
}
private:
Rom_session_capability _create_session(const char *module_name, const char *label)
{
try {
return session("ram_quota=4K, filename=\"%s\", label=\"%s\"",
module_name, label ? label: module_name); }
catch (...) {
PERR("Could not open ROM session for module \"%s\"", module_name);
throw Rom_connection_failed();
}
}
public:
public:
/**
* Constructor
*
* \param filename name of ROM file
* \param label initial session label
*
* \throw Rom_connection_failed
*/
Rom_connection(const char *filename, const char *label = 0) :
Connection<Rom_session>(_create_session(filename, label)),
Rom_session_client(cap())
{ }
};
}
/**
* Constructor
*
* \param module_name name of ROM module
* \param label initial session label
*
* \throw Rom_connection_failed
*/
Rom_connection(const char *module_name, const char *label = 0)
:
Connection<Rom_session>(_create_session(module_name, label)),
Rom_session_client(cap())
{ }
};
#endif /* _INCLUDE__ROM_SESSION__CONNECTION_H_ */

View File

@ -23,14 +23,16 @@
namespace Genode {
struct Rom_dataspace : Dataspace { };
struct Rom_dataspace;
struct Rom_session;
typedef Capability<Rom_dataspace> Rom_dataspace_capability;
}
struct Genode::Rom_dataspace : Dataspace { };
struct Genode::Rom_session : Session
{
static const char *service_name() { return "ROM"; }

View File

@ -17,22 +17,22 @@
#include <root/capability.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Root_client; }
struct Root_client : Rpc_client<Root>
{
explicit Root_client(Root_capability root)
: Rpc_client<Root>(root) { }
Session_capability session(Session_args const &args, Affinity const &affinity) {
return call<Rpc_session>(args, affinity); }
struct Genode::Root_client : Rpc_client<Root>
{
explicit Root_client(Root_capability root)
: Rpc_client<Root>(root) { }
void upgrade(Session_capability session, Upgrade_args const &args) {
call<Rpc_upgrade>(session, args); }
Session_capability session(Session_args const &args, Affinity const &affinity) override {
return call<Rpc_session>(args, affinity); }
void close(Session_capability session) {
call<Rpc_close>(session); }
};
}
void upgrade(Session_capability session, Upgrade_args const &args) override {
call<Rpc_upgrade>(session, args); }
void close(Session_capability session) override {
call<Rpc_close>(session); }
};
#endif /* _INCLUDE__ROOT__CLIENT_H_ */

View File

@ -26,239 +26,245 @@
namespace Genode {
/**
* Session creation policy for a single-client service
*/
class Single_client
{
private:
bool _used;
public:
Single_client() : _used(0) { }
void aquire(const char *)
{
if (_used)
throw Root::Unavailable();
_used = true;
}
void release() { _used = false; }
};
/**
* Session-creation policy for a multi-client service
*/
struct Multiple_clients
{
void aquire(const char *) { }
void release() { }
};
/**
* Template for implementing the root interface
*
* \param SESSION_TYPE session-component type to manage,
* derived from 'Rpc_object'
* \param POLICY session-creation policy
*
* The 'POLICY' template parameter allows for constraining the session
* creation to only one instance at a time (using the 'Single_session'
* policy) or multiple instances (using the 'Multiple_sessions' policy).
*
* The 'POLICY' class must provide the following two functions:
*
* :'aquire(const char *args)': is called with the session arguments
* at creation time of each new session. It can therefore implement
* a session-creation policy taking session arguments into account.
* If the policy denies the creation of a new session, it throws
* one of the exceptions defined in the 'Root' interface.
*
* :'release': is called at the destruction time of a session. It enables
* the policy to keep track of and impose restrictions on the number
* of existing sessions.
*
* The default policy 'Multiple_clients' imposes no restrictions on the
* creation of new sessions.
*/
template <typename SESSION_TYPE, typename POLICY = Multiple_clients>
class Root_component : public Rpc_object<Typed_root<SESSION_TYPE> >, private POLICY
{
private:
/*
* Entry point that manages the session objects
* created by this root interface
*/
Rpc_entrypoint *_ep;
/*
* Allocator for allocating session objects.
* This allocator must be used by the derived
* class when calling the 'new' operator for
* creating a new session.
*/
Allocator *_md_alloc;
protected:
/**
* Create new session (to be implemented by a derived class)
*
* Only a derived class knows the constructor arguments of
* a specific session. Therefore, we cannot unify the call
* of its 'new' operator and must implement the session
* creation at a place, where the required knowledge exist.
*
* In the implementation of this function, the heap, provided
* by 'Root_component' must be used for allocating the session
* object.
*
* If the server implementation does not evaluate the session
* affinity, it suffices to override the overload without the
* affinity argument.
*
* \throw Allocator::Out_of_memory typically caused by the
* meta-data allocator
* \throw Root::Invalid_args typically caused by the
* session-component constructor
*/
virtual SESSION_TYPE *_create_session(const char *args,
Affinity const &)
{
return _create_session(args);
}
virtual SESSION_TYPE *_create_session(const char *args)
{
throw Root::Invalid_args();
}
/**
* Inform session about a quota upgrade
*
* Once a session is created, its client can successively extend
* its quota donation via the 'Parent::transfer_quota' function.
* This will result in the invokation of 'Root::upgrade' at the
* root interface the session was created with. The root interface,
* in turn, informs the session about the new resources via the
* '_upgrade_session' function. The default implementation is
* suited for sessions that use a static amount of resources
* accounted for at session-creation time. For such sessions, an
* upgrade is not useful. However, sessions that dynamically
* allocate resources on behalf of its client, should respond to
* quota upgrades by implementing this function.
*
* \param session session to upgrade
* \param args description of additional resources in the
* same format as used at session creation
*/
virtual void _upgrade_session(SESSION_TYPE *, const char *) { }
virtual void _destroy_session(SESSION_TYPE *session) {
destroy(_md_alloc, session); }
/**
* Return allocator to allocate server object in '_create_session()'
*/
Allocator *md_alloc() { return _md_alloc; }
Rpc_entrypoint *ep() { return _ep; }
public:
/**
* Constructor
*
* \param ep entry point that manages the sessions of this
* root interface.
* \param ram_session provider of dataspaces for the backing store
* of session objects and session data
*/
Root_component(Rpc_entrypoint *ep, Allocator *metadata_alloc)
: _ep(ep), _md_alloc(metadata_alloc) { }
/********************
** Root interface **
********************/
Session_capability session(Root::Session_args const &args,
Affinity const &affinity)
{
if (!args.is_valid_string()) throw Root::Invalid_args();
POLICY::aquire(args.string());
/*
* We need to decrease 'ram_quota' by
* the size of the session object.
*/
size_t ram_quota = Arg_string::find_arg(args.string(), "ram_quota").long_value(0);
size_t needed = sizeof(SESSION_TYPE) + md_alloc()->overhead(sizeof(SESSION_TYPE));
if (needed > ram_quota) {
PERR("Insufficient ram quota, provided=%zu, required=%zu",
ram_quota, needed);
throw Root::Quota_exceeded();
}
size_t const remaining_ram_quota = ram_quota - needed;
/*
* Deduce ram quota needed for allocating the session object from the
* donated ram quota.
*
* XXX the size of the 'adjusted_args' buffer should dependent
* on the message-buffer size and stack size.
*/
enum { MAX_ARGS_LEN = 256 };
char adjusted_args[MAX_ARGS_LEN];
strncpy(adjusted_args, args.string(), sizeof(adjusted_args));
char ram_quota_buf[64];
snprintf(ram_quota_buf, sizeof(ram_quota_buf), "%zu",
remaining_ram_quota);
Arg_string::set_arg(adjusted_args, sizeof(adjusted_args),
"ram_quota", ram_quota_buf);
SESSION_TYPE *s = 0;
try { s = _create_session(adjusted_args, affinity); }
catch (Allocator::Out_of_memory) { throw Root::Quota_exceeded(); }
return _ep->manage(s);
}
void upgrade(Session_capability session, Root::Upgrade_args const &args)
{
if (!args.is_valid_string()) throw Root::Invalid_args();
typedef typename Object_pool<SESSION_TYPE>::Guard Object_guard;
Object_guard s(_ep->lookup_and_lock(session));
if (!s) return;
_upgrade_session(s, args.string());
}
void close(Session_capability session)
{
SESSION_TYPE * s =
dynamic_cast<SESSION_TYPE *>(_ep->lookup_and_lock(session));
if (!s) return;
/* let the entry point forget the session object */
_ep->dissolve(s);
_destroy_session(s);
POLICY::release();
return;
}
};
class Single_client;
class Multiple_clients;
template <typename, typename POLICY = Multiple_clients> class Root_component;
}
/**
* Session creation policy for a single-client service
*/
class Genode::Single_client
{
private:
bool _used;
public:
Single_client() : _used(0) { }
void aquire(const char *)
{
if (_used)
throw Root::Unavailable();
_used = true;
}
void release() { _used = false; }
};
/**
* Session-creation policy for a multi-client service
*/
struct Genode::Multiple_clients
{
void aquire(const char *) { }
void release() { }
};
/**
* Template for implementing the root interface
*
* \param SESSION_TYPE session-component type to manage,
* derived from 'Rpc_object'
* \param POLICY session-creation policy
*
* The 'POLICY' template parameter allows for constraining the session
* creation to only one instance at a time (using the 'Single_session'
* policy) or multiple instances (using the 'Multiple_sessions' policy).
*
* The 'POLICY' class must provide the following two functions:
*
* :'aquire(const char *args)': is called with the session arguments
* at creation time of each new session. It can therefore implement
* a session-creation policy taking session arguments into account.
* If the policy denies the creation of a new session, it throws
* one of the exceptions defined in the 'Root' interface.
*
* :'release': is called at the destruction time of a session. It enables
* the policy to keep track of and impose restrictions on the number
* of existing sessions.
*
* The default policy 'Multiple_clients' imposes no restrictions on the
* creation of new sessions.
*/
template <typename SESSION_TYPE, typename POLICY>
class Genode::Root_component : public Rpc_object<Typed_root<SESSION_TYPE> >,
private POLICY
{
private:
/*
* Entry point that manages the session objects
* created by this root interface
*/
Rpc_entrypoint *_ep;
/*
* Allocator for allocating session objects.
* This allocator must be used by the derived
* class when calling the 'new' operator for
* creating a new session.
*/
Allocator *_md_alloc;
protected:
/**
* Create new session (to be implemented by a derived class)
*
* Only a derived class knows the constructor arguments of
* a specific session. Therefore, we cannot unify the call
* of its 'new' operator and must implement the session
* creation at a place, where the required knowledge exist.
*
* In the implementation of this function, the heap, provided
* by 'Root_component' must be used for allocating the session
* object.
*
* If the server implementation does not evaluate the session
* affinity, it suffices to override the overload without the
* affinity argument.
*
* \throw Allocator::Out_of_memory typically caused by the
* meta-data allocator
* \throw Root::Invalid_args typically caused by the
* session-component constructor
*/
virtual SESSION_TYPE *_create_session(const char *args,
Affinity const &)
{
return _create_session(args);
}
virtual SESSION_TYPE *_create_session(const char *args)
{
throw Root::Invalid_args();
}
/**
* Inform session about a quota upgrade
*
* Once a session is created, its client can successively extend
* its quota donation via the 'Parent::transfer_quota' function.
* This will result in the invokation of 'Root::upgrade' at the
* root interface the session was created with. The root interface,
* in turn, informs the session about the new resources via the
* '_upgrade_session' function. The default implementation is
* suited for sessions that use a static amount of resources
* accounted for at session-creation time. For such sessions, an
* upgrade is not useful. However, sessions that dynamically
* allocate resources on behalf of its client, should respond to
* quota upgrades by implementing this function.
*
* \param session session to upgrade
* \param args description of additional resources in the
* same format as used at session creation
*/
virtual void _upgrade_session(SESSION_TYPE *, const char *) { }
virtual void _destroy_session(SESSION_TYPE *session) {
destroy(_md_alloc, session); }
/**
* Return allocator to allocate server object in '_create_session()'
*/
Allocator *md_alloc() { return _md_alloc; }
Rpc_entrypoint *ep() { return _ep; }
public:
/**
* Constructor
*
* \param ep entry point that manages the sessions of this
* root interface.
* \param ram_session provider of dataspaces for the backing store
* of session objects and session data
*/
Root_component(Rpc_entrypoint *ep, Allocator *metadata_alloc)
: _ep(ep), _md_alloc(metadata_alloc) { }
/********************
** Root interface **
********************/
Session_capability session(Root::Session_args const &args,
Affinity const &affinity) override
{
if (!args.is_valid_string()) throw Root::Invalid_args();
POLICY::aquire(args.string());
/*
* We need to decrease 'ram_quota' by
* the size of the session object.
*/
size_t ram_quota = Arg_string::find_arg(args.string(), "ram_quota").long_value(0);
size_t needed = sizeof(SESSION_TYPE) + md_alloc()->overhead(sizeof(SESSION_TYPE));
if (needed > ram_quota) {
PERR("Insufficient ram quota, provided=%zu, required=%zu",
ram_quota, needed);
throw Root::Quota_exceeded();
}
size_t const remaining_ram_quota = ram_quota - needed;
/*
* Deduce ram quota needed for allocating the session object from the
* donated ram quota.
*
* XXX the size of the 'adjusted_args' buffer should dependent
* on the message-buffer size and stack size.
*/
enum { MAX_ARGS_LEN = 256 };
char adjusted_args[MAX_ARGS_LEN];
strncpy(adjusted_args, args.string(), sizeof(adjusted_args));
char ram_quota_buf[64];
snprintf(ram_quota_buf, sizeof(ram_quota_buf), "%zu",
remaining_ram_quota);
Arg_string::set_arg(adjusted_args, sizeof(adjusted_args),
"ram_quota", ram_quota_buf);
SESSION_TYPE *s = 0;
try { s = _create_session(adjusted_args, affinity); }
catch (Allocator::Out_of_memory) { throw Root::Quota_exceeded(); }
return _ep->manage(s);
}
void upgrade(Session_capability session, Root::Upgrade_args const &args) override
{
if (!args.is_valid_string()) throw Root::Invalid_args();
typedef typename Object_pool<SESSION_TYPE>::Guard Object_guard;
Object_guard s(_ep->lookup_and_lock(session));
if (!s) return;
_upgrade_session(s, args.string());
}
void close(Session_capability session) override
{
SESSION_TYPE * s =
dynamic_cast<SESSION_TYPE *>(_ep->lookup_and_lock(session));
if (!s) return;
/* let the entry point forget the session object */
_ep->dissolve(s);
_destroy_session(s);
POLICY::release();
return;
}
};
#endif /* _INCLUDE__ROOT__COMPONENT_H_ */

View File

@ -22,74 +22,78 @@
namespace Genode {
struct Root
{
/*********************
** Exception types **
*********************/
class Exception : public ::Genode::Exception { };
class Unavailable : public Exception { };
class Quota_exceeded : public Exception { };
class Invalid_args : public Exception { };
typedef Rpc_in_buffer<160> Session_args;
typedef Rpc_in_buffer<160> Upgrade_args;
virtual ~Root() { }
/**
* Create session
*
* \throw Unavailable
* \throw Quota_exceeded
* \throw Invalid_args
*
* \return capability to new session
*/
virtual Session_capability session(Session_args const &args,
Affinity const &affinity) = 0;
/**
* Extend resource donation to an existing session
*/
virtual void upgrade(Session_capability session, Upgrade_args const &args) = 0;
/**
* Close session
*/
virtual void close(Session_capability session) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC_THROW(Rpc_session, Session_capability, session,
GENODE_TYPE_LIST(Unavailable, Quota_exceeded, Invalid_args),
Session_args const &, Affinity const &);
GENODE_RPC_THROW(Rpc_upgrade, void, upgrade,
GENODE_TYPE_LIST(Invalid_args),
Session_capability, Upgrade_args const &);
GENODE_RPC(Rpc_close, void, close, Session_capability);
GENODE_RPC_INTERFACE(Rpc_session, Rpc_upgrade, Rpc_close);
};
/**
* Root interface supplemented with information about the managed
* session type
*
* This class template is used to automatically propagate the
* correct session type to 'Parent::announce()' when announcing
* a service.
*/
template <typename SESSION_TYPE>
struct Typed_root : Root
{
typedef SESSION_TYPE Session_type;
};
struct Root;
template <typename> struct Typed_root;
}
struct Genode::Root
{
/*********************
** Exception types **
*********************/
class Exception : public ::Genode::Exception { };
class Unavailable : public Exception { };
class Quota_exceeded : public Exception { };
class Invalid_args : public Exception { };
typedef Rpc_in_buffer<160> Session_args;
typedef Rpc_in_buffer<160> Upgrade_args;
virtual ~Root() { }
/**
* Create session
*
* \throw Unavailable
* \throw Quota_exceeded
* \throw Invalid_args
*
* \return capability to new session
*/
virtual Session_capability session(Session_args const &args,
Affinity const &affinity) = 0;
/**
* Extend resource donation to an existing session
*/
virtual void upgrade(Session_capability session, Upgrade_args const &args) = 0;
/**
* Close session
*/
virtual void close(Session_capability session) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC_THROW(Rpc_session, Session_capability, session,
GENODE_TYPE_LIST(Unavailable, Quota_exceeded, Invalid_args),
Session_args const &, Affinity const &);
GENODE_RPC_THROW(Rpc_upgrade, void, upgrade,
GENODE_TYPE_LIST(Invalid_args),
Session_capability, Upgrade_args const &);
GENODE_RPC(Rpc_close, void, close, Session_capability);
GENODE_RPC_INTERFACE(Rpc_session, Rpc_upgrade, Rpc_close);
};
/**
* Root interface supplemented with information about the managed
* session type
*
* This class template is used to automatically propagate the
* correct session type to 'Parent::announce()' when announcing
* a service.
*/
template <typename SESSION_TYPE>
struct Genode::Typed_root : Root
{
typedef SESSION_TYPE Session_type;
};
#endif /* _INCLUDE__ROOT__ROOT_H_ */

View File

@ -21,18 +21,17 @@
*/
#include <base/rpc.h>
namespace Genode {
namespace Genode { class Session; }
/**
* Base class of session interfaces
*
* Each session interface must implement the function 'service_name'
* ! static const char *service_name();
* This function returns the name of the service provided via the session
* interface.
*/
class Session { };
}
/**
* Base class of session interfaces
*
* Each session interface must implement the function 'service_name'
* ! static const char *service_name();
* This function returns the name of the service provided via the session
* interface.
*/
class Genode::Session { };
#endif /* _INCLUDE__SESSION_H_ */

View File

@ -19,25 +19,25 @@
#include <base/rpc_client.h>
#include <signal_session/source_client.h>
namespace Genode {
namespace Genode { struct Signal_session_client; }
struct Signal_session_client : Rpc_client<Signal_session>
{
explicit Signal_session_client(Signal_session_capability session)
: Rpc_client<Signal_session>(session) { }
Signal_source_capability signal_source() {
return call<Rpc_signal_source>(); }
struct Genode::Signal_session_client : Rpc_client<Signal_session>
{
explicit Signal_session_client(Signal_session_capability session)
: Rpc_client<Signal_session>(session) { }
Signal_context_capability alloc_context(long imprint) {
return call<Rpc_alloc_context>(imprint); }
Signal_source_capability signal_source() override {
return call<Rpc_signal_source>(); }
void free_context(Signal_context_capability cap) {
call<Rpc_free_context>(cap); }
Signal_context_capability alloc_context(long imprint) override {
return call<Rpc_alloc_context>(imprint); }
void submit(Signal_context_capability receiver, unsigned cnt = 1) {
call<Rpc_submit>(receiver, cnt); }
};
}
void free_context(Signal_context_capability cap) override {
call<Rpc_free_context>(cap); }
void submit(Signal_context_capability receiver, unsigned cnt = 1) override {
call<Rpc_submit>(receiver, cnt); }
};
#endif /* _INCLUDE__CAP_SESSION__CLIENT_H_ */

View File

@ -17,16 +17,16 @@
#include <signal_session/client.h>
#include <base/connection.h>
namespace Genode {
namespace Genode { struct Signal_connection; }
struct Signal_connection : Connection<Signal_session>, Signal_session_client
{
Signal_connection()
:
Connection<Signal_session>(session("ram_quota=12K")),
Signal_session_client(cap())
{ }
};
}
struct Genode::Signal_connection : Connection<Signal_session>, Signal_session_client
{
Signal_connection()
:
Connection<Signal_session>(session("ram_quota=12K")),
Signal_session_client(cap())
{ }
};
#endif /* _INCLUDE__CAP_SESSION__CONNECTION_H_ */

View File

@ -24,75 +24,76 @@ namespace Genode {
class Signal_context;
class Signal_receiver;
typedef Capability<Signal_receiver> Signal_receiver_capability;
typedef Capability<Signal_context> Signal_context_capability;
typedef Capability<Signal_source> Signal_source_capability;
struct Signal_session : Session
{
static const char *service_name() { return "SIGNAL"; }
virtual ~Signal_session() { }
class Out_of_metadata : public Exception { };
/**
* Request capability for the signal-source interface
*/
virtual Signal_source_capability signal_source() = 0;
/**
* Allocate signal context
*
* \param imprint opaque value that gets delivered with signals
* originating from the allocated signal-context
* capability
* \return new signal-context capability
* \throw Out_of_metadata
*/
virtual Signal_context_capability alloc_context(long imprint) = 0;
/**
* Free signal-context
*
* \param cap capability of signal-context to release
*/
virtual void free_context(Signal_context_capability cap) = 0;
/**
* Submit signals to the specified signal context
*
* \param context signal destination
* \param cnt number of signals to submit at once
*
* Note that the 'context' argument does not necessarily belong to
* the signal session. Normally, it is a capability obtained from
* a potentially untrusted source. Because we cannot trust this
* capability, signals are not submitted by invoking 'cap' directly
* but by using it as argument to our trusted signal-session
* interface. Otherwise, a potential signal receiver could supply
* a capability with a blocking interface to compromise the
* nonblocking behaviour of the submit function.
*/
virtual void submit(Signal_context_capability context,
unsigned cnt = 1) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_signal_source, Signal_source_capability, signal_source);
GENODE_RPC_THROW(Rpc_alloc_context, Signal_context_capability, alloc_context,
GENODE_TYPE_LIST(Out_of_metadata), long);
GENODE_RPC(Rpc_free_context, void, free_context, Signal_context_capability);
GENODE_RPC(Rpc_submit, void, submit, Signal_context_capability, unsigned);
GENODE_RPC_INTERFACE(Rpc_submit, Rpc_signal_source, Rpc_alloc_context,
Rpc_free_context);
};
struct Signal_session;
}
struct Genode::Signal_session : Session
{
static const char *service_name() { return "SIGNAL"; }
virtual ~Signal_session() { }
class Out_of_metadata : public Exception { };
/**
* Request capability for the signal-source interface
*/
virtual Signal_source_capability signal_source() = 0;
/**
* Allocate signal context
*
* \param imprint opaque value that gets delivered with signals
* originating from the allocated signal-context
* capability
* \return new signal-context capability
* \throw Out_of_metadata
*/
virtual Signal_context_capability alloc_context(long imprint) = 0;
/**
* Free signal-context
*
* \param cap capability of signal-context to release
*/
virtual void free_context(Signal_context_capability cap) = 0;
/**
* Submit signals to the specified signal context
*
* \param context signal destination
* \param cnt number of signals to submit at once
*
* Note that the 'context' argument does not necessarily belong to
* the signal session. Normally, it is a capability obtained from
* a potentially untrusted source. Because we cannot trust this
* capability, signals are not submitted by invoking 'cap' directly
* but by using it as argument to our trusted signal-session
* interface. Otherwise, a potential signal receiver could supply
* a capability with a blocking interface to compromise the
* nonblocking behaviour of the submit function.
*/
virtual void submit(Signal_context_capability context,
unsigned cnt = 1) = 0;
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_signal_source, Signal_source_capability, signal_source);
GENODE_RPC_THROW(Rpc_alloc_context, Signal_context_capability, alloc_context,
GENODE_TYPE_LIST(Out_of_metadata), long);
GENODE_RPC(Rpc_free_context, void, free_context, Signal_context_capability);
GENODE_RPC(Rpc_submit, void, submit, Signal_context_capability, unsigned);
GENODE_RPC_INTERFACE(Rpc_submit, Rpc_signal_source, Rpc_alloc_context,
Rpc_free_context);
};
#endif /* _INCLUDE__CAP_SESSION__CAP_SESSION_H_ */

View File

@ -21,55 +21,55 @@
#include <base/rpc.h>
namespace Genode {
namespace Genode { struct Signal_source; }
/**
* Blocking part of the signal-session interface
*
* The blocking 'wait_for_signal()' function cannot be part of the
* signal-session interface because otherwise, context allocations or
* signal submissions would not be possible while blocking for signals.
* Therefore, the blocking part is implemented a separate interface,
* which can be used by an independent thread.
*/
struct Genode::Signal_source
{
class Signal
{
private:
long _imprint;
int _num;
public:
Signal(long imprint, int num) :
_imprint(imprint),
_num(num)
{ }
Signal() : _imprint(0), _num(0) { }
long imprint() { return _imprint; }
int num() { return _num; }
};
virtual ~Signal_source() { }
/**
* Blocking part of the signal-session interface
*
* The blocking 'wait_for_signal()' function cannot be part of the
* signal-session interface because otherwise, context allocations or
* signal submissions would not be possible while blocking for signals.
* Therefore, the blocking part is implemented a separate interface,
* which can be used by an independent thread.
* Wait for signal
*/
struct Signal_source
{
class Signal
{
private:
long _imprint;
int _num;
public:
Signal(long imprint, int num) :
_imprint(imprint),
_num(num)
{ }
Signal() : _imprint(0), _num(0) { }
long imprint() { return _imprint; }
int num() { return _num; }
};
virtual ~Signal_source() { }
/**
* Wait for signal
*/
virtual Signal wait_for_signal() = 0;
virtual Signal wait_for_signal() = 0;
/*********************
** RPC declaration **
*********************/
/*********************
** RPC declaration **
*********************/
GENODE_RPC(Rpc_wait_for_signal, Signal, wait_for_signal);
GENODE_RPC_INTERFACE(Rpc_wait_for_signal);
};
}
GENODE_RPC(Rpc_wait_for_signal, Signal, wait_for_signal);
GENODE_RPC_INTERFACE(Rpc_wait_for_signal);
};
#endif /* _INCLUDE__SIGNAL_SESSION__SOURCE_H_ */

View File

@ -19,15 +19,15 @@
#include <signal_session/source.h>
#include <base/rpc_client.h>
namespace Genode {
namespace Genode { struct Signal_source_client; }
struct Signal_source_client : Rpc_client<Signal_source>
{
Signal_source_client(Signal_source_capability signal_source)
: Rpc_client<Signal_source>(signal_source) { }
Signal wait_for_signal() { return call<Rpc_wait_for_signal>(); }
};
}
struct Genode::Signal_source_client : Rpc_client<Signal_source>
{
Signal_source_client(Signal_source_capability signal_source)
: Rpc_client<Signal_source>(signal_source) { }
Signal wait_for_signal() override { return call<Rpc_wait_for_signal>(); }
};
#endif /* _INCLUDE__SIGNAL_SESSION__SOURCE_CLIENT_H_ */

View File

@ -21,8 +21,8 @@
#include <signal_session/source.h>
#include <base/rpc_server.h>
namespace Genode {
struct Signal_source_rpc_object : Rpc_object<Signal_source> { };
}
namespace Genode { struct Signal_source_rpc_object; }
struct Genode::Signal_source_rpc_object : Rpc_object<Signal_source> { };
#endif /* _INCLUDE__SIGNAL_SESSION__SOURCE_RPC_OBJECT_H_ */

Some files were not shown because too many files have changed in this diff Show More