mirror of
https://github.com/genodelabs/genode.git
synced 2025-01-13 16:29:54 +00:00
a5ea6765d1
This commit fixes several issues that were triggered e.g. by the 'noux_tool_chain' run-script (fix #208 in part). The following problems are tackled: * Don't reference count capability selectors within a task that are actually controlled by core (all beneath 0x200000), because it's undecideable which "version" of a capability selector we currently use, e.g. a thread gets destroyed and a new one gets created immediately some other thread might have a Native_capability pointing to the already destroyed thread's gate capability-slot, that is now a new valid one (the one of the new thread) * In core we cannot invalidate and remove a capability from the so called Cap_map before each reference to it is destroyed, so don't do this in Cap_session_component::free, but only reference-decrement within there, the actual removal can only be done in Cap_map::remove. Because core also has to invalidate a capability to be removed in all protection-domains we have to implement a core specific Cap_map::remove method * When a capability gets inserted into the Cap_map, and we detect an old invalid entry with the dame id in the tree, don't just overmap that invalid entry (as there exist remaining references to it), but just remove it from the tree and allocate an new entry. * Use the Cap_session_component interface to free a Pager_object when it gets dissolved, as its also used for allocation
125 lines
3.1 KiB
C++
125 lines
3.1 KiB
C++
/*
|
|
* \brief Capability index allocator for Fiasco.OC.
|
|
* \author Stefan Kalkowski
|
|
* \date 2012-02-16
|
|
*/
|
|
|
|
/*
|
|
* Copyright (C) 2012 Genode Labs GmbH
|
|
*
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
* under the terms of the GNU General Public License version 2.
|
|
*/
|
|
|
|
#ifndef _INCLUDE__BASE__CAP_ALLOC_H_
|
|
#define _INCLUDE__BASE__CAP_ALLOC_H_
|
|
|
|
#include <base/cap_map.h>
|
|
#include <base/native_types.h>
|
|
|
|
namespace Genode {
|
|
|
|
/**
|
|
* Cap_index_allocator_tpl implements the Cap_index_allocator for Fiasco.OC.
|
|
*
|
|
* It's designed as a template because we need two distinguished versions
|
|
* for core and non-core processes with respect to dimensioning. Moreover,
|
|
* core needs more information within a Cap_index object, than the base
|
|
* class provides.
|
|
*
|
|
* \param T Cap_index specialization to use
|
|
* \param SZ size of Cap_index array used by the allocator
|
|
*/
|
|
template <typename T, unsigned SZ>
|
|
class Cap_index_allocator_tpl : public Cap_index_allocator
|
|
{
|
|
private:
|
|
|
|
Spin_lock _lock; /* used very early in initialization,
|
|
where normal lock isn't feasible */
|
|
|
|
enum {
|
|
/* everything above START_IDX is managed by core */
|
|
START_IDX = Fiasco::USER_BASE_CAP >> Fiasco::L4_CAP_SHIFT
|
|
};
|
|
|
|
protected:
|
|
|
|
unsigned char _data[SZ*sizeof(T)];
|
|
T* _indices;
|
|
|
|
public:
|
|
|
|
Cap_index_allocator_tpl() : _indices(reinterpret_cast<T*>(&_data)) {
|
|
memset(&_data, 0, sizeof(_data)); }
|
|
|
|
|
|
/***********************************
|
|
** Cap_index_allocator interface **
|
|
***********************************/
|
|
|
|
Cap_index* alloc_range(size_t cnt)
|
|
{
|
|
Lock_guard<Spin_lock> guard(_lock);
|
|
|
|
/*
|
|
* iterate through array and find unused, consecutive entries
|
|
*/
|
|
for (unsigned i = START_IDX, j = 0; (i+cnt) < SZ; i+=j+1, j=0) {
|
|
for (; j < cnt; j++)
|
|
if (_indices[i+j].used())
|
|
break;
|
|
|
|
/* if we found a fitting hole, initialize the objects */
|
|
if (j == cnt) {
|
|
for (j = 0; j < cnt; j++)
|
|
new (&_indices[i+j]) T();
|
|
return &_indices[i];
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
Cap_index* alloc(addr_t addr)
|
|
{
|
|
Lock_guard<Spin_lock> guard(_lock);
|
|
|
|
/*
|
|
* construct the Cap_index pointer from the given
|
|
* address in capability space
|
|
*/
|
|
T* obj = reinterpret_cast<T*>(kcap_to_idx(addr));
|
|
|
|
if (obj < &_indices[0] || obj >= &_indices[SZ])
|
|
throw Index_out_of_bounds();
|
|
|
|
return new (obj) T();
|
|
}
|
|
|
|
void free(Cap_index* idx, size_t cnt)
|
|
{
|
|
Lock_guard<Spin_lock> guard(_lock);
|
|
|
|
T* obj = static_cast<T*>(idx);
|
|
for (size_t i = 0; i < cnt; obj++, i++) {
|
|
/* range check given pointer address */
|
|
if (obj < &_indices[0] || obj >= &_indices[SZ])
|
|
throw Index_out_of_bounds();
|
|
delete obj;
|
|
}
|
|
}
|
|
|
|
addr_t idx_to_kcap(Cap_index *idx) {
|
|
return ((T*)idx - &_indices[0]) << Fiasco::L4_CAP_SHIFT;
|
|
}
|
|
|
|
Cap_index* kcap_to_idx(addr_t kcap) {
|
|
return &_indices[kcap >> Fiasco::L4_CAP_SHIFT]; }
|
|
|
|
bool static_idx(Cap_index *idx) {
|
|
return ((T*)idx) < &_indices[START_IDX]; }
|
|
};
|
|
}
|
|
|
|
#endif /* _INCLUDE__BASE__CAP_ALLOC_H_ */
|