mirror of
https://github.com/genodelabs/genode.git
synced 2025-06-22 08:50:09 +00:00
Rework Region_map interface
- Remove exceptions - Use 'Attr' struct for attach arguments - Let 'attach' return 'Range' instead of 'Local_addr' - Renamed 'Region_map::State' to 'Region_map::Fault' Issue #5245 Fixes #5070
This commit is contained in:
@ -152,10 +152,10 @@ class Core::Platform_thread : Interface
|
|||||||
Affinity::Location affinity() const;
|
Affinity::Location affinity() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make thread to vCPU
|
* Turn thread into vCPU
|
||||||
*/
|
*/
|
||||||
Foc::l4_cap_idx_t setup_vcpu(unsigned, Cap_mapping const &,
|
Foc::l4_cap_idx_t setup_vcpu(unsigned, Cap_mapping const &,
|
||||||
Cap_mapping &, Region_map::Local_addr &);
|
Cap_mapping &, addr_t &);
|
||||||
|
|
||||||
|
|
||||||
/************************
|
/************************
|
||||||
|
@ -49,7 +49,7 @@ struct Core::Vcpu : Rpc_object<Vm_session::Native_vcpu, Vcpu>
|
|||||||
Vcpu_id_allocator &_vcpu_ids;
|
Vcpu_id_allocator &_vcpu_ids;
|
||||||
Cap_mapping _recall { true };
|
Cap_mapping _recall { true };
|
||||||
Foc::l4_cap_idx_t _task_index_client { };
|
Foc::l4_cap_idx_t _task_index_client { };
|
||||||
Region_map::Local_addr _foc_vcpu_state { };
|
addr_t _foc_vcpu_state { };
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -65,7 +65,7 @@ struct Core::Vcpu : Rpc_object<Vm_session::Native_vcpu, Vcpu>
|
|||||||
*******************************/
|
*******************************/
|
||||||
|
|
||||||
Foc::l4_cap_idx_t task_index() const { return _task_index_client; }
|
Foc::l4_cap_idx_t task_index() const { return _task_index_client; }
|
||||||
Region_map::Local_addr foc_vcpu_state() const { return _foc_vcpu_state; }
|
addr_t foc_vcpu_state() const { return _foc_vcpu_state; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ class Core::Vm_session_component
|
|||||||
/* helpers for vm_session_common.cc */
|
/* helpers for vm_session_common.cc */
|
||||||
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
|
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
|
||||||
void _detach_vm_memory(addr_t, size_t);
|
void _detach_vm_memory(addr_t, size_t);
|
||||||
void _with_region(Region_map::Local_addr, auto const &);
|
void _with_region(addr_t, auto const &);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
@ -116,9 +116,9 @@ class Core::Vm_session_component
|
|||||||
*********************************/
|
*********************************/
|
||||||
|
|
||||||
/* used on destruction of attached dataspaces */
|
/* used on destruction of attached dataspaces */
|
||||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void detach_at (addr_t) override;
|
||||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
void unmap_region (addr_t, size_t) override;
|
||||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void reserve_and_flush (addr_t) override;
|
||||||
|
|
||||||
|
|
||||||
/**************************
|
/**************************
|
||||||
|
@ -352,7 +352,7 @@ Platform_thread::~Platform_thread()
|
|||||||
Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id,
|
Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id,
|
||||||
Cap_mapping const &task_vcpu,
|
Cap_mapping const &task_vcpu,
|
||||||
Cap_mapping &vcpu_irq,
|
Cap_mapping &vcpu_irq,
|
||||||
Region_map::Local_addr &vcpu_state)
|
addr_t &vcpu_state)
|
||||||
{
|
{
|
||||||
if (!_platform_pd)
|
if (!_platform_pd)
|
||||||
return Foc::L4_INVALID_CAP;
|
return Foc::L4_INVALID_CAP;
|
||||||
@ -361,8 +361,7 @@ Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id,
|
|||||||
return Foc::L4_INVALID_CAP;
|
return Foc::L4_INVALID_CAP;
|
||||||
|
|
||||||
/* vCPU state attached by kernel syscall to client PD directly */
|
/* vCPU state attached by kernel syscall to client PD directly */
|
||||||
vcpu_state = Region_map::Local_addr(Platform::VCPU_VIRT_EXT_START +
|
vcpu_state = Platform::VCPU_VIRT_EXT_START + L4_PAGESIZE * vcpu_id;
|
||||||
L4_PAGESIZE * vcpu_id);
|
|
||||||
|
|
||||||
l4_fpage_t const vm_page = l4_fpage(vcpu_state, L4_PAGESHIFT, L4_FPAGE_RW);
|
l4_fpage_t const vm_page = l4_fpage(vcpu_state, L4_PAGESHIFT, L4_FPAGE_RW);
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ Vm_session_component::~Vm_session_component()
|
|||||||
if (!_map.any_block_addr(&out_addr))
|
if (!_map.any_block_addr(&out_addr))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
struct Genode::Vm_session::Native_vcpu : Interface
|
struct Genode::Vm_session::Native_vcpu : Interface
|
||||||
{
|
{
|
||||||
GENODE_RPC(Rpc_foc_vcpu_state, Region_map::Local_addr, foc_vcpu_state);
|
GENODE_RPC(Rpc_foc_vcpu_state, addr_t, foc_vcpu_state);
|
||||||
GENODE_RPC(Rpc_task_index, Foc::l4_cap_idx_t, task_index);
|
GENODE_RPC(Rpc_task_index, Foc::l4_cap_idx_t, task_index);
|
||||||
|
|
||||||
GENODE_RPC_INTERFACE(Rpc_task_index, Rpc_foc_vcpu_state);
|
GENODE_RPC_INTERFACE(Rpc_task_index, Rpc_foc_vcpu_state);
|
||||||
|
@ -90,7 +90,7 @@ struct Foc_native_vcpu_rpc : Rpc_client<Vm_session::Native_vcpu>, Noncopyable
|
|||||||
Foc::l4_cap_idx_t task_index() { return call<Rpc_task_index>(); }
|
Foc::l4_cap_idx_t task_index() { return call<Rpc_task_index>(); }
|
||||||
|
|
||||||
Foc::l4_vcpu_state_t * foc_vcpu_state() {
|
Foc::l4_vcpu_state_t * foc_vcpu_state() {
|
||||||
return static_cast<Foc::l4_vcpu_state_t *>(call<Rpc_foc_vcpu_state>()); }
|
return reinterpret_cast<Foc::l4_vcpu_state_t *>(call<Rpc_foc_vcpu_state>()); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,67 +23,63 @@
|
|||||||
using namespace Core;
|
using namespace Core;
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||||
off_t offset, bool use_local_addr,
|
|
||||||
Region_map::Local_addr, bool, bool writeable)
|
|
||||||
{
|
{
|
||||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Local_addr {
|
return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Attach_result {
|
||||||
|
|
||||||
if (!ds_ptr)
|
if (!ds_ptr)
|
||||||
throw Invalid_dataspace();
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
Dataspace_component &ds = *ds_ptr;
|
Dataspace_component &ds = *ds_ptr;
|
||||||
|
|
||||||
if (size == 0)
|
size_t const size = (attr.size == 0) ? ds.size() : attr.size;
|
||||||
size = ds.size();
|
size_t const page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
/* attach attributes 'use_at' and 'offset' not supported within core */
|
||||||
|
if (attr.use_at || attr.offset)
|
||||||
if (use_local_addr) {
|
return Attach_error::REGION_CONFLICT;
|
||||||
error("Parameter 'use_local_addr' not supported within core");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (offset) {
|
|
||||||
error("Parameter 'offset' not supported within core");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned const align = get_page_size_log2();
|
unsigned const align = get_page_size_log2();
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
Allocator::Alloc_result virt =
|
Allocator::Alloc_result const virt =
|
||||||
platform().region_alloc().alloc_aligned(page_rounded_size, align);
|
platform().region_alloc().alloc_aligned(page_rounded_size, align);
|
||||||
|
|
||||||
if (virt.failed()) {
|
if (virt.failed()) {
|
||||||
error("could not allocate virtual address range in core of size ",
|
error("could not allocate virtual address range in core of size ",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return nullptr;
|
return Attach_error::REGION_CONFLICT;
|
||||||
}
|
}
|
||||||
|
|
||||||
using namespace Hw;
|
using namespace Hw;
|
||||||
|
|
||||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||||
unsigned num_pages = (unsigned)(page_rounded_size >> get_page_size_log2());
|
unsigned const num_pages = unsigned(page_rounded_size >> get_page_size_log2());
|
||||||
Page_flags const flags { (writeable && ds.writeable()) ? RW : RO,
|
|
||||||
NO_EXEC, KERN, GLOBAL,
|
|
||||||
ds.io_mem() ? DEVICE : RAM,
|
|
||||||
ds.cacheability() };
|
|
||||||
|
|
||||||
return virt.convert<Local_addr>(
|
Page_flags const flags {
|
||||||
|
.writeable = (attr.writeable && ds.writeable()) ? RW : RO,
|
||||||
|
.executable = NO_EXEC,
|
||||||
|
.privileged = KERN,
|
||||||
|
.global = GLOBAL,
|
||||||
|
.type = ds.io_mem() ? DEVICE : RAM,
|
||||||
|
.cacheable = ds.cacheability()
|
||||||
|
};
|
||||||
|
|
||||||
[&] (void *virt_addr) -> void * {
|
return virt.convert<Attach_result>(
|
||||||
|
|
||||||
|
[&] (void *virt_addr) -> Attach_result {
|
||||||
if (map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
if (map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||||
return virt_addr;
|
return Range { .start = addr_t(virt_addr),
|
||||||
|
.num_bytes = page_rounded_size };
|
||||||
|
|
||||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||||
return nullptr; },
|
return Attach_error::REGION_CONFLICT; },
|
||||||
|
|
||||||
[&] (Allocator::Alloc_error) {
|
[&] (Allocator::Alloc_error) {
|
||||||
return nullptr; });
|
return Attach_error::REGION_CONFLICT; });
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Core_region_map::detach(Local_addr) { }
|
void Core_region_map::detach(addr_t) { }
|
||||||
|
@ -108,7 +108,14 @@ Platform_thread::Platform_thread(size_t const quota,
|
|||||||
error("failed to allocate UTCB");
|
error("failed to allocate UTCB");
|
||||||
throw Out_of_ram();
|
throw Out_of_ram();
|
||||||
}
|
}
|
||||||
_utcb_core_addr = (Native_utcb *)core_env().rm_session()->attach(_utcb);
|
|
||||||
|
Region_map::Attr attr { };
|
||||||
|
attr.writeable = true;
|
||||||
|
core_env().rm_session()->attach(_utcb, attr).with_result(
|
||||||
|
[&] (Region_map::Range range) {
|
||||||
|
_utcb_core_addr = (Native_utcb *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("failed to attach UTCB of new thread within core"); });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@ Vm_session_component::~Vm_session_component()
|
|||||||
if (!_map.any_block_addr(&out_addr))
|
if (!_map.any_block_addr(&out_addr))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* free region in allocator */
|
/* free region in allocator */
|
||||||
|
@ -92,7 +92,7 @@ Vm_session_component::~Vm_session_component()
|
|||||||
if (!_map.any_block_addr(&out_addr))
|
if (!_map.any_block_addr(&out_addr))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* free region in allocator */
|
/* free region in allocator */
|
||||||
|
@ -174,7 +174,7 @@ Vm_session_component::~Vm_session_component()
|
|||||||
if (!_map.any_block_addr(&out_addr))
|
if (!_map.any_block_addr(&out_addr))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* free region in allocator */
|
/* free region in allocator */
|
||||||
|
@ -42,7 +42,7 @@ void Vm_session_component::Vcpu::exception_handler(Signal_context_capability han
|
|||||||
|
|
||||||
unsigned const cpu = location.xpos();
|
unsigned const cpu = location.xpos();
|
||||||
|
|
||||||
if (!kobj.create(cpu, ds_addr, Capability_space::capid(handler), id))
|
if (!kobj.create(cpu, (void *)ds_addr, Capability_space::capid(handler), id))
|
||||||
warning("Cannot instantiate vm kernel object, invalid signal context?");
|
warning("Cannot instantiate vm kernel object, invalid signal context?");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,7 +65,18 @@ Capability<Vm_session::Native_vcpu> Vm_session_component::create_vcpu(Thread_cap
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
vcpu.ds_cap = _constrained_md_ram_alloc.alloc(_ds_size(), Cache::UNCACHED);
|
vcpu.ds_cap = _constrained_md_ram_alloc.alloc(_ds_size(), Cache::UNCACHED);
|
||||||
vcpu.ds_addr = _alloc_vcpu_data(_region_map.attach(vcpu.ds_cap));
|
|
||||||
|
Region_map::Attr attr { };
|
||||||
|
attr.writeable = true;
|
||||||
|
vcpu.ds_addr = _region_map.attach(vcpu.ds_cap, attr).convert<addr_t>(
|
||||||
|
[&] (Region_map::Range range) { return _alloc_vcpu_data(range.start); },
|
||||||
|
[&] (Region_map::Attach_error) -> addr_t {
|
||||||
|
error("failed to attach VCPU data within core");
|
||||||
|
if (vcpu.ds_cap.valid())
|
||||||
|
_constrained_md_ram_alloc.free(vcpu.ds_cap);
|
||||||
|
_vcpus[_vcpu_id_alloc].destruct();
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
if (vcpu.ds_cap.valid())
|
if (vcpu.ds_cap.valid())
|
||||||
_constrained_md_ram_alloc.free(vcpu.ds_cap);
|
_constrained_md_ram_alloc.free(vcpu.ds_cap);
|
||||||
|
@ -55,9 +55,9 @@ class Core::Vm_session_component
|
|||||||
Kernel::Vm::Identity &id;
|
Kernel::Vm::Identity &id;
|
||||||
Rpc_entrypoint &ep;
|
Rpc_entrypoint &ep;
|
||||||
Ram_dataspace_capability ds_cap { };
|
Ram_dataspace_capability ds_cap { };
|
||||||
Region_map::Local_addr ds_addr { nullptr };
|
addr_t ds_addr { };
|
||||||
Kernel_object<Kernel::Vm> kobj {};
|
Kernel_object<Kernel::Vm> kobj { };
|
||||||
Affinity::Location location {};
|
Affinity::Location location { };
|
||||||
|
|
||||||
Vcpu(Kernel::Vm::Identity &id, Rpc_entrypoint &ep) : id(id), ep(ep)
|
Vcpu(Kernel::Vm::Identity &id, Rpc_entrypoint &ep) : id(id), ep(ep)
|
||||||
{
|
{
|
||||||
@ -94,14 +94,13 @@ class Core::Vm_session_component
|
|||||||
static size_t _ds_size();
|
static size_t _ds_size();
|
||||||
static size_t _alloc_vcpu_data(Genode::addr_t ds_addr);
|
static size_t _alloc_vcpu_data(Genode::addr_t ds_addr);
|
||||||
|
|
||||||
void * _alloc_table();
|
void *_alloc_table();
|
||||||
void _attach(addr_t phys_addr, addr_t vm_addr, size_t size);
|
void _attach(addr_t phys_addr, addr_t vm_addr, size_t size);
|
||||||
|
|
||||||
/* helpers for vm_session_common.cc */
|
/* helpers for vm_session_common.cc */
|
||||||
void _attach_vm_memory(Dataspace_component &, addr_t,
|
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
|
||||||
Attach_attr);
|
|
||||||
void _detach_vm_memory(addr_t, size_t);
|
void _detach_vm_memory(addr_t, size_t);
|
||||||
void _with_region(Region_map::Local_addr, auto const &);
|
void _with_region(addr_t, auto const &);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
@ -119,13 +118,15 @@ class Core::Vm_session_component
|
|||||||
Trace::Source_registry &);
|
Trace::Source_registry &);
|
||||||
~Vm_session_component();
|
~Vm_session_component();
|
||||||
|
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
** Region_map_detach interface **
|
** Region_map_detach interface **
|
||||||
*********************************/
|
*********************************/
|
||||||
|
|
||||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void detach_at (addr_t) override;
|
||||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
void unmap_region (addr_t, size_t) override;
|
||||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void reserve_and_flush (addr_t) override;
|
||||||
|
|
||||||
|
|
||||||
/**************************
|
/**************************
|
||||||
** Vm session interface **
|
** Vm session interface **
|
||||||
|
@ -67,14 +67,18 @@ void Thread::_init_platform_thread(size_t weight, Type type)
|
|||||||
size_t const utcb_size = sizeof(Native_utcb);
|
size_t const utcb_size = sizeof(Native_utcb);
|
||||||
addr_t const stack_area = stack_area_virtual_base();
|
addr_t const stack_area = stack_area_virtual_base();
|
||||||
addr_t const utcb_new = (addr_t)&_stack->utcb() - stack_area;
|
addr_t const utcb_new = (addr_t)&_stack->utcb() - stack_area;
|
||||||
Region_map * const rm = env_stack_area_region_map;
|
|
||||||
|
|
||||||
/* remap initial main-thread UTCB according to stack-area spec */
|
/* remap initial main-thread UTCB according to stack-area spec */
|
||||||
try { rm->attach_at(Hw::_main_thread_utcb_ds, utcb_new, utcb_size); }
|
if (env_stack_area_region_map->attach(Hw::_main_thread_utcb_ds, {
|
||||||
catch(...) {
|
.size = utcb_size,
|
||||||
error("failed to re-map UTCB");
|
.offset = { },
|
||||||
while (1) ;
|
.use_at = true,
|
||||||
}
|
.at = utcb_new,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).failed())
|
||||||
|
error("failed to attach UTCB to local address space");
|
||||||
|
|
||||||
/* adjust initial object state in case of a main thread */
|
/* adjust initial object state in case of a main thread */
|
||||||
native_thread().cap = Hw::_main_thread_cap;
|
native_thread().cap = Hw::_main_thread_cap;
|
||||||
_thread_cap = main_thread_cap();
|
_thread_cap = main_thread_cap();
|
||||||
@ -108,18 +112,26 @@ Thread::Start_result Thread::start()
|
|||||||
|
|
||||||
/* attach UTCB at top of stack */
|
/* attach UTCB at top of stack */
|
||||||
size_t const size = sizeof(_stack->utcb());
|
size_t const size = sizeof(_stack->utcb());
|
||||||
addr_t dst = Stack_allocator::addr_to_base(_stack) +
|
return env_stack_area_region_map->attach(cpu_thread.utcb(), {
|
||||||
stack_virtual_size() - size - stack_area_virtual_base();
|
.size = size,
|
||||||
try {
|
.offset = { },
|
||||||
env_stack_area_region_map->attach_at(cpu_thread.utcb(), dst, size);
|
.use_at = true,
|
||||||
} catch (...) {
|
.at = Stack_allocator::addr_to_base(_stack)
|
||||||
error("failed to attach userland stack");
|
+ stack_virtual_size() - size - stack_area_virtual_base(),
|
||||||
sleep_forever();
|
.executable = { },
|
||||||
}
|
.writeable = true
|
||||||
|
}).convert<Start_result>(
|
||||||
|
[&] (Region_map::Range) {
|
||||||
/* start execution with initial IP and aligned SP */
|
/* start execution with initial IP and aligned SP */
|
||||||
cpu_thread.start((addr_t)_thread_start, _stack->top());
|
cpu_thread.start((addr_t)_thread_start, _stack->top());
|
||||||
return Start_result::OK;
|
return Start_result::OK;
|
||||||
},
|
},
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("failed to attach userland stack");
|
||||||
|
return Start_result::DENIED;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
},
|
||||||
[&] (Cpu_session::Create_thread_error) { return Start_result::DENIED; }
|
[&] (Cpu_session::Create_thread_error) { return Start_result::DENIED; }
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -52,15 +52,14 @@ class Core::Region_map_component : public Rpc_object<Region_map>,
|
|||||||
void add_client(Rm_client &) { }
|
void add_client(Rm_client &) { }
|
||||||
void remove_client(Rm_client &) { }
|
void remove_client(Rm_client &) { }
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability, size_t, off_t, bool,
|
Attach_result attach(Dataspace_capability, Attr const &) override {
|
||||||
Local_addr, bool, bool) override {
|
return Attach_error::REGION_CONFLICT; }
|
||||||
return (addr_t)0; }
|
|
||||||
|
|
||||||
void detach(Local_addr) override { }
|
void detach(addr_t) override { }
|
||||||
|
|
||||||
void fault_handler(Signal_context_capability) override { }
|
void fault_handler(Signal_context_capability) override { }
|
||||||
|
|
||||||
State state() override { return State(); }
|
Fault fault() override { return { }; }
|
||||||
|
|
||||||
Dataspace_capability dataspace() override { return Dataspace_capability(); }
|
Dataspace_capability dataspace() override { return Dataspace_capability(); }
|
||||||
|
|
||||||
|
@ -42,38 +42,35 @@ class Stack_area_region_map : public Genode::Region_map
|
|||||||
/**
|
/**
|
||||||
* Attach backing store to stack area
|
* Attach backing store to stack area
|
||||||
*/
|
*/
|
||||||
Local_addr attach(Genode::Dataspace_capability, Genode::size_t size,
|
Attach_result attach(Genode::Dataspace_capability, Attr const &attr) override
|
||||||
Genode::off_t, bool, Local_addr local_addr, bool,
|
|
||||||
bool) override
|
|
||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
/* convert stack-area-relative to absolute virtual address */
|
/* convert stack-area-relative to absolute virtual address */
|
||||||
addr_t addr = (addr_t)local_addr + stack_area_virtual_base();
|
addr_t const addr = attr.at + stack_area_virtual_base();
|
||||||
|
|
||||||
/* use anonymous mmap for allocating stack backing store */
|
/* use anonymous mmap for allocating stack backing store */
|
||||||
int flags = MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE;
|
int flags = MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE;
|
||||||
int prot = PROT_READ | PROT_WRITE;
|
int prot = PROT_READ | PROT_WRITE;
|
||||||
void *res = lx_mmap((void*)addr, size, prot, flags, -1, 0);
|
void *res = lx_mmap((void*)addr, attr.size, prot, flags, -1, 0);
|
||||||
|
|
||||||
if ((addr_t)res != addr)
|
if ((addr_t)res != addr)
|
||||||
throw Region_conflict();
|
return Attach_error::REGION_CONFLICT;
|
||||||
|
|
||||||
return local_addr;
|
return Range { .start = attr.at, .num_bytes = attr.size };
|
||||||
}
|
}
|
||||||
|
|
||||||
void detach(Local_addr local_addr) override
|
void detach(Genode::addr_t at) override
|
||||||
{
|
{
|
||||||
Genode::warning("stack area detach from ", (void*)local_addr,
|
Genode::warning("stack area detach from ", (void*)at,
|
||||||
" - not implemented");
|
" - not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
void fault_handler(Genode::Signal_context_capability) override { }
|
void fault_handler(Genode::Signal_context_capability) override { }
|
||||||
|
|
||||||
State state() override { return State(); }
|
Fault fault() override { return { }; }
|
||||||
|
|
||||||
Genode::Dataspace_capability dataspace() override {
|
Genode::Dataspace_capability dataspace() override { return { }; }
|
||||||
return Genode::Dataspace_capability(); }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ struct Genode::Local_rm_session : Rm_session, Local_session
|
|||||||
/* detach sub region map from local address space */
|
/* detach sub region map from local address space */
|
||||||
Region_map_mmap &rm = static_cast<Region_map_mmap &>(*rm_ptr);
|
Region_map_mmap &rm = static_cast<Region_map_mmap &>(*rm_ptr);
|
||||||
rm.with_attached_sub_rm_base_ptr([&] (void *base_ptr) {
|
rm.with_attached_sub_rm_base_ptr([&] (void *base_ptr) {
|
||||||
_local_rm.detach(base_ptr); });
|
_local_rm.detach(addr_t(base_ptr)); });
|
||||||
|
|
||||||
Genode::destroy(_md_alloc, &rm);
|
Genode::destroy(_md_alloc, &rm);
|
||||||
}
|
}
|
||||||
|
@ -126,54 +126,13 @@ class Genode::Region_map_mmap : public Region_map, public Dataspace
|
|||||||
** Region map interface **
|
** Region map interface **
|
||||||
**************************/
|
**************************/
|
||||||
|
|
||||||
struct Attach_attr
|
Attach_result attach(Dataspace_capability, Attr const &) override;
|
||||||
{
|
|
||||||
size_t size;
|
|
||||||
off_t offset;
|
|
||||||
bool use_local_addr;
|
|
||||||
void *local_addr;
|
|
||||||
bool executable;
|
|
||||||
bool writeable;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class Attach_error
|
void detach(addr_t) override;
|
||||||
{
|
|
||||||
INVALID_DATASPACE, REGION_CONFLICT, OUT_OF_RAM, OUT_OF_CAPS
|
|
||||||
};
|
|
||||||
|
|
||||||
using Attach_result = Attempt<Local_addr, Attach_error>;
|
|
||||||
|
|
||||||
Attach_result attach(Dataspace_capability, Attach_attr const &);
|
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability ds, size_t size, off_t offset,
|
|
||||||
bool use_local_addr, Local_addr local_addr,
|
|
||||||
bool executable, bool writeable) override
|
|
||||||
{
|
|
||||||
Attach_attr const attr { .size = size,
|
|
||||||
.offset = offset,
|
|
||||||
.use_local_addr = use_local_addr,
|
|
||||||
.local_addr = local_addr,
|
|
||||||
.executable = executable,
|
|
||||||
.writeable = writeable };
|
|
||||||
|
|
||||||
return attach(ds, attr).convert<Local_addr>(
|
|
||||||
[&] (Local_addr local_addr) { return local_addr; },
|
|
||||||
[&] (Attach_error e) -> Local_addr {
|
|
||||||
switch (e) {
|
|
||||||
case Attach_error::INVALID_DATASPACE: throw Invalid_dataspace();
|
|
||||||
case Attach_error::REGION_CONFLICT: throw Region_conflict();
|
|
||||||
case Attach_error::OUT_OF_RAM: throw Out_of_ram();
|
|
||||||
case Attach_error::OUT_OF_CAPS: throw Out_of_caps();
|
|
||||||
}
|
|
||||||
throw Region_conflict();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void detach(Local_addr) override;
|
|
||||||
|
|
||||||
void fault_handler(Signal_context_capability) override { }
|
void fault_handler(Signal_context_capability) override { }
|
||||||
|
|
||||||
State state() override { return State(); }
|
Fault fault() override { return { }; }
|
||||||
|
|
||||||
|
|
||||||
/*************************
|
/*************************
|
||||||
|
@ -28,7 +28,7 @@ class Genode::Region
|
|||||||
private:
|
private:
|
||||||
|
|
||||||
addr_t _start { 0 };
|
addr_t _start { 0 };
|
||||||
off_t _offset { 0 };
|
addr_t _offset { 0 };
|
||||||
Dataspace_capability _ds { };
|
Dataspace_capability _ds { };
|
||||||
size_t _size { 0 };
|
size_t _size { 0 };
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ class Genode::Region
|
|||||||
|
|
||||||
Region() { }
|
Region() { }
|
||||||
|
|
||||||
Region(addr_t start, off_t offset, Dataspace_capability ds, size_t size)
|
Region(addr_t start, addr_t offset, Dataspace_capability ds, size_t size)
|
||||||
: _start(start), _offset(offset), _ds(ds), _size(size) { }
|
: _start(start), _offset(offset), _ds(ds), _size(size) { }
|
||||||
|
|
||||||
bool used() const { return _size > 0; }
|
bool used() const { return _size > 0; }
|
||||||
|
@ -49,10 +49,8 @@ static inline void flush_stack_area()
|
|||||||
Genode::size_t const size = stack_area_virtual_size();
|
Genode::size_t const size = stack_area_virtual_size();
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
if ((ret = lx_munmap(base, size)) < 0) {
|
if ((ret = lx_munmap(base, size)) < 0)
|
||||||
error(__func__, ": failed ret=", ret);
|
error(__func__, ": failed ret=", ret);
|
||||||
throw Region_map::Region_conflict();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -71,10 +69,8 @@ static inline Genode::addr_t reserve_stack_area()
|
|||||||
if (addr_in != addr_out) {
|
if (addr_in != addr_out) {
|
||||||
lx_munmap((void *)addr_out, size);
|
lx_munmap((void *)addr_out, size);
|
||||||
error(__func__, ": failed addr_in=", addr_in, " addr_out=", addr_out);
|
error(__func__, ": failed addr_in=", addr_in, " addr_out=", addr_out);
|
||||||
throw Region_map::Region_conflict();
|
|
||||||
}
|
}
|
||||||
|
return (addr_t)addr_out;
|
||||||
return (addr_t) addr_out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _INCLUDE__BASE__INTERNAL__STACK_AREA_H_ */
|
#endif /* _INCLUDE__BASE__INTERNAL__STACK_AREA_H_ */
|
||||||
|
@ -23,9 +23,14 @@ using namespace Genode;
|
|||||||
|
|
||||||
void Platform::_attach_stack_area()
|
void Platform::_attach_stack_area()
|
||||||
{
|
{
|
||||||
pd._address_space.attach_at(pd._stack_area.dataspace(),
|
pd._address_space.attach(pd._stack_area.dataspace(), Region_map::Attr {
|
||||||
stack_area_virtual_base(),
|
.size = stack_area_virtual_size(),
|
||||||
stack_area_virtual_size());
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = stack_area_virtual_base(),
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
});
|
||||||
|
|
||||||
env_stack_area_region_map = &pd._stack_area;
|
env_stack_area_region_map = &pd._stack_area;
|
||||||
env_stack_area_ram_allocator = &pd;
|
env_stack_area_ram_allocator = &pd;
|
||||||
|
@ -40,19 +40,14 @@ Region_map_client::Region_map_client(Capability<Region_map> session)
|
|||||||
: Rpc_client<Region_map>(session) { }
|
: Rpc_client<Region_map>(session) { }
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Region_map_client::attach(Dataspace_capability ds, size_t size,
|
Region_map_client::attach(Dataspace_capability ds, Attr const &attr)
|
||||||
off_t offset, bool use_local_addr,
|
|
||||||
Region_map::Local_addr local_addr,
|
|
||||||
bool executable, bool writeable)
|
|
||||||
{
|
{
|
||||||
return _local(rpc_cap())->attach(ds, size, offset, use_local_addr,
|
return _local(rpc_cap())->attach(ds, attr);
|
||||||
local_addr, executable, writeable);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Region_map_client::detach(Local_addr local_addr) {
|
void Region_map_client::detach(addr_t at) { return _local(rpc_cap())->detach(at); }
|
||||||
return _local(rpc_cap())->detach(local_addr); }
|
|
||||||
|
|
||||||
|
|
||||||
void Region_map_client::fault_handler(Signal_context_capability /*handler*/)
|
void Region_map_client::fault_handler(Signal_context_capability /*handler*/)
|
||||||
@ -66,7 +61,7 @@ void Region_map_client::fault_handler(Signal_context_capability /*handler*/)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Region_map::State Region_map_client::state() { return _local(rpc_cap())->state(); }
|
Region_map::Fault Region_map_client::fault() { return _local(rpc_cap())->fault(); }
|
||||||
|
|
||||||
|
|
||||||
Dataspace_capability Region_map_client::dataspace()
|
Dataspace_capability Region_map_client::dataspace()
|
||||||
|
@ -74,11 +74,11 @@ static Mutex &mutex()
|
|||||||
|
|
||||||
|
|
||||||
Region_map_mmap::Reserve_local_result
|
Region_map_mmap::Reserve_local_result
|
||||||
Region_map_mmap::_reserve_local(bool use_local_addr, addr_t local_addr, size_t size)
|
Region_map_mmap::_reserve_local(bool use_at, addr_t at, size_t size)
|
||||||
{
|
{
|
||||||
/* special handling for stack area */
|
/* special handling for stack area */
|
||||||
if (use_local_addr
|
if (use_at
|
||||||
&& local_addr == stack_area_virtual_base()
|
&& at == stack_area_virtual_base()
|
||||||
&& size == stack_area_virtual_size()) {
|
&& size == stack_area_virtual_size()) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -96,19 +96,19 @@ Region_map_mmap::_reserve_local(bool use_local_addr, addr_t local_addr, size_t s
|
|||||||
}
|
}
|
||||||
} inst;
|
} inst;
|
||||||
|
|
||||||
return local_addr;
|
return at;
|
||||||
}
|
}
|
||||||
|
|
||||||
int const flags = MAP_ANONYMOUS | MAP_PRIVATE;
|
int const flags = MAP_ANONYMOUS | MAP_PRIVATE;
|
||||||
int const prot = PROT_NONE;
|
int const prot = PROT_NONE;
|
||||||
void * const addr_in = use_local_addr ? (void *)local_addr : 0;
|
void * const addr_in = use_at ? (void *)at : 0;
|
||||||
void * const addr_out = lx_mmap(addr_in, size, prot, flags, -1, 0);
|
void * const addr_out = lx_mmap(addr_in, size, prot, flags, -1, 0);
|
||||||
|
|
||||||
/* reserve at local address failed - unmap incorrect mapping */
|
/* reserve at local address failed - unmap incorrect mapping */
|
||||||
if (use_local_addr && addr_in != addr_out)
|
if (use_at && addr_in != addr_out)
|
||||||
lx_munmap((void *)addr_out, size);
|
lx_munmap((void *)addr_out, size);
|
||||||
|
|
||||||
if ((use_local_addr && addr_in != addr_out)
|
if ((use_at && addr_in != addr_out)
|
||||||
|| (((long)addr_out < 0) && ((long)addr_out > -4095))) {
|
|| (((long)addr_out < 0) && ((long)addr_out > -4095))) {
|
||||||
error("_reserve_local: lx_mmap failed "
|
error("_reserve_local: lx_mmap failed "
|
||||||
"(addr_in=", addr_in, ",addr_out=", addr_out, "/", (long)addr_out, ")");
|
"(addr_in=", addr_in, ",addr_out=", addr_out, "/", (long)addr_out, ")");
|
||||||
@ -123,8 +123,8 @@ Region_map_mmap::Map_local_result
|
|||||||
Region_map_mmap::_map_local(Dataspace_capability ds,
|
Region_map_mmap::_map_local(Dataspace_capability ds,
|
||||||
size_t size,
|
size_t size,
|
||||||
addr_t offset,
|
addr_t offset,
|
||||||
bool use_local_addr,
|
bool use_at,
|
||||||
addr_t local_addr,
|
addr_t at,
|
||||||
bool executable,
|
bool executable,
|
||||||
bool overmap,
|
bool overmap,
|
||||||
bool writeable)
|
bool writeable)
|
||||||
@ -136,7 +136,7 @@ Region_map_mmap::_map_local(Dataspace_capability ds,
|
|||||||
int const prot = PROT_READ
|
int const prot = PROT_READ
|
||||||
| (writeable ? PROT_WRITE : 0)
|
| (writeable ? PROT_WRITE : 0)
|
||||||
| (executable ? PROT_EXEC : 0);
|
| (executable ? PROT_EXEC : 0);
|
||||||
void * const addr_in = use_local_addr ? (void*)local_addr : 0;
|
void * const addr_in = use_at ? (void*)at : 0;
|
||||||
void * const addr_out = lx_mmap(addr_in, size, prot, flags, fd, offset);
|
void * const addr_out = lx_mmap(addr_in, size, prot, flags, fd, offset);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -148,10 +148,10 @@ Region_map_mmap::_map_local(Dataspace_capability ds,
|
|||||||
lx_close(fd);
|
lx_close(fd);
|
||||||
|
|
||||||
/* attach at local address failed - unmap incorrect mapping */
|
/* attach at local address failed - unmap incorrect mapping */
|
||||||
if (use_local_addr && addr_in != addr_out)
|
if (use_at && addr_in != addr_out)
|
||||||
lx_munmap((void *)addr_out, size);
|
lx_munmap((void *)addr_out, size);
|
||||||
|
|
||||||
if ((use_local_addr && addr_in != addr_out)
|
if ((use_at && addr_in != addr_out)
|
||||||
|| (((long)addr_out < 0) && ((long)addr_out > -4095))) {
|
|| (((long)addr_out < 0) && ((long)addr_out > -4095))) {
|
||||||
error("_map_local: lx_mmap failed"
|
error("_map_local: lx_mmap failed"
|
||||||
"(addr_in=", addr_in, ", addr_out=", addr_out, "/", (long)addr_out, ") "
|
"(addr_in=", addr_in, ", addr_out=", addr_out, "/", (long)addr_out, ") "
|
||||||
@ -192,28 +192,23 @@ struct Inhibit_tracing_guard
|
|||||||
|
|
||||||
|
|
||||||
Region_map_mmap::Attach_result
|
Region_map_mmap::Attach_result
|
||||||
Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr)
|
Region_map_mmap::attach(Dataspace_capability ds, Attr const &attr)
|
||||||
{
|
{
|
||||||
Mutex::Guard mutex_guard(mutex());
|
Mutex::Guard mutex_guard(mutex());
|
||||||
|
|
||||||
Inhibit_tracing_guard it_guard { };
|
Inhibit_tracing_guard it_guard { };
|
||||||
|
|
||||||
/* only support attach_at for sub RM sessions */
|
/* only support attach_at for sub RM sessions */
|
||||||
if (_sub_rm && !attr.use_local_addr) {
|
if (_sub_rm && !attr.use_at) {
|
||||||
error("Region_map_mmap::attach: attaching w/o local addr not supported");
|
error("Region_map_mmap::attach: attaching w/o local addr not supported");
|
||||||
return Attach_error::REGION_CONFLICT;
|
return Attach_error::REGION_CONFLICT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (attr.offset < 0) {
|
|
||||||
error("Region_map_mmap::attach: negative offset not supported");
|
|
||||||
return Attach_error::REGION_CONFLICT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!ds.valid())
|
if (!ds.valid())
|
||||||
return Attach_error::INVALID_DATASPACE;
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
size_t const remaining_ds_size = _dataspace_size(ds) > (addr_t)attr.offset
|
size_t const remaining_ds_size = _dataspace_size(ds) > attr.offset
|
||||||
? _dataspace_size(ds) - (addr_t)attr.offset : 0;
|
? _dataspace_size(ds) - attr.offset : 0;
|
||||||
|
|
||||||
/* determine size of virtual address region */
|
/* determine size of virtual address region */
|
||||||
size_t const region_size = attr.size ? min(remaining_ds_size, attr.size)
|
size_t const region_size = attr.size ? min(remaining_ds_size, attr.size)
|
||||||
@ -248,12 +243,12 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr)
|
|||||||
* Check for the dataspace to not exceed the boundaries of the
|
* Check for the dataspace to not exceed the boundaries of the
|
||||||
* sub RM session
|
* sub RM session
|
||||||
*/
|
*/
|
||||||
if (region_size + (addr_t)attr.local_addr > _size) {
|
if (region_size + attr.at > _size) {
|
||||||
error("Region_map_mmap::attach: dataspace does not fit in sub RM session");
|
error("Region_map_mmap::attach: dataspace does not fit in sub RM session");
|
||||||
return Attach_error::REGION_CONFLICT;
|
return Attach_error::REGION_CONFLICT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_add_to_rmap(Region((addr_t)attr.local_addr, attr.offset, ds, region_size)))
|
if (!_add_to_rmap(Region(attr.at, attr.offset, ds, region_size)))
|
||||||
return Attach_error::REGION_CONFLICT;
|
return Attach_error::REGION_CONFLICT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -266,10 +261,10 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr)
|
|||||||
*/
|
*/
|
||||||
if (_is_attached())
|
if (_is_attached())
|
||||||
_map_local(ds, region_size, attr.offset,
|
_map_local(ds, region_size, attr.offset,
|
||||||
true, _base + (addr_t)attr.local_addr,
|
true, _base + attr.at,
|
||||||
attr.executable, true, attr.writeable);
|
attr.executable, true, attr.writeable);
|
||||||
|
|
||||||
return Local_addr(attr.local_addr);
|
return Range { .start = attr.at, .num_bytes = region_size };
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
@ -296,8 +291,7 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr)
|
|||||||
* Reserve local address range that can hold the entire sub RM
|
* Reserve local address range that can hold the entire sub RM
|
||||||
* session.
|
* session.
|
||||||
*/
|
*/
|
||||||
return _reserve_local(attr.use_local_addr, (addr_t)attr.local_addr,
|
return _reserve_local(attr.use_at, attr.at, region_size)
|
||||||
region_size)
|
|
||||||
.convert<Attach_result>(
|
.convert<Attach_result>(
|
||||||
|
|
||||||
[&] (addr_t base) -> Attach_result
|
[&] (addr_t base) -> Attach_result
|
||||||
@ -328,7 +322,7 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr)
|
|||||||
attr.executable, true, attr.writeable);
|
attr.executable, true, attr.writeable);
|
||||||
}
|
}
|
||||||
|
|
||||||
return Local_addr(rm->_base);
|
return Range { .start = rm->_base, .num_bytes = region_size };
|
||||||
},
|
},
|
||||||
[&] (Reserve_local_error e) {
|
[&] (Reserve_local_error e) {
|
||||||
switch (e) { case Reserve_local_error::REGION_CONFLICT: break; }
|
switch (e) { case Reserve_local_error::REGION_CONFLICT: break; }
|
||||||
@ -344,14 +338,13 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr)
|
|||||||
* Boring, a plain dataspace is attached to a root RM session.
|
* Boring, a plain dataspace is attached to a root RM session.
|
||||||
* Note, we do not overmap.
|
* Note, we do not overmap.
|
||||||
*/
|
*/
|
||||||
return _map_local(ds, region_size, attr.offset, attr.use_local_addr,
|
return _map_local(ds, region_size, attr.offset, attr.use_at,
|
||||||
(addr_t)attr.local_addr, attr.executable, false,
|
attr.at, attr.executable, false, attr.writeable)
|
||||||
attr.writeable)
|
|
||||||
.convert<Attach_result>(
|
.convert<Attach_result>(
|
||||||
|
|
||||||
[&] (void *addr) -> Attach_result {
|
[&] (void *addr) -> Attach_result {
|
||||||
if (_add_to_rmap(Region((addr_t)addr, attr.offset, ds, region_size)))
|
if (_add_to_rmap(Region((addr_t)addr, attr.offset, ds, region_size)))
|
||||||
return Local_addr(addr);
|
return Range { .start = (addr_t)addr, .num_bytes = region_size };
|
||||||
|
|
||||||
return Attach_error::REGION_CONFLICT;
|
return Attach_error::REGION_CONFLICT;
|
||||||
},
|
},
|
||||||
@ -366,7 +359,7 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Region_map_mmap::detach(Region_map::Local_addr local_addr)
|
void Region_map_mmap::detach(addr_t at)
|
||||||
{
|
{
|
||||||
Mutex::Guard mutex_guard(mutex());
|
Mutex::Guard mutex_guard(mutex());
|
||||||
|
|
||||||
@ -381,14 +374,14 @@ void Region_map_mmap::detach(Region_map::Local_addr local_addr)
|
|||||||
* 2.2 we are attached to a root RM
|
* 2.2 we are attached to a root RM
|
||||||
*/
|
*/
|
||||||
|
|
||||||
Region region = _rmap.lookup(local_addr);
|
Region region = _rmap.lookup(at);
|
||||||
if (!region.used())
|
if (!region.used())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove meta data from region map
|
* Remove meta data from region map
|
||||||
*/
|
*/
|
||||||
_rmap.remove_region(local_addr);
|
_rmap.remove_region(at);
|
||||||
|
|
||||||
if (_sub_rm) {
|
if (_sub_rm) {
|
||||||
|
|
||||||
@ -404,8 +397,8 @@ void Region_map_mmap::detach(Region_map::Local_addr local_addr)
|
|||||||
* needed.
|
* needed.
|
||||||
*/
|
*/
|
||||||
if (_is_attached()) {
|
if (_is_attached()) {
|
||||||
lx_munmap((void *)((addr_t)local_addr + _base), region.size());
|
lx_munmap((void *)(at + _base), region.size());
|
||||||
_reserve_local(true, (addr_t)local_addr + _base, region.size());
|
_reserve_local(true, at + _base, region.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@ -417,7 +410,7 @@ void Region_map_mmap::detach(Region_map::Local_addr local_addr)
|
|||||||
* sub RM session. In both cases, we simply mark the local address
|
* sub RM session. In both cases, we simply mark the local address
|
||||||
* range as free.
|
* range as free.
|
||||||
*/
|
*/
|
||||||
lx_munmap(local_addr, region.size());
|
lx_munmap((void *)at, region.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -64,49 +64,64 @@ Main::Main(Env &env) : heap(env.ram(), env.rm())
|
|||||||
log("blob region region ", Hex_range<addr_t>(beg, size), " size=", size);
|
log("blob region region ", Hex_range<addr_t>(beg, size), " size=", size);
|
||||||
|
|
||||||
/* RAM dataspace attachment overlapping binary */
|
/* RAM dataspace attachment overlapping binary */
|
||||||
try {
|
|
||||||
Ram_dataspace_capability ds(env.ram().alloc(size));
|
|
||||||
|
|
||||||
log("before RAM dataspace attach");
|
log("before RAM dataspace attach");
|
||||||
env.rm().attach_at(ds, beg);
|
env.rm().attach(env.ram().alloc(size), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = beg,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) {
|
||||||
error("after RAM dataspace attach -- ERROR");
|
error("after RAM dataspace attach -- ERROR");
|
||||||
env.parent().exit(-1);
|
env.parent().exit(-1); },
|
||||||
} catch (Region_map::Region_conflict) {
|
[&] (Region_map::Attach_error e) {
|
||||||
log("OK caught Region_conflict exception");
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
}
|
log("OK caught Region_conflict exception"); }
|
||||||
|
);
|
||||||
|
|
||||||
/* empty managed dataspace overlapping binary */
|
/* empty managed dataspace overlapping binary */
|
||||||
try {
|
{
|
||||||
Rm_connection rm_connection(env);
|
Rm_connection rm_connection(env);
|
||||||
Region_map_client rm(rm_connection.create(size));
|
Region_map_client rm(rm_connection.create(size));
|
||||||
Dataspace_capability ds(rm.dataspace());
|
|
||||||
|
|
||||||
log("before sub-RM dataspace attach");
|
log("before sub-RM dataspace attach");
|
||||||
env.rm().attach_at(ds, beg);
|
env.rm().attach(rm.dataspace(), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = beg,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) {
|
||||||
error("after sub-RM dataspace attach -- ERROR");
|
error("after sub-RM dataspace attach -- ERROR");
|
||||||
env.parent().exit(-1);
|
env.parent().exit(-1); },
|
||||||
} catch (Region_map::Region_conflict) {
|
[&] (Region_map::Attach_error e) {
|
||||||
log("OK caught Region_conflict exception");
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
|
log("OK caught Region_conflict exception"); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* sparsely populated managed dataspace in free VM area */
|
/* sparsely populated managed dataspace in free VM area */
|
||||||
try {
|
{
|
||||||
Rm_connection rm_connection(env);
|
Rm_connection rm_connection(env);
|
||||||
Region_map_client rm(rm_connection.create(0x100000));
|
Region_map_client rm(rm_connection.create(0x100000));
|
||||||
|
|
||||||
rm.attach_at(env.ram().alloc(0x1000), 0x1000);
|
rm.attach(env.ram().alloc(0x1000), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
Dataspace_capability ds(rm.dataspace());
|
.use_at = true, .at = 0x1000,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
});
|
||||||
|
|
||||||
log("before populated sub-RM dataspace attach");
|
log("before populated sub-RM dataspace attach");
|
||||||
char *addr = (char *)env.rm().attach(ds) + 0x1000;
|
char * const addr = env.rm().attach(rm.dataspace(), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).convert<char *>(
|
||||||
|
[&] (Region_map::Range r) { return (char *)r.start + 0x1000; },
|
||||||
|
[&] (Region_map::Attach_error) { return nullptr; }
|
||||||
|
);
|
||||||
log("after populated sub-RM dataspace attach / before touch");
|
log("after populated sub-RM dataspace attach / before touch");
|
||||||
char const val = *addr;
|
char const val = *addr;
|
||||||
*addr = 0x55;
|
*addr = 0x55;
|
||||||
log("after touch (", val, "/", *addr, ")");
|
log("after touch (", val, "/", *addr, ")");
|
||||||
} catch (Region_map::Region_conflict) {
|
|
||||||
error("Caught Region_conflict exception -- ERROR");
|
|
||||||
env.parent().exit(-1);
|
|
||||||
}
|
}
|
||||||
env.parent().exit(0);
|
env.parent().exit(0);
|
||||||
}
|
}
|
||||||
|
@ -49,58 +49,50 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
|||||||
return virt_addr;
|
return virt_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Core_region_map::attach(Dataspace_capability ds_cap, size_t,
|
Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||||
off_t offset, bool use_local_addr,
|
|
||||||
Region_map::Local_addr,
|
|
||||||
bool executable, bool writeable)
|
|
||||||
{
|
{
|
||||||
auto lambda = [&] (Dataspace_component *ds_ptr) -> Local_addr {
|
return _ep.apply(ds_cap, [&] (Dataspace_component * const ds_ptr) -> Attach_result {
|
||||||
|
|
||||||
if (!ds_ptr)
|
if (!ds_ptr)
|
||||||
throw Invalid_dataspace();
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
Dataspace_component &ds = *ds_ptr;
|
Dataspace_component &ds = *ds_ptr;
|
||||||
|
|
||||||
if (use_local_addr) {
|
/* attach attributes 'use_at' and 'offset' not supported within core */
|
||||||
error("Parameter 'use_local_addr' not supported within core");
|
if (attr.use_at || attr.offset)
|
||||||
return nullptr;
|
return Attach_error::REGION_CONFLICT;
|
||||||
}
|
|
||||||
|
|
||||||
if (offset) {
|
|
||||||
error("Parameter 'offset' not supported within core");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t page_rounded_size = align_addr(ds.size(), get_page_size_log2());
|
const size_t page_rounded_size = align_addr(ds.size(), get_page_size_log2());
|
||||||
|
|
||||||
/* allocate the virtual region contiguous for the dataspace */
|
/* allocate the virtual region contiguous for the dataspace */
|
||||||
void * virt_ptr = alloc_region(ds, page_rounded_size);
|
void * virt_ptr = alloc_region(ds, page_rounded_size);
|
||||||
if (!virt_ptr)
|
if (!virt_ptr)
|
||||||
throw Out_of_ram();
|
return Attach_error::OUT_OF_RAM;
|
||||||
|
|
||||||
/* map it */
|
/* map it */
|
||||||
Nova::Utcb &utcb = *reinterpret_cast<Nova::Utcb *>(Thread::myself()->utcb());
|
Nova::Utcb &utcb = *reinterpret_cast<Nova::Utcb *>(Thread::myself()->utcb());
|
||||||
const Nova::Rights rights(true, writeable && ds.writeable(), executable);
|
const Nova::Rights rights(true, attr.writeable && ds.writeable(), attr.executable);
|
||||||
|
|
||||||
if (map_local(platform_specific().core_pd_sel(), utcb,
|
if (map_local(platform_specific().core_pd_sel(), utcb,
|
||||||
ds.phys_addr(), reinterpret_cast<addr_t>(virt_ptr),
|
ds.phys_addr(), reinterpret_cast<addr_t>(virt_ptr),
|
||||||
page_rounded_size >> get_page_size_log2(), rights, true)) {
|
page_rounded_size >> get_page_size_log2(), rights, true)) {
|
||||||
platform().region_alloc().free(virt_ptr, page_rounded_size);
|
platform().region_alloc().free(virt_ptr, page_rounded_size);
|
||||||
throw Out_of_ram();
|
|
||||||
|
return Attach_error::OUT_OF_RAM;
|
||||||
}
|
}
|
||||||
|
|
||||||
return virt_ptr;
|
return Range { .start = addr_t(virt_ptr), .num_bytes = page_rounded_size };
|
||||||
};
|
});
|
||||||
return _ep.apply(ds_cap, lambda);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Core_region_map::detach(Local_addr core_local_addr)
|
void Core_region_map::detach(addr_t core_local_addr)
|
||||||
{
|
{
|
||||||
size_t size = platform_specific().region_alloc_size_at(core_local_addr);
|
size_t size = platform_specific().region_alloc_size_at((void *)core_local_addr);
|
||||||
|
|
||||||
unmap_local(*reinterpret_cast<Nova::Utcb *>(Thread::myself()->utcb()),
|
unmap_local(*reinterpret_cast<Nova::Utcb *>(Thread::myself()->utcb()),
|
||||||
core_local_addr, size >> get_page_size_log2());
|
core_local_addr, size >> get_page_size_log2());
|
||||||
|
|
||||||
platform().region_alloc().free(core_local_addr);
|
platform().region_alloc().free((void *)core_local_addr);
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ class Core::Vm_session_component
|
|||||||
private Ram_quota_guard,
|
private Ram_quota_guard,
|
||||||
private Cap_quota_guard,
|
private Cap_quota_guard,
|
||||||
public Rpc_object<Vm_session, Vm_session_component>,
|
public Rpc_object<Vm_session, Vm_session_component>,
|
||||||
public Region_map_detach
|
private Region_map_detach
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -141,7 +141,7 @@ class Core::Vm_session_component
|
|||||||
/* helpers for vm_session_common.cc */
|
/* helpers for vm_session_common.cc */
|
||||||
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
|
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
|
||||||
void _detach_vm_memory(addr_t, size_t);
|
void _detach_vm_memory(addr_t, size_t);
|
||||||
void _with_region(Region_map::Local_addr, auto const &);
|
void _with_region(addr_t, auto const &);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
@ -158,14 +158,16 @@ class Core::Vm_session_component
|
|||||||
Trace::Source_registry &);
|
Trace::Source_registry &);
|
||||||
~Vm_session_component();
|
~Vm_session_component();
|
||||||
|
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
** Region_map_detach interface **
|
** Region_map_detach interface **
|
||||||
*********************************/
|
*********************************/
|
||||||
|
|
||||||
/* used on destruction of attached dataspaces */
|
/* used on destruction of attached dataspaces */
|
||||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void detach_at(addr_t) override;
|
||||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
void unmap_region(addr_t, size_t) override;
|
||||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void reserve_and_flush(addr_t) override;
|
||||||
|
|
||||||
|
|
||||||
/**************************
|
/**************************
|
||||||
** Vm session interface **
|
** Vm session interface **
|
||||||
@ -174,8 +176,8 @@ class Core::Vm_session_component
|
|||||||
Capability<Native_vcpu> create_vcpu(Thread_capability);
|
Capability<Native_vcpu> create_vcpu(Thread_capability);
|
||||||
void attach_pic(addr_t) override { /* unused on NOVA */ }
|
void attach_pic(addr_t) override { /* unused on NOVA */ }
|
||||||
|
|
||||||
void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */
|
void attach(Dataspace_capability, addr_t, Attach_attr) override;
|
||||||
void detach(addr_t, size_t) override; /* vm_session_common.cc */
|
void detach(addr_t, size_t) override;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -408,7 +408,7 @@ Vm_session_component::~Vm_session_component()
|
|||||||
if (!_map.any_block_addr(&out_addr))
|
if (!_map.any_block_addr(&out_addr))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_pd_sel && _pd_sel != invalid_sel())
|
if (_pd_sel && _pd_sel != invalid_sel())
|
||||||
|
@ -21,25 +21,22 @@ Region_map_client::Region_map_client(Capability<Region_map> session)
|
|||||||
: Rpc_client<Region_map>(session) { }
|
: Rpc_client<Region_map>(session) { }
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Region_map_client::attach(Dataspace_capability ds, size_t size, off_t offset,
|
Region_map_client::attach(Dataspace_capability ds, Attr const &attr)
|
||||||
bool use_local_addr, Local_addr local_addr,
|
|
||||||
bool executable, bool writeable)
|
|
||||||
{
|
{
|
||||||
return call<Rpc_attach>(ds, size, offset, use_local_addr, local_addr,
|
return call<Rpc_attach>(ds, attr);
|
||||||
executable, writeable);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Region_map_client::detach(Local_addr local_addr) {
|
void Region_map_client::detach(addr_t at) {
|
||||||
call<Rpc_detach>(local_addr); }
|
call<Rpc_detach>(at); }
|
||||||
|
|
||||||
|
|
||||||
void Region_map_client::fault_handler(Signal_context_capability cap) {
|
void Region_map_client::fault_handler(Signal_context_capability cap) {
|
||||||
call<Rpc_fault_handler>(cap); }
|
call<Rpc_fault_handler>(cap); }
|
||||||
|
|
||||||
|
|
||||||
Region_map::State Region_map_client::state() { return call<Rpc_state>(); }
|
Region_map::Fault Region_map_client::fault() { return call<Rpc_fault>(); }
|
||||||
|
|
||||||
|
|
||||||
Dataspace_capability Region_map_client::dataspace()
|
Dataspace_capability Region_map_client::dataspace()
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include <region_map/client.h>
|
#include <region_map/client.h>
|
||||||
|
|
||||||
#include <base/attached_rom_dataspace.h>
|
#include <base/attached_rom_dataspace.h>
|
||||||
|
#include <base/attached_ram_dataspace.h>
|
||||||
|
|
||||||
#include <trace/timestamp.h>
|
#include <trace/timestamp.h>
|
||||||
|
|
||||||
@ -298,9 +299,9 @@ void test_pat(Genode::Env &env)
|
|||||||
|
|
||||||
enum { DS_ORDER = 12, PAGE_4K = 12 };
|
enum { DS_ORDER = 12, PAGE_4K = 12 };
|
||||||
|
|
||||||
Ram_dataspace_capability ds = env.ram().alloc (1 << (DS_ORDER + PAGE_4K),
|
Attached_dataspace ds { env.rm(), env.ram().alloc (1 << (DS_ORDER + PAGE_4K),
|
||||||
WRITE_COMBINED);
|
WRITE_COMBINED) };
|
||||||
addr_t map_addr = env.rm().attach(ds);
|
addr_t const map_addr = addr_t(ds.local_addr<void>());
|
||||||
|
|
||||||
enum { STACK_SIZE = 4096 };
|
enum { STACK_SIZE = 4096 };
|
||||||
|
|
||||||
@ -309,7 +310,10 @@ void test_pat(Genode::Env &env)
|
|||||||
|
|
||||||
Genode::Rm_connection rm(env);
|
Genode::Rm_connection rm(env);
|
||||||
Genode::Region_map_client rm_free_area(rm.create(1 << (DS_ORDER + PAGE_4K)));
|
Genode::Region_map_client rm_free_area(rm.create(1 << (DS_ORDER + PAGE_4K)));
|
||||||
addr_t remap_addr = env.rm().attach(rm_free_area.dataspace());
|
|
||||||
|
Attached_dataspace remap { env.rm(), rm_free_area.dataspace() };
|
||||||
|
|
||||||
|
addr_t const remap_addr = addr_t(remap.local_addr<void>());
|
||||||
|
|
||||||
/* trigger mapping of whole area */
|
/* trigger mapping of whole area */
|
||||||
for (addr_t i = map_addr; i < map_addr + (1 << (DS_ORDER + PAGE_4K)); i += (1 << PAGE_4K))
|
for (addr_t i = map_addr; i < map_addr + (1 << (DS_ORDER + PAGE_4K)); i += (1 << PAGE_4K))
|
||||||
@ -435,7 +439,7 @@ class Pager : private Genode::Thread {
|
|||||||
private:
|
private:
|
||||||
|
|
||||||
Native_capability _call_to_map { };
|
Native_capability _call_to_map { };
|
||||||
Ram_dataspace_capability _ds;
|
Attached_ram_dataspace _ds;
|
||||||
static addr_t _ds_mem;
|
static addr_t _ds_mem;
|
||||||
|
|
||||||
void entry() override { }
|
void entry() override { }
|
||||||
@ -468,9 +472,10 @@ class Pager : private Genode::Thread {
|
|||||||
Pager(Genode::Env &env, Location location)
|
Pager(Genode::Env &env, Location location)
|
||||||
:
|
:
|
||||||
Thread(env, "pager", 0x1000, location, Weight(), env.cpu()),
|
Thread(env, "pager", 0x1000, location, Weight(), env.cpu()),
|
||||||
_ds(env.ram().alloc (4096))
|
_ds(env.ram(), env.rm(), 4096)
|
||||||
{
|
{
|
||||||
_ds_mem = env.rm().attach(_ds);
|
_ds_mem = addr_t(_ds.local_addr<void>());
|
||||||
|
|
||||||
touch_read(reinterpret_cast<unsigned char *>(_ds_mem));
|
touch_read(reinterpret_cast<unsigned char *>(_ds_mem));
|
||||||
|
|
||||||
/* request creation of a 'local' EC */
|
/* request creation of a 'local' EC */
|
||||||
@ -503,7 +508,8 @@ class Cause_mapping : public Genode::Thread {
|
|||||||
Native_capability _call_to_map { };
|
Native_capability _call_to_map { };
|
||||||
Rm_connection _rm;
|
Rm_connection _rm;
|
||||||
Region_map_client _sub_rm;
|
Region_map_client _sub_rm;
|
||||||
addr_t _mem_nd;
|
Attached_dataspace _mem_ds;
|
||||||
|
addr_t _mem_nd = addr_t(_mem_ds.local_addr<void>());
|
||||||
addr_t _mem_st;
|
addr_t _mem_st;
|
||||||
Nova::Rights const _mapping_rwx = {true, true, true};
|
Nova::Rights const _mapping_rwx = {true, true, true};
|
||||||
|
|
||||||
@ -518,7 +524,7 @@ class Cause_mapping : public Genode::Thread {
|
|||||||
_call_to_map(call_to_map),
|
_call_to_map(call_to_map),
|
||||||
_rm(env),
|
_rm(env),
|
||||||
_sub_rm(_rm.create(0x2000)),
|
_sub_rm(_rm.create(0x2000)),
|
||||||
_mem_nd(env.rm().attach(_sub_rm.dataspace())),
|
_mem_ds(env.rm(), _sub_rm.dataspace()),
|
||||||
_mem_st(mem_st)
|
_mem_st(mem_st)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
@ -606,7 +612,13 @@ class Greedy : public Genode::Thread {
|
|||||||
|
|
||||||
for (unsigned i = 0; i < SUB_RM_SIZE / 4096; i++) {
|
for (unsigned i = 0; i < SUB_RM_SIZE / 4096; i++) {
|
||||||
|
|
||||||
addr_t map_to = _env.rm().attach(ds);
|
addr_t const map_to = _env.rm().attach(ds, { }).convert<addr_t>(
|
||||||
|
[&] (Region_map::Range r) { return r.start; },
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("Greedy: failed to attach RAM dataspace");
|
||||||
|
return 0UL;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
/* check that we really got the mapping */
|
/* check that we really got the mapping */
|
||||||
touch_read(reinterpret_cast<unsigned char *>(map_to));
|
touch_read(reinterpret_cast<unsigned char *>(map_to));
|
||||||
|
@ -19,53 +19,42 @@
|
|||||||
using namespace Core;
|
using namespace Core;
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||||
off_t offset, bool use_local_addr,
|
|
||||||
Region_map::Local_addr, bool, bool)
|
|
||||||
{
|
{
|
||||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> void * {
|
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Attach_result {
|
||||||
|
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
if (size == 0)
|
|
||||||
size = ds->size();
|
|
||||||
|
|
||||||
|
size_t const size = (attr.size == 0) ? ds->size() : attr.size;
|
||||||
size_t const page_rounded_size = (size + get_page_size() - 1)
|
size_t const page_rounded_size = (size + get_page_size() - 1)
|
||||||
& get_page_mask();
|
& get_page_mask();
|
||||||
|
|
||||||
if (use_local_addr) {
|
/* attach attributes 'use_at' and 'offset' not supported within core */
|
||||||
error("parameter 'use_local_addr' not supported within core");
|
if (attr.use_at || attr.offset)
|
||||||
return nullptr;
|
return Attach_error::REGION_CONFLICT;
|
||||||
}
|
|
||||||
|
|
||||||
if (offset) {
|
|
||||||
error("parameter 'offset' not supported within core");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
Range_allocator &virt_alloc = platform().region_alloc();
|
Range_allocator &virt_alloc = platform().region_alloc();
|
||||||
return virt_alloc.try_alloc(page_rounded_size).convert<void *>(
|
return virt_alloc.try_alloc(page_rounded_size).convert<Attach_result>(
|
||||||
|
|
||||||
[&] (void *virt_addr) -> void * {
|
[&] (void *virt_addr) -> Attach_result {
|
||||||
|
|
||||||
/* map the dataspace's physical pages to virtual memory */
|
/* map the dataspace's physical pages to virtual memory */
|
||||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||||
return nullptr;
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
return virt_addr;
|
return Range { .start = addr_t(virt_addr), .num_bytes = page_rounded_size };
|
||||||
},
|
},
|
||||||
|
|
||||||
[&] (Range_allocator::Alloc_error) -> void * {
|
[&] (Range_allocator::Alloc_error) {
|
||||||
error("could not allocate virtual address range in core of size ",
|
error("could not allocate virtual address range in core of size ",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return nullptr;
|
return Attach_error::REGION_CONFLICT;
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Core_region_map::detach(Local_addr) { }
|
void Core_region_map::detach(addr_t) { }
|
||||||
|
@ -19,59 +19,48 @@
|
|||||||
using namespace Core;
|
using namespace Core;
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Core_region_map::attach(Dataspace_capability ds_cap, size_t size, off_t offset,
|
Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||||
bool use_local_addr, Region_map::Local_addr, bool, bool)
|
|
||||||
{
|
{
|
||||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Local_addr {
|
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Attach_result {
|
||||||
|
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
if (size == 0)
|
size_t const size = (attr.size == 0) ? ds->size() : attr.size;
|
||||||
size = ds->size();
|
size_t const page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
/* attach attributes 'use_at' and 'offset' not supported within core */
|
||||||
|
if (attr.use_at || attr.offset)
|
||||||
if (use_local_addr) {
|
return Attach_error::REGION_CONFLICT;
|
||||||
error(__func__, ": 'use_local_addr' not supported within core");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (offset) {
|
|
||||||
error(__func__, ": 'offset' not supported within core");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
return platform().region_alloc().try_alloc(page_rounded_size).convert<Local_addr>(
|
return platform().region_alloc().try_alloc(page_rounded_size).convert<Attach_result>(
|
||||||
[&] (void *virt_ptr) {
|
[&] (void *virt_ptr) {
|
||||||
|
|
||||||
/* map the dataspace's physical pages to core-local virtual addresses */
|
/* map the dataspace's physical pages to core-local virtual addresses */
|
||||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
map_local(ds->phys_addr(), (addr_t)virt_ptr, num_pages);
|
map_local(ds->phys_addr(), (addr_t)virt_ptr, num_pages);
|
||||||
|
|
||||||
return virt_ptr;
|
return Range { .start = addr_t(virt_ptr), .num_bytes = page_rounded_size };
|
||||||
},
|
},
|
||||||
[&] (Range_allocator::Alloc_error) -> Local_addr {
|
[&] (Range_allocator::Alloc_error) -> Attach_result {
|
||||||
error("could not allocate virtual address range in core of size ",
|
error("could not allocate virtual address range in core of size ",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return nullptr;
|
return Attach_error::REGION_CONFLICT;
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Core_region_map::detach(Local_addr core_local_addr)
|
void Core_region_map::detach(addr_t const at)
|
||||||
{
|
{
|
||||||
size_t size = platform_specific().region_alloc_size_at(core_local_addr);
|
size_t const size = platform_specific().region_alloc_size_at((void *)at);
|
||||||
|
|
||||||
if (!unmap_local(core_local_addr, size >> get_page_size_log2())) {
|
if (!unmap_local(at, size >> get_page_size_log2())) {
|
||||||
error("could not unmap core virtual address ",
|
error("could not unmap core virtual address ", Hex(at), " in ", __PRETTY_FUNCTION__);
|
||||||
Hex(core_local_addr), " in ", __PRETTY_FUNCTION__);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
platform().region_alloc().free(core_local_addr);
|
platform().region_alloc().free((void *)at);
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,7 @@ class Core::Vm_session_component
|
|||||||
private Ram_quota_guard,
|
private Ram_quota_guard,
|
||||||
private Cap_quota_guard,
|
private Cap_quota_guard,
|
||||||
public Rpc_object<Vm_session, Vm_session_component>,
|
public Rpc_object<Vm_session, Vm_session_component>,
|
||||||
public Region_map_detach
|
private Region_map_detach
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ class Core::Vm_session_component
|
|||||||
/* helpers for vm_session_common.cc */
|
/* helpers for vm_session_common.cc */
|
||||||
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
|
void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr);
|
||||||
void _detach_vm_memory(addr_t, size_t);
|
void _detach_vm_memory(addr_t, size_t);
|
||||||
void _with_region(Region_map::Local_addr, auto const &);
|
void _with_region(addr_t, auto const &);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
@ -102,14 +102,16 @@ class Core::Vm_session_component
|
|||||||
Trace::Source_registry &);
|
Trace::Source_registry &);
|
||||||
~Vm_session_component();
|
~Vm_session_component();
|
||||||
|
|
||||||
|
|
||||||
/*********************************
|
/*********************************
|
||||||
** Region_map_detach interface **
|
** Region_map_detach interface **
|
||||||
*********************************/
|
*********************************/
|
||||||
|
|
||||||
/* used on destruction of attached dataspaces */
|
/* used on destruction of attached dataspaces */
|
||||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void detach_at (addr_t) override;
|
||||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
void unmap_region (addr_t, size_t) override;
|
||||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
void reserve_and_flush (addr_t) override;
|
||||||
|
|
||||||
|
|
||||||
/**************************
|
/**************************
|
||||||
** Vm session interface **
|
** Vm session interface **
|
||||||
@ -118,8 +120,8 @@ class Core::Vm_session_component
|
|||||||
Capability<Native_vcpu> create_vcpu(Thread_capability);
|
Capability<Native_vcpu> create_vcpu(Thread_capability);
|
||||||
void attach_pic(addr_t) override { /* unused on seL4 */ }
|
void attach_pic(addr_t) override { /* unused on seL4 */ }
|
||||||
|
|
||||||
void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */
|
void attach(Dataspace_capability, addr_t, Attach_attr) override;
|
||||||
void detach(addr_t, size_t) override; /* vm_session_common.cc */
|
void detach(addr_t, size_t) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _CORE__VM_SESSION_COMPONENT_H_ */
|
#endif /* _CORE__VM_SESSION_COMPONENT_H_ */
|
||||||
|
@ -408,7 +408,7 @@ class Core::Vm_space
|
|||||||
bool ok = true;
|
bool ok = true;
|
||||||
|
|
||||||
for (size_t i = 0; i < num_pages; i++) {
|
for (size_t i = 0; i < num_pages; i++) {
|
||||||
off_t const offset = i << get_page_size_log2();
|
addr_t const offset = i << get_page_size_log2();
|
||||||
|
|
||||||
if (_map_frame(from_phys + offset, to_virt + offset, attr,
|
if (_map_frame(from_phys + offset, to_virt + offset, attr,
|
||||||
false /* host page table */, fn_unmap))
|
false /* host page table */, fn_unmap))
|
||||||
@ -442,7 +442,7 @@ class Core::Vm_space
|
|||||||
Mutex::Guard guard(_mutex);
|
Mutex::Guard guard(_mutex);
|
||||||
|
|
||||||
for (size_t i = 0; i < num_pages; i++) {
|
for (size_t i = 0; i < num_pages; i++) {
|
||||||
off_t const offset = i << get_page_size_log2();
|
addr_t const offset = i << get_page_size_log2();
|
||||||
|
|
||||||
_map_frame(from_phys + offset, guest_phys + offset, attr,
|
_map_frame(from_phys + offset, guest_phys + offset, attr,
|
||||||
true /* guest page table */, fn_unmap);
|
true /* guest page table */, fn_unmap);
|
||||||
@ -457,7 +457,7 @@ class Core::Vm_space
|
|||||||
Mutex::Guard guard(_mutex);
|
Mutex::Guard guard(_mutex);
|
||||||
|
|
||||||
for (size_t i = 0; unmap_success && i < num_pages; i++) {
|
for (size_t i = 0; unmap_success && i < num_pages; i++) {
|
||||||
off_t const offset = i << get_page_size_log2();
|
addr_t const offset = i << get_page_size_log2();
|
||||||
|
|
||||||
_page_table_registry.flush_page(virt + offset, [&] (Cap_sel const &idx, addr_t) {
|
_page_table_registry.flush_page(virt + offset, [&] (Cap_sel const &idx, addr_t) {
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ Vm_session_component::~Vm_session_component()
|
|||||||
if (!_map.any_block_addr(&out_addr))
|
if (!_map.any_block_addr(&out_addr))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_vm_page_table.value())
|
if (_vm_page_table.value())
|
||||||
@ -300,7 +300,7 @@ void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc,
|
|||||||
if (!_map.any_block_addr(&out_addr))
|
if (!_map.any_block_addr(&out_addr))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
_vm_space.map_guest(page.addr, page.hotspot,
|
_vm_space.map_guest(page.addr, page.hotspot,
|
||||||
|
@ -56,45 +56,43 @@ class Stack_area_region_map : public Region_map
|
|||||||
/**
|
/**
|
||||||
* Allocate and attach on-the-fly backing store to the stack area
|
* Allocate and attach on-the-fly backing store to the stack area
|
||||||
*/
|
*/
|
||||||
Local_addr attach(Dataspace_capability, size_t size, off_t,
|
Attach_result attach(Dataspace_capability, Attr const &attr) override
|
||||||
bool, Local_addr local_addr, bool, bool) override
|
|
||||||
{
|
{
|
||||||
using namespace Core;
|
using namespace Core;
|
||||||
|
|
||||||
size = round_page(size);
|
size_t const size = round_page(attr.size);
|
||||||
|
size_t const num_pages = size >> get_page_size_log2();
|
||||||
|
|
||||||
/* allocate physical memory */
|
/* allocate physical memory */
|
||||||
Range_allocator &phys_alloc = Core::platform_specific().ram_alloc();
|
Range_allocator &phys_alloc = Core::platform_specific().ram_alloc();
|
||||||
size_t const num_pages = size >> get_page_size_log2();
|
|
||||||
addr_t const phys = Untyped_memory::alloc_pages(phys_alloc, num_pages);
|
addr_t const phys = Untyped_memory::alloc_pages(phys_alloc, num_pages);
|
||||||
Untyped_memory::convert_to_page_frames(phys, num_pages);
|
Untyped_memory::convert_to_page_frames(phys, num_pages);
|
||||||
|
|
||||||
Dataspace_component &ds = *new (&_ds_slab)
|
Dataspace_component &ds = *new (&_ds_slab)
|
||||||
Dataspace_component(size, 0, phys, CACHED, true, 0);
|
Dataspace_component(size, 0, phys, CACHED, true, 0);
|
||||||
|
|
||||||
addr_t const core_local_addr =
|
addr_t const core_local_addr = stack_area_virtual_base() + attr.at;
|
||||||
stack_area_virtual_base() + (addr_t)local_addr;
|
|
||||||
|
|
||||||
if (!map_local(ds.phys_addr(), core_local_addr,
|
if (!map_local(ds.phys_addr(), core_local_addr,
|
||||||
ds.size() >> get_page_size_log2())) {
|
ds.size() >> get_page_size_log2())) {
|
||||||
error(__func__, ": could not map phys ", Hex(ds.phys_addr()), " "
|
error(__func__, ": could not map phys ", Hex(ds.phys_addr()), " "
|
||||||
"at local ", Hex(core_local_addr));
|
"at local ", Hex(core_local_addr));
|
||||||
return (addr_t)0;
|
return Attach_error::INVALID_DATASPACE;
|
||||||
}
|
}
|
||||||
|
|
||||||
ds.assign_core_local_addr((void*)core_local_addr);
|
ds.assign_core_local_addr((void*)core_local_addr);
|
||||||
|
|
||||||
return local_addr;
|
return Range { .start = attr.at, .num_bytes = size };
|
||||||
}
|
}
|
||||||
|
|
||||||
void detach(Local_addr local_addr) override
|
void detach(addr_t at) override
|
||||||
{
|
{
|
||||||
using namespace Core;
|
using namespace Core;
|
||||||
|
|
||||||
if ((addr_t)local_addr >= stack_area_virtual_size())
|
if (at >= stack_area_virtual_size())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
addr_t const detach = stack_area_virtual_base() + (addr_t)local_addr;
|
addr_t const detach = stack_area_virtual_base() + at;
|
||||||
addr_t const stack = stack_virtual_size();
|
addr_t const stack = stack_virtual_size();
|
||||||
addr_t const pages = ((detach & ~(stack - 1)) + stack - detach)
|
addr_t const pages = ((detach & ~(stack - 1)) + stack - detach)
|
||||||
>> get_page_size_log2();
|
>> get_page_size_log2();
|
||||||
@ -107,9 +105,9 @@ class Stack_area_region_map : public Region_map
|
|||||||
|
|
||||||
void fault_handler(Signal_context_capability) override { }
|
void fault_handler(Signal_context_capability) override { }
|
||||||
|
|
||||||
State state() override { return State(); }
|
Fault fault() override { return { }; }
|
||||||
|
|
||||||
Dataspace_capability dataspace() override { return Dataspace_capability(); }
|
Dataspace_capability dataspace() override { return { }; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,7 +24,8 @@ class Genode::Attached_dataspace : Noncopyable
|
|||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
|
||||||
typedef Region_map::Invalid_dataspace Invalid_dataspace;
|
struct Invalid_dataspace : Exception { };
|
||||||
|
struct Region_conflict : Exception { };
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
@ -32,16 +33,25 @@ class Genode::Attached_dataspace : Noncopyable
|
|||||||
|
|
||||||
Region_map &_rm;
|
Region_map &_rm;
|
||||||
|
|
||||||
size_t const _size = { Dataspace_client(_ds).size() };
|
|
||||||
|
|
||||||
void * _local_addr = nullptr;
|
|
||||||
|
|
||||||
Dataspace_capability _check(Dataspace_capability ds)
|
Dataspace_capability _check(Dataspace_capability ds)
|
||||||
{
|
{
|
||||||
if (ds.valid())
|
if (ds.valid())
|
||||||
return ds;
|
return ds;
|
||||||
|
|
||||||
throw Region_map::Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
}
|
||||||
|
|
||||||
|
Region_map::Attach_result _attached = _rm.attach(_ds, {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true });
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T *_ptr() const
|
||||||
|
{
|
||||||
|
return _attached.convert<T *>(
|
||||||
|
[&] (Region_map::Range range) { return (T *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { return nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -55,21 +65,30 @@ class Genode::Attached_dataspace : Noncopyable
|
|||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
*
|
*
|
||||||
* \throw Region_map::Region_conflict
|
* \throw Region_conflict
|
||||||
* \throw Region_map::Invalid_dataspace
|
* \throw Invalid_dataspace
|
||||||
* \throw Out_of_caps
|
* \throw Out_of_caps
|
||||||
* \throw Out_of_ram
|
* \throw Out_of_ram
|
||||||
*/
|
*/
|
||||||
Attached_dataspace(Region_map &rm, Dataspace_capability ds)
|
Attached_dataspace(Region_map &rm, Dataspace_capability ds)
|
||||||
: _ds(_check(ds)), _rm(rm), _local_addr(_rm.attach(_ds)) { }
|
:
|
||||||
|
_ds(_check(ds)), _rm(rm)
|
||||||
|
{
|
||||||
|
_attached.with_error([&] (Region_map::Attach_error e) {
|
||||||
|
if (e == Region_map::Attach_error::OUT_OF_RAM) throw Out_of_ram();
|
||||||
|
if (e == Region_map::Attach_error::OUT_OF_CAPS) throw Out_of_caps();
|
||||||
|
throw Region_conflict();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Destructor
|
* Destructor
|
||||||
*/
|
*/
|
||||||
~Attached_dataspace()
|
~Attached_dataspace()
|
||||||
{
|
{
|
||||||
if (_local_addr)
|
_attached.with_result(
|
||||||
_rm.detach(_local_addr);
|
[&] (Region_map::Range range) { _rm.detach(range.start); },
|
||||||
|
[&] (Region_map::Attach_error) { });
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -84,15 +103,20 @@ class Genode::Attached_dataspace : Noncopyable
|
|||||||
* A newly attached dataspace is untyped memory anyway.
|
* A newly attached dataspace is untyped memory anyway.
|
||||||
*/
|
*/
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T *local_addr() { return static_cast<T *>(_local_addr); }
|
T *local_addr() { return _ptr<T>(); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T const *local_addr() const { return static_cast<T const *>(_local_addr); }
|
T const *local_addr() const { return _ptr<T const>(); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return size
|
* Return size
|
||||||
*/
|
*/
|
||||||
size_t size() const { return _size; }
|
size_t size() const
|
||||||
|
{
|
||||||
|
return _attached.convert<size_t>(
|
||||||
|
[&] (Region_map::Range range) { return range.num_bytes; },
|
||||||
|
[&] (Region_map::Attach_error) { return 0UL; });
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Forget dataspace, thereby skipping the detachment on destruction
|
* Forget dataspace, thereby skipping the detachment on destruction
|
||||||
@ -103,7 +127,7 @@ class Genode::Attached_dataspace : Noncopyable
|
|||||||
* removed the memory mappings of the dataspace. So we have to omit the
|
* removed the memory mappings of the dataspace. So we have to omit the
|
||||||
* detach operation in '~Attached_dataspace'.
|
* detach operation in '~Attached_dataspace'.
|
||||||
*/
|
*/
|
||||||
void invalidate() { _local_addr = nullptr; }
|
void invalidate() { _attached = Region_map::Attach_error::INVALID_DATASPACE; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _INCLUDE__BASE__ATTACHED_DATASPACE_H_ */
|
#endif /* _INCLUDE__BASE__ATTACHED_DATASPACE_H_ */
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#define _INCLUDE__BASE__ATTACHED_IO_MEM_DATASPACE_H_
|
#define _INCLUDE__BASE__ATTACHED_IO_MEM_DATASPACE_H_
|
||||||
|
|
||||||
#include <io_mem_session/connection.h>
|
#include <io_mem_session/connection.h>
|
||||||
#include <base/env.h>
|
#include <base/attached_dataspace.h>
|
||||||
|
|
||||||
namespace Genode { class Attached_io_mem_dataspace; }
|
namespace Genode { class Attached_io_mem_dataspace; }
|
||||||
|
|
||||||
@ -34,11 +34,23 @@ class Genode::Attached_io_mem_dataspace
|
|||||||
Region_map &_env_rm;
|
Region_map &_env_rm;
|
||||||
Io_mem_connection _mmio;
|
Io_mem_connection _mmio;
|
||||||
Io_mem_dataspace_capability _ds;
|
Io_mem_dataspace_capability _ds;
|
||||||
Region_map::Local_addr _local_addr;
|
addr_t const _at;
|
||||||
|
|
||||||
static void *_with_sub_page_offset(void *local, addr_t io_base)
|
static addr_t _with_sub_page_offset(addr_t local, addr_t io_base)
|
||||||
{
|
{
|
||||||
return (void *)((addr_t)local | (io_base & (addr_t)0xfff));
|
return local | (io_base & 0xfffUL);
|
||||||
|
}
|
||||||
|
|
||||||
|
addr_t _attach()
|
||||||
|
{
|
||||||
|
return _env_rm.attach(_ds, {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).convert<addr_t>(
|
||||||
|
[&] (Region_map::Range range) { return range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { return 0UL; }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -55,8 +67,8 @@ class Genode::Attached_io_mem_dataspace
|
|||||||
* \throw Insufficient_cap_quota
|
* \throw Insufficient_cap_quota
|
||||||
* \throw Out_of_ram
|
* \throw Out_of_ram
|
||||||
* \throw Out_of_caps
|
* \throw Out_of_caps
|
||||||
* \throw Region_map::Region_conflict
|
* \throw Attached_dataspace::Region_conflict
|
||||||
* \throw Region_map::Invalid_dataspace
|
* \throw Attached_dataspace::Invalid_dataspace
|
||||||
*/
|
*/
|
||||||
Attached_io_mem_dataspace(Env &env, Genode::addr_t base, Genode::size_t size,
|
Attached_io_mem_dataspace(Env &env, Genode::addr_t base, Genode::size_t size,
|
||||||
bool write_combined = false)
|
bool write_combined = false)
|
||||||
@ -64,13 +76,16 @@ class Genode::Attached_io_mem_dataspace
|
|||||||
_env_rm(env.rm()),
|
_env_rm(env.rm()),
|
||||||
_mmio(env, base, size, write_combined),
|
_mmio(env, base, size, write_combined),
|
||||||
_ds(_mmio.dataspace()),
|
_ds(_mmio.dataspace()),
|
||||||
_local_addr(_with_sub_page_offset(env.rm().attach(_ds), base))
|
_at(_with_sub_page_offset(_attach(), base))
|
||||||
{ }
|
{
|
||||||
|
if (!_ds.valid()) throw Attached_dataspace::Invalid_dataspace();
|
||||||
|
if (!_at) throw Attached_dataspace::Region_conflict();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Destructor
|
* Destructor
|
||||||
*/
|
*/
|
||||||
~Attached_io_mem_dataspace() { _env_rm.detach(_local_addr); }
|
~Attached_io_mem_dataspace() { if (_at) _env_rm.detach(_at); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return capability of the used RAM dataspace
|
* Return capability of the used RAM dataspace
|
||||||
@ -84,7 +99,7 @@ class Genode::Attached_io_mem_dataspace
|
|||||||
* A newly allocated I/O MEM dataspace is untyped memory anyway.
|
* A newly allocated I/O MEM dataspace is untyped memory anyway.
|
||||||
*/
|
*/
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T *local_addr() { return static_cast<T *>(_local_addr); }
|
T *local_addr() { return reinterpret_cast<T *>(_at); }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _INCLUDE__BASE__ATTACHED_IO_MEM_DATASPACE_H_ */
|
#endif /* _INCLUDE__BASE__ATTACHED_IO_MEM_DATASPACE_H_ */
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <util/touch.h>
|
#include <util/touch.h>
|
||||||
#include <base/ram_allocator.h>
|
#include <base/ram_allocator.h>
|
||||||
#include <base/env.h>
|
#include <base/env.h>
|
||||||
|
#include <base/attached_dataspace.h>
|
||||||
|
|
||||||
namespace Genode { class Attached_ram_dataspace; }
|
namespace Genode { class Attached_ram_dataspace; }
|
||||||
|
|
||||||
@ -38,7 +39,7 @@ class Genode::Attached_ram_dataspace
|
|||||||
Ram_allocator *_ram = nullptr;
|
Ram_allocator *_ram = nullptr;
|
||||||
Region_map *_rm = nullptr;
|
Region_map *_rm = nullptr;
|
||||||
Ram_dataspace_capability _ds { };
|
Ram_dataspace_capability _ds { };
|
||||||
void *_local_addr = nullptr;
|
addr_t _at = 0;
|
||||||
Cache const _cache = CACHED;
|
Cache const _cache = CACHED;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -46,8 +47,8 @@ class Genode::Attached_ram_dataspace
|
|||||||
|
|
||||||
void _detach_and_free_dataspace()
|
void _detach_and_free_dataspace()
|
||||||
{
|
{
|
||||||
if (_local_addr)
|
if (_at)
|
||||||
_rm->detach(_local_addr);
|
_rm->detach(_at);
|
||||||
|
|
||||||
if (_ds.valid())
|
if (_ds.valid())
|
||||||
_ram->free(_ds);
|
_ram->free(_ds);
|
||||||
@ -57,13 +58,19 @@ class Genode::Attached_ram_dataspace
|
|||||||
{
|
{
|
||||||
if (!_size) return;
|
if (!_size) return;
|
||||||
|
|
||||||
try {
|
|
||||||
_ds = _ram->alloc(_size, _cache);
|
_ds = _ram->alloc(_size, _cache);
|
||||||
_local_addr = _rm->attach(_ds);
|
|
||||||
}
|
Region_map::Attr attr { };
|
||||||
|
attr.writeable = true;
|
||||||
|
_rm->attach(_ds, attr).with_result(
|
||||||
|
[&] (Region_map::Range range) { _at = range.start; },
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
/* revert allocation if attaching the dataspace failed */
|
/* revert allocation if attaching the dataspace failed */
|
||||||
catch (Region_map::Region_conflict) { _ram->free(_ds); throw; }
|
_ram->free(_ds);
|
||||||
catch (Region_map::Invalid_dataspace) { _ram->free(_ds); throw; }
|
if (e == Region_map::Attach_error::OUT_OF_RAM) throw Out_of_ram();
|
||||||
|
if (e == Region_map::Attach_error::OUT_OF_CAPS) throw Out_of_caps();
|
||||||
|
throw Attached_dataspace::Region_conflict();
|
||||||
|
});
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Eagerly map dataspace if used for DMA
|
* Eagerly map dataspace if used for DMA
|
||||||
@ -77,7 +84,7 @@ class Genode::Attached_ram_dataspace
|
|||||||
*/
|
*/
|
||||||
if (_cache != CACHED) {
|
if (_cache != CACHED) {
|
||||||
enum { PAGE_SIZE = 4096 };
|
enum { PAGE_SIZE = 4096 };
|
||||||
unsigned char volatile *base = (unsigned char volatile *)_local_addr;
|
unsigned char volatile *base = (unsigned char volatile *)_at;
|
||||||
for (size_t i = 0; i < _size; i += PAGE_SIZE)
|
for (size_t i = 0; i < _size; i += PAGE_SIZE)
|
||||||
touch_read_write(base + i);
|
touch_read_write(base + i);
|
||||||
}
|
}
|
||||||
@ -96,8 +103,8 @@ class Genode::Attached_ram_dataspace
|
|||||||
*
|
*
|
||||||
* \throw Out_of_ram
|
* \throw Out_of_ram
|
||||||
* \throw Out_of_caps
|
* \throw Out_of_caps
|
||||||
* \throw Region_map::Region_conflict
|
* \throw Attached_dataspace::Region_conflict
|
||||||
* \throw Region_map::Invalid_dataspace
|
* \throw Attached_dataspace::Invalid_dataspace
|
||||||
*/
|
*/
|
||||||
Attached_ram_dataspace(Ram_allocator &ram, Region_map &rm,
|
Attached_ram_dataspace(Ram_allocator &ram, Region_map &rm,
|
||||||
size_t size, Cache cache = CACHED)
|
size_t size, Cache cache = CACHED)
|
||||||
@ -125,7 +132,7 @@ class Genode::Attached_ram_dataspace
|
|||||||
* untyped memory anyway.
|
* untyped memory anyway.
|
||||||
*/
|
*/
|
||||||
template <typename T>
|
template <typename T>
|
||||||
T *local_addr() const { return static_cast<T *>(_local_addr); }
|
T *local_addr() const { return reinterpret_cast<T *>(_at); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return size
|
* Return size
|
||||||
@ -137,7 +144,7 @@ class Genode::Attached_ram_dataspace
|
|||||||
_swap(_size, other._size);
|
_swap(_size, other._size);
|
||||||
_swap(_ram, other._ram);
|
_swap(_ram, other._ram);
|
||||||
_swap(_ds, other._ds);
|
_swap(_ds, other._ds);
|
||||||
_swap(_local_addr, other._local_addr);
|
_swap(_at, other._at);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -41,15 +41,10 @@ class Genode::Region_map_client : public Rpc_client<Region_map>
|
|||||||
|
|
||||||
explicit Region_map_client(Capability<Region_map>);
|
explicit Region_map_client(Capability<Region_map>);
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability ds, size_t size = 0,
|
Attach_result attach(Dataspace_capability, Attr const &) override;
|
||||||
off_t offset = 0, bool use_local_addr = false,
|
void detach(addr_t) override;
|
||||||
Local_addr local_addr = (void *)0,
|
|
||||||
bool executable = false,
|
|
||||||
bool writeable = true) override;
|
|
||||||
|
|
||||||
void detach(Local_addr) override;
|
|
||||||
void fault_handler(Signal_context_capability) override;
|
void fault_handler(Signal_context_capability) override;
|
||||||
State state() override;
|
Fault fault() override;
|
||||||
Dataspace_capability dataspace() override;
|
Dataspace_capability dataspace() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ namespace Genode { struct Region_map; }
|
|||||||
struct Genode::Region_map : Interface
|
struct Genode::Region_map : Interface
|
||||||
{
|
{
|
||||||
/**
|
/**
|
||||||
* State of region map
|
* Fault state of region map
|
||||||
*
|
*
|
||||||
* If a thread accesses a location outside the regions attached to its
|
* If a thread accesses a location outside the regions attached to its
|
||||||
* address space, a fault occurs and gets signalled to the registered fault
|
* address space, a fault occurs and gets signalled to the registered fault
|
||||||
@ -35,115 +35,46 @@ struct Genode::Region_map : Interface
|
|||||||
* fault address and fault type to resolve the fault. This information is
|
* fault address and fault type to resolve the fault. This information is
|
||||||
* represented by this structure.
|
* represented by this structure.
|
||||||
*/
|
*/
|
||||||
struct State
|
struct Fault
|
||||||
{
|
{
|
||||||
enum Fault_type { READY, READ_FAULT, WRITE_FAULT, EXEC_FAULT };
|
enum class Type { NONE, READ, WRITE, EXEC };
|
||||||
|
|
||||||
/**
|
Type type; /* type of occurred fault */
|
||||||
* Type of occurred fault
|
addr_t addr; /* fault address unless fault is 'NONE' */
|
||||||
*/
|
|
||||||
Fault_type type = READY;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Fault address
|
|
||||||
*/
|
|
||||||
addr_t addr = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Default constructor
|
|
||||||
*/
|
|
||||||
State() { }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor
|
|
||||||
*/
|
|
||||||
State(Fault_type fault_type, addr_t fault_addr)
|
|
||||||
: type(fault_type), addr(fault_addr) { }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct Range { addr_t start; size_t num_bytes; };
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper for tranferring the bit representation of a pointer as RPC
|
* Attributes for 'attach'
|
||||||
* argument.
|
|
||||||
*/
|
*/
|
||||||
class Local_addr
|
struct Attr
|
||||||
{
|
{
|
||||||
private:
|
size_t size; /* size of the mapping, or 0 for the whole dataspace */
|
||||||
|
addr_t offset; /* page-aligned offset in dataspace */
|
||||||
void *_ptr = nullptr;
|
bool use_at;
|
||||||
|
addr_t at; /* designated start of region if 'use_at' is true */
|
||||||
public:
|
bool executable;
|
||||||
|
bool writeable;
|
||||||
Local_addr(auto ptr) : _ptr((void *)ptr) { }
|
|
||||||
|
|
||||||
Local_addr() { }
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
operator T () { return (T)_ptr; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class Attach_error { OUT_OF_RAM, OUT_OF_CAPS, REGION_CONFLICT, INVALID_DATASPACE };
|
||||||
|
|
||||||
/*********************
|
using Attach_result = Attempt<Range, Attach_error>;
|
||||||
** Exception types **
|
|
||||||
*********************/
|
|
||||||
|
|
||||||
class Invalid_dataspace : public Exception { };
|
|
||||||
class Region_conflict : public Exception { };
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Map dataspace into region map
|
* Map dataspace into region map
|
||||||
*
|
*
|
||||||
* \param ds capability of dataspace to map
|
* \param ds capability of dataspace to map
|
||||||
* \param size size of the locally mapped region
|
* \param attr mapping attributes
|
||||||
* default (0) is the whole dataspace
|
* \return address range of mapping within region map
|
||||||
* \param offset start at offset in dataspace (page-aligned)
|
|
||||||
* \param use_local_addr if set to true, attach the dataspace at
|
|
||||||
* the specified 'local_addr'
|
|
||||||
* \param local_addr local destination address
|
|
||||||
* \param executable if the mapping should be executable
|
|
||||||
* \param writeable if the mapping should be writeable
|
|
||||||
*
|
|
||||||
* \throw Invalid_dataspace
|
|
||||||
* \throw Region_conflict
|
|
||||||
* \throw Out_of_ram RAM quota of meta-data backing store is exhausted
|
|
||||||
* \throw Out_of_caps cap quota of meta-data backing store is exhausted
|
|
||||||
*
|
|
||||||
* \return address of mapped dataspace within region map
|
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
virtual Local_addr attach(Dataspace_capability ds,
|
virtual Attach_result attach(Dataspace_capability ds, Attr const &attr) = 0;
|
||||||
size_t size = 0, off_t offset = 0,
|
|
||||||
bool use_local_addr = false,
|
|
||||||
Local_addr local_addr = (void *)0,
|
|
||||||
bool executable = false,
|
|
||||||
bool writeable = true) = 0;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Shortcut for attaching a dataspace at a predefined local address
|
|
||||||
*/
|
|
||||||
Local_addr attach_at(Dataspace_capability ds, addr_t local_addr,
|
|
||||||
size_t size = 0, off_t offset = 0) {
|
|
||||||
return attach(ds, size, offset, true, local_addr); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Shortcut for attaching a dataspace executable at local address
|
|
||||||
*/
|
|
||||||
Local_addr attach_executable(Dataspace_capability ds, addr_t local_addr,
|
|
||||||
size_t size = 0, off_t offset = 0) {
|
|
||||||
return attach(ds, size, offset, true, local_addr, true, false ); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Shortcut for attaching a dataspace will full rights at local address
|
|
||||||
*/
|
|
||||||
Local_addr attach_rwx(Dataspace_capability ds, addr_t local_addr,
|
|
||||||
size_t size = 0, off_t offset = 0) {
|
|
||||||
return attach(ds, size, offset, true, local_addr, true, true ); }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove region from local address space
|
* Remove region from local address space
|
||||||
*/
|
*/
|
||||||
virtual void detach(Local_addr local_addr) = 0;
|
virtual void detach(addr_t) = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Register signal handler for region-manager faults
|
* Register signal handler for region-manager faults
|
||||||
@ -156,9 +87,9 @@ struct Genode::Region_map : Interface
|
|||||||
virtual void fault_handler(Signal_context_capability handler) = 0;
|
virtual void fault_handler(Signal_context_capability handler) = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Request current state of region map
|
* Request current fault state of region map
|
||||||
*/
|
*/
|
||||||
virtual State state() = 0;
|
virtual Fault fault() = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return dataspace representation of region map
|
* Return dataspace representation of region map
|
||||||
@ -170,17 +101,13 @@ struct Genode::Region_map : Interface
|
|||||||
** RPC declaration **
|
** RPC declaration **
|
||||||
*********************/
|
*********************/
|
||||||
|
|
||||||
GENODE_RPC_THROW(Rpc_attach, Local_addr, attach,
|
GENODE_RPC(Rpc_attach, Attach_result, attach, Dataspace_capability, Attr const &);
|
||||||
GENODE_TYPE_LIST(Invalid_dataspace, Region_conflict,
|
GENODE_RPC(Rpc_detach, void, detach, addr_t);
|
||||||
Out_of_ram, Out_of_caps),
|
|
||||||
Dataspace_capability, size_t, off_t, bool, Local_addr,
|
|
||||||
bool, bool);
|
|
||||||
GENODE_RPC(Rpc_detach, void, detach, Local_addr);
|
|
||||||
GENODE_RPC(Rpc_fault_handler, void, fault_handler, Signal_context_capability);
|
GENODE_RPC(Rpc_fault_handler, void, fault_handler, Signal_context_capability);
|
||||||
GENODE_RPC(Rpc_state, State, state);
|
GENODE_RPC(Rpc_fault, Fault, fault);
|
||||||
GENODE_RPC(Rpc_dataspace, Dataspace_capability, dataspace);
|
GENODE_RPC(Rpc_dataspace, Dataspace_capability, dataspace);
|
||||||
|
|
||||||
GENODE_RPC_INTERFACE(Rpc_attach, Rpc_detach, Rpc_fault_handler, Rpc_state,
|
GENODE_RPC_INTERFACE(Rpc_attach, Rpc_detach, Rpc_fault_handler, Rpc_fault,
|
||||||
Rpc_dataspace);
|
Rpc_dataspace);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -136,9 +136,9 @@ _ZN6Genode17Native_capability4_incEv T
|
|||||||
_ZN6Genode17Native_capabilityC1Ev T
|
_ZN6Genode17Native_capabilityC1Ev T
|
||||||
_ZN6Genode17Native_capabilityC2Ev T
|
_ZN6Genode17Native_capabilityC2Ev T
|
||||||
_ZN6Genode17Region_map_client13fault_handlerENS_10CapabilityINS_14Signal_contextEEE T
|
_ZN6Genode17Region_map_client13fault_handlerENS_10CapabilityINS_14Signal_contextEEE T
|
||||||
_ZN6Genode17Region_map_client5stateEv T
|
_ZN6Genode17Region_map_client5faultEv T
|
||||||
_ZN6Genode17Region_map_client6attachENS_10CapabilityINS_9DataspaceEEEmlbNS_10Region_map10Local_addrEbb T
|
_ZN6Genode17Region_map_client6attachENS_10CapabilityINS_9DataspaceEEERKNS_10Region_map4AttrE T
|
||||||
_ZN6Genode17Region_map_client6detachENS_10Region_map10Local_addrE T
|
_ZN6Genode17Region_map_client6detachEm T
|
||||||
_ZN6Genode17Region_map_client9dataspaceEv T
|
_ZN6Genode17Region_map_client9dataspaceEv T
|
||||||
_ZN6Genode17Region_map_clientC1ENS_10CapabilityINS_10Region_mapEEE T
|
_ZN6Genode17Region_map_clientC1ENS_10CapabilityINS_10Region_mapEEE T
|
||||||
_ZN6Genode17Region_map_clientC2ENS_10CapabilityINS_10Region_mapEEE T
|
_ZN6Genode17Region_map_clientC2ENS_10CapabilityINS_10Region_mapEEE T
|
||||||
|
@ -21,18 +21,16 @@
|
|||||||
using namespace Core;
|
using namespace Core;
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Core_region_map::attach(Dataspace_capability ds_cap, size_t, off_t, bool,
|
Core_region_map::attach(Dataspace_capability ds_cap, Attr const &)
|
||||||
Region_map::Local_addr, bool, bool)
|
|
||||||
{
|
{
|
||||||
auto lambda = [] (Dataspace_component *ds) {
|
return _ep.apply(ds_cap, [] (Dataspace_component *ds) -> Attach_result {
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
return (void *)ds->phys_addr();
|
return Range { .start = ds->phys_addr(), .num_bytes = ds->size() };
|
||||||
};
|
});
|
||||||
return _ep.apply(ds_cap, lambda);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Core_region_map::detach(Local_addr) { }
|
void Core_region_map::detach(addr_t) { }
|
||||||
|
@ -31,6 +31,7 @@ void Dataspace_component::detached_from(Rm_region ®ion)
|
|||||||
_regions.remove(®ion);
|
_regions.remove(®ion);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Dataspace_component::detach_from_rm_sessions()
|
void Dataspace_component::detach_from_rm_sessions()
|
||||||
{
|
{
|
||||||
_mutex.acquire();
|
_mutex.acquire();
|
||||||
@ -44,13 +45,14 @@ void Dataspace_component::detach_from_rm_sessions()
|
|||||||
* removes the current region from the '_regions' list.
|
* removes the current region from the '_regions' list.
|
||||||
*/
|
*/
|
||||||
_mutex.release();
|
_mutex.release();
|
||||||
r->rm().reserve_and_flush((void *)r->base());
|
r->rm().reserve_and_flush(r->base());
|
||||||
_mutex.acquire();
|
_mutex.acquire();
|
||||||
}
|
}
|
||||||
|
|
||||||
_mutex.release();
|
_mutex.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Dataspace_component::~Dataspace_component()
|
Dataspace_component::~Dataspace_component()
|
||||||
{
|
{
|
||||||
detach_from_rm_sessions();
|
detach_from_rm_sessions();
|
||||||
|
@ -34,18 +34,11 @@ class Core::Core_region_map : public Region_map
|
|||||||
|
|
||||||
Core_region_map(Rpc_entrypoint &ep) : _ep(ep) { }
|
Core_region_map(Rpc_entrypoint &ep) : _ep(ep) { }
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability, size_t size = 0,
|
Attach_result attach(Dataspace_capability, Attr const &) override;
|
||||||
off_t offset=0, bool use_local_addr = false,
|
void detach(addr_t) override;
|
||||||
Local_addr local_addr = 0,
|
|
||||||
bool executable = false,
|
|
||||||
bool writeable = true) override;
|
|
||||||
|
|
||||||
void detach(Local_addr) override;
|
|
||||||
|
|
||||||
void fault_handler (Signal_context_capability) override { }
|
void fault_handler (Signal_context_capability) override { }
|
||||||
State state () override { return State(); }
|
Fault fault() override { return { }; }
|
||||||
|
Dataspace_capability dataspace() override { return { }; }
|
||||||
Dataspace_capability dataspace() override { return Dataspace_capability(); }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _CORE__INCLUDE__CORE_REGION_MAP_H_ */
|
#endif /* _CORE__INCLUDE__CORE_REGION_MAP_H_ */
|
||||||
|
@ -40,7 +40,7 @@
|
|||||||
#include <base/internal/stack_area.h>
|
#include <base/internal/stack_area.h>
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
class Region_map_detach;
|
struct Region_map_detach;
|
||||||
class Rm_region;
|
class Rm_region;
|
||||||
struct Fault;
|
struct Fault;
|
||||||
class Cpu_thread_component;
|
class Cpu_thread_component;
|
||||||
@ -52,13 +52,19 @@ namespace Core {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Core::Region_map_detach : Interface
|
struct Core::Region_map_detach : Interface
|
||||||
{
|
{
|
||||||
public:
|
virtual void detach_at(addr_t) = 0;
|
||||||
|
|
||||||
virtual void detach(Region_map::Local_addr) = 0;
|
/**
|
||||||
|
* Unmap memory area from all address spaces referencing it
|
||||||
|
*
|
||||||
|
* \param base base address of region to unmap
|
||||||
|
* \param size size of region to unmap in bytes
|
||||||
|
*/
|
||||||
virtual void unmap_region(addr_t base, size_t size) = 0;
|
virtual void unmap_region(addr_t base, size_t size) = 0;
|
||||||
virtual void reserve_and_flush(Region_map::Local_addr) = 0;
|
|
||||||
|
virtual void reserve_and_flush(addr_t) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -81,7 +87,7 @@ class Core::Rm_region : public List<Rm_region>::Element
|
|||||||
size_t size;
|
size_t size;
|
||||||
bool write;
|
bool write;
|
||||||
bool exec;
|
bool exec;
|
||||||
off_t off;
|
addr_t off;
|
||||||
bool dma;
|
bool dma;
|
||||||
|
|
||||||
void print(Output &out) const
|
void print(Output &out) const
|
||||||
@ -110,7 +116,7 @@ class Core::Rm_region : public List<Rm_region>::Element
|
|||||||
size_t size() const { return _attr.size; }
|
size_t size() const { return _attr.size; }
|
||||||
bool write() const { return _attr.write; }
|
bool write() const { return _attr.write; }
|
||||||
bool executable() const { return _attr.exec; }
|
bool executable() const { return _attr.exec; }
|
||||||
off_t offset() const { return _attr.off; }
|
addr_t offset() const { return _attr.off; }
|
||||||
bool dma() const { return _attr.dma; }
|
bool dma() const { return _attr.dma; }
|
||||||
Region_map_detach &rm() const { return _rm; }
|
Region_map_detach &rm() const { return _rm; }
|
||||||
|
|
||||||
@ -213,7 +219,7 @@ class Core::Rm_faulter : Fifo<Rm_faulter>::Element, Interface
|
|||||||
Pager_object &_pager_object;
|
Pager_object &_pager_object;
|
||||||
Mutex _mutex { };
|
Mutex _mutex { };
|
||||||
Weak_ptr<Region_map_component> _faulting_region_map { };
|
Weak_ptr<Region_map_component> _faulting_region_map { };
|
||||||
Region_map::State _fault_state { };
|
Region_map::Fault _fault { };
|
||||||
|
|
||||||
friend class Fifo<Rm_faulter>;
|
friend class Fifo<Rm_faulter>;
|
||||||
|
|
||||||
@ -231,8 +237,7 @@ class Core::Rm_faulter : Fifo<Rm_faulter>::Element, Interface
|
|||||||
/**
|
/**
|
||||||
* Assign fault state
|
* Assign fault state
|
||||||
*/
|
*/
|
||||||
void fault(Region_map_component &faulting_region_map,
|
void fault(Region_map_component &faulting_region_map, Region_map::Fault);
|
||||||
Region_map::State fault_state);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Disassociate faulter from the faulted region map
|
* Disassociate faulter from the faulted region map
|
||||||
@ -246,12 +251,12 @@ class Core::Rm_faulter : Fifo<Rm_faulter>::Element, Interface
|
|||||||
* Return true if page fault occurred in specified address range
|
* Return true if page fault occurred in specified address range
|
||||||
*/
|
*/
|
||||||
bool fault_in_addr_range(addr_t addr, size_t size) {
|
bool fault_in_addr_range(addr_t addr, size_t size) {
|
||||||
return (_fault_state.addr >= addr) && (_fault_state.addr <= addr + size - 1); }
|
return (_fault.addr >= addr) && (_fault.addr <= addr + size - 1); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return fault state as exported via the region-map interface
|
* Return fault state as exported via the region-map interface
|
||||||
*/
|
*/
|
||||||
Region_map::State fault_state() { return _fault_state; }
|
Region_map::Fault fault() { return _fault; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wake up faulter by answering the pending page fault
|
* Wake up faulter by answering the pending page fault
|
||||||
@ -412,7 +417,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
* Called recursively when resolving a page fault in nested region maps.
|
* Called recursively when resolving a page fault in nested region maps.
|
||||||
*/
|
*/
|
||||||
With_mapping_result _with_region_at_fault(Recursion_limit const recursion_limit,
|
With_mapping_result _with_region_at_fault(Recursion_limit const recursion_limit,
|
||||||
Fault const &fault,
|
Core::Fault const &fault,
|
||||||
auto const &resolved_fn,
|
auto const &resolved_fn,
|
||||||
auto const &reflect_fn)
|
auto const &reflect_fn)
|
||||||
{
|
{
|
||||||
@ -441,7 +446,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
Rm_region const ®ion = *region_ptr;
|
Rm_region const ®ion = *region_ptr;
|
||||||
|
|
||||||
/* fault information relative to 'region' */
|
/* fault information relative to 'region' */
|
||||||
Fault const relative_fault = fault.within_region(region);
|
Core::Fault const relative_fault = fault.within_region(region);
|
||||||
|
|
||||||
Result result = Result::NO_REGION;
|
Result result = Result::NO_REGION;
|
||||||
|
|
||||||
@ -476,7 +481,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* traverse into managed dataspace */
|
/* traverse into managed dataspace */
|
||||||
Fault const sub_region_map_relative_fault =
|
Core::Fault const sub_region_map_relative_fault =
|
||||||
relative_fault.within_sub_region_map(region.offset(),
|
relative_fault.within_sub_region_map(region.offset(),
|
||||||
dataspace.size());
|
dataspace.size());
|
||||||
|
|
||||||
@ -497,30 +502,25 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
|
|
||||||
struct Attach_attr
|
struct Attach_attr
|
||||||
{
|
{
|
||||||
size_t size;
|
Attr attr;
|
||||||
off_t offset;
|
|
||||||
bool use_local_addr;
|
|
||||||
addr_t local_addr;
|
|
||||||
bool executable;
|
|
||||||
bool writeable;
|
|
||||||
bool dma;
|
bool dma;
|
||||||
};
|
};
|
||||||
|
|
||||||
Local_addr _attach(Dataspace_capability, Attach_attr);
|
Attach_result _attach(Dataspace_capability, Attach_attr);
|
||||||
|
|
||||||
void _with_region(Local_addr local_addr, auto const &fn)
|
void _with_region(addr_t at, auto const &fn)
|
||||||
{
|
{
|
||||||
/* read meta data for address */
|
/* read meta data for address */
|
||||||
Rm_region *region_ptr = _map.metadata(local_addr);
|
Rm_region * const region_ptr = _map.metadata((void *)at);
|
||||||
|
|
||||||
if (!region_ptr) {
|
if (!region_ptr) {
|
||||||
if (_diag.enabled)
|
if (_diag.enabled)
|
||||||
warning("_with_region: no attachment at ", (void *)local_addr);
|
warning("_with_region: no attachment at ", (void *)at);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((region_ptr->base() != static_cast<addr_t>(local_addr)) && _diag.enabled)
|
if ((region_ptr->base() != static_cast<addr_t>(at)) && _diag.enabled)
|
||||||
warning("_with_region: ", static_cast<void *>(local_addr), " is not "
|
warning("_with_region: ", reinterpret_cast<void *>(at), " is not "
|
||||||
"the beginning of the region ", Hex(region_ptr->base()));
|
"the beginning of the region ", Hex(region_ptr->base()));
|
||||||
|
|
||||||
fn(*region_ptr);
|
fn(*region_ptr);
|
||||||
@ -530,16 +530,6 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/*
|
|
||||||
* Unmaps a memory area from all address spaces referencing it.
|
|
||||||
*
|
|
||||||
* \param base base address of region to unmap
|
|
||||||
* \param size size of region to unmap
|
|
||||||
*/
|
|
||||||
void unmap_region(addr_t base, size_t size) override;
|
|
||||||
|
|
||||||
void reserve_and_flush(Local_addr) override;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
*
|
*
|
||||||
@ -572,11 +562,9 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
* for resolution.
|
* for resolution.
|
||||||
*
|
*
|
||||||
* \param faulter faulting region-manager client
|
* \param faulter faulting region-manager client
|
||||||
* \param pf_addr page-fault address
|
* \param fault fault information
|
||||||
* \param pf_type type of page fault (read/write/execute)
|
|
||||||
*/
|
*/
|
||||||
void fault(Rm_faulter &faulter, addr_t pf_addr,
|
void fault(Rm_faulter &faulter, Fault);
|
||||||
Region_map::State::Fault_type pf_type);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Dissolve faulter from region map
|
* Dissolve faulter from region map
|
||||||
@ -596,16 +584,16 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
* /param reflect_fn functor called to reflect a missing mapping
|
* /param reflect_fn functor called to reflect a missing mapping
|
||||||
* to user space if a fault handler is registered
|
* to user space if a fault handler is registered
|
||||||
*/
|
*/
|
||||||
With_mapping_result with_mapping_for_fault(Fault const &fault,
|
With_mapping_result with_mapping_for_fault(Core::Fault const &fault,
|
||||||
auto const &apply_fn,
|
auto const &apply_fn,
|
||||||
auto const &reflect_fn)
|
auto const &reflect_fn)
|
||||||
{
|
{
|
||||||
return _with_region_at_fault(Recursion_limit { 5 }, fault,
|
return _with_region_at_fault(Recursion_limit { 5 }, fault,
|
||||||
[&] (Rm_region const ®ion, Fault const ®ion_relative_fault)
|
[&] (Rm_region const ®ion, Core::Fault const ®ion_relative_fault)
|
||||||
{
|
{
|
||||||
With_mapping_result result = With_mapping_result::NO_REGION;
|
With_mapping_result result = With_mapping_result::NO_REGION;
|
||||||
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
||||||
Fault const ram_relative_fault =
|
Core::Fault const ram_relative_fault =
|
||||||
region_relative_fault.within_ram(region.offset(), dataspace.attr());
|
region_relative_fault.within_ram(region.offset(), dataspace.attr());
|
||||||
|
|
||||||
Log2_range src_range { ram_relative_fault.hotspot };
|
Log2_range src_range { ram_relative_fault.hotspot };
|
||||||
@ -661,15 +649,23 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
|||||||
Attach_dma_result attach_dma(Dataspace_capability, addr_t);
|
Attach_dma_result attach_dma(Dataspace_capability, addr_t);
|
||||||
|
|
||||||
|
|
||||||
|
/*********************************
|
||||||
|
** Region_map_detach interface **
|
||||||
|
*********************************/
|
||||||
|
|
||||||
|
void unmap_region (addr_t, size_t) override;
|
||||||
|
void detach_at (addr_t) override;
|
||||||
|
void reserve_and_flush (addr_t) override;
|
||||||
|
|
||||||
|
|
||||||
/**************************
|
/**************************
|
||||||
** Region map interface **
|
** Region map interface **
|
||||||
**************************/
|
**************************/
|
||||||
|
|
||||||
Local_addr attach (Dataspace_capability, size_t, off_t,
|
Attach_result attach (Dataspace_capability, Attr const &) override;
|
||||||
bool, Local_addr, bool, bool) override;
|
void detach (addr_t at) override { detach_at(at); }
|
||||||
void detach (Local_addr) override;
|
void fault_handler (Signal_context_capability) override;
|
||||||
void fault_handler (Signal_context_capability handler) override;
|
Fault fault () override;
|
||||||
State state () override;
|
|
||||||
|
|
||||||
Dataspace_capability dataspace () override { return _ds_cap; }
|
Dataspace_capability dataspace () override { return _ds_cap; }
|
||||||
};
|
};
|
||||||
|
@ -110,13 +110,10 @@ class Core::Trace::Subject
|
|||||||
_size = size;
|
_size = size;
|
||||||
|
|
||||||
/* copy content */
|
/* copy content */
|
||||||
void *src = local_rm.attach(from_ds),
|
Attached_dataspace from { local_rm, from_ds },
|
||||||
*dst = local_rm.attach(_ds);
|
to { local_rm, _ds };
|
||||||
|
|
||||||
Genode::memcpy(dst, src, _size);
|
Genode::memcpy(to.local_addr<char>(), from.local_addr<char const>(), _size);
|
||||||
|
|
||||||
local_rm.detach(src);
|
|
||||||
local_rm.detach(dst);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -62,12 +62,12 @@ Pager_object::Pager_result Rm_client::pager(Ipc_pager &pager)
|
|||||||
|
|
||||||
[&] (Region_map_component &rm, Fault const &fault) /* reflect to user space */
|
[&] (Region_map_component &rm, Fault const &fault) /* reflect to user space */
|
||||||
{
|
{
|
||||||
using Type = Region_map::State::Fault_type;
|
using Type = Region_map::Fault::Type;
|
||||||
Type const type = (fault.access == Access::READ) ? Type::READ_FAULT
|
Type const type = (fault.access == Access::READ) ? Type::READ
|
||||||
: (fault.access == Access::WRITE) ? Type::WRITE_FAULT
|
: (fault.access == Access::WRITE) ? Type::WRITE
|
||||||
: Type::EXEC_FAULT;
|
: Type::EXEC;
|
||||||
/* deliver fault info to responsible region map */
|
/* deliver fault info to responsible region map */
|
||||||
rm.fault(*this, fault.hotspot.value, type);
|
rm.fault(*this, { .type = type, .addr = fault.hotspot.value });
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -118,12 +118,12 @@ Pager_object::Pager_result Rm_client::pager(Ipc_pager &pager)
|
|||||||
*************/
|
*************/
|
||||||
|
|
||||||
void Rm_faulter::fault(Region_map_component &faulting_region_map,
|
void Rm_faulter::fault(Region_map_component &faulting_region_map,
|
||||||
Region_map::State fault_state)
|
Region_map::Fault fault)
|
||||||
{
|
{
|
||||||
Mutex::Guard lock_guard(_mutex);
|
Mutex::Guard lock_guard(_mutex);
|
||||||
|
|
||||||
_faulting_region_map = faulting_region_map.weak_ptr();
|
_faulting_region_map = faulting_region_map.weak_ptr();
|
||||||
_fault_state = fault_state;
|
_fault = fault;
|
||||||
|
|
||||||
_pager_object.unresolved_page_fault_occurred();
|
_pager_object.unresolved_page_fault_occurred();
|
||||||
}
|
}
|
||||||
@ -154,7 +154,7 @@ void Rm_faulter::continue_after_resolved_fault()
|
|||||||
|
|
||||||
_pager_object.wake_up();
|
_pager_object.wake_up();
|
||||||
_faulting_region_map = Weak_ptr<Core::Region_map_component>();
|
_faulting_region_map = Weak_ptr<Core::Region_map_component>();
|
||||||
_fault_state = Region_map::State();
|
_fault = { };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -162,55 +162,54 @@ void Rm_faulter::continue_after_resolved_fault()
|
|||||||
** Region-map component **
|
** Region-map component **
|
||||||
**************************/
|
**************************/
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const attr)
|
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const core_attr)
|
||||||
{
|
{
|
||||||
|
Attr const attr = core_attr.attr;
|
||||||
|
|
||||||
/* serialize access */
|
/* serialize access */
|
||||||
Mutex::Guard lock_guard(_mutex);
|
Mutex::Guard lock_guard(_mutex);
|
||||||
|
|
||||||
/* offset must be positive and page-aligned */
|
/* offset must be page-aligned */
|
||||||
if (attr.offset < 0 || align_addr(attr.offset, get_page_size_log2()) != attr.offset)
|
if (align_addr(attr.offset, get_page_size_log2()) != attr.offset)
|
||||||
throw Region_conflict();
|
return Attach_error::REGION_CONFLICT;
|
||||||
|
|
||||||
auto lambda = [&] (Dataspace_component *dsc) {
|
auto lambda = [&] (Dataspace_component *dsc) -> Attach_result {
|
||||||
|
|
||||||
using Alloc_error = Range_allocator::Alloc_error;
|
using Alloc_error = Range_allocator::Alloc_error;
|
||||||
|
|
||||||
/* check dataspace validity */
|
/* check dataspace validity */
|
||||||
if (!dsc)
|
if (!dsc)
|
||||||
throw Invalid_dataspace();
|
return Attach_error::INVALID_DATASPACE;
|
||||||
|
|
||||||
unsigned const min_align_log2 = get_page_size_log2();
|
unsigned const min_align_log2 = get_page_size_log2();
|
||||||
|
|
||||||
size_t const off = attr.offset;
|
size_t const ds_size = dsc->size();
|
||||||
if (off >= dsc->size())
|
|
||||||
throw Region_conflict();
|
|
||||||
|
|
||||||
size_t size = attr.size;
|
if (attr.offset >= ds_size)
|
||||||
|
return Attach_error::REGION_CONFLICT;
|
||||||
|
|
||||||
if (!size)
|
size_t size = attr.size ? attr.size : ds_size - attr.offset;
|
||||||
size = dsc->size() - attr.offset;
|
|
||||||
|
|
||||||
/* work with page granularity */
|
/* work with page granularity */
|
||||||
size = align_addr(size, min_align_log2);
|
size = align_addr(size, min_align_log2);
|
||||||
|
|
||||||
/* deny creation of regions larger then the actual dataspace */
|
/* deny creation of regions larger then the actual dataspace */
|
||||||
if (dsc->size() < size + attr.offset)
|
if (ds_size < size + attr.offset)
|
||||||
throw Region_conflict();
|
return Attach_error::REGION_CONFLICT;
|
||||||
|
|
||||||
/* allocate region for attachment */
|
/* allocate region for attachment */
|
||||||
void *attach_at = nullptr;
|
bool at_defined = false;
|
||||||
if (attr.use_local_addr) {
|
addr_t at { };
|
||||||
_map.alloc_addr(size, attr.local_addr).with_result(
|
if (attr.use_at) {
|
||||||
[&] (void *ptr) { attach_at = ptr; },
|
Alloc_error error = Alloc_error::DENIED;
|
||||||
[&] (Range_allocator::Alloc_error error) {
|
_map.alloc_addr(size, attr.at).with_result(
|
||||||
switch (error) {
|
[&] (void *ptr) { at = addr_t(ptr); at_defined = true; },
|
||||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
[&] (Alloc_error e) { error = e; });
|
||||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
|
||||||
case Alloc_error::DENIED: break;
|
if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM;
|
||||||
}
|
if (error == Alloc_error::OUT_OF_CAPS) return Attach_error::OUT_OF_CAPS;
|
||||||
throw Region_conflict();
|
|
||||||
});
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -222,8 +221,7 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const att
|
|||||||
if (align_log2 >= sizeof(void *)*8)
|
if (align_log2 >= sizeof(void *)*8)
|
||||||
align_log2 = min_align_log2;
|
align_log2 = min_align_log2;
|
||||||
|
|
||||||
bool done = false;
|
for (; !at_defined && (align_log2 >= min_align_log2); align_log2--) {
|
||||||
for (; !done && (align_log2 >= min_align_log2); align_log2--) {
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't use an alignment higher than the alignment of the backing
|
* Don't use an alignment higher than the alignment of the backing
|
||||||
@ -233,60 +231,52 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const att
|
|||||||
if (((dsc->map_src_addr() + attr.offset) & ((1UL << align_log2) - 1)) != 0)
|
if (((dsc->map_src_addr() + attr.offset) & ((1UL << align_log2) - 1)) != 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* try allocating the align region */
|
/* try allocating the aligned region */
|
||||||
_map.alloc_aligned(size, (unsigned)align_log2).with_result(
|
Alloc_error error = Alloc_error::DENIED;
|
||||||
|
_map.alloc_aligned(size, unsigned(align_log2)).with_result(
|
||||||
|
[&] (void *ptr) { at = addr_t(ptr); at_defined = true; },
|
||||||
|
[&] (Alloc_error e) { error = e; });
|
||||||
|
|
||||||
[&] (void *ptr) {
|
if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM;
|
||||||
attach_at = ptr;
|
if (error == Alloc_error::OUT_OF_CAPS) return Attach_error::OUT_OF_CAPS;
|
||||||
done = true; },
|
|
||||||
|
|
||||||
[&] (Range_allocator::Alloc_error error) {
|
|
||||||
switch (error) {
|
|
||||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
|
||||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
|
||||||
case Alloc_error::DENIED: break; /* no fit */
|
|
||||||
}
|
}
|
||||||
/* try smaller alignment in next iteration... */
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!done)
|
|
||||||
throw Region_conflict();
|
|
||||||
}
|
}
|
||||||
|
if (!at_defined)
|
||||||
|
return Attach_error::REGION_CONFLICT;
|
||||||
|
|
||||||
Rm_region::Attr const region_attr
|
Rm_region::Attr const region_attr
|
||||||
{
|
{
|
||||||
.base = (addr_t)attach_at,
|
.base = at,
|
||||||
.size = size,
|
.size = size,
|
||||||
.write = attr.writeable,
|
.write = attr.writeable,
|
||||||
.exec = attr.executable,
|
.exec = attr.executable,
|
||||||
.off = attr.offset,
|
.off = attr.offset,
|
||||||
.dma = attr.dma,
|
.dma = core_attr.dma,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* store attachment info in meta data */
|
/* store attachment info in meta data */
|
||||||
try {
|
try {
|
||||||
_map.construct_metadata(attach_at, *dsc, *this, region_attr);
|
_map.construct_metadata((void *)at, *dsc, *this, region_attr);
|
||||||
}
|
}
|
||||||
catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||||
error("failed to store attachment info");
|
error("failed to store attachment info");
|
||||||
throw Invalid_dataspace();
|
return Attach_error::INVALID_DATASPACE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* inform dataspace about attachment */
|
/* inform dataspace about attachment */
|
||||||
Rm_region * const region_ptr = _map.metadata(attach_at);
|
Rm_region * const region_ptr = _map.metadata((void *)at);
|
||||||
if (region_ptr)
|
if (region_ptr)
|
||||||
dsc->attached_to(*region_ptr);
|
dsc->attached_to(*region_ptr);
|
||||||
|
|
||||||
/* check if attach operation resolves any faulting region-manager clients */
|
/* check if attach operation resolves any faulting region-manager clients */
|
||||||
_faulters.for_each([&] (Rm_faulter &faulter) {
|
_faulters.for_each([&] (Rm_faulter &faulter) {
|
||||||
if (faulter.fault_in_addr_range((addr_t)attach_at, size)) {
|
if (faulter.fault_in_addr_range(at, size)) {
|
||||||
_faulters.remove(faulter);
|
_faulters.remove(faulter);
|
||||||
faulter.continue_after_resolved_fault();
|
faulter.continue_after_resolved_fault();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return attach_at;
|
return Range { .start = at, .num_bytes = size };
|
||||||
};
|
};
|
||||||
|
|
||||||
return _ds_ep.apply(ds_cap, lambda);
|
return _ds_ep.apply(ds_cap, lambda);
|
||||||
@ -351,23 +341,10 @@ void Region_map_component::unmap_region(addr_t base, size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
Region_map_component::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||||
off_t offset, bool use_local_addr,
|
|
||||||
Region_map::Local_addr local_addr,
|
|
||||||
bool executable, bool writeable)
|
|
||||||
{
|
{
|
||||||
Attach_attr const attr {
|
return _attach(ds_cap, { .attr = attr, .dma = false });
|
||||||
.size = size,
|
|
||||||
.offset = offset,
|
|
||||||
.use_local_addr = use_local_addr,
|
|
||||||
.local_addr = local_addr,
|
|
||||||
.executable = executable,
|
|
||||||
.writeable = writeable,
|
|
||||||
.dma = false,
|
|
||||||
};
|
|
||||||
|
|
||||||
return _attach(ds_cap, attr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -375,25 +352,30 @@ Region_map_component::Attach_dma_result
|
|||||||
Region_map_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
|
Region_map_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
|
||||||
{
|
{
|
||||||
Attach_attr const attr {
|
Attach_attr const attr {
|
||||||
.size = 0,
|
.attr = {
|
||||||
.offset = 0,
|
.size = { },
|
||||||
.use_local_addr = true,
|
.offset = { },
|
||||||
.local_addr = at,
|
.use_at = true,
|
||||||
|
.at = at,
|
||||||
.executable = false,
|
.executable = false,
|
||||||
.writeable = true,
|
.writeable = true,
|
||||||
|
},
|
||||||
.dma = true,
|
.dma = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
using Attach_dma_error = Pd_session::Attach_dma_error;
|
using Attach_dma_error = Pd_session::Attach_dma_error;
|
||||||
|
|
||||||
try {
|
return _attach(ds_cap, attr).convert<Attach_dma_result>(
|
||||||
_attach(ds_cap, attr);
|
[&] (Range) { return Pd_session::Attach_dma_ok(); },
|
||||||
return Pd_session::Attach_dma_ok();
|
[&] (Attach_error e) {
|
||||||
|
switch (e) {
|
||||||
|
case Attach_error::OUT_OF_RAM: return Attach_dma_error::OUT_OF_RAM;
|
||||||
|
case Attach_error::OUT_OF_CAPS: return Attach_dma_error::OUT_OF_CAPS;
|
||||||
|
case Attach_error::REGION_CONFLICT: break;
|
||||||
|
case Attach_error::INVALID_DATASPACE: break;
|
||||||
}
|
}
|
||||||
catch (Invalid_dataspace) { return Attach_dma_error::DENIED; }
|
return Attach_dma_error::DENIED;
|
||||||
catch (Region_conflict) { return Attach_dma_error::DENIED; }
|
});
|
||||||
catch (Out_of_ram) { return Attach_dma_error::OUT_OF_RAM; }
|
|
||||||
catch (Out_of_caps) { return Attach_dma_error::OUT_OF_CAPS; }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -448,23 +430,23 @@ void Region_map_component::_reserve_and_flush_unsynchronized(Rm_region ®ion)
|
|||||||
/*
|
/*
|
||||||
* Flush the region, but keep it reserved until 'detach()' is called.
|
* Flush the region, but keep it reserved until 'detach()' is called.
|
||||||
*/
|
*/
|
||||||
void Region_map_component::reserve_and_flush(Local_addr local_addr)
|
void Region_map_component::reserve_and_flush(addr_t const at)
|
||||||
{
|
{
|
||||||
/* serialize access */
|
/* serialize access */
|
||||||
Mutex::Guard lock_guard(_mutex);
|
Mutex::Guard lock_guard(_mutex);
|
||||||
|
|
||||||
_with_region(local_addr, [&] (Rm_region ®ion) {
|
_with_region(at, [&] (Rm_region ®ion) {
|
||||||
_reserve_and_flush_unsynchronized(region);
|
_reserve_and_flush_unsynchronized(region);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Region_map_component::detach(Local_addr local_addr)
|
void Region_map_component::detach_at(addr_t const at)
|
||||||
{
|
{
|
||||||
/* serialize access */
|
/* serialize access */
|
||||||
Mutex::Guard lock_guard(_mutex);
|
Mutex::Guard lock_guard(_mutex);
|
||||||
|
|
||||||
_with_region(local_addr, [&] (Rm_region ®ion) {
|
_with_region(at, [&] (Rm_region ®ion) {
|
||||||
if (!region.reserved())
|
if (!region.reserved())
|
||||||
_reserve_and_flush_unsynchronized(region);
|
_reserve_and_flush_unsynchronized(region);
|
||||||
/* free the reserved region */
|
/* free the reserved region */
|
||||||
@ -490,11 +472,10 @@ void Region_map_component::remove_client(Rm_client &rm_client)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Region_map_component::fault(Rm_faulter &faulter, addr_t pf_addr,
|
void Region_map_component::fault(Rm_faulter &faulter, Region_map::Fault fault)
|
||||||
Region_map::State::Fault_type pf_type)
|
|
||||||
{
|
{
|
||||||
/* remember fault state in faulting thread */
|
/* remember fault state in faulting thread */
|
||||||
faulter.fault(*this, Region_map::State(pf_type, pf_addr));
|
faulter.fault(*this, fault);
|
||||||
|
|
||||||
/* enqueue faulter */
|
/* enqueue faulter */
|
||||||
_faulters.enqueue(faulter);
|
_faulters.enqueue(faulter);
|
||||||
@ -520,17 +501,15 @@ void Region_map_component::fault_handler(Signal_context_capability sigh)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Region_map::State Region_map_component::state()
|
Region_map::Fault Region_map_component::fault()
|
||||||
{
|
{
|
||||||
/* serialize access */
|
/* serialize access */
|
||||||
Mutex::Guard lock_guard(_mutex);
|
Mutex::Guard lock_guard(_mutex);
|
||||||
|
|
||||||
/* return ready state if there are not current faulters */
|
/* return fault information regarding the first faulter */
|
||||||
Region_map::State result;
|
Region_map::Fault result { };
|
||||||
|
|
||||||
/* otherwise return fault information regarding the first faulter */
|
|
||||||
_faulters.head([&] (Rm_faulter &faulter) {
|
_faulters.head([&] (Rm_faulter &faulter) {
|
||||||
result = faulter.fault_state(); });
|
result = faulter.fault(); });
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -609,7 +588,7 @@ Region_map_component::~Region_map_component()
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
detach(out_addr);
|
detach_at(out_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* revoke dataspace representation */
|
/* revoke dataspace representation */
|
||||||
|
@ -65,25 +65,25 @@ class Stack_area_region_map : public Region_map
|
|||||||
/**
|
/**
|
||||||
* Allocate and attach on-the-fly backing store to stack area
|
* Allocate and attach on-the-fly backing store to stack area
|
||||||
*/
|
*/
|
||||||
Local_addr attach(Dataspace_capability, size_t size, off_t,
|
Attach_result attach(Dataspace_capability, Attr const &attr) override
|
||||||
bool, Local_addr local_addr, bool, bool) override
|
|
||||||
{
|
{
|
||||||
/* allocate physical memory */
|
/* allocate physical memory */
|
||||||
size = round_page(size);
|
size_t const size = round_page(attr.size);
|
||||||
|
|
||||||
Range_allocator &phys = platform_specific().ram_alloc();
|
Range_allocator &phys = platform_specific().ram_alloc();
|
||||||
|
|
||||||
return phys.alloc_aligned(size, get_page_size_log2()).convert<Local_addr>(
|
return phys.alloc_aligned(size, get_page_size_log2()).convert<Attach_result>(
|
||||||
|
|
||||||
[&] (void *phys_ptr) {
|
[&] (void *phys_ptr) -> Attach_result {
|
||||||
|
|
||||||
|
try {
|
||||||
addr_t const phys_base = (addr_t)phys_ptr;
|
addr_t const phys_base = (addr_t)phys_ptr;
|
||||||
|
|
||||||
Dataspace_component &ds = *new (&_ds_slab)
|
Dataspace_component &ds = *new (&_ds_slab)
|
||||||
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
||||||
|
|
||||||
addr_t const core_local_addr = stack_area_virtual_base()
|
addr_t const core_local_addr = stack_area_virtual_base()
|
||||||
+ (addr_t)local_addr;
|
+ attr.at;
|
||||||
|
|
||||||
if (!map_local(ds.phys_addr(), core_local_addr,
|
if (!map_local(ds.phys_addr(), core_local_addr,
|
||||||
ds.size() >> get_page_size_log2())) {
|
ds.size() >> get_page_size_log2())) {
|
||||||
@ -91,26 +91,27 @@ class Stack_area_region_map : public Region_map
|
|||||||
" at local ", Hex(core_local_addr));
|
" at local ", Hex(core_local_addr));
|
||||||
|
|
||||||
phys.free(phys_ptr);
|
phys.free(phys_ptr);
|
||||||
return Local_addr { (addr_t)0 };
|
return Attach_error::INVALID_DATASPACE;
|
||||||
}
|
}
|
||||||
|
|
||||||
ds.assign_core_local_addr((void*)core_local_addr);
|
ds.assign_core_local_addr((void*)core_local_addr);
|
||||||
|
|
||||||
return local_addr;
|
return Range { .start = attr.at, .num_bytes = size };
|
||||||
|
}
|
||||||
|
catch (Out_of_ram) { return Attach_error::OUT_OF_RAM; }
|
||||||
|
catch (Out_of_caps) { return Attach_error::OUT_OF_CAPS; }
|
||||||
},
|
},
|
||||||
[&] (Range_allocator::Alloc_error) {
|
[&] (Range_allocator::Alloc_error) {
|
||||||
error("could not allocate backing store for new stack");
|
error("could not allocate backing store for new stack");
|
||||||
return (addr_t)0; });
|
return Attach_error::REGION_CONFLICT; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void detach(Local_addr local_addr) override
|
void detach(addr_t const at) override
|
||||||
{
|
{
|
||||||
using Genode::addr_t;
|
if (at >= stack_area_virtual_size())
|
||||||
|
|
||||||
if ((addr_t)local_addr >= stack_area_virtual_size())
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
addr_t const detach = stack_area_virtual_base() + (addr_t)local_addr;
|
addr_t const detach = stack_area_virtual_base() + at;
|
||||||
addr_t const stack = stack_virtual_size();
|
addr_t const stack = stack_virtual_size();
|
||||||
addr_t const pages = ((detach & ~(stack - 1)) + stack - detach)
|
addr_t const pages = ((detach & ~(stack - 1)) + stack - detach)
|
||||||
>> get_page_size_log2();
|
>> get_page_size_log2();
|
||||||
@ -120,9 +121,9 @@ class Stack_area_region_map : public Region_map
|
|||||||
|
|
||||||
void fault_handler(Signal_context_capability) override { }
|
void fault_handler(Signal_context_capability) override { }
|
||||||
|
|
||||||
State state() override { return State(); }
|
Fault fault() override { return { }; }
|
||||||
|
|
||||||
Dataspace_capability dataspace() override { return Dataspace_capability(); }
|
Dataspace_capability dataspace() override { return { }; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -65,6 +65,8 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
|||||||
|
|
||||||
using Alloc_error = Range_allocator::Alloc_error;
|
using Alloc_error = Range_allocator::Alloc_error;
|
||||||
|
|
||||||
|
Region_map_detach &rm_detach = *this;
|
||||||
|
|
||||||
_map.alloc_addr(attribute.size, guest_phys).with_result(
|
_map.alloc_addr(attribute.size, guest_phys).with_result(
|
||||||
|
|
||||||
[&] (void *) {
|
[&] (void *) {
|
||||||
@ -75,14 +77,14 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
|||||||
.size = attribute.size,
|
.size = attribute.size,
|
||||||
.write = dsc.writeable() && attribute.writeable,
|
.write = dsc.writeable() && attribute.writeable,
|
||||||
.exec = attribute.executable,
|
.exec = attribute.executable,
|
||||||
.off = (off_t)attribute.offset,
|
.off = attribute.offset,
|
||||||
.dma = false,
|
.dma = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* store attachment info in meta data */
|
/* store attachment info in meta data */
|
||||||
try {
|
try {
|
||||||
_map.construct_metadata((void *)guest_phys,
|
_map.construct_metadata((void *)guest_phys,
|
||||||
dsc, *this, region_attr);
|
dsc, rm_detach, region_attr);
|
||||||
|
|
||||||
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||||
error("failed to store attachment info");
|
error("failed to store attachment info");
|
||||||
@ -149,7 +151,7 @@ void Vm_session_component::detach(addr_t guest_phys, size_t size)
|
|||||||
|
|
||||||
if (region) {
|
if (region) {
|
||||||
iteration_size = region->size();
|
iteration_size = region->size();
|
||||||
detach(region->base());
|
detach_at(region->base());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (addr >= guest_phys_end - (iteration_size - 1))
|
if (addr >= guest_phys_end - (iteration_size - 1))
|
||||||
@ -160,10 +162,10 @@ void Vm_session_component::detach(addr_t guest_phys, size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Vm_session_component::_with_region(Region_map::Local_addr addr,
|
void Vm_session_component::_with_region(addr_t const addr,
|
||||||
auto const &fn)
|
auto const &fn)
|
||||||
{
|
{
|
||||||
Rm_region *region = _map.metadata(addr);
|
Rm_region *region = _map.metadata((void *)addr);
|
||||||
if (region)
|
if (region)
|
||||||
fn(*region);
|
fn(*region);
|
||||||
else
|
else
|
||||||
@ -171,7 +173,7 @@ void Vm_session_component::_with_region(Region_map::Local_addr addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Vm_session_component::detach(Region_map::Local_addr addr)
|
void Vm_session_component::detach_at(addr_t const addr)
|
||||||
{
|
{
|
||||||
_with_region(addr, [&] (Rm_region ®ion) {
|
_with_region(addr, [&] (Rm_region ®ion) {
|
||||||
|
|
||||||
@ -190,7 +192,7 @@ void Vm_session_component::unmap_region(addr_t base, size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Vm_session_component::reserve_and_flush(Region_map::Local_addr addr)
|
void Vm_session_component::reserve_and_flush(addr_t const addr)
|
||||||
{
|
{
|
||||||
_with_region(addr, [&] (Rm_region ®ion) {
|
_with_region(addr, [&] (Rm_region ®ion) {
|
||||||
|
|
||||||
|
@ -33,11 +33,16 @@ struct Genode::Attached_stack_area : Expanding_region_map_client
|
|||||||
Expanding_region_map_client(parent, pd, Pd_session_client(pd).stack_area(),
|
Expanding_region_map_client(parent, pd, Pd_session_client(pd).stack_area(),
|
||||||
Parent::Env::pd())
|
Parent::Env::pd())
|
||||||
{
|
{
|
||||||
Region_map_client address_space(Pd_session_client(pd).address_space());
|
Region_map_client local_rm(Pd_session_client(pd).address_space());
|
||||||
|
|
||||||
address_space.attach_at(Expanding_region_map_client::dataspace(),
|
local_rm.attach(Expanding_region_map_client::dataspace(), Region_map::Attr {
|
||||||
stack_area_virtual_base(),
|
.size = stack_area_virtual_size(),
|
||||||
stack_area_virtual_size());
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = stack_area_virtual_base(),
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -34,22 +34,15 @@ struct Genode::Expanding_region_map_client : Region_map_client
|
|||||||
Parent::Client::Id pd_id)
|
Parent::Client::Id pd_id)
|
||||||
: Region_map_client(rm), _pd_client(parent, pd, pd_id) { }
|
: Region_map_client(rm), _pd_client(parent, pd, pd_id) { }
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability ds, size_t size, off_t offset,
|
Attach_result attach(Dataspace_capability ds, Attr const &attr) override
|
||||||
bool use_local_addr, Local_addr local_addr,
|
|
||||||
bool executable, bool writeable) override
|
|
||||||
{
|
{
|
||||||
return retry<Out_of_ram>(
|
for (;;) {
|
||||||
[&] () {
|
Attach_result const result = Region_map_client::attach(ds, attr);
|
||||||
return retry<Out_of_caps>(
|
if (result == Attach_error::OUT_OF_RAM) _pd_client.upgrade_ram(8*1024);
|
||||||
[&] {
|
else if (result == Attach_error::OUT_OF_CAPS) _pd_client.upgrade_caps(2);
|
||||||
return Region_map_client::attach(ds, size, offset,
|
else
|
||||||
use_local_addr,
|
return result;
|
||||||
local_addr,
|
}
|
||||||
executable,
|
|
||||||
writeable); },
|
|
||||||
[&] { _pd_client.upgrade_caps(2); });
|
|
||||||
},
|
|
||||||
[&] () { _pd_client.upgrade_ram(8*1024); });
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -759,8 +759,6 @@ void Child::_try_construct_env_dependent_members()
|
|||||||
catch (Out_of_caps) { _error("out of caps during ELF loading"); }
|
catch (Out_of_caps) { _error("out of caps during ELF loading"); }
|
||||||
catch (Process::Missing_dynamic_linker) { _error("dynamic linker unavailable"); }
|
catch (Process::Missing_dynamic_linker) { _error("dynamic linker unavailable"); }
|
||||||
catch (Process::Invalid_executable) { _error("invalid ELF executable"); }
|
catch (Process::Invalid_executable) { _error("invalid ELF executable"); }
|
||||||
catch (Region_map::Invalid_dataspace) { _error("ELF loading failed (Invalid_dataspace)"); }
|
|
||||||
catch (Region_map::Region_conflict) { _error("ELF loading failed (Region_conflict)"); }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -41,12 +41,17 @@ Child::Process::Loaded_executable::Loaded_executable(Type type,
|
|||||||
throw Missing_dynamic_linker();
|
throw Missing_dynamic_linker();
|
||||||
}
|
}
|
||||||
|
|
||||||
addr_t elf_addr = 0;
|
addr_t const elf_addr = local_rm.attach(ldso_ds, Region_map::Attr{}).convert<addr_t>(
|
||||||
try { elf_addr = local_rm.attach(ldso_ds); }
|
[&] (Region_map::Range range) { return range.start; },
|
||||||
catch (Region_map::Invalid_dataspace) {
|
[&] (Region_map::Attach_error const e) -> addr_t {
|
||||||
error("dynamic linker is an invalid dataspace"); throw; }
|
if (e == Region_map::Attach_error::INVALID_DATASPACE)
|
||||||
catch (Region_map::Region_conflict) {
|
error("dynamic linker is an invalid dataspace");
|
||||||
error("region conflict while attaching dynamic linker"); throw; }
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
|
error("region conflict while attaching dynamic linker");
|
||||||
|
return 0; });
|
||||||
|
|
||||||
|
if (!elf_addr)
|
||||||
|
return;
|
||||||
|
|
||||||
Elf_binary elf(elf_addr);
|
Elf_binary elf(elf_addr);
|
||||||
|
|
||||||
@ -66,7 +71,6 @@ Child::Process::Loaded_executable::Loaded_executable(Type type,
|
|||||||
size_t const size = seg.mem_size();
|
size_t const size = seg.mem_size();
|
||||||
|
|
||||||
bool const write = seg.flags().w;
|
bool const write = seg.flags().w;
|
||||||
bool const exec = seg.flags().x;
|
|
||||||
|
|
||||||
if (write) {
|
if (write) {
|
||||||
|
|
||||||
@ -89,14 +93,17 @@ Child::Process::Loaded_executable::Loaded_executable(Type type,
|
|||||||
error("allocation of read-write segment failed"); throw; };
|
error("allocation of read-write segment failed"); throw; };
|
||||||
|
|
||||||
/* attach dataspace */
|
/* attach dataspace */
|
||||||
void *base;
|
Region_map::Attr attr { };
|
||||||
try { base = local_rm.attach(ds_cap); }
|
attr.writeable = true;
|
||||||
catch (Region_map::Invalid_dataspace) {
|
void * const ptr = local_rm.attach(ds_cap, attr).convert<void *>(
|
||||||
error("attempt to attach invalid segment dataspace"); throw; }
|
[&] (Region_map::Range range) { return (void *)range.start; },
|
||||||
catch (Region_map::Region_conflict) {
|
[&] (Region_map::Attach_error const e) {
|
||||||
error("region conflict while locally attaching ELF segment"); throw; }
|
if (e == Region_map::Attach_error::INVALID_DATASPACE)
|
||||||
|
error("attempt to attach invalid segment dataspace");
|
||||||
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
|
error("region conflict while locally attaching ELF segment");
|
||||||
|
return nullptr; });
|
||||||
|
|
||||||
void * const ptr = base;
|
|
||||||
addr_t const laddr = elf_addr + seg.file_offset();
|
addr_t const laddr = elf_addr + seg.file_offset();
|
||||||
|
|
||||||
/* copy contents and fill with zeros */
|
/* copy contents and fill with zeros */
|
||||||
@ -115,15 +122,21 @@ Child::Process::Loaded_executable::Loaded_executable(Type type,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* detach dataspace */
|
/* detach dataspace */
|
||||||
local_rm.detach(base);
|
local_rm.detach(addr_t(ptr));
|
||||||
|
|
||||||
off_t const offset = 0;
|
remote_rm.attach(ds_cap, Region_map::Attr {
|
||||||
try { remote_rm.attach_at(ds_cap, addr, size, offset); }
|
.size = size,
|
||||||
catch (Region_map::Region_conflict) {
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = addr,
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) { },
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
error("region conflict while remotely attaching ELF segment");
|
error("region conflict while remotely attaching ELF segment");
|
||||||
error("addr=", (void *)addr, " size=", (void *)size, " offset=", (void *)offset);
|
error("addr=", (void *)addr, " size=", (void *)size); }
|
||||||
throw; }
|
);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* read-only segment */
|
/* read-only segment */
|
||||||
@ -131,27 +144,28 @@ Child::Process::Loaded_executable::Loaded_executable(Type type,
|
|||||||
if (seg.file_size() != seg.mem_size())
|
if (seg.file_size() != seg.mem_size())
|
||||||
warning("filesz and memsz for read-only segment differ");
|
warning("filesz and memsz for read-only segment differ");
|
||||||
|
|
||||||
off_t const offset = seg.file_offset();
|
remote_rm.attach(ldso_ds, Region_map::Attr {
|
||||||
try {
|
.size = size,
|
||||||
if (exec)
|
.offset = seg.file_offset(),
|
||||||
remote_rm.attach_executable(ldso_ds, addr, size, offset);
|
.use_at = true,
|
||||||
else
|
.at = addr,
|
||||||
remote_rm.attach_at(ldso_ds, addr, size, offset);
|
.executable = seg.flags().x,
|
||||||
}
|
.writeable = false
|
||||||
catch (Region_map::Region_conflict) {
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) { },
|
||||||
|
[&] (Region_map::Attach_error const e) {
|
||||||
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
error("region conflict while remotely attaching read-only ELF segment");
|
error("region conflict while remotely attaching read-only ELF segment");
|
||||||
error("addr=", (void *)addr, " size=", (void *)size, " offset=", (void *)offset);
|
if (e == Region_map::Attach_error::INVALID_DATASPACE)
|
||||||
throw;
|
|
||||||
}
|
|
||||||
catch (Region_map::Invalid_dataspace) {
|
|
||||||
error("attempt to attach invalid read-only segment dataspace");
|
error("attempt to attach invalid read-only segment dataspace");
|
||||||
throw;
|
error("addr=", (void *)addr, " size=", (void *)size);
|
||||||
}
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* detach ELF */
|
/* detach ELF */
|
||||||
local_rm.detach((void *)elf_addr);
|
local_rm.detach(elf_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ void Heap::Dataspace_pool::remove_and_free(Dataspace &ds)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
Ram_dataspace_capability ds_cap = ds.cap;
|
Ram_dataspace_capability ds_cap = ds.cap;
|
||||||
void *ds_local_addr = ds.local_addr;
|
addr_t const at = addr_t(ds.local_addr);
|
||||||
|
|
||||||
remove(&ds);
|
remove(&ds);
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ void Heap::Dataspace_pool::remove_and_free(Dataspace &ds)
|
|||||||
*/
|
*/
|
||||||
ds.~Dataspace();
|
ds.~Dataspace();
|
||||||
|
|
||||||
region_map->detach(ds_local_addr);
|
region_map->detach(at);
|
||||||
ram_alloc->free(ds_cap);
|
ram_alloc->free(ds_cap);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,22 +102,36 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
|||||||
struct Attach_guard
|
struct Attach_guard
|
||||||
{
|
{
|
||||||
Region_map &rm;
|
Region_map &rm;
|
||||||
struct { void *ptr = nullptr; };
|
Region_map::Range range { };
|
||||||
bool keep = false;
|
bool keep = false;
|
||||||
|
|
||||||
Attach_guard(Region_map &rm) : rm(rm) { }
|
Attach_guard(Region_map &rm) : rm(rm) { }
|
||||||
|
|
||||||
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
|
~Attach_guard() { if (!keep && range.start) rm.detach(range.start); }
|
||||||
|
|
||||||
} attach_guard(*_ds_pool.region_map);
|
} attach_guard(*_ds_pool.region_map);
|
||||||
|
|
||||||
try {
|
Region_map::Attr attr { };
|
||||||
attach_guard.ptr = _ds_pool.region_map->attach(ds_cap);
|
attr.writeable = true;
|
||||||
|
Region_map::Attach_result const result = _ds_pool.region_map->attach(ds_cap, attr);
|
||||||
|
if (result.failed()) {
|
||||||
|
using Error = Region_map::Attach_error;
|
||||||
|
return result.convert<Alloc_error>(
|
||||||
|
[&] (auto) /* never called */ { return Alloc_error::DENIED; },
|
||||||
|
[&] (Error e) {
|
||||||
|
switch (e) {
|
||||||
|
case Error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM;
|
||||||
|
case Error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS;
|
||||||
|
case Error::REGION_CONFLICT: break;
|
||||||
|
case Error::INVALID_DATASPACE: break;
|
||||||
}
|
}
|
||||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
return Alloc_error::DENIED;
|
||||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
});
|
||||||
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
|
}
|
||||||
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
|
|
||||||
|
result.with_result(
|
||||||
|
[&] (Region_map::Range range) { attach_guard.range = range; },
|
||||||
|
[&] (auto) { /* handled above */ });
|
||||||
|
|
||||||
Alloc_result metadata = Alloc_error::DENIED;
|
Alloc_result metadata = Alloc_error::DENIED;
|
||||||
|
|
||||||
@ -128,7 +142,7 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
|||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* add new local address range to our local allocator */
|
/* add new local address range to our local allocator */
|
||||||
_alloc->add_range((addr_t)attach_guard.ptr, size).with_result(
|
_alloc->add_range(attach_guard.range.start, size).with_result(
|
||||||
[&] (Range_allocator::Range_ok) {
|
[&] (Range_allocator::Range_ok) {
|
||||||
metadata = _alloc->alloc_aligned(sizeof(Heap::Dataspace), log2(16U)); },
|
metadata = _alloc->alloc_aligned(sizeof(Heap::Dataspace), log2(16U)); },
|
||||||
[&] (Alloc_error error) {
|
[&] (Alloc_error error) {
|
||||||
@ -138,7 +152,7 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
|||||||
return metadata.convert<Result>(
|
return metadata.convert<Result>(
|
||||||
[&] (void *md_ptr) -> Result {
|
[&] (void *md_ptr) -> Result {
|
||||||
Dataspace &ds = *construct_at<Dataspace>(md_ptr, ds_cap,
|
Dataspace &ds = *construct_at<Dataspace>(md_ptr, ds_cap,
|
||||||
attach_guard.ptr, size);
|
(void *)attach_guard.range.start, size);
|
||||||
_ds_pool.insert(&ds);
|
_ds_pool.insert(&ds);
|
||||||
alloc_guard.keep = attach_guard.keep = true;
|
alloc_guard.keep = attach_guard.keep = true;
|
||||||
return &ds;
|
return &ds;
|
||||||
|
@ -20,25 +20,23 @@ Region_map_client::Region_map_client(Capability<Region_map> cap)
|
|||||||
: Rpc_client<Region_map>(cap) { }
|
: Rpc_client<Region_map>(cap) { }
|
||||||
|
|
||||||
|
|
||||||
Region_map::Local_addr
|
Region_map::Attach_result
|
||||||
Region_map_client::attach(Dataspace_capability ds, size_t size, off_t offset,
|
Region_map_client::attach(Dataspace_capability ds, Attr const &attr)
|
||||||
bool use_local_addr, Local_addr local_addr,
|
|
||||||
bool executable, bool writeable)
|
|
||||||
{
|
{
|
||||||
return call<Rpc_attach>(ds, size, offset, use_local_addr, local_addr,
|
return call<Rpc_attach>(ds, attr);
|
||||||
executable, writeable);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Region_map_client::detach(Local_addr local_addr) {
|
void Region_map_client::detach(addr_t at) { call<Rpc_detach>(at); }
|
||||||
call<Rpc_detach>(local_addr); }
|
|
||||||
|
|
||||||
|
|
||||||
void Region_map_client::fault_handler(Signal_context_capability cap) {
|
void Region_map_client::fault_handler(Signal_context_capability cap)
|
||||||
call<Rpc_fault_handler>(cap); }
|
{
|
||||||
|
call<Rpc_fault_handler>(cap);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Region_map::State Region_map_client::state() { return call<Rpc_state>(); }
|
Region_map::Fault Region_map_client::fault() { return call<Rpc_fault>(); }
|
||||||
|
|
||||||
|
|
||||||
Dataspace_capability Region_map_client::dataspace() { return call<Rpc_dataspace>(); }
|
Dataspace_capability Region_map_client::dataspace() { return call<Rpc_dataspace>(); }
|
||||||
|
@ -63,28 +63,42 @@ Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
|
|||||||
struct Attach_guard
|
struct Attach_guard
|
||||||
{
|
{
|
||||||
Region_map &rm;
|
Region_map &rm;
|
||||||
struct { void *ptr = nullptr; };
|
Region_map::Range range { };
|
||||||
bool keep = false;
|
bool keep = false;
|
||||||
|
|
||||||
Attach_guard(Region_map &rm) : rm(rm) { }
|
Attach_guard(Region_map &rm) : rm(rm) { }
|
||||||
|
|
||||||
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
|
~Attach_guard() { if (!keep && range.start) rm.detach(range.start); }
|
||||||
|
|
||||||
} attach_guard(_region_map);
|
} attach_guard(_region_map);
|
||||||
|
|
||||||
try {
|
Region_map::Attr attr { };
|
||||||
attach_guard.ptr = _region_map.attach(ds_cap);
|
attr.writeable = true;
|
||||||
|
Region_map::Attach_result const result = _region_map.attach(ds_cap, attr);
|
||||||
|
if (result.failed()) {
|
||||||
|
using Error = Region_map::Attach_error;
|
||||||
|
return result.convert<Alloc_error>(
|
||||||
|
[&] (auto) /* never called */ { return Alloc_error::DENIED; },
|
||||||
|
[&] (Error e) {
|
||||||
|
switch (e) {
|
||||||
|
case Error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM;
|
||||||
|
case Error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS;
|
||||||
|
case Error::REGION_CONFLICT: break;
|
||||||
|
case Error::INVALID_DATASPACE: break;
|
||||||
}
|
}
|
||||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
return Alloc_error::DENIED;
|
||||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
});
|
||||||
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
|
}
|
||||||
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
|
|
||||||
|
result.with_result(
|
||||||
|
[&] (Region_map::Range range) { attach_guard.range = range; },
|
||||||
|
[&] (auto) { /* handled above */ });
|
||||||
|
|
||||||
/* serialize access to block list */
|
/* serialize access to block list */
|
||||||
Mutex::Guard guard(_mutex);
|
Mutex::Guard guard(_mutex);
|
||||||
|
|
||||||
Block * const block = construct_at<Block>(attach_guard.ptr, ds_cap, size);
|
Block * const block = construct_at<Block>((void *)attach_guard.range.start,
|
||||||
|
ds_cap, size);
|
||||||
_consumed += size;
|
_consumed += size;
|
||||||
_blocks.insert(block);
|
_blocks.insert(block);
|
||||||
|
|
||||||
@ -126,7 +140,7 @@ void Sliced_heap::free(void *addr, size_t)
|
|||||||
block->~Block();
|
block->~Block();
|
||||||
}
|
}
|
||||||
|
|
||||||
_region_map.detach(local_addr);
|
_region_map.detach(addr_t(local_addr));
|
||||||
_ram_alloc.free(ds_cap);
|
_ram_alloc.free(ds_cap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,19 +49,35 @@ void Stack::size(size_t const size)
|
|||||||
|
|
||||||
/* allocate and attach backing store for the stack enhancement */
|
/* allocate and attach backing store for the stack enhancement */
|
||||||
addr_t const ds_addr = _base - ds_size - stack_area_virtual_base();
|
addr_t const ds_addr = _base - ds_size - stack_area_virtual_base();
|
||||||
try {
|
|
||||||
Ram_allocator * const ram = env_stack_area_ram_allocator;
|
|
||||||
Ram_dataspace_capability const ds_cap = ram->alloc(ds_size);
|
|
||||||
Region_map * const rm = env_stack_area_region_map;
|
|
||||||
void * const attach_addr = rm->attach_at(ds_cap, ds_addr, ds_size);
|
|
||||||
|
|
||||||
if (ds_addr != (addr_t)attach_addr)
|
Ram_allocator &ram = *env_stack_area_ram_allocator;
|
||||||
throw Thread::Out_of_stack_space();
|
Region_map &rm = *env_stack_area_region_map;
|
||||||
}
|
|
||||||
catch (Out_of_ram) { throw Thread::Stack_alloc_failed(); }
|
ram.try_alloc(ds_size).with_result(
|
||||||
|
[&] (Ram_dataspace_capability ds_cap) {
|
||||||
|
|
||||||
|
rm.attach(ds_cap, Region_map::Attr {
|
||||||
|
.size = ds_size,
|
||||||
|
.offset = 0,
|
||||||
|
.use_at = true,
|
||||||
|
.at = ds_addr,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true,
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range r) {
|
||||||
|
if (r.start != ds_addr)
|
||||||
|
throw Thread::Stack_alloc_failed();
|
||||||
|
|
||||||
/* update stack information */
|
/* update stack information */
|
||||||
_base -= ds_size;
|
_base -= ds_size;
|
||||||
|
},
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
throw Thread::Stack_alloc_failed(); }
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Ram_allocator::Alloc_error) {
|
||||||
|
throw Thread::Stack_alloc_failed(); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -93,27 +109,53 @@ Thread::_alloc_stack(size_t stack_size, char const *name, bool main_thread)
|
|||||||
if (sizeof(Native_utcb) >= (1 << PAGE_SIZE_LOG2))
|
if (sizeof(Native_utcb) >= (1 << PAGE_SIZE_LOG2))
|
||||||
ds_addr -= sizeof(Native_utcb);
|
ds_addr -= sizeof(Native_utcb);
|
||||||
|
|
||||||
|
Ram_allocator &ram = *env_stack_area_ram_allocator;
|
||||||
|
|
||||||
/* allocate and attach backing store for the stack */
|
/* allocate and attach backing store for the stack */
|
||||||
Ram_dataspace_capability ds_cap;
|
return ram.try_alloc(ds_size).convert<Stack *>(
|
||||||
try {
|
|
||||||
ds_cap = env_stack_area_ram_allocator->alloc(ds_size);
|
[&] (Ram_dataspace_capability const ds_cap)
|
||||||
addr_t attach_addr = ds_addr - stack_area_virtual_base();
|
{
|
||||||
if (attach_addr != (addr_t)env_stack_area_region_map->attach_at(ds_cap, attach_addr, ds_size))
|
addr_t const attach_addr = ds_addr - stack_area_virtual_base();
|
||||||
|
|
||||||
|
return env_stack_area_region_map->attach(ds_cap, Region_map::Attr {
|
||||||
|
.size = ds_size,
|
||||||
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = attach_addr,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).convert<Stack *>(
|
||||||
|
|
||||||
|
[&] (Region_map::Range const range) -> Stack * {
|
||||||
|
if (range.start != attach_addr) {
|
||||||
|
ram.free(ds_cap);
|
||||||
throw Stack_alloc_failed();
|
throw Stack_alloc_failed();
|
||||||
}
|
}
|
||||||
catch (Out_of_ram) { throw Stack_alloc_failed(); }
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now the stack is backed by memory, so it is safe to access its members.
|
* Now the stack is backed by memory, it is safe to access
|
||||||
|
* its members.
|
||||||
*
|
*
|
||||||
* We need to initialize the stack object's memory with zeroes, otherwise
|
* We need to initialize the stack object's memory with
|
||||||
* the ds_cap isn't invalid. That would cause trouble when the assignment
|
* zeroes, otherwise the ds_cap isn't invalid. That would
|
||||||
* operator of Native_capability is used.
|
* cause trouble when the assignment operator of
|
||||||
|
* Native_capability is used.
|
||||||
*/
|
*/
|
||||||
construct_at<Stack>(stack, name, *this, ds_addr, ds_cap);
|
construct_at<Stack>(stack, name, *this, ds_addr, ds_cap);
|
||||||
|
|
||||||
Abi::init_stack(stack->top());
|
Abi::init_stack(stack->top());
|
||||||
return stack;
|
return stack;
|
||||||
|
},
|
||||||
|
[&] (Region_map::Attach_error) -> Stack * {
|
||||||
|
ram.free(ds_cap);
|
||||||
|
throw Stack_alloc_failed();
|
||||||
|
}
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Ram_allocator::Alloc_error) -> Stack * {
|
||||||
|
throw Stack_alloc_failed(); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -125,7 +167,7 @@ void Thread::_free_stack(Stack *stack)
|
|||||||
/* call de-constructor explicitly before memory gets detached */
|
/* call de-constructor explicitly before memory gets detached */
|
||||||
stack->~Stack();
|
stack->~Stack();
|
||||||
|
|
||||||
Genode::env_stack_area_region_map->detach((void *)ds_addr);
|
Genode::env_stack_area_region_map->detach(ds_addr);
|
||||||
Genode::env_stack_area_ram_allocator->free(ds_cap);
|
Genode::env_stack_area_ram_allocator->free(ds_cap);
|
||||||
|
|
||||||
/* stack ready for reuse */
|
/* stack ready for reuse */
|
||||||
@ -226,7 +268,15 @@ void Thread::_init_cpu_session_and_trace_control()
|
|||||||
/* initialize trace control now that the CPU session must be valid */
|
/* initialize trace control now that the CPU session must be valid */
|
||||||
Dataspace_capability ds = _cpu_session->trace_control();
|
Dataspace_capability ds = _cpu_session->trace_control();
|
||||||
if (ds.valid()) {
|
if (ds.valid()) {
|
||||||
_trace_control = local_rm_ptr->attach(ds); }
|
Region_map::Attr attr { };
|
||||||
|
attr.writeable = true;
|
||||||
|
local_rm_ptr->attach(ds, attr).with_result(
|
||||||
|
[&] (Region_map::Range range) {
|
||||||
|
_trace_control = reinterpret_cast<Trace::Control *>(range.start); },
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("failed to initialize trace control for new thread"); }
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -270,7 +320,7 @@ Thread::~Thread()
|
|||||||
* detached trace control dataspace.
|
* detached trace control dataspace.
|
||||||
*/
|
*/
|
||||||
if (_trace_control && local_rm_ptr)
|
if (_trace_control && local_rm_ptr)
|
||||||
local_rm_ptr->detach(_trace_control);
|
local_rm_ptr->detach(addr_t(_trace_control));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -60,13 +60,13 @@ bool Trace::Logger::_evaluate_control()
|
|||||||
|
|
||||||
/* unload policy */
|
/* unload policy */
|
||||||
if (policy_module) {
|
if (policy_module) {
|
||||||
_env().rm().detach(policy_module);
|
_env().rm().detach(addr_t(policy_module));
|
||||||
policy_module = 0;
|
policy_module = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* unmap trace buffer */
|
/* unmap trace buffer */
|
||||||
if (buffer) {
|
if (buffer) {
|
||||||
_env().rm().detach(buffer);
|
_env().rm().detach(addr_t(buffer));
|
||||||
buffer = 0;
|
buffer = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,17 +97,21 @@ bool Trace::Logger::_evaluate_control()
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
|
||||||
max_event_size = 0;
|
max_event_size = 0;
|
||||||
policy_module = 0;
|
policy_module = nullptr;
|
||||||
|
|
||||||
enum {
|
_env().rm().attach(policy_ds, {
|
||||||
MAX_SIZE = 0, NO_OFFSET = 0, ANY_LOCAL_ADDR = false,
|
.size = { }, .offset = { },
|
||||||
EXECUTABLE = true
|
.use_at = { }, .at = { },
|
||||||
};
|
.executable = true, .writeable = true,
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range range) {
|
||||||
|
policy_module = reinterpret_cast<Policy_module *>(range.start); },
|
||||||
|
[&] (Region_map::Attach_error) { error("failed to attach trace policy"); }
|
||||||
|
);
|
||||||
|
|
||||||
policy_module = _env().rm().attach(policy_ds, MAX_SIZE, NO_OFFSET,
|
if (!policy_module)
|
||||||
ANY_LOCAL_ADDR, nullptr, EXECUTABLE);
|
return false;
|
||||||
|
|
||||||
/* relocate function pointers of policy callback table */
|
/* relocate function pointers of policy callback table */
|
||||||
for (unsigned i = 0; i < sizeof(Trace::Policy_module)/sizeof(void *); i++) {
|
for (unsigned i = 0; i < sizeof(Trace::Policy_module)/sizeof(void *); i++) {
|
||||||
@ -116,10 +120,8 @@ bool Trace::Logger::_evaluate_control()
|
|||||||
|
|
||||||
max_event_size = policy_module->max_event_size();
|
max_event_size = policy_module->max_event_size();
|
||||||
|
|
||||||
} catch (...) { }
|
|
||||||
|
|
||||||
/* obtain buffer */
|
/* obtain buffer */
|
||||||
buffer = 0;
|
buffer = nullptr;
|
||||||
Dataspace_capability buffer_ds = Cpu_thread_client(thread_cap).trace_buffer();
|
Dataspace_capability buffer_ds = Cpu_thread_client(thread_cap).trace_buffer();
|
||||||
|
|
||||||
if (!buffer_ds.valid()) {
|
if (!buffer_ds.valid()) {
|
||||||
@ -129,11 +131,16 @@ bool Trace::Logger::_evaluate_control()
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
Region_map::Attr attr { };
|
||||||
buffer = _env().rm().attach(buffer_ds);
|
attr.writeable = true;
|
||||||
buffer->init(Dataspace_client(buffer_ds).size());
|
_env().rm().attach(buffer_ds, attr).with_result(
|
||||||
} catch (...) { }
|
[&] (Region_map::Range range) {
|
||||||
|
buffer = reinterpret_cast<Buffer *>(range.start); },
|
||||||
|
[&] (Region_map::Attach_error) { error("failed to attach trace buffer"); });
|
||||||
|
if (!buffer)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
buffer->init(Dataspace_client(buffer_ds).size());
|
||||||
policy_version = control->policy_version();
|
policy_version = control->policy_version();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,11 +236,17 @@ Trace::Logger *Thread::_logger()
|
|||||||
|
|
||||||
Cpu_session &cpu = myself ? *myself->_cpu_session : _env().cpu();
|
Cpu_session &cpu = myself ? *myself->_cpu_session : _env().cpu();
|
||||||
|
|
||||||
if (!myself)
|
if (!myself && !main_trace_control) {
|
||||||
if (!main_trace_control) {
|
|
||||||
Dataspace_capability ds = _env().cpu().trace_control();
|
Dataspace_capability ds = _env().cpu().trace_control();
|
||||||
if (ds.valid())
|
if (ds.valid()) {
|
||||||
main_trace_control = _env().rm().attach(ds);
|
Region_map::Attr attr { };
|
||||||
|
attr.writeable = true;
|
||||||
|
_env().rm().attach(ds, attr).with_result(
|
||||||
|
[&] (Region_map::Range range) {
|
||||||
|
main_trace_control = reinterpret_cast<Trace::Control *>(range.start); },
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("failed to attach trace control"); });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
thread_cap.with_result(
|
thread_cap.with_result(
|
||||||
|
@ -32,8 +32,11 @@ static Heap *cxx_heap_ptr;
|
|||||||
Heap &cxx_heap()
|
Heap &cxx_heap()
|
||||||
{
|
{
|
||||||
class Cxx_heap_uninitialized : Exception { };
|
class Cxx_heap_uninitialized : Exception { };
|
||||||
if (!cxx_heap_ptr)
|
if (!cxx_heap_ptr) {
|
||||||
|
raw("Cxx_heap_uninitialized");
|
||||||
|
for (;;);
|
||||||
throw Cxx_heap_uninitialized();
|
throw Cxx_heap_uninitialized();
|
||||||
|
}
|
||||||
|
|
||||||
return *cxx_heap_ptr;
|
return *cxx_heap_ptr;
|
||||||
}
|
}
|
||||||
|
@ -72,9 +72,9 @@ class Linker::Dynamic
|
|||||||
|
|
||||||
struct Needed : Fifo<Needed>::Element
|
struct Needed : Fifo<Needed>::Element
|
||||||
{
|
{
|
||||||
off_t offset;
|
addr_t offset;
|
||||||
|
|
||||||
Needed(off_t offset) : offset(offset) { }
|
Needed(addr_t offset) : offset(offset) { }
|
||||||
|
|
||||||
char const *path(char const *strtab)
|
char const *path(char const *strtab)
|
||||||
{
|
{
|
||||||
|
@ -137,8 +137,17 @@ struct Linker::Elf_file : File
|
|||||||
|| (name == "posix.lib.so")
|
|| (name == "posix.lib.so")
|
||||||
|| (strcmp(name.string(), "vfs", 3) == 0);
|
|| (strcmp(name.string(), "vfs", 3) == 0);
|
||||||
|
|
||||||
reloc_base = resident ? Region_map::r()->alloc_region_at_end(size)
|
Region_map::Alloc_region_result const allocated_region =
|
||||||
|
resident ? Region_map::r()->alloc_region_at_end(size)
|
||||||
: Region_map::r()->alloc_region(size);
|
: Region_map::r()->alloc_region(size);
|
||||||
|
|
||||||
|
reloc_base = allocated_region.convert<addr_t>(
|
||||||
|
[&] (addr_t base) { return base; },
|
||||||
|
[&] (Region_map::Alloc_region_error) { return 0UL; });
|
||||||
|
|
||||||
|
if (!reloc_base)
|
||||||
|
error("failed to allocate region within linker area");
|
||||||
|
|
||||||
start = 0;
|
start = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -292,10 +301,15 @@ struct Linker::Elf_file : File
|
|||||||
*/
|
*/
|
||||||
void load_segment_rx(Elf::Phdr const &p)
|
void load_segment_rx(Elf::Phdr const &p)
|
||||||
{
|
{
|
||||||
Region_map::r()->attach_executable(rom_cap,
|
if (Region_map::r()->attach(rom_cap, Region_map::Attr {
|
||||||
trunc_page(p.p_vaddr) + reloc_base,
|
.size = round_page(p.p_memsz),
|
||||||
round_page(p.p_memsz),
|
.offset = trunc_page(p.p_offset),
|
||||||
trunc_page(p.p_offset));
|
.use_at = true,
|
||||||
|
.at = trunc_page(p.p_vaddr) + reloc_base,
|
||||||
|
.executable = true,
|
||||||
|
.writeable = false
|
||||||
|
}).failed())
|
||||||
|
error("failed to load RX segment");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -303,19 +317,46 @@ struct Linker::Elf_file : File
|
|||||||
*/
|
*/
|
||||||
void load_segment_rw(Elf::Phdr const &p, int nr)
|
void load_segment_rw(Elf::Phdr const &p, int nr)
|
||||||
{
|
{
|
||||||
void *src = env.rm().attach(rom_cap, 0, p.p_offset);
|
void * const src = env.rm().attach(rom_cap, Region_map::Attr {
|
||||||
addr_t dst = p.p_vaddr + reloc_base;
|
.size = { },
|
||||||
|
.offset = p.p_offset,
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).convert<void *>(
|
||||||
|
[&] (Genode::Region_map::Range range) { return (void *)range.start; },
|
||||||
|
[&] (Genode::Region_map::Attach_error) { return nullptr; }
|
||||||
|
);
|
||||||
|
if (!src) {
|
||||||
|
error("dynamic linker failed to locally map RW segment ", nr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
addr_t const dst = p.p_vaddr + reloc_base;
|
||||||
|
|
||||||
ram_cap[nr] = env.ram().alloc(p.p_memsz);
|
ram_cap[nr] = env.ram().alloc(p.p_memsz);
|
||||||
Region_map::r()->attach_at(ram_cap[nr], dst);
|
|
||||||
|
Region_map::r()->attach(ram_cap[nr], Region_map::Attr {
|
||||||
|
.size = { },
|
||||||
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = dst,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Genode::Region_map::Range) {
|
||||||
|
|
||||||
memcpy((void*)dst, src, p.p_filesz);
|
memcpy((void*)dst, src, p.p_filesz);
|
||||||
|
|
||||||
/* clear if file size < memory size */
|
/* clear if file size < memory size */
|
||||||
if (p.p_filesz < p.p_memsz)
|
if (p.p_filesz < p.p_memsz)
|
||||||
memset((void *)(dst + p.p_filesz), 0, p.p_memsz - p.p_filesz);
|
memset((void *)(dst + p.p_filesz), 0, p.p_memsz - p.p_filesz);
|
||||||
|
},
|
||||||
env.rm().detach(src);
|
[&] (Genode::Region_map::Attach_error) {
|
||||||
|
error("dynamic linker failed to copy RW segment"); }
|
||||||
|
);
|
||||||
|
env.rm().detach(addr_t(src));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -39,11 +39,6 @@ namespace Linker {
|
|||||||
*/
|
*/
|
||||||
class Linker::Region_map
|
class Linker::Region_map
|
||||||
{
|
{
|
||||||
public:
|
|
||||||
|
|
||||||
typedef Region_map_client::Local_addr Local_addr;
|
|
||||||
typedef Region_map_client::Region_conflict Region_conflict;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
Env &_env;
|
Env &_env;
|
||||||
@ -56,15 +51,27 @@ class Linker::Region_map
|
|||||||
|
|
||||||
Region_map(Env &env, Allocator &md_alloc, addr_t base)
|
Region_map(Env &env, Allocator &md_alloc, addr_t base)
|
||||||
:
|
:
|
||||||
_env(env), _range(&md_alloc),
|
_env(env), _range(&md_alloc), _base(base)
|
||||||
_base((addr_t)_env.rm().attach_rwx(_rm.dataspace(), base))
|
|
||||||
{
|
{
|
||||||
|
_env.rm().attach(_rm.dataspace(), Genode::Region_map::Attr {
|
||||||
|
.size = 0,
|
||||||
|
.offset = 0,
|
||||||
|
.use_at = true,
|
||||||
|
.at = _base,
|
||||||
|
.executable = true,
|
||||||
|
.writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Genode::Region_map::Range) {
|
||||||
_range.add_range(base, Pd_session::LINKER_AREA_SIZE);
|
_range.add_range(base, Pd_session::LINKER_AREA_SIZE);
|
||||||
|
|
||||||
if (Linker::verbose)
|
if (Linker::verbose)
|
||||||
log(" ", Hex(base),
|
log(" ", Hex(base),
|
||||||
" .. ", Hex(base + Pd_session::LINKER_AREA_SIZE - 1),
|
" .. ", Hex(base + Pd_session::LINKER_AREA_SIZE - 1),
|
||||||
": linker area");
|
": linker area");
|
||||||
|
},
|
||||||
|
[&] (Genode::Region_map::Attach_error) {
|
||||||
|
error("failed to locally attach linker area"); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -73,63 +80,55 @@ class Linker::Region_map
|
|||||||
|
|
||||||
static Constructible_region_map &r();
|
static Constructible_region_map &r();
|
||||||
|
|
||||||
|
using Alloc_region_error = Ram_allocator::Alloc_error;
|
||||||
|
using Alloc_region_result = Attempt<addr_t, Alloc_region_error>;
|
||||||
|
using Attach_result = Genode::Region_map::Attach_result;
|
||||||
|
using Attr = Genode::Region_map::Attr;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate region anywhere within the region map
|
* Allocate region anywhere within the region map
|
||||||
*
|
|
||||||
* XXX propagate OUT_OF_RAM, OUT_OF_CAPS
|
|
||||||
*/
|
*/
|
||||||
addr_t alloc_region(size_t size)
|
Alloc_region_result alloc_region(size_t size)
|
||||||
{
|
{
|
||||||
return _range.alloc_aligned(size, get_page_size_log2()).convert<addr_t>(
|
return _range.alloc_aligned(size, get_page_size_log2()).convert<Alloc_region_result>(
|
||||||
[&] (void *ptr) { return (addr_t)ptr; },
|
[&] (void *ptr) { return (addr_t)ptr; },
|
||||||
[&] (Allocator::Alloc_error) -> addr_t { throw Region_conflict(); });
|
[&] (Allocator::Alloc_error e) { return e; });
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate region at specified 'vaddr'
|
* Allocate region at specified 'vaddr'
|
||||||
*/
|
*/
|
||||||
void alloc_region_at(size_t size, addr_t vaddr)
|
Alloc_region_result alloc_region_at(size_t size, addr_t vaddr)
|
||||||
{
|
{
|
||||||
if (_range.alloc_addr(size, vaddr).failed())
|
return _range.alloc_addr(size, vaddr).convert<Alloc_region_result>(
|
||||||
throw Region_conflict();
|
[&] (void *ptr) { return (addr_t)ptr; },
|
||||||
|
[&] (Allocator::Alloc_error e) { return e; });
|
||||||
}
|
}
|
||||||
|
|
||||||
addr_t alloc_region_at_end(size_t size)
|
Alloc_region_result alloc_region_at_end(size_t size)
|
||||||
{
|
{
|
||||||
_end -= align_addr(size, get_page_size_log2());
|
_end -= align_addr(size, get_page_size_log2());
|
||||||
alloc_region_at(size, _end);
|
return alloc_region_at(size, _end);
|
||||||
return _end;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_region(addr_t vaddr) { _range.free((void *)vaddr); }
|
void free_region(addr_t vaddr) { _range.free((void *)vaddr); }
|
||||||
|
|
||||||
/**
|
Attach_result attach(Dataspace_capability ds, Attr attr)
|
||||||
* Overwritten from 'Region_map_client'
|
|
||||||
*/
|
|
||||||
Local_addr attach_at(Dataspace_capability ds, addr_t local_addr,
|
|
||||||
size_t size = 0, off_t offset = 0)
|
|
||||||
{
|
{
|
||||||
return retry<Genode::Out_of_ram>(
|
if (!attr.use_at)
|
||||||
[&] () {
|
error("unexpected arguments of Linker::Region_map::attach");
|
||||||
return _rm.attach_at(ds, local_addr - _base, size, offset);
|
|
||||||
|
attr.at -= _base;
|
||||||
|
return _rm.attach(ds, attr).convert<Attach_result>(
|
||||||
|
[&] (Genode::Region_map::Range range) {
|
||||||
|
range.start += _base;
|
||||||
|
return range;
|
||||||
},
|
},
|
||||||
[&] () { _env.upgrade(Parent::Env::pd(), "ram_quota=8K"); });
|
[&] (Genode::Region_map::Attach_error e) { return e; }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
void detach(addr_t local_addr) { _rm.detach(local_addr - _base); }
|
||||||
* Overwritten from 'Region_map_client'
|
|
||||||
*/
|
|
||||||
Local_addr attach_executable(Dataspace_capability ds, addr_t local_addr,
|
|
||||||
size_t size = 0, off_t offset = 0)
|
|
||||||
{
|
|
||||||
return retry<Genode::Out_of_ram>(
|
|
||||||
[&] () {
|
|
||||||
return _rm.attach_executable(ds, local_addr - _base, size, offset);
|
|
||||||
},
|
|
||||||
[&] () { _env.upgrade(Parent::Env::pd(), "ram_quota=8K"); });
|
|
||||||
}
|
|
||||||
|
|
||||||
void detach(Local_addr local_addr) { _rm.detach((addr_t)local_addr - _base); }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _INCLUDE__REGION_MAP_H_ */
|
#endif /* _INCLUDE__REGION_MAP_H_ */
|
||||||
|
@ -652,25 +652,32 @@ void Genode::init_ldso_phdr(Env &env)
|
|||||||
{
|
{
|
||||||
struct Not_implemented : Exception { };
|
struct Not_implemented : Exception { };
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability ds, size_t, off_t,
|
Attach_result attach(Dataspace_capability ds, Attr const &) override
|
||||||
bool, Local_addr, bool, bool) override
|
|
||||||
{
|
{
|
||||||
size_t const size = Dataspace_client(ds).size();
|
size_t const size = Dataspace_client(ds).size();
|
||||||
|
|
||||||
Linker::Region_map &linker_area = *Linker::Region_map::r();
|
Linker::Region_map &linker_area = *Linker::Region_map::r();
|
||||||
|
|
||||||
addr_t const at = linker_area.alloc_region_at_end(size);
|
return linker_area.alloc_region_at_end(size).convert<Attach_result>(
|
||||||
|
[&] (addr_t const at) {
|
||||||
(void)linker_area.attach_at(ds, at, size, 0UL);
|
return linker_area.attach(ds, Region_map::Attr {
|
||||||
|
.size = size,
|
||||||
return at;
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = at,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true });
|
||||||
|
},
|
||||||
|
[&] (Linker::Region_map::Alloc_region_error) {
|
||||||
|
return Attach_error::REGION_CONFLICT; }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void detach(Local_addr) override { throw Not_implemented(); }
|
void detach(addr_t) override { throw Not_implemented(); }
|
||||||
|
|
||||||
void fault_handler(Signal_context_capability) override { }
|
void fault_handler(Signal_context_capability) override { }
|
||||||
|
|
||||||
State state() override { throw Not_implemented(); }
|
Fault fault() override { throw Not_implemented(); }
|
||||||
|
|
||||||
Dataspace_capability dataspace() override { throw Not_implemented(); }
|
Dataspace_capability dataspace() override { throw Not_implemented(); }
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ void Component::construct(Genode::Env &env)
|
|||||||
pd_2.free(ds);
|
pd_2.free(ds);
|
||||||
|
|
||||||
log("try to attach dataspace to see if it still exists");
|
log("try to attach dataspace to see if it still exists");
|
||||||
env.rm().attach(ds);
|
env.rm().attach(ds, { });
|
||||||
|
|
||||||
log("attach operation succeeded");
|
log("attach operation succeeded");
|
||||||
|
|
||||||
|
@ -40,11 +40,11 @@ enum {
|
|||||||
SHUTDOWN = EXEC_TEST - 1
|
SHUTDOWN = EXEC_TEST - 1
|
||||||
};
|
};
|
||||||
|
|
||||||
static char const *state_name(Region_map::State &state)
|
static char const *fault_name(Region_map::Fault const &fault)
|
||||||
{
|
{
|
||||||
return state.type == Region_map::State::READ_FAULT ? "READ_FAULT" :
|
return fault.type == Region_map::Fault::Type::READ ? "READ_FAULT" :
|
||||||
state.type == Region_map::State::WRITE_FAULT ? "WRITE_FAULT" :
|
fault.type == Region_map::Fault::Type::WRITE ? "WRITE_FAULT" :
|
||||||
state.type == Region_map::State::EXEC_FAULT ? "EXEC_FAULT" : "READY";
|
fault.type == Region_map::Fault::Type::EXEC ? "EXEC_FAULT" : "READY";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -295,6 +295,15 @@ struct Main_parent
|
|||||||
long volatile &_child_value() { return *_ds.local_addr<long volatile>(); }
|
long volatile &_child_value() { return *_ds.local_addr<long volatile>(); }
|
||||||
long volatile &_child_stop() { return *(_ds.local_addr<long volatile>() + 1); }
|
long volatile &_child_stop() { return *(_ds.local_addr<long volatile>() + 1); }
|
||||||
|
|
||||||
|
void _attach_at(Dataspace_capability ds, addr_t at)
|
||||||
|
{
|
||||||
|
if (_address_space.attach(ds, {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = at,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).failed()) error("_attach_at unexpectedly failed");
|
||||||
|
}
|
||||||
|
|
||||||
void _test_read_fault(addr_t const child_virt_addr)
|
void _test_read_fault(addr_t const child_virt_addr)
|
||||||
{
|
{
|
||||||
/* allocate dataspace to resolve the fault */
|
/* allocate dataspace to resolve the fault */
|
||||||
@ -302,7 +311,7 @@ struct Main_parent
|
|||||||
|
|
||||||
_child_value() = READ_TEST;
|
_child_value() = READ_TEST;
|
||||||
|
|
||||||
_address_space.attach_at(_ds.cap(), child_virt_addr);
|
_attach_at(_ds.cap(), child_virt_addr);
|
||||||
|
|
||||||
/* poll until our child modifies the dataspace content */
|
/* poll until our child modifies the dataspace content */
|
||||||
while (_child_value() == READ_TEST);
|
while (_child_value() == READ_TEST);
|
||||||
@ -311,7 +320,7 @@ struct Main_parent
|
|||||||
Hex(_child_value()));
|
Hex(_child_value()));
|
||||||
|
|
||||||
log("revoke dataspace from child");
|
log("revoke dataspace from child");
|
||||||
_address_space.detach((void *)child_virt_addr);
|
_address_space.detach(child_virt_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void _test_write_fault(addr_t const child_virt_addr, unsigned round)
|
void _test_write_fault(addr_t const child_virt_addr, unsigned round)
|
||||||
@ -322,7 +331,7 @@ struct Main_parent
|
|||||||
|
|
||||||
_child_value() = WRITE_TEST;
|
_child_value() = WRITE_TEST;
|
||||||
|
|
||||||
_address_space.attach_at(_binary.dataspace(), child_virt_addr);
|
_attach_at(_binary.dataspace(), child_virt_addr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,36 +346,35 @@ struct Main_parent
|
|||||||
: " unknown");
|
: " unknown");
|
||||||
|
|
||||||
/* detach region where fault happened */
|
/* detach region where fault happened */
|
||||||
_address_space.detach((void *)child_virt_addr);
|
_address_space.detach(child_virt_addr);
|
||||||
|
|
||||||
if (round == ROUND_FAULT_ON_ROM_BINARY) {
|
if (round == ROUND_FAULT_ON_ROM_BINARY) {
|
||||||
/* attach a RAM dataspace read-only */
|
/* attach a RAM dataspace read-only */
|
||||||
enum {
|
if (_address_space.attach(_ds.cap(), {
|
||||||
SIZE = 4096, OFFSET = 0, ATTACH_AT = true, NON_EXEC = false,
|
.size = 4096, .offset = { },
|
||||||
READONLY = false
|
.use_at = true, .at = child_virt_addr,
|
||||||
};
|
.executable = { }, .writeable = { }
|
||||||
|
}).failed()) error("attach of ROUND_FAULT_ON_ROM_BINARY failed");
|
||||||
|
|
||||||
_address_space.attach(_ds.cap(), SIZE, OFFSET, ATTACH_AT,
|
|
||||||
child_virt_addr, NON_EXEC, READONLY);
|
|
||||||
} else
|
} else
|
||||||
if (round == ROUND_FAULT_ON_RO_RAM) {
|
if (round == ROUND_FAULT_ON_RO_RAM) {
|
||||||
/* let client continue by attaching RAM dataspace writeable */
|
/* let client continue by attaching RAM dataspace writeable */
|
||||||
_address_space.attach_at(_ds.cap(), child_virt_addr);
|
_attach_at(_ds.cap(), child_virt_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void _test_exec_fault(Region_map::State &state)
|
void _test_exec_fault(Region_map::Fault const &fault)
|
||||||
{
|
{
|
||||||
if (_child_value() == WRITE_TEST) {
|
if (_child_value() == WRITE_TEST) {
|
||||||
_child_value() = EXEC_TEST;
|
_child_value() = EXEC_TEST;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (state.type != Region_map::State::EXEC_FAULT ||
|
if (fault.type != Region_map::Fault::Type::EXEC ||
|
||||||
state.addr != MANAGED_ADDR)
|
fault.addr != MANAGED_ADDR)
|
||||||
{
|
{
|
||||||
error("exec test failed ", (int)state.type,
|
error("exec test failed ", (int)fault.type,
|
||||||
" addr=", Hex(state.addr));
|
" addr=", Hex(fault.addr));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -381,17 +389,17 @@ struct Main_parent
|
|||||||
|
|
||||||
log("received region-map fault signal, request fault state");
|
log("received region-map fault signal, request fault state");
|
||||||
|
|
||||||
Region_map::State state = _address_space.state();
|
Region_map::Fault const fault = _address_space.fault();
|
||||||
|
|
||||||
log("rm session state is ", state_name(state), ", pf_addr=", Hex(state.addr));
|
log("rm session state is ", fault_name(fault), ", pf_addr=", Hex(fault.addr));
|
||||||
|
|
||||||
/* ignore spurious fault signal */
|
/* ignore spurious fault signal */
|
||||||
if (state.type == Region_map::State::READY) {
|
if (fault.type == Region_map::Fault::Type::NONE) {
|
||||||
log("ignoring spurious fault signal");
|
log("ignoring spurious fault signal");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr_t child_virt_addr = state.addr & ~(4096 - 1);
|
addr_t child_virt_addr = fault.addr & ~(4096 - 1);
|
||||||
|
|
||||||
if (_fault_cnt < FAULT_CNT_READ)
|
if (_fault_cnt < FAULT_CNT_READ)
|
||||||
_test_read_fault(child_virt_addr);
|
_test_read_fault(child_virt_addr);
|
||||||
@ -404,7 +412,7 @@ struct Main_parent
|
|||||||
_handle_fault_stack();
|
_handle_fault_stack();
|
||||||
|
|
||||||
if (_fault_cnt > FAULT_CNT_WRITE)
|
if (_fault_cnt > FAULT_CNT_WRITE)
|
||||||
_test_exec_fault(state);
|
_test_exec_fault(fault);
|
||||||
|
|
||||||
_fault_cnt++;
|
_fault_cnt++;
|
||||||
}
|
}
|
||||||
@ -413,9 +421,9 @@ struct Main_parent
|
|||||||
{
|
{
|
||||||
/* sanity check that we got exec fault */
|
/* sanity check that we got exec fault */
|
||||||
if (_config.xml().attribute_value("executable_fault_test", true)) {
|
if (_config.xml().attribute_value("executable_fault_test", true)) {
|
||||||
Region_map::State state = _address_space.state();
|
Region_map::Fault const fault = _address_space.fault();
|
||||||
if (state.type != Region_map::State::EXEC_FAULT) {
|
if (fault.type != Region_map::Fault::Type::EXEC) {
|
||||||
error("unexpected state ", state_name(state));
|
error("unexpected state ", fault_name(fault));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,19 +46,22 @@ class Local_fault_handler : public Entrypoint
|
|||||||
|
|
||||||
void _handle_fault()
|
void _handle_fault()
|
||||||
{
|
{
|
||||||
Region_map::State state = _region_map.state();
|
Region_map::Fault fault = _region_map.fault();
|
||||||
|
|
||||||
_fault_cnt = _fault_cnt + 1;
|
_fault_cnt = _fault_cnt + 1;
|
||||||
|
|
||||||
log("region-map state is ",
|
log("region-map fault is ",
|
||||||
state.type == Region_map::State::READ_FAULT ? "READ_FAULT" :
|
fault.type == Region_map::Fault::Type::READ ? "READ_FAULT" :
|
||||||
state.type == Region_map::State::WRITE_FAULT ? "WRITE_FAULT" :
|
fault.type == Region_map::Fault::Type::WRITE ? "WRITE_FAULT" :
|
||||||
state.type == Region_map::State::EXEC_FAULT ? "EXEC_FAULT" : "READY",
|
fault.type == Region_map::Fault::Type::EXEC ? "EXEC_FAULT" : "READY",
|
||||||
", pf_addr=", Hex(state.addr, Hex::PREFIX));
|
", pf_addr=", Hex(fault.addr, Hex::PREFIX));
|
||||||
|
|
||||||
log("allocate dataspace and attach it to sub region map");
|
log("allocate dataspace and attach it to sub region map");
|
||||||
Dataspace_capability ds = _env.ram().alloc(PAGE_SIZE);
|
Dataspace_capability ds = _env.ram().alloc(PAGE_SIZE);
|
||||||
_region_map.attach_at(ds, state.addr & ~(PAGE_SIZE - 1));
|
_region_map.attach(ds, {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = fault.addr & ~(PAGE_SIZE - 1),
|
||||||
|
.executable = { }, .writeable = true });
|
||||||
|
|
||||||
log("returning from handle_fault");
|
log("returning from handle_fault");
|
||||||
}
|
}
|
||||||
@ -83,6 +86,25 @@ class Local_fault_handler : public Entrypoint
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
static void *ptr_from_attach_result(Region_map::Attach_result const &result)
|
||||||
|
{
|
||||||
|
return result.convert<void *>(
|
||||||
|
[&] (Region_map::Range range) { return (void *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("read-only attach unexpectedly failed");
|
||||||
|
return nullptr; });
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void *attach_rw(Region_map &rm, Dataspace_capability ds)
|
||||||
|
{
|
||||||
|
return ptr_from_attach_result(rm.attach(ds, {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true }));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void nested_regions(Genode::Env &env)
|
void nested_regions(Genode::Env &env)
|
||||||
{
|
{
|
||||||
enum {
|
enum {
|
||||||
@ -98,7 +120,7 @@ void nested_regions(Genode::Env &env)
|
|||||||
Region_map_client rm_top(rm.create(MANAGED_REGION_TOP_SIZE));
|
Region_map_client rm_top(rm.create(MANAGED_REGION_TOP_SIZE));
|
||||||
Dataspace_client rm_top_client(rm_top.dataspace());
|
Dataspace_client rm_top_client(rm_top.dataspace());
|
||||||
|
|
||||||
void *ptr_top = env.rm().attach(rm_top.dataspace());
|
void *ptr_top = attach_rw(env.rm(), rm_top.dataspace());
|
||||||
addr_t const addr_top = reinterpret_cast<addr_t>(ptr_top);
|
addr_t const addr_top = reinterpret_cast<addr_t>(ptr_top);
|
||||||
log(" region top ",
|
log(" region top ",
|
||||||
Hex_range<addr_t>(addr_top, rm_top_client.size()));
|
Hex_range<addr_t>(addr_top, rm_top_client.size()));
|
||||||
@ -106,13 +128,13 @@ void nested_regions(Genode::Env &env)
|
|||||||
/* shim region 1 */
|
/* shim region 1 */
|
||||||
Region_map_client rm_shim1(rm.create(MANAGED_REGION_SHIM1_SIZE));
|
Region_map_client rm_shim1(rm.create(MANAGED_REGION_SHIM1_SIZE));
|
||||||
Dataspace_client rm_shim1_client(rm_shim1.dataspace());
|
Dataspace_client rm_shim1_client(rm_shim1.dataspace());
|
||||||
void *ptr_shim1 = rm_top.attach(rm_shim1.dataspace());
|
void *ptr_shim1 = attach_rw(rm_top, rm_shim1.dataspace());
|
||||||
addr_t const addr_shim1 = reinterpret_cast<addr_t>(ptr_shim1);
|
addr_t const addr_shim1 = reinterpret_cast<addr_t>(ptr_shim1);
|
||||||
|
|
||||||
/* shim region 2 */
|
/* shim region 2 */
|
||||||
Region_map_client rm_shim2(rm.create(MANAGED_REGION_SHIM2_SIZE));
|
Region_map_client rm_shim2(rm.create(MANAGED_REGION_SHIM2_SIZE));
|
||||||
Dataspace_client rm_shim2_client(rm_shim2.dataspace());
|
Dataspace_client rm_shim2_client(rm_shim2.dataspace());
|
||||||
void *ptr_shim2 = rm_top.attach(rm_shim2.dataspace());
|
void *ptr_shim2 = attach_rw(rm_top, rm_shim2.dataspace());
|
||||||
addr_t const addr_shim2 = reinterpret_cast<addr_t>(ptr_shim2);
|
addr_t const addr_shim2 = reinterpret_cast<addr_t>(ptr_shim2);
|
||||||
|
|
||||||
log(" region shim ",
|
log(" region shim ",
|
||||||
@ -122,16 +144,12 @@ void nested_regions(Genode::Env &env)
|
|||||||
/* attach some memory to region 2 as readonly and touch/map it */
|
/* attach some memory to region 2 as readonly and touch/map it */
|
||||||
size_t const shim2_ram_size = PAGE_SIZE * 2;
|
size_t const shim2_ram_size = PAGE_SIZE * 2;
|
||||||
Dataspace_capability shim2_ram_ds = env.ram().alloc(shim2_ram_size);
|
Dataspace_capability shim2_ram_ds = env.ram().alloc(shim2_ram_size);
|
||||||
enum {
|
void * const ptr_shim2_ram =
|
||||||
COMPLETE_SIZE = 0, OFFSET_0 = 0, OFFSET_1000 = 0x1000,
|
ptr_from_attach_result(rm_shim2.attach(shim2_ram_ds, {
|
||||||
USE_LOCAL_ADDR = true, LOCAL_ADDR_0 = 0, LOCAL_ADDR_1000 = 0x1000,
|
.size = { }, .offset = { },
|
||||||
NON_EXECUTABLE = false,
|
.use_at = true, .at = 0x1000,
|
||||||
READONLY = false, WRITEABLE = true
|
.executable = { }, .writeable = { } }));
|
||||||
};
|
|
||||||
void * ptr_shim2_ram = rm_shim2.attach(shim2_ram_ds, COMPLETE_SIZE,
|
|
||||||
OFFSET_0, USE_LOCAL_ADDR,
|
|
||||||
LOCAL_ADDR_1000, NON_EXECUTABLE,
|
|
||||||
READONLY);
|
|
||||||
addr_t const addr_shim2_ram = reinterpret_cast<addr_t>(ptr_shim2_ram);
|
addr_t const addr_shim2_ram = reinterpret_cast<addr_t>(ptr_shim2_ram);
|
||||||
addr_t const read_shim2 = addr_top + addr_shim2 + addr_shim2_ram;
|
addr_t const read_shim2 = addr_top + addr_shim2 + addr_shim2_ram;
|
||||||
|
|
||||||
@ -148,7 +166,13 @@ void nested_regions(Genode::Env &env)
|
|||||||
Region_map_client rm_bottom(rm.create(MANAGED_REGION_BOTTOM_SIZE));
|
Region_map_client rm_bottom(rm.create(MANAGED_REGION_BOTTOM_SIZE));
|
||||||
Dataspace_client rm_bottom_client(rm_bottom.dataspace());
|
Dataspace_client rm_bottom_client(rm_bottom.dataspace());
|
||||||
size_t const size_bottom = MANAGED_REGION_BOTTOM_SIZE - MANAGED_REGION_SHIM2_SIZE;
|
size_t const size_bottom = MANAGED_REGION_BOTTOM_SIZE - MANAGED_REGION_SHIM2_SIZE;
|
||||||
void const *ptr_bottom = rm_shim1.attach(rm_bottom.dataspace(), size_bottom);
|
|
||||||
|
void const * const ptr_bottom =
|
||||||
|
ptr_from_attach_result(rm_shim1.attach(rm_bottom.dataspace(), {
|
||||||
|
.size = size_bottom, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = { } }));
|
||||||
|
|
||||||
addr_t const addr_bottom = reinterpret_cast<addr_t>(ptr_bottom);
|
addr_t const addr_bottom = reinterpret_cast<addr_t>(ptr_bottom);
|
||||||
|
|
||||||
log(" bottom shim (r) ",
|
log(" bottom shim (r) ",
|
||||||
@ -159,14 +183,17 @@ void nested_regions(Genode::Env &env)
|
|||||||
/* attach some memory to bottom as writeable */
|
/* attach some memory to bottom as writeable */
|
||||||
Dataspace_capability bottom_ram_ds = env.ram().alloc(MANAGED_REGION_BOTTOM_SIZE);
|
Dataspace_capability bottom_ram_ds = env.ram().alloc(MANAGED_REGION_BOTTOM_SIZE);
|
||||||
{
|
{
|
||||||
void * base_rw = env.rm().attach(bottom_ram_ds);
|
void * base_rw = attach_rw(env.rm(), bottom_ram_ds);
|
||||||
memset(base_rw, 0xff, MANAGED_REGION_BOTTOM_SIZE);
|
memset(base_rw, 0xff, MANAGED_REGION_BOTTOM_SIZE);
|
||||||
env.rm().detach(base_rw);
|
env.rm().detach(addr_t(base_rw));
|
||||||
}
|
}
|
||||||
void * ptr_bottom_ram = rm_bottom.attach(bottom_ram_ds, COMPLETE_SIZE,
|
|
||||||
OFFSET_0, USE_LOCAL_ADDR,
|
void const * const ptr_bottom_ram =
|
||||||
LOCAL_ADDR_0, NON_EXECUTABLE,
|
ptr_from_attach_result(rm_bottom.attach(bottom_ram_ds, {
|
||||||
WRITEABLE);
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = 0,
|
||||||
|
.executable = { }, .writeable = true }));
|
||||||
|
|
||||||
addr_t const addr_bottom_ram = reinterpret_cast<addr_t>(ptr_bottom_ram);
|
addr_t const addr_bottom_ram = reinterpret_cast<addr_t>(ptr_bottom_ram);
|
||||||
addr_t const write_bottom = addr_top + addr_shim1 + addr_bottom + addr_bottom_ram;
|
addr_t const write_bottom = addr_top + addr_shim1 + addr_bottom + addr_bottom_ram;
|
||||||
|
|
||||||
@ -212,7 +239,7 @@ void Component::construct(Genode::Env & env)
|
|||||||
/*
|
/*
|
||||||
* Attach region map as dataspace to the local address space.
|
* Attach region map as dataspace to the local address space.
|
||||||
*/
|
*/
|
||||||
void *addr = env.rm().attach(region_map.dataspace());
|
void *addr = attach_rw(env.rm(), region_map.dataspace());
|
||||||
|
|
||||||
log("attached sub dataspace at local address ", addr);
|
log("attached sub dataspace at local address ", addr);
|
||||||
Dataspace_client client(region_map.dataspace());
|
Dataspace_client client(region_map.dataspace());
|
||||||
|
@ -52,17 +52,27 @@ void Component::construct(Env &env)
|
|||||||
|
|
||||||
for (unsigned r = 0; r < ROUNDS; ++r) {
|
for (unsigned r = 0; r < ROUNDS; ++r) {
|
||||||
for (unsigned i = 0; i < sizeof(page)/sizeof(*page); ++i) {
|
for (unsigned i = 0; i < sizeof(page)/sizeof(*page); ++i) {
|
||||||
off_t const offset = 0;
|
addr_t const offset = 0;
|
||||||
|
|
||||||
unsigned char volatile const *v =
|
uint8_t volatile const *v =
|
||||||
env.rm().attach(page[i].cap(), page[i].size(), offset);
|
env.rm().attach(page[i].cap(), {
|
||||||
|
.size = page[i].size(),
|
||||||
|
.offset = offset,
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
}).convert<uint8_t *>(
|
||||||
|
[&] (Region_map::Range range) { return (uint8_t *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { return nullptr; }
|
||||||
|
);
|
||||||
|
|
||||||
if (page[i].color != *v) {
|
if (page[i].color != *v) {
|
||||||
error("value @ ", v, " ", X(*v), " != ", X(page[i].color), " in round ", r);
|
error("value @ ", v, " ", X(*v), " != ", X(page[i].color), " in round ", r);
|
||||||
env.parent().exit(-1);
|
env.parent().exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
env.rm().detach(Region_map::Local_addr(v));
|
env.rm().detach(addr_t(v));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,12 +39,11 @@ static char const *test_pattern_2() {
|
|||||||
|
|
||||||
|
|
||||||
static void fill_ds_with_test_pattern(Env &env, char const *pattern,
|
static void fill_ds_with_test_pattern(Env &env, char const *pattern,
|
||||||
Dataspace_capability ds, size_t offset)
|
Dataspace_capability ds_cap, size_t offset)
|
||||||
{
|
{
|
||||||
log("fill dataspace with information");
|
log("fill dataspace with information");
|
||||||
char *content = env.rm().attach(ds);
|
Attached_dataspace ds { env.rm(), ds_cap };
|
||||||
copy_cstring(content + offset, pattern, ~0);
|
copy_cstring(ds.local_addr<char>() + offset, pattern, ~0);
|
||||||
env.rm().detach(content);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -62,7 +61,7 @@ void Component::construct(Env &env)
|
|||||||
log("--- sub-rm test ---");
|
log("--- sub-rm test ---");
|
||||||
|
|
||||||
log("create RM connection");
|
log("create RM connection");
|
||||||
enum { SUB_RM_SIZE = 1024*1024 };
|
size_t const SUB_RM_SIZE = 1024*1024;
|
||||||
Rm_connection rm(env);
|
Rm_connection rm(env);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -80,7 +79,7 @@ void Component::construct(Env &env)
|
|||||||
*/
|
*/
|
||||||
log("create managed dataspace");
|
log("create managed dataspace");
|
||||||
Region_map_client sub_rm(rm.create(SUB_RM_SIZE));
|
Region_map_client sub_rm(rm.create(SUB_RM_SIZE));
|
||||||
enum { DS_SIZE = 4*4096 };
|
size_t const DS_SIZE = 4*4096;
|
||||||
Ram_dataspace_capability ds = env.ram().alloc(DS_SIZE);
|
Ram_dataspace_capability ds = env.ram().alloc(DS_SIZE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -91,19 +90,32 @@ void Component::construct(Env &env)
|
|||||||
|
|
||||||
if (!config.xml().attribute_value("support_attach_sub_any", true)) {
|
if (!config.xml().attribute_value("support_attach_sub_any", true)) {
|
||||||
log("attach RAM ds to any position at sub rm - this should fail");
|
log("attach RAM ds to any position at sub rm - this should fail");
|
||||||
try {
|
sub_rm.attach(ds, {
|
||||||
sub_rm.attach(ds, 0, 0, false, (addr_t)0);
|
.size = { }, .offset = { },
|
||||||
fail("sub rm attach_any unexpectedly did not fail");
|
.use_at = { }, .at = { },
|
||||||
}
|
.executable = { }, .writeable = true
|
||||||
catch (Region_map::Region_conflict) {
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) {
|
||||||
|
fail("sub rm attach_any unexpectedly did not fail"); },
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
log("attach failed as expected"); }
|
log("attach failed as expected"); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
log("attach RAM ds to a fixed position at sub rm");
|
log("attach RAM ds to a fixed position at sub rm");
|
||||||
|
|
||||||
enum { DS_SUB_OFFSET = 4096 };
|
addr_t const DS_SUB_OFFSET = 4096;
|
||||||
if ((addr_t)sub_rm.attach_at(ds, DS_SUB_OFFSET, 0, 0) != DS_SUB_OFFSET)
|
sub_rm.attach(ds, {
|
||||||
fail("attach_at return-value mismatch");
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = DS_SUB_OFFSET,
|
||||||
|
.executable = { }, .writeable = { }
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range const range) {
|
||||||
|
if (range.start != DS_SUB_OFFSET)
|
||||||
|
fail("attach-at return-value mismatch"); },
|
||||||
|
[&] (Region_map::Attach_error) { }
|
||||||
|
);
|
||||||
|
|
||||||
log("attach sub rm at local address space");
|
log("attach sub rm at local address space");
|
||||||
|
|
||||||
@ -117,8 +129,15 @@ void Component::construct(Env &env)
|
|||||||
*/
|
*/
|
||||||
addr_t const local_attach_addr =
|
addr_t const local_attach_addr =
|
||||||
config.xml().attribute_value("local_attach_addr", (addr_t)0);
|
config.xml().attribute_value("local_attach_addr", (addr_t)0);
|
||||||
char *sub_rm_base = env.rm().attach_at(sub_rm.dataspace(),
|
|
||||||
local_attach_addr);
|
char * const sub_rm_base = env.rm().attach(sub_rm.dataspace(), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = local_attach_addr,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).convert<char *>(
|
||||||
|
[&] (Region_map::Range const range) { return (char *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { return nullptr; }
|
||||||
|
);
|
||||||
|
|
||||||
log("validate pattern in sub rm");
|
log("validate pattern in sub rm");
|
||||||
validate_pattern_at(test_pattern(), sub_rm_base + DS_SUB_OFFSET);
|
validate_pattern_at(test_pattern(), sub_rm_base + DS_SUB_OFFSET);
|
||||||
@ -129,9 +148,17 @@ void Component::construct(Env &env)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
log("attach RAM ds at another fixed position at sub rm");
|
log("attach RAM ds at another fixed position at sub rm");
|
||||||
enum { DS_SUB_OFFSET_2 = 0x40000 };
|
addr_t const DS_SUB_OFFSET_2 = 0x40000;
|
||||||
if ((addr_t)sub_rm.attach_at(ds, DS_SUB_OFFSET_2, 0, 0) != DS_SUB_OFFSET_2)
|
sub_rm.attach(ds, {
|
||||||
fail("attach_at return-value mismatch");
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = DS_SUB_OFFSET_2,
|
||||||
|
.executable = { }, .writeable = { }
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range const range) {
|
||||||
|
if (range.start != DS_SUB_OFFSET_2)
|
||||||
|
fail("attach-at return-value mismatch"); },
|
||||||
|
[&] (Region_map::Attach_error) { }
|
||||||
|
);
|
||||||
|
|
||||||
log("validate pattern in second mapping in sub rm");
|
log("validate pattern in second mapping in sub rm");
|
||||||
validate_pattern_at(test_pattern(), sub_rm_base + DS_SUB_OFFSET_2);
|
validate_pattern_at(test_pattern(), sub_rm_base + DS_SUB_OFFSET_2);
|
||||||
@ -140,35 +167,50 @@ void Component::construct(Env &env)
|
|||||||
* Try to cross the boundaries of the sub RM session. This should
|
* Try to cross the boundaries of the sub RM session. This should
|
||||||
* produce an error.
|
* produce an error.
|
||||||
*/
|
*/
|
||||||
try {
|
sub_rm.attach(ds, {
|
||||||
sub_rm.attach_at(ds, SUB_RM_SIZE - 4096, 0, 0);
|
.size = { }, .offset = { },
|
||||||
fail("undetected boundary conflict\n");
|
.use_at = true, .at = SUB_RM_SIZE - 4096,
|
||||||
}
|
.executable = { }, .writeable = true
|
||||||
catch (Region_map::Region_conflict) {
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) {
|
||||||
|
fail("undetected boundary conflict\n"); },
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
log("attaching beyond sub RM boundary failed as expected"); }
|
log("attaching beyond sub RM boundary failed as expected"); }
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for working region - conflict detection
|
* Check for working region - conflict detection
|
||||||
*/
|
*/
|
||||||
log("attaching RAM ds to a conflicting region");
|
log("attaching RAM ds to a conflicting region");
|
||||||
try {
|
sub_rm.attach(ds, {
|
||||||
sub_rm.attach_at(ds, DS_SUB_OFFSET + 4096, 0, 0);
|
.size = { }, .offset = { },
|
||||||
fail("region conflict went undetected\n");
|
.use_at = true, .at = DS_SUB_OFFSET + 4096,
|
||||||
}
|
.executable = { }, .writeable = true
|
||||||
catch (Region_map::Region_conflict) {
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) {
|
||||||
|
fail("region conflict went undetected"); },
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
log("attaching conflicting region failed as expected"); }
|
log("attaching conflicting region failed as expected"); }
|
||||||
|
);
|
||||||
|
|
||||||
if (config.xml().attribute_value("attach_twice_forbidden", false)) {
|
if (config.xml().attribute_value("attach_twice_forbidden", false)) {
|
||||||
/*
|
/*
|
||||||
* Try to double-attach the same sub RM session. This should fail
|
* Try to double-attach the same sub RM session. This should fail
|
||||||
*/
|
*/
|
||||||
log("attach sub rm again at local address space");
|
log("attach sub rm again at local address space");
|
||||||
try {
|
sub_rm.attach(ds, {
|
||||||
env.rm().attach(sub_rm.dataspace());
|
.size = { }, .offset = { },
|
||||||
fail("double attachment of sub RM session went undetected\n");
|
.use_at = { }, .at = { },
|
||||||
}
|
.executable = { }, .writeable = true
|
||||||
catch (Region_map::Region_conflict) {
|
}).with_result(
|
||||||
|
[&] (Region_map::Range) {
|
||||||
|
fail("double attachment of sub RM session went undetected"); },
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
|
if (e == Region_map::Attach_error::REGION_CONFLICT)
|
||||||
log("doubly attaching sub RM session failed as expected"); }
|
log("doubly attaching sub RM session failed as expected"); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -178,8 +220,12 @@ void Component::construct(Env &env)
|
|||||||
* page.
|
* page.
|
||||||
*/
|
*/
|
||||||
log("attach RAM ds with offset");
|
log("attach RAM ds with offset");
|
||||||
enum { DS_SUB_OFFSET_3 = 0x80000 };
|
addr_t const DS_SUB_OFFSET_3 = 0x80000;
|
||||||
sub_rm.attach_at(ds, DS_SUB_OFFSET_3, 0, 4096);
|
sub_rm.attach(ds, {
|
||||||
|
.size = { }, .offset = 4096,
|
||||||
|
.use_at = true, .at = DS_SUB_OFFSET_3,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
});
|
||||||
validate_pattern_at(test_pattern_2(), sub_rm_base + DS_SUB_OFFSET_3);
|
validate_pattern_at(test_pattern_2(), sub_rm_base + DS_SUB_OFFSET_3);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -187,15 +233,19 @@ void Component::construct(Env &env)
|
|||||||
* starting with the second page.
|
* starting with the second page.
|
||||||
*/
|
*/
|
||||||
log("attach RAM ds with offset and size");
|
log("attach RAM ds with offset and size");
|
||||||
enum { DS_SUB_OFFSET_4 = 0xc0000 };
|
addr_t const DS_SUB_OFFSET_4 = 0xc0000;
|
||||||
sub_rm.attach_at(ds, DS_SUB_OFFSET_4, 2*4096, 4096);
|
sub_rm.attach(ds, {
|
||||||
|
.size = 2*4096, .offset = 4096,
|
||||||
|
.use_at = true, .at = DS_SUB_OFFSET_4,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
});
|
||||||
validate_pattern_at(test_pattern_2(), sub_rm_base + DS_SUB_OFFSET_4);
|
validate_pattern_at(test_pattern_2(), sub_rm_base + DS_SUB_OFFSET_4);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detach the first attachment (to be validated by the run script by
|
* Detach the first attachment (to be validated by the run script by
|
||||||
* inspecting '/proc/pid/maps' after running the test.
|
* inspecting '/proc/pid/maps' after running the test.
|
||||||
*/
|
*/
|
||||||
sub_rm.detach((void *)DS_SUB_OFFSET);
|
sub_rm.detach(DS_SUB_OFFSET);
|
||||||
|
|
||||||
log("--- end of sub-rm test ---");
|
log("--- end of sub-rm test ---");
|
||||||
|
|
||||||
|
@ -38,10 +38,20 @@ static void test_out_of_bounds_access(Env &env)
|
|||||||
Attached_ram_dataspace buf_ds(env.ram(), env.rm(), BUF_SIZE);
|
Attached_ram_dataspace buf_ds(env.ram(), env.rm(), BUF_SIZE);
|
||||||
|
|
||||||
/* attach buffer at start of managed dataspace, leave 2nd page as guard */
|
/* attach buffer at start of managed dataspace, leave 2nd page as guard */
|
||||||
sub_rm.attach_at(buf_ds.cap(), 0);
|
sub_rm.attach(buf_ds.cap(), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = 0,
|
||||||
|
.executable = { }, .writeable = true });
|
||||||
|
|
||||||
/* locally attach managed dataspace */
|
/* locally attach managed dataspace */
|
||||||
char * const buf_ptr = env.rm().attach(sub_rm.dataspace());
|
char * const buf_ptr = env.rm().attach(sub_rm.dataspace(), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true }
|
||||||
|
).convert<char *>(
|
||||||
|
[&] (Region_map::Range range) { return (char *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { return nullptr; }
|
||||||
|
);
|
||||||
|
|
||||||
auto tokenize_two_tokens_at_end_of_buffer = [&] (char const * const input)
|
auto tokenize_two_tokens_at_end_of_buffer = [&] (char const * const input)
|
||||||
{
|
{
|
||||||
|
@ -346,21 +346,21 @@ extern "C" dde_addr_t dde_dma_get_physaddr(void *virt) {
|
|||||||
|
|
||||||
extern "C" dde_uint8_t dde_inb(dde_addr_t port)
|
extern "C" dde_uint8_t dde_inb(dde_addr_t port)
|
||||||
{
|
{
|
||||||
dde_uint8_t v;
|
dde_uint8_t v { };
|
||||||
pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inb(port); });
|
pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inb(port); });
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" dde_uint16_t dde_inw(dde_addr_t port)
|
extern "C" dde_uint16_t dde_inw(dde_addr_t port)
|
||||||
{
|
{
|
||||||
dde_uint16_t v;
|
dde_uint16_t v { };
|
||||||
pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inw(port); });
|
pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inw(port); });
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" dde_uint32_t dde_inl(dde_addr_t port)
|
extern "C" dde_uint32_t dde_inl(dde_addr_t port)
|
||||||
{
|
{
|
||||||
dde_uint32_t v;
|
dde_uint32_t v { };
|
||||||
pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inl(port); });
|
pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inl(port); });
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
@ -384,15 +384,14 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
|||||||
public Genode::Rm_connection,
|
public Genode::Rm_connection,
|
||||||
public Genode::Region_map_client
|
public Genode::Region_map_client
|
||||||
{
|
{
|
||||||
enum {
|
static constexpr Genode::size_t VM_SIZE = 2 * 1024 * 1024,
|
||||||
VM_SIZE = 2 * 1024 * 1024,
|
|
||||||
BLOCK_SIZE = 64 * 1024,
|
BLOCK_SIZE = 64 * 1024,
|
||||||
ELEMENTS = VM_SIZE / BLOCK_SIZE,
|
ELEMENTS = VM_SIZE / BLOCK_SIZE;
|
||||||
};
|
|
||||||
|
|
||||||
Genode::addr_t _base;
|
Genode::Attached_dataspace _managed_ds;
|
||||||
|
Genode::addr_t _base = Genode::addr_t(_managed_ds.local_addr<void>());
|
||||||
Genode::Ram_dataspace_capability _ds_cap[ELEMENTS];
|
Genode::Ram_dataspace_capability _ds_cap[ELEMENTS];
|
||||||
int _index;
|
unsigned _index = 0;
|
||||||
Genode::Allocator_avl _range;
|
Genode::Allocator_avl _range;
|
||||||
Genode::Ram_allocator &_ram;
|
Genode::Ram_allocator &_ram;
|
||||||
|
|
||||||
@ -414,30 +413,41 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
|||||||
|
|
||||||
_ds_cap[_index] = ds;
|
_ds_cap[_index] = ds;
|
||||||
|
|
||||||
Alloc_error error = Alloc_error::DENIED;
|
return Region_map_client::attach(_ds_cap[_index], {
|
||||||
|
.size = BLOCK_SIZE,
|
||||||
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = _index*BLOCK_SIZE,
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
}).convert<Extend_result>(
|
||||||
|
|
||||||
try {
|
[&] (Region_map::Range range) {
|
||||||
Region_map_client::attach_at(_ds_cap[_index],
|
|
||||||
_index * BLOCK_SIZE,
|
|
||||||
BLOCK_SIZE, 0);
|
|
||||||
/* return base + offset in VM area */
|
|
||||||
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
|
||||||
++_index;
|
|
||||||
|
|
||||||
_range.add_range(block_base, BLOCK_SIZE);
|
_index++;
|
||||||
|
|
||||||
return Extend_ok();
|
return _range.add_range(_base + range.start, range.num_bytes)
|
||||||
}
|
.convert<Extend_result>(
|
||||||
catch (Out_of_ram) { error = Alloc_error::OUT_OF_RAM; }
|
[&] (Range_allocator::Range_ok) { return Extend_ok(); },
|
||||||
catch (Out_of_caps) { error = Alloc_error::OUT_OF_CAPS; }
|
[&] (Alloc_error e) { return e; });
|
||||||
catch (...) { error = Alloc_error::DENIED; }
|
},
|
||||||
|
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
|
|
||||||
Genode::error("Slab_backend_alloc: local attach_at failed");
|
Genode::error("Slab_backend_alloc: local attach_at failed");
|
||||||
|
|
||||||
_ram.free(ds);
|
_ram.free(ds);
|
||||||
_ds_cap[_index] = { };
|
_ds_cap[_index] = { };
|
||||||
|
|
||||||
return error;
|
using Error = Region_map::Attach_error;
|
||||||
|
switch (e) {
|
||||||
|
case Error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM;
|
||||||
|
case Error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS;
|
||||||
|
case Error::INVALID_DATASPACE: break;
|
||||||
|
case Error::REGION_CONFLICT: break;
|
||||||
|
}
|
||||||
|
return Alloc_error::DENIED;
|
||||||
|
}
|
||||||
|
);
|
||||||
},
|
},
|
||||||
|
|
||||||
[&] (Alloc_error e) -> Extend_result {
|
[&] (Alloc_error e) -> Extend_result {
|
||||||
@ -451,11 +461,8 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
|||||||
:
|
:
|
||||||
Rm_connection(env),
|
Rm_connection(env),
|
||||||
Region_map_client(Rm_connection::create(VM_SIZE)),
|
Region_map_client(Rm_connection::create(VM_SIZE)),
|
||||||
_index(0), _range(&md_alloc), _ram(ram)
|
_managed_ds(rm, dataspace()), _range(&md_alloc), _ram(ram)
|
||||||
{
|
{ }
|
||||||
/* reserver attach us, anywere */
|
|
||||||
_base = rm.attach(dataspace());
|
|
||||||
}
|
|
||||||
|
|
||||||
Genode::addr_t start() const { return _base; }
|
Genode::addr_t start() const { return _base; }
|
||||||
Genode::addr_t end() const { return _base + VM_SIZE - 1; }
|
Genode::addr_t end() const { return _base + VM_SIZE - 1; }
|
||||||
|
@ -67,10 +67,27 @@ namespace Allocator {
|
|||||||
addr_t _base; /* virt. base address */
|
addr_t _base; /* virt. base address */
|
||||||
Cache _cache; /* non-/cached RAM */
|
Cache _cache; /* non-/cached RAM */
|
||||||
Ram_dataspace_capability _ds_cap[ELEMENTS]; /* dataspaces to put in VM */
|
Ram_dataspace_capability _ds_cap[ELEMENTS]; /* dataspaces to put in VM */
|
||||||
int _index = 0; /* current index in ds_cap */
|
unsigned _index = 0; /* current index in ds_cap */
|
||||||
Allocator_avl _range; /* manage allocations */
|
Allocator_avl _range; /* manage allocations */
|
||||||
bool _quota_exceeded = false;
|
bool _quota_exceeded = false;
|
||||||
|
|
||||||
|
addr_t _attach_managed_ds(Region_map &local_rm)
|
||||||
|
{
|
||||||
|
return local_rm.attach(dataspace(), {
|
||||||
|
.size = { },
|
||||||
|
.offset = { },
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
}).convert<addr_t>(
|
||||||
|
[&] (Range range) { return range.start; },
|
||||||
|
[&] (Attach_error) {
|
||||||
|
error("rump backend allocator failed to attach managed dataspace");
|
||||||
|
return 0UL; }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
bool _alloc_block()
|
bool _alloc_block()
|
||||||
{
|
{
|
||||||
if (_quota_exceeded)
|
if (_quota_exceeded)
|
||||||
@ -83,29 +100,39 @@ namespace Allocator {
|
|||||||
|
|
||||||
Policy_guard<POLICY> guard;
|
Policy_guard<POLICY> guard;
|
||||||
|
|
||||||
try {
|
_ds_cap[_index] = Rump::env().env().ram().try_alloc(BLOCK_SIZE, _cache)
|
||||||
_ds_cap[_index] = Rump::env().env().ram().alloc(BLOCK_SIZE, _cache);
|
.template convert<Ram_dataspace_capability>(
|
||||||
/* attach at index * BLOCK_SIZE */
|
[&] (Ram_dataspace_capability cap) { return cap; },
|
||||||
Region_map_client::attach_at(_ds_cap[_index], _index * BLOCK_SIZE, BLOCK_SIZE, 0);
|
[&] (Allocator::Alloc_error) { return Ram_dataspace_capability(); }
|
||||||
} catch (Genode::Out_of_ram) {
|
);
|
||||||
warning("backend allocator exhausted (out of RAM)");
|
|
||||||
_quota_exceeded = true;
|
if (!_ds_cap[_index].valid()) {
|
||||||
return false;
|
warning("backend allocator exhausted");
|
||||||
} catch (Genode::Out_of_caps) {
|
|
||||||
warning("backend allocator exhausted (out of caps)");
|
|
||||||
_quota_exceeded = true;
|
|
||||||
return false;
|
|
||||||
} catch (Genode::Region_map::Region_conflict) {
|
|
||||||
warning("backend VM region exhausted");
|
|
||||||
_quota_exceeded = true;
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return base + offset in VM area */
|
if (Region_map_client::attach(_ds_cap[_index], {
|
||||||
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
.size = BLOCK_SIZE,
|
||||||
++_index;
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = _index*BLOCK_SIZE,
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
}).failed()) {
|
||||||
|
warning("failed to locally attach backend memory");
|
||||||
|
Rump::env().env().ram().free(_ds_cap[_index]);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
_range.add_range(block_base, BLOCK_SIZE);
|
addr_t const block_base = _base + _index*BLOCK_SIZE;
|
||||||
|
if (_range.add_range(block_base, BLOCK_SIZE).failed()) {
|
||||||
|
warning("failed to extend backend allocator metadata");
|
||||||
|
Region_map_client::detach(_index*BLOCK_SIZE);
|
||||||
|
Rump::env().env().ram().free(_ds_cap[_index]);
|
||||||
|
_ds_cap[_index] = { };
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
++_index;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,12 +142,10 @@ namespace Allocator {
|
|||||||
:
|
:
|
||||||
Rm_connection(Rump::env().env()),
|
Rm_connection(Rump::env().env()),
|
||||||
Region_map_client(Rm_connection::create(VM_SIZE)),
|
Region_map_client(Rm_connection::create(VM_SIZE)),
|
||||||
|
_base(_attach_managed_ds(Rump::env().env().rm())),
|
||||||
_cache(cache),
|
_cache(cache),
|
||||||
_range(&Rump::env().heap())
|
_range(&Rump::env().heap())
|
||||||
{
|
{ }
|
||||||
/* reserver attach us, anywere */
|
|
||||||
_base = Rump::env().env().rm().attach(dataspace());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate
|
* Allocate
|
||||||
|
@ -456,39 +456,59 @@ class Vfs::Rump_file_system : public File_system
|
|||||||
|
|
||||||
Genode::Dataspace_capability dataspace(char const *path) override
|
Genode::Dataspace_capability dataspace(char const *path) override
|
||||||
{
|
{
|
||||||
Genode::Env &env = _env.env();
|
struct stat s { };
|
||||||
|
if (rump_sys_lstat(path, &s) != 0)
|
||||||
|
return { };
|
||||||
|
|
||||||
int fd = rump_sys_open(path, O_RDONLY);
|
using Region_map = Genode::Region_map;
|
||||||
if (fd == -1) return Genode::Dataspace_capability();
|
|
||||||
|
|
||||||
struct stat s;
|
auto read_file_content = [&path] (Region_map::Range const range) -> bool
|
||||||
if (rump_sys_lstat(path, &s) != 0) return Genode::Dataspace_capability();
|
{
|
||||||
size_t const ds_size = s.st_size;
|
int const fd = rump_sys_open(path, O_RDONLY);
|
||||||
|
size_t i = 0; /* bytes read */
|
||||||
char *local_addr = nullptr;
|
if (fd >= 0) {
|
||||||
Ram_dataspace_capability ds_cap;
|
while (i < range.num_bytes) {
|
||||||
try {
|
size_t const CHUNK_SIZE = 16U << 10;
|
||||||
ds_cap = env.ram().alloc(ds_size);
|
ssize_t const n = rump_sys_read(fd, (void *)(range.start + i),
|
||||||
|
min(range.num_bytes - i, CHUNK_SIZE));
|
||||||
local_addr = env.rm().attach(ds_cap);
|
if (n <= 0)
|
||||||
|
break;
|
||||||
enum { CHUNK_SIZE = 16U << 10 };
|
|
||||||
|
|
||||||
for (size_t i = 0; i < ds_size;) {
|
|
||||||
ssize_t n = rump_sys_read(fd, &local_addr[i], min(ds_size-i, CHUNK_SIZE));
|
|
||||||
if (n == -1)
|
|
||||||
throw n;
|
|
||||||
i += n;
|
i += n;
|
||||||
}
|
}
|
||||||
|
|
||||||
env.rm().detach(local_addr);
|
|
||||||
} catch(...) {
|
|
||||||
if (local_addr)
|
|
||||||
env.rm().detach(local_addr);
|
|
||||||
env.ram().free(ds_cap);
|
|
||||||
}
|
|
||||||
rump_sys_close(fd);
|
rump_sys_close(fd);
|
||||||
|
}
|
||||||
|
return (i == range.num_bytes);
|
||||||
|
};
|
||||||
|
|
||||||
|
return _env.env().ram().try_alloc(s.st_size).convert<Dataspace_capability>(
|
||||||
|
[&] (Ram_dataspace_capability const ds_cap) {
|
||||||
|
return _env.env().rm().attach(ds_cap, {
|
||||||
|
.size = { }, .offset = { }, .use_at = { },
|
||||||
|
.at = { }, .executable = { }, .writeable = true
|
||||||
|
}).convert<Dataspace_capability>(
|
||||||
|
[&] (Region_map::Range const range) -> Dataspace_capability {
|
||||||
|
|
||||||
|
bool const complete = read_file_content(range);
|
||||||
|
_env.env().rm().detach(range.start);
|
||||||
|
|
||||||
|
if (complete)
|
||||||
return ds_cap;
|
return ds_cap;
|
||||||
|
|
||||||
|
Genode::error("rump failed to read content into VFS dataspace");
|
||||||
|
_env.env().ram().free(ds_cap);
|
||||||
|
return Dataspace_capability();
|
||||||
|
},
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
_env.env().ram().free(ds_cap);
|
||||||
|
return Dataspace_capability();
|
||||||
|
}
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Genode::Ram_allocator::Alloc_error) {
|
||||||
|
Genode::error("rump failed to allocate VFS dataspace of size ", s.st_size);
|
||||||
|
return Dataspace_capability();
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void release(char const *path,
|
void release(char const *path,
|
||||||
|
@ -73,6 +73,7 @@ class Trace_recorder::Monitor
|
|||||||
private:
|
private:
|
||||||
|
|
||||||
Env &_env;
|
Env &_env;
|
||||||
|
Attached_dataspace _ds;
|
||||||
Trace_buffer _buffer;
|
Trace_buffer _buffer;
|
||||||
Registry<Attached_buffer>::Element _element;
|
Registry<Attached_buffer>::Element _element;
|
||||||
Subject_info _info;
|
Subject_info _info;
|
||||||
@ -86,18 +87,15 @@ class Trace_recorder::Monitor
|
|||||||
Genode::Dataspace_capability ds,
|
Genode::Dataspace_capability ds,
|
||||||
Trace::Subject_info const &info,
|
Trace::Subject_info const &info,
|
||||||
Trace::Subject_id id)
|
Trace::Subject_id id)
|
||||||
: _env(env),
|
:
|
||||||
_buffer(*((Trace::Buffer*)_env.rm().attach(ds))),
|
_env(env),
|
||||||
|
_ds(env.rm(), ds),
|
||||||
|
_buffer(*_ds.local_addr<Trace::Buffer>()),
|
||||||
_element(registry, *this),
|
_element(registry, *this),
|
||||||
_info(info),
|
_info(info),
|
||||||
_subject_id(id)
|
_subject_id(id)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
~Attached_buffer()
|
|
||||||
{
|
|
||||||
_env.rm().detach(_buffer.address());
|
|
||||||
}
|
|
||||||
|
|
||||||
void process_events(Trace_directory &);
|
void process_events(Trace_directory &);
|
||||||
|
|
||||||
Registry<Writer_base> &writers() { return _writers; }
|
Registry<Writer_base> &writers() { return _writers; }
|
||||||
|
@ -55,14 +55,23 @@ class Vfs_trace::Trace_buffer_file_system : public Single_file_system
|
|||||||
|
|
||||||
void setup(Dataspace_capability ds)
|
void setup(Dataspace_capability ds)
|
||||||
{
|
{
|
||||||
_buffer.construct(*((Trace::Buffer *)_env.env().rm().attach(ds)));
|
_env.env().rm().attach(ds, {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range range) {
|
||||||
|
_buffer.construct(*(Trace::Buffer *)range.start); },
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("failed to attach trace buffer"); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void flush()
|
void flush()
|
||||||
{
|
{
|
||||||
if (!_buffer.constructed()) return;
|
if (!_buffer.constructed()) return;
|
||||||
|
|
||||||
_env.env().rm().detach(_buffer->address());
|
_env.env().rm().detach(addr_t(_buffer->address()));
|
||||||
_buffer.destruct();
|
_buffer.destruct();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,10 +161,20 @@ int Framebuffer::map_io_mem(addr_t base, size_t size, bool write_combined,
|
|||||||
if (!io_ds.valid())
|
if (!io_ds.valid())
|
||||||
return -2;
|
return -2;
|
||||||
|
|
||||||
try {
|
genode_env().rm().attach(io_ds, {
|
||||||
*out_addr = genode_env().rm().attach(io_ds, size, 0, addr != 0, addr);
|
.size = size,
|
||||||
}
|
.offset = { },
|
||||||
catch (Region_map::Region_conflict) { return -3; }
|
.use_at = (addr != 0),
|
||||||
|
.at = addr,
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range range) { *out_addr = (void *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (*out_addr == nullptr)
|
||||||
|
return -3;
|
||||||
|
|
||||||
log("fb mapped to ", *out_addr);
|
log("fb mapped to ", *out_addr);
|
||||||
|
|
||||||
|
@ -180,14 +180,23 @@ class Acpica::Io_mem
|
|||||||
/* create managed dataspace to let virt region reserved */
|
/* create managed dataspace to let virt region reserved */
|
||||||
Genode::Region_map_client managed_region(rm_conn->create(io_mem._size));
|
Genode::Region_map_client managed_region(rm_conn->create(io_mem._size));
|
||||||
/* remember virt, since it get invalid during invalidate() */
|
/* remember virt, since it get invalid during invalidate() */
|
||||||
Genode::addr_t const re_attach_virt = reinterpret_cast<Genode::addr_t>(io_mem._virt);
|
Genode::addr_t const orig_virt = reinterpret_cast<Genode::addr_t>(io_mem._virt);
|
||||||
|
|
||||||
/* drop I/O mem and virt region get's freed */
|
/* drop I/O mem and virt region get's freed */
|
||||||
io_mem.invalidate();
|
io_mem.invalidate();
|
||||||
|
|
||||||
/* re-attach dummy managed dataspace to virt region */
|
/* re-attach dummy managed dataspace to virt region */
|
||||||
Genode::addr_t const re_attached_virt = Acpica::env().rm().attach_at(managed_region.dataspace(), re_attach_virt);
|
Genode::addr_t const re_attached_virt =
|
||||||
if (re_attach_virt != re_attached_virt)
|
Acpica::env().rm().attach(managed_region.dataspace(), {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = true, .at = orig_virt,
|
||||||
|
.executable = false, .writeable = true
|
||||||
|
}).convert<Genode::addr_t>(
|
||||||
|
[&] (Genode::Region_map::Range range) { return range.start; },
|
||||||
|
[&] (Genode::Region_map::Attach_error) { return 0UL; }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (orig_virt != re_attached_virt)
|
||||||
FAIL(0);
|
FAIL(0);
|
||||||
|
|
||||||
if (!io_mem.unused() || io_mem.stale())
|
if (!io_mem.unused() || io_mem.stale())
|
||||||
@ -235,7 +244,7 @@ class Acpica::Io_mem
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
if (!stale()) {
|
if (!stale()) {
|
||||||
Acpica::env().rm().detach(_virt);
|
Acpica::env().rm().detach(Genode::addr_t(_virt));
|
||||||
Genode::destroy(Acpica::heap(), _io_mem);
|
Genode::destroy(Acpica::heap(), _io_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,8 +303,14 @@ class Acpica::Io_mem
|
|||||||
if (!io_mem)
|
if (!io_mem)
|
||||||
return 0UL;
|
return 0UL;
|
||||||
|
|
||||||
io_mem->_virt = Acpica::env().rm().attach(io_mem->_io_mem->dataspace(),
|
io_mem->_virt = Acpica::env().rm().attach(io_mem->_io_mem->dataspace(), {
|
||||||
io_mem->_size);
|
.size = io_mem->_size, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).convert<Genode::uint8_t *>(
|
||||||
|
[&] (Genode::Region_map::Range r) { return (Genode::uint8_t *)r.start; },
|
||||||
|
[&] (Genode::Region_map::Attach_error) { return nullptr; }
|
||||||
|
);
|
||||||
|
|
||||||
return reinterpret_cast<Genode::addr_t>(io_mem->_virt);
|
return reinterpret_cast<Genode::addr_t>(io_mem->_virt);
|
||||||
}
|
}
|
||||||
@ -303,7 +318,7 @@ class Acpica::Io_mem
|
|||||||
Genode::addr_t pre_expand(ACPI_PHYSICAL_ADDRESS p, ACPI_SIZE s)
|
Genode::addr_t pre_expand(ACPI_PHYSICAL_ADDRESS p, ACPI_SIZE s)
|
||||||
{
|
{
|
||||||
if (_io_mem) {
|
if (_io_mem) {
|
||||||
Acpica::env().rm().detach(_virt);
|
Acpica::env().rm().detach(Genode::addr_t(_virt));
|
||||||
Genode::destroy(Acpica::heap(), _io_mem);
|
Genode::destroy(Acpica::heap(), _io_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,7 +332,7 @@ class Acpica::Io_mem
|
|||||||
Genode::addr_t post_expand(ACPI_PHYSICAL_ADDRESS p, ACPI_SIZE s)
|
Genode::addr_t post_expand(ACPI_PHYSICAL_ADDRESS p, ACPI_SIZE s)
|
||||||
{
|
{
|
||||||
if (_io_mem) {
|
if (_io_mem) {
|
||||||
Acpica::env().rm().detach(_virt);
|
Acpica::env().rm().detach(Genode::addr_t(_virt));
|
||||||
Genode::destroy(Acpica::heap(), _io_mem);
|
Genode::destroy(Acpica::heap(), _io_mem);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -352,14 +367,28 @@ class Acpica::Io_mem
|
|||||||
Genode::addr_t virt = reinterpret_cast<Genode::addr_t>(io2._virt);
|
Genode::addr_t virt = reinterpret_cast<Genode::addr_t>(io2._virt);
|
||||||
|
|
||||||
Acpica::env().rm().detach(virt);
|
Acpica::env().rm().detach(virt);
|
||||||
Acpica::env().rm().attach_at(io_ds, virt, io2._size, off_phys);
|
if (Acpica::env().rm().attach(io_ds, {
|
||||||
|
.size = io2._size, .offset = off_phys,
|
||||||
|
.use_at = true, .at = virt,
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).failed()) Genode::error("re-attach io2 failed");
|
||||||
});
|
});
|
||||||
|
|
||||||
if (io_mem._virt)
|
if (io_mem._virt)
|
||||||
FAIL(0UL);
|
FAIL(0UL);
|
||||||
|
|
||||||
/* attach whole memory */
|
/* attach whole memory */
|
||||||
io_mem._virt = Acpica::env().rm().attach(io_ds);
|
io_mem._virt = Acpica::env().rm().attach(io_ds, {
|
||||||
|
.size = { }, .offset = { },
|
||||||
|
.use_at = { }, .at = { },
|
||||||
|
.executable = { }, .writeable = true
|
||||||
|
}).convert<Genode::uint8_t *>(
|
||||||
|
[&] (Genode::Region_map::Range range) { return (Genode::uint8_t *)range.start; },
|
||||||
|
[&] (Genode::Region_map::Attach_error) {
|
||||||
|
Genode::error("attaching while io_ds failed");
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
);
|
||||||
return io_mem.to_virt(p);
|
return io_mem.to_virt(p);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -30,28 +30,45 @@ struct Libc::Cloned_malloc_heap_range
|
|||||||
|
|
||||||
Ram_dataspace_capability ds;
|
Ram_dataspace_capability ds;
|
||||||
|
|
||||||
size_t const size;
|
using Range = Region_map::Range;
|
||||||
addr_t const local_addr;
|
|
||||||
|
|
||||||
Cloned_malloc_heap_range(Ram_allocator &ram, Region_map &rm,
|
Range const range;
|
||||||
void *start, size_t size)
|
|
||||||
try :
|
Cloned_malloc_heap_range(Ram_allocator &ram, Region_map &rm, Range const range)
|
||||||
ram(ram), rm(rm), ds(ram.alloc(size)), size(size),
|
:
|
||||||
local_addr(rm.attach_at(ds, (addr_t)start))
|
ram(ram), rm(rm), ds(ram.alloc(range.num_bytes)), range(range)
|
||||||
{ }
|
{
|
||||||
catch (Region_map::Region_conflict) {
|
rm.attach(ds, {
|
||||||
error("could not clone heap region ", Hex_range((addr_t)start, size));
|
.size = { },
|
||||||
throw;
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = range.start,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Range) { },
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
|
using Error = Region_map::Attach_error;
|
||||||
|
switch (e) {
|
||||||
|
case Error::OUT_OF_RAM: throw Out_of_ram();
|
||||||
|
case Error::OUT_OF_CAPS: throw Out_of_caps();
|
||||||
|
case Error::INVALID_DATASPACE: break;
|
||||||
|
case Error::REGION_CONFLICT: break;
|
||||||
|
}
|
||||||
|
error("failed to clone heap region ",
|
||||||
|
Hex_range(range.start, range.num_bytes));
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void import_content(Clone_connection &clone_connection)
|
void import_content(Clone_connection &clone_connection)
|
||||||
{
|
{
|
||||||
clone_connection.memory_content((void *)local_addr, size);
|
clone_connection.memory_content((void *)range.start, range.num_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~Cloned_malloc_heap_range()
|
virtual ~Cloned_malloc_heap_range()
|
||||||
{
|
{
|
||||||
rm.detach(local_addr);
|
rm.detach(range.start);
|
||||||
ram.free(ds);
|
ram.free(ds);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -50,13 +50,15 @@ namespace Libc {
|
|||||||
MAX_CHUNK_SIZE = 1024*1024
|
MAX_CHUNK_SIZE = 1024*1024
|
||||||
};
|
};
|
||||||
|
|
||||||
|
using Range = Region_map::Range;
|
||||||
|
|
||||||
struct Dataspace : List<Dataspace>::Element
|
struct Dataspace : List<Dataspace>::Element
|
||||||
{
|
{
|
||||||
Ram_dataspace_capability cap;
|
Ram_dataspace_capability cap;
|
||||||
void *local_addr;
|
Range range;
|
||||||
|
|
||||||
Dataspace(Ram_dataspace_capability c, void *a)
|
Dataspace(Ram_dataspace_capability cap, Range range)
|
||||||
: cap(c), local_addr(a) {}
|
: cap(cap), range(range) { }
|
||||||
};
|
};
|
||||||
|
|
||||||
class Dataspace_pool : public List<Dataspace>
|
class Dataspace_pool : public List<Dataspace>
|
||||||
|
@ -342,13 +342,13 @@ void Libc::Kernel::_handle_user_interrupt()
|
|||||||
|
|
||||||
void Libc::Kernel::_clone_state_from_parent()
|
void Libc::Kernel::_clone_state_from_parent()
|
||||||
{
|
{
|
||||||
struct Range { void *at; size_t size; };
|
using Range = Region_map::Range;
|
||||||
|
|
||||||
auto range_attr = [&] (Xml_node node)
|
auto range_attr = [&] (Xml_node const &node)
|
||||||
{
|
{
|
||||||
return Range {
|
return Range {
|
||||||
.at = (void *)node.attribute_value("at", 0UL),
|
.start = node.attribute_value("at", 0UL),
|
||||||
.size = node.attribute_value("size", 0UL)
|
.num_bytes = node.attribute_value("size", 0UL)
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -365,7 +365,7 @@ void Libc::Kernel::_clone_state_from_parent()
|
|||||||
new (_heap)
|
new (_heap)
|
||||||
Registered<Cloned_malloc_heap_range>(_cloned_heap_ranges,
|
Registered<Cloned_malloc_heap_range>(_cloned_heap_ranges,
|
||||||
_env.ram(), _env.rm(),
|
_env.ram(), _env.rm(),
|
||||||
range.at, range.size); });
|
range); });
|
||||||
|
|
||||||
_clone_connection.construct(_env);
|
_clone_connection.construct(_env);
|
||||||
|
|
||||||
@ -385,7 +385,7 @@ void Libc::Kernel::_clone_state_from_parent()
|
|||||||
|
|
||||||
auto copy_from_parent = [&] (Range range)
|
auto copy_from_parent = [&] (Range range)
|
||||||
{
|
{
|
||||||
_clone_connection->memory_content(range.at, range.size);
|
_clone_connection->memory_content((void *)range.start, range.num_bytes);
|
||||||
};
|
};
|
||||||
|
|
||||||
/* clone application stack */
|
/* clone application stack */
|
||||||
|
@ -36,12 +36,12 @@ Libc::Mem_alloc_impl::Dataspace_pool::~Dataspace_pool()
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
Ram_dataspace_capability ds_cap = ds->cap;
|
Ram_dataspace_capability ds_cap = ds->cap;
|
||||||
void const * const local_addr = ds->local_addr;
|
Range const range = ds->range;
|
||||||
|
|
||||||
remove(ds);
|
remove(ds);
|
||||||
delete ds;
|
delete ds;
|
||||||
|
|
||||||
_region_map->detach(local_addr);
|
_region_map->detach(range.start);
|
||||||
_ram->free(ds_cap);
|
_ram->free(ds_cap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -49,33 +49,58 @@ Libc::Mem_alloc_impl::Dataspace_pool::~Dataspace_pool()
|
|||||||
|
|
||||||
int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *alloc)
|
int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *alloc)
|
||||||
{
|
{
|
||||||
Ram_dataspace_capability new_ds_cap;
|
|
||||||
void *local_addr;
|
|
||||||
|
|
||||||
/* make new ram dataspace available at our local address space */
|
/* make new ram dataspace available at our local address space */
|
||||||
try {
|
|
||||||
new_ds_cap = _ram->alloc(size);
|
|
||||||
|
|
||||||
enum { MAX_SIZE = 0, NO_OFFSET = 0, ANY_LOCAL_ADDR = false };
|
Ram_dataspace_capability new_ds_cap { };
|
||||||
local_addr = _region_map->attach(new_ds_cap, MAX_SIZE, NO_OFFSET,
|
int result = 0;
|
||||||
ANY_LOCAL_ADDR, nullptr, _executable);
|
_ram->try_alloc(size).with_result(
|
||||||
|
[&] (Ram_dataspace_capability cap) { new_ds_cap = cap; },
|
||||||
|
[&] (Ram_allocator::Alloc_error e) {
|
||||||
|
switch (e) {
|
||||||
|
case Ram_allocator::Alloc_error::OUT_OF_RAM: result = -2; break;
|
||||||
|
case Ram_allocator::Alloc_error::OUT_OF_CAPS: result = -4; break;
|
||||||
|
case Ram_allocator::Alloc_error::DENIED: break;
|
||||||
}
|
}
|
||||||
catch (Out_of_ram) { return -2; }
|
result = -5;
|
||||||
catch (Out_of_caps) { return -4; }
|
});
|
||||||
catch (Region_map::Region_conflict) {
|
|
||||||
|
if (result < 0)
|
||||||
|
return result;
|
||||||
|
|
||||||
|
Region_map::Range const range = _region_map->attach(new_ds_cap, {
|
||||||
|
.size = { },
|
||||||
|
.offset = { },
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = _executable,
|
||||||
|
.writeable = true
|
||||||
|
}).convert<Region_map::Range>(
|
||||||
|
[&] (Region_map::Range range) { return range; },
|
||||||
|
[&] (Region_map::Attach_error e) {
|
||||||
|
switch (e) {
|
||||||
|
case Region_map::Attach_error::OUT_OF_RAM: result = -2; break;
|
||||||
|
case Region_map::Attach_error::OUT_OF_CAPS: result = -4; break;
|
||||||
|
case Region_map::Attach_error::INVALID_DATASPACE: result = -6; break;
|
||||||
|
case Region_map::Attach_error::REGION_CONFLICT: break;
|
||||||
|
}
|
||||||
|
result = -7;
|
||||||
|
return Region_map::Range { };
|
||||||
|
});
|
||||||
|
|
||||||
|
if (result < 0) {
|
||||||
_ram->free(new_ds_cap);
|
_ram->free(new_ds_cap);
|
||||||
return -3;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* add new local address range to our local allocator */
|
/* add new local address range to our local allocator */
|
||||||
alloc->add_range((addr_t)local_addr, size);
|
alloc->add_range(range.start, range.num_bytes);
|
||||||
|
|
||||||
/* now that we have new backing store, allocate Dataspace structure */
|
/* now that we have new backing store, allocate Dataspace structure */
|
||||||
return alloc->alloc_aligned(sizeof(Dataspace), 2).convert<int>(
|
return alloc->alloc_aligned(sizeof(Dataspace), 2).convert<int>(
|
||||||
|
|
||||||
[&] (void *ptr) {
|
[&] (void *ptr) {
|
||||||
/* add dataspace information to list of dataspaces */
|
/* add dataspace information to list of dataspaces */
|
||||||
Dataspace *ds = construct_at<Dataspace>(ptr, new_ds_cap, local_addr);
|
Dataspace *ds = construct_at<Dataspace>(ptr, new_ds_cap, range);
|
||||||
insert(ds);
|
insert(ds);
|
||||||
return 0; },
|
return 0; },
|
||||||
|
|
||||||
|
@ -2424,9 +2424,19 @@ void *Libc::Vfs_plugin::mmap(void *addr_in, ::size_t length, int prot, int flags
|
|||||||
return MAP_FAILED;
|
return MAP_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
region_map().attach(ds_cap, {
|
||||||
addr = region_map().attach(ds_cap, length, offset);
|
.size = length,
|
||||||
} catch (...) {
|
.offset = addr_t(offset),
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range range) { addr = (void *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { addr = nullptr; }
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!addr) {
|
||||||
monitor().monitor([&] {
|
monitor().monitor([&] {
|
||||||
reference_handle->close();
|
reference_handle->close();
|
||||||
return Fn::COMPLETE;
|
return Fn::COMPLETE;
|
||||||
@ -2469,7 +2479,7 @@ int Libc::Vfs_plugin::munmap(void *addr, ::size_t)
|
|||||||
if (entry.start == addr) {
|
if (entry.start == addr) {
|
||||||
reference_handle = entry.reference_handle;
|
reference_handle = entry.reference_handle;
|
||||||
destroy(_alloc, &entry);
|
destroy(_alloc, &entry);
|
||||||
region_map().detach(addr);
|
region_map().detach(addr_t(addr));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -333,17 +333,28 @@ struct Drm::Buffer
|
|||||||
|
|
||||||
bool mmap(Genode::Env &env)
|
bool mmap(Genode::Env &env)
|
||||||
{
|
{
|
||||||
if (_local_addr) return true;
|
using Region_map = Genode::Region_map;
|
||||||
|
|
||||||
_local_addr = Gpu::addr_t(env.rm().attach(_allocation.cap, _allocation.size,
|
if (!_local_addr)
|
||||||
_allocation.offset));
|
env.rm().attach(_allocation.cap, {
|
||||||
return true;
|
.size = _allocation.size,
|
||||||
|
.offset = Genode::addr_t(_allocation.offset),
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).with_result(
|
||||||
|
[&] (Region_map::Range range) { _local_addr = range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { Genode::error("Drm::Buffer::mmap failed"); }
|
||||||
|
);
|
||||||
|
|
||||||
|
return (_local_addr != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void unmap()
|
void unmap()
|
||||||
{
|
{
|
||||||
if (_local_addr)
|
if (_local_addr)
|
||||||
_env.rm().detach((void *)_local_addr);
|
_env.rm().detach(_local_addr);
|
||||||
|
|
||||||
_local_addr = 0;
|
_local_addr = 0;
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,8 @@ void lib_1_good();
|
|||||||
void lib_1_exception();
|
void lib_1_exception();
|
||||||
void lib_2_exception();
|
void lib_2_exception();
|
||||||
|
|
||||||
|
struct Lib_1_exception { };
|
||||||
|
|
||||||
struct Lib_1_local_3
|
struct Lib_1_local_3
|
||||||
{
|
{
|
||||||
int x { 0x12345678 };
|
int x { 0x12345678 };
|
||||||
|
@ -115,7 +115,7 @@ static void lib_1_attr_destructor_2() { log(__func__, " ", Hex(--lib_1_pod_2));
|
|||||||
|
|
||||||
static void exception() { throw 666; }
|
static void exception() { throw 666; }
|
||||||
|
|
||||||
void lib_1_exception() { throw Genode::Region_map::Region_conflict(); }
|
void lib_1_exception() { throw Lib_1_exception(); }
|
||||||
void lib_1_good() { }
|
void lib_1_good() { }
|
||||||
|
|
||||||
|
|
||||||
|
@ -247,7 +247,7 @@ void Libc::Component::construct(Libc::Env &env)
|
|||||||
lib_1_exception();
|
lib_1_exception();
|
||||||
error("undelivered exception in shared lib");
|
error("undelivered exception in shared lib");
|
||||||
}
|
}
|
||||||
catch (Region_map::Region_conflict) { log("exception in shared lib: caught"); }
|
catch (Lib_1_exception) { log("exception in shared lib: caught"); }
|
||||||
|
|
||||||
try {
|
try {
|
||||||
__ldso_raise_exception();
|
__ldso_raise_exception();
|
||||||
|
@ -76,7 +76,7 @@
|
|||||||
#include <base/env.h>
|
#include <base/env.h>
|
||||||
#include <base/signal.h>
|
#include <base/signal.h>
|
||||||
#include <base/allocator.h>
|
#include <base/allocator.h>
|
||||||
#include <dataspace/client.h>
|
#include <base/attached_dataspace.h>
|
||||||
#include <util/string.h>
|
#include <util/string.h>
|
||||||
#include <util/construct_at.h>
|
#include <util/construct_at.h>
|
||||||
|
|
||||||
@ -492,8 +492,7 @@ class Genode::Packet_stream_base
|
|||||||
|
|
||||||
Genode::Region_map &_rm;
|
Genode::Region_map &_rm;
|
||||||
Genode::Dataspace_capability _ds_cap;
|
Genode::Dataspace_capability _ds_cap;
|
||||||
void *_ds_local_base;
|
Genode::Attached_dataspace _ds { _rm, _ds_cap };
|
||||||
Genode::size_t _ds_size { 0 };
|
|
||||||
|
|
||||||
Genode::off_t _submit_queue_offset;
|
Genode::off_t _submit_queue_offset;
|
||||||
Genode::off_t _ack_queue_offset;
|
Genode::off_t _ack_queue_offset;
|
||||||
@ -515,43 +514,24 @@ class Genode::Packet_stream_base
|
|||||||
_rm(rm), _ds_cap(transport_ds),
|
_rm(rm), _ds_cap(transport_ds),
|
||||||
|
|
||||||
/* map dataspace locally */
|
/* map dataspace locally */
|
||||||
_ds_local_base(rm.attach(_ds_cap)),
|
|
||||||
_submit_queue_offset(0),
|
_submit_queue_offset(0),
|
||||||
_ack_queue_offset(_submit_queue_offset + submit_queue_size),
|
_ack_queue_offset(_submit_queue_offset + submit_queue_size),
|
||||||
_bulk_buffer_offset(align_addr(_ack_queue_offset + ack_queue_size, 6))
|
_bulk_buffer_offset(align_addr(_ack_queue_offset + ack_queue_size, 6))
|
||||||
{
|
{
|
||||||
Genode::size_t ds_size = Genode::Dataspace_client(_ds_cap).size();
|
if ((Genode::size_t)_bulk_buffer_offset >= _ds.size())
|
||||||
|
|
||||||
if ((Genode::size_t)_bulk_buffer_offset >= ds_size)
|
|
||||||
throw Transport_dataspace_too_small();
|
throw Transport_dataspace_too_small();
|
||||||
|
|
||||||
_ds_size = ds_size;
|
_bulk_buffer_size = _ds.size() - _bulk_buffer_offset;
|
||||||
_bulk_buffer_size = ds_size - _bulk_buffer_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Destructor
|
|
||||||
*/
|
|
||||||
~Packet_stream_base()
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Prevent throwing exceptions from the destructor. Otherwise,
|
|
||||||
* the compiler may generate implicit calls to 'std::terminate'.
|
|
||||||
*/
|
|
||||||
try {
|
|
||||||
/* unmap transport dataspace locally */
|
|
||||||
_rm.detach(_ds_local_base);
|
|
||||||
} catch (...) { }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *_submit_queue_local_base() {
|
void *_submit_queue_local_base() {
|
||||||
return (void *)((Genode::addr_t)_ds_local_base + _submit_queue_offset); }
|
return _ds.local_addr<char>() + _submit_queue_offset; }
|
||||||
|
|
||||||
void *_ack_queue_local_base() {
|
void *_ack_queue_local_base() {
|
||||||
return (void *)((Genode::addr_t)_ds_local_base + _ack_queue_offset); }
|
return _ds.local_addr<char>() + _ack_queue_offset; }
|
||||||
|
|
||||||
Genode::addr_t _bulk_buffer_local_base() {
|
Genode::addr_t _bulk_buffer_local_base() {
|
||||||
return (Genode::addr_t)_ds_local_base + _bulk_buffer_offset; }
|
return (Genode::addr_t)_ds.local_addr<char>() + _bulk_buffer_offset; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Hook for unit testing
|
* Hook for unit testing
|
||||||
@ -578,11 +558,11 @@ class Genode::Packet_stream_base
|
|||||||
if (!packet_valid(packet) || packet.size() < sizeof(CONTENT_TYPE))
|
if (!packet_valid(packet) || packet.size() < sizeof(CONTENT_TYPE))
|
||||||
throw Packet_descriptor::Invalid_packet();
|
throw Packet_descriptor::Invalid_packet();
|
||||||
|
|
||||||
return (CONTENT_TYPE *)((Genode::addr_t)_ds_local_base + packet.offset());
|
return (CONTENT_TYPE *)(_ds.local_addr<char>() + packet.offset());
|
||||||
}
|
}
|
||||||
|
|
||||||
Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds_local_base; }
|
Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds.local_addr<char>(); }
|
||||||
Genode::addr_t ds_size() const { return _ds_size; }
|
Genode::addr_t ds_size() const { return _ds.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -853,8 +833,8 @@ class Genode::Packet_stream_source : private Packet_stream_base
|
|||||||
Genode::Dataspace_capability dataspace() {
|
Genode::Dataspace_capability dataspace() {
|
||||||
return Packet_stream_base::_dataspace(); }
|
return Packet_stream_base::_dataspace(); }
|
||||||
|
|
||||||
Genode::addr_t ds_local_base() const { return reinterpret_cast<Genode::addr_t>(_ds_local_base); }
|
Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds.local_addr<void>(); }
|
||||||
Genode::addr_t ds_size() const { return Packet_stream_base::_ds_size; }
|
Genode::addr_t ds_size() const { return Packet_stream_base::_ds.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -1030,8 +1010,8 @@ class Genode::Packet_stream_sink : private Packet_stream_base
|
|||||||
Genode::Dataspace_capability dataspace() {
|
Genode::Dataspace_capability dataspace() {
|
||||||
return Packet_stream_base::_dataspace(); }
|
return Packet_stream_base::_dataspace(); }
|
||||||
|
|
||||||
Genode::addr_t ds_local_base() const { return reinterpret_cast<Genode::addr_t>(_ds_local_base); }
|
Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds.local_addr<char>(); }
|
||||||
Genode::addr_t ds_size() const { return Packet_stream_base::_ds_size; }
|
Genode::addr_t ds_size() const { return Packet_stream_base::_ds.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _INCLUDE__OS__PACKET_STREAM_H_ */
|
#endif /* _INCLUDE__OS__PACKET_STREAM_H_ */
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
#include <monitor.h>
|
#include <monitor.h>
|
||||||
|
|
||||||
/* Genode includes */
|
/* Genode includes */
|
||||||
#include <trace_session/connection.h>
|
|
||||||
#include <util/formatted_output.h>
|
#include <util/formatted_output.h>
|
||||||
|
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
@ -80,25 +79,6 @@ struct Conditional
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/******************
|
|
||||||
** Monitor_base **
|
|
||||||
******************/
|
|
||||||
|
|
||||||
Monitor_base::Monitor_base(Trace::Connection &trace,
|
|
||||||
Region_map &rm,
|
|
||||||
Trace::Subject_id subject_id)
|
|
||||||
:
|
|
||||||
_trace(trace), _rm(rm),
|
|
||||||
_buffer_raw(*(Trace::Buffer *)rm.attach(_trace.buffer(subject_id)))
|
|
||||||
{ }
|
|
||||||
|
|
||||||
|
|
||||||
Monitor_base::~Monitor_base()
|
|
||||||
{
|
|
||||||
_rm.detach(&_buffer_raw);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*************
|
/*************
|
||||||
** Monitor **
|
** Monitor **
|
||||||
*************/
|
*************/
|
||||||
|
@ -19,7 +19,9 @@
|
|||||||
|
|
||||||
/* Genode includes */
|
/* Genode includes */
|
||||||
#include <base/trace/types.h>
|
#include <base/trace/types.h>
|
||||||
|
#include <base/attached_dataspace.h>
|
||||||
#include <trace/trace_buffer.h>
|
#include <trace/trace_buffer.h>
|
||||||
|
#include <trace_session/connection.h>
|
||||||
|
|
||||||
namespace Genode { namespace Trace { class Connection; } }
|
namespace Genode { namespace Trace { class Connection; } }
|
||||||
|
|
||||||
@ -33,13 +35,15 @@ class Monitor_base
|
|||||||
|
|
||||||
Genode::Trace::Connection &_trace;
|
Genode::Trace::Connection &_trace;
|
||||||
Genode::Region_map &_rm;
|
Genode::Region_map &_rm;
|
||||||
Genode::Trace::Buffer &_buffer_raw;
|
Genode::Attached_dataspace _ds;
|
||||||
|
Genode::Trace::Buffer &_buffer_raw = *_ds.local_addr<Genode::Trace::Buffer>();
|
||||||
|
|
||||||
Monitor_base(Genode::Trace::Connection &trace,
|
Monitor_base(Genode::Trace::Connection &trace,
|
||||||
Genode::Region_map &rm,
|
Genode::Region_map &rm,
|
||||||
Genode::Trace::Subject_id subject_id);
|
Genode::Trace::Subject_id subject_id)
|
||||||
|
:
|
||||||
~Monitor_base();
|
_trace(trace), _rm(rm), _ds(rm, _trace.buffer(subject_id))
|
||||||
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -11,6 +11,9 @@
|
|||||||
* under the terms of the GNU Affero General Public License version 3.
|
* under the terms of the GNU Affero General Public License version 3.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* Genode includes */
|
||||||
|
#include <base/attached_dataspace.h>
|
||||||
|
|
||||||
/* local includes */
|
/* local includes */
|
||||||
#include <policy.h>
|
#include <policy.h>
|
||||||
|
|
||||||
@ -26,11 +29,9 @@ Policy::Policy(Env &env, Trace::Connection &trace, Policy_dict &dict,
|
|||||||
[&] (Trace::Policy_id id) {
|
[&] (Trace::Policy_id id) {
|
||||||
Dataspace_capability const dst_ds = _trace.policy(id);
|
Dataspace_capability const dst_ds = _trace.policy(id);
|
||||||
if (dst_ds.valid()) {
|
if (dst_ds.valid()) {
|
||||||
void * const dst = _env.rm().attach(dst_ds);
|
Attached_dataspace dst { _env.rm(), dst_ds },
|
||||||
void const * const src = _env.rm().attach(_ds);
|
src { _env.rm(), _ds };
|
||||||
memcpy(dst, src, _size);
|
memcpy(dst.local_addr<void>(), src.local_addr<void>(), _size);
|
||||||
_env.rm().detach(dst);
|
|
||||||
_env.rm().detach(src);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
warning("failed to obtain policy buffer for '", name, "'");
|
warning("failed to obtain policy buffer for '", name, "'");
|
||||||
|
@ -185,9 +185,14 @@ class Acpi::Memory
|
|||||||
* address is the offset of loop_region.base() from
|
* address is the offset of loop_region.base() from
|
||||||
* _io_region.base().
|
* _io_region.base().
|
||||||
*/
|
*/
|
||||||
_acpi_window.attach_at(
|
_acpi_window.attach(_range.metadata((void *)loop_region.base())->connection->dataspace(), {
|
||||||
_range.metadata((void *)loop_region.base())->connection->dataspace(),
|
.size = loop_region.size(),
|
||||||
loop_region.base() - _io_region->base(), loop_region.size());
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = loop_region.base() - _io_region->base(),
|
||||||
|
.executable = { },
|
||||||
|
.writeable = { }
|
||||||
|
});
|
||||||
|
|
||||||
return _acpi_ptr(req_base);
|
return _acpi_ptr(req_base);
|
||||||
}
|
}
|
||||||
|
@ -322,7 +322,7 @@ class Platform::Resources : Noncopyable, public Hw_ready_state
|
|||||||
|
|
||||||
/* GTT starts at half of the mmio memory */
|
/* GTT starts at half of the mmio memory */
|
||||||
size_t const gttm_half_size = mmio.size() / 2;
|
size_t const gttm_half_size = mmio.size() / 2;
|
||||||
off_t const gtt_offset = gttm_half_size;
|
addr_t const gtt_offset = gttm_half_size;
|
||||||
|
|
||||||
if (gttm_half_size < gtt_reserved()) {
|
if (gttm_half_size < gtt_reserved()) {
|
||||||
Genode::error("GTTM size too small");
|
Genode::error("GTTM size too small");
|
||||||
@ -331,15 +331,36 @@ class Platform::Resources : Noncopyable, public Hw_ready_state
|
|||||||
|
|
||||||
/* attach actual iomem + reserved */
|
/* attach actual iomem + reserved */
|
||||||
_rm_gttmm.detach(0ul);
|
_rm_gttmm.detach(0ul);
|
||||||
_rm_gttmm.attach_at(mmio.cap(), 0ul, gtt_offset);
|
if (_rm_gttmm.attach(mmio.cap(), {
|
||||||
|
.size = gtt_offset,
|
||||||
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = 0,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).failed()) error("failed to re-attach mmio to gttmm");
|
||||||
|
|
||||||
/* attach beginning of GTT */
|
/* attach beginning of GTT */
|
||||||
_rm_gttmm.detach(gtt_offset);
|
_rm_gttmm.detach(gtt_offset);
|
||||||
_rm_gttmm.attach_at(mmio.cap(), gtt_offset,
|
if (_rm_gttmm.attach(mmio.cap(), {
|
||||||
gtt_reserved(), gtt_offset);
|
.size = size_t(gtt_reserved()),
|
||||||
|
.offset = gtt_offset,
|
||||||
|
.use_at = true,
|
||||||
|
.at = gtt_offset,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).failed()) error("failed to re-attach mmio at gtt offset to gttmm");
|
||||||
|
|
||||||
_rm_gmadr.detach(0ul);
|
_rm_gmadr.detach(0ul);
|
||||||
_rm_gmadr.attach_at(gmadr.cap(), 0ul, aperture_reserved());
|
if (_rm_gmadr.attach(gmadr.cap(), {
|
||||||
|
.size = size_t(aperture_reserved()),
|
||||||
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = 0,
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).failed()) error("failed to re-attach gmadr");
|
||||||
|
|
||||||
}, []() {
|
}, []() {
|
||||||
error("reinit failed");
|
error("reinit failed");
|
||||||
});
|
});
|
||||||
@ -407,12 +428,32 @@ class Platform::Resources : Noncopyable, public Hw_ready_state
|
|||||||
auto const dummmy_gtt_ds = _env.ram().alloc(Igd::PAGE_SIZE);
|
auto const dummmy_gtt_ds = _env.ram().alloc(Igd::PAGE_SIZE);
|
||||||
auto remainder = gttm_half_size - gtt_reserved();
|
auto remainder = gttm_half_size - gtt_reserved();
|
||||||
|
|
||||||
for (off_t offset = gtt_offset + gtt_reserved();
|
for (addr_t offset = gtt_offset + gtt_reserved();
|
||||||
remainder > 0;
|
remainder > 0;
|
||||||
offset += Igd::PAGE_SIZE, remainder -= Igd::PAGE_SIZE) {
|
offset += Igd::PAGE_SIZE, remainder -= Igd::PAGE_SIZE) {
|
||||||
|
|
||||||
rm.retry_with_upgrade({Igd::PAGE_SIZE}, Cap_quota{8}, [&]() {
|
for (;;) {
|
||||||
_rm_gttmm.attach_at(dummmy_gtt_ds, offset, Igd::PAGE_SIZE); });
|
Region_map::Attach_result const result =
|
||||||
|
_rm_gttmm.attach(dummmy_gtt_ds, {
|
||||||
|
.size = Igd::PAGE_SIZE,
|
||||||
|
.offset = { },
|
||||||
|
.use_at = true,
|
||||||
|
.at = offset,
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
});
|
||||||
|
if (result.ok())
|
||||||
|
break;
|
||||||
|
|
||||||
|
using Error = Region_map::Attach_error;
|
||||||
|
|
||||||
|
if (result == Error::OUT_OF_RAM) rm.upgrade_ram(Igd::PAGE_SIZE);
|
||||||
|
else if (result == Error::OUT_OF_CAPS) rm.upgrade_caps(8);
|
||||||
|
else {
|
||||||
|
error("failed to fill up GTT as dummy RAM");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,28 +77,44 @@ class Igd::Ppgtt_allocator : public Genode::Translation_table_allocator
|
|||||||
catch (Gpu::Session::Out_of_caps) { throw; }
|
catch (Gpu::Session::Out_of_caps) { throw; }
|
||||||
catch (...) { return Alloc_error::DENIED; }
|
catch (...) { return Alloc_error::DENIED; }
|
||||||
|
|
||||||
Alloc_error alloc_error = Alloc_error::DENIED;
|
return _rm.attach(ds, {
|
||||||
|
.size = { },
|
||||||
|
.offset = { },
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = { },
|
||||||
|
.writeable = true
|
||||||
|
}).convert<Alloc_result>(
|
||||||
|
|
||||||
try {
|
[&] (Genode::Region_map::Range const range) -> Alloc_result {
|
||||||
void * const va = _rm.attach(ds);
|
|
||||||
|
void * const va = (void*)range.start;
|
||||||
void * const pa = (void*)_backend.dma_addr(ds);
|
void * const pa = (void*)_backend.dma_addr(ds);
|
||||||
|
|
||||||
if (_map.add(ds, pa, va, alloc_size) == true) {
|
if (_map.add(ds, pa, va, range.num_bytes) == true) {
|
||||||
_range.add_range((Genode::addr_t)va, alloc_size);
|
if (_range.add_range(range.start, range.num_bytes).ok())
|
||||||
result = _range.alloc_aligned(size, 12);
|
return _range.alloc_aligned(size, 12);
|
||||||
return result;
|
|
||||||
|
Genode::error("Ppgtt_allocator failed to extend meta data");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* _map.add failed, roll back _rm.attach */
|
/* _map.add failed, roll back _rm.attach */
|
||||||
_rm.detach(va);
|
_rm.detach(range.start);
|
||||||
}
|
|
||||||
catch (Genode::Out_of_ram) { alloc_error = Alloc_error::OUT_OF_RAM; }
|
|
||||||
catch (Genode::Out_of_caps) { alloc_error = Alloc_error::OUT_OF_CAPS; }
|
|
||||||
catch (...) { alloc_error = Alloc_error::DENIED; }
|
|
||||||
|
|
||||||
/* roll back allocation */
|
|
||||||
_backend.free(ds);
|
_backend.free(ds);
|
||||||
return alloc_error;
|
return Alloc_error::DENIED;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Genode::Region_map::Attach_error e) {
|
||||||
|
|
||||||
|
_backend.free(ds);
|
||||||
|
|
||||||
|
using Error = Genode::Region_map::Attach_error;
|
||||||
|
|
||||||
|
if (e == Error::OUT_OF_RAM) return Alloc_error::OUT_OF_RAM;
|
||||||
|
if (e == Error::OUT_OF_CAPS) return Alloc_error::OUT_OF_CAPS;
|
||||||
|
return Alloc_error::DENIED;
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, size_t size) override
|
void free(void *addr, size_t size) override
|
||||||
|
@ -25,31 +25,17 @@
|
|||||||
|
|
||||||
using namespace Driver;
|
using namespace Driver;
|
||||||
|
|
||||||
Device_pd::Region_map_client::Local_addr
|
|
||||||
Device_pd::Region_map_client::attach(Dataspace_capability ds,
|
Device_pd::Region_map_client::Attach_result
|
||||||
size_t size,
|
Device_pd::Region_map_client::attach(Dataspace_capability ds, Attr const &attr)
|
||||||
off_t offset,
|
|
||||||
bool use_local_addr,
|
|
||||||
Local_addr local_addr,
|
|
||||||
bool executable,
|
|
||||||
bool writeable)
|
|
||||||
{
|
{
|
||||||
return retry<Out_of_ram>(
|
for (;;) {
|
||||||
[&] () {
|
Attach_result const result = Genode::Region_map_client::attach(ds, attr);
|
||||||
return retry<Out_of_caps>(
|
if (result == Attach_error::OUT_OF_RAM) upgrade_ram();
|
||||||
[&] () {
|
else if (result == Attach_error::OUT_OF_CAPS) upgrade_caps();
|
||||||
return Genode::Region_map_client::attach(ds, size, offset,
|
else
|
||||||
use_local_addr,
|
return result;
|
||||||
local_addr,
|
|
||||||
executable,
|
|
||||||
writeable); },
|
|
||||||
[&] () {
|
|
||||||
upgrade_caps();
|
|
||||||
}
|
}
|
||||||
);
|
|
||||||
},
|
|
||||||
[&] () { upgrade_ram(); }
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -116,21 +102,26 @@ void Device_pd::remove_range(Io_mmu::Range const & range)
|
|||||||
void Device_pd::enable_pci_device(Io_mem_dataspace_capability const io_mem_cap,
|
void Device_pd::enable_pci_device(Io_mem_dataspace_capability const io_mem_cap,
|
||||||
Pci::Bdf const & bdf)
|
Pci::Bdf const & bdf)
|
||||||
{
|
{
|
||||||
addr_t addr = _address_space.attach(io_mem_cap, 0x1000);
|
_address_space.attach(io_mem_cap, {
|
||||||
|
.size = 0x1000, .offset = { },
|
||||||
/* sanity check */
|
.use_at = { }, .at = { },
|
||||||
if (!addr)
|
.executable = { }, .writeable = true
|
||||||
throw Region_map::Region_conflict();
|
}).with_result(
|
||||||
|
[&] (Region_map::Range range) {
|
||||||
|
|
||||||
/* trigger eager mapping of memory */
|
/* trigger eager mapping of memory */
|
||||||
_pd.map(Pd_session::Virt_range { addr, 0x1000 });
|
_pd.map(Pd_session::Virt_range { range.start, range.num_bytes });
|
||||||
|
|
||||||
/* try to assign pci device to this protection domain */
|
/* try to assign pci device to this protection domain */
|
||||||
if (!_pd.assign_pci(addr, Pci::Bdf::rid(bdf)))
|
if (!_pd.assign_pci(range.start, Pci::Bdf::rid(bdf)))
|
||||||
log("Assignment of PCI device ", bdf, " to device PD failed, no IOMMU?!");
|
log("Assignment of PCI device ", bdf, " to device PD failed, no IOMMU?!");
|
||||||
|
|
||||||
/* we don't need the mapping anymore */
|
/* we don't need the mapping anymore */
|
||||||
_address_space.detach(addr);
|
_address_space.detach(range.start);
|
||||||
|
},
|
||||||
|
[&] (Region_map::Attach_error) {
|
||||||
|
error("failed to attach PCI device to device PD"); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -66,13 +66,7 @@ class Driver::Device_pd : public Io_mmu::Domain
|
|||||||
_ram_guard(ram_guard), _cap_guard(cap_guard)
|
_ram_guard(ram_guard), _cap_guard(cap_guard)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability ds,
|
Attach_result attach(Dataspace_capability ds, Attr const &attr) override;
|
||||||
size_t size = 0,
|
|
||||||
off_t offset = 0,
|
|
||||||
bool use_local_addr = false,
|
|
||||||
Local_addr local_addr = (void *)0,
|
|
||||||
bool executable = false,
|
|
||||||
bool writeable = true) override;
|
|
||||||
|
|
||||||
void upgrade_ram();
|
void upgrade_ram();
|
||||||
void upgrade_caps();
|
void upgrade_caps();
|
||||||
|
@ -402,12 +402,9 @@ bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const
|
|||||||
warning("local capabilities exhausted during child creation"); }
|
warning("local capabilities exhausted during child creation"); }
|
||||||
catch (Child::Missing_name_attribute) {
|
catch (Child::Missing_name_attribute) {
|
||||||
warning("skipped startup of nameless child"); }
|
warning("skipped startup of nameless child"); }
|
||||||
catch (Region_map::Region_conflict) {
|
catch (Attached_dataspace::Region_conflict) {
|
||||||
warning("failed to attach dataspace to local address space "
|
warning("failed to attach dataspace to local address space "
|
||||||
"during child construction"); }
|
"during child construction"); }
|
||||||
catch (Region_map::Invalid_dataspace) {
|
|
||||||
warning("attempt to attach invalid dataspace to local address space "
|
|
||||||
"during child construction"); }
|
|
||||||
catch (Service_denied) {
|
catch (Service_denied) {
|
||||||
warning("failed to create session during child construction"); }
|
warning("failed to create session during child construction"); }
|
||||||
|
|
||||||
|
@ -898,38 +898,42 @@ class Vfs::Ram_file_system : public Vfs::File_system
|
|||||||
{
|
{
|
||||||
using namespace Vfs_ram;
|
using namespace Vfs_ram;
|
||||||
|
|
||||||
Ram_dataspace_capability ds_cap;
|
|
||||||
|
|
||||||
Node * const node = lookup(path);
|
Node * const node = lookup(path);
|
||||||
if (!node)
|
if (!node)
|
||||||
return ds_cap;
|
return { };
|
||||||
|
|
||||||
File * const file = dynamic_cast<File *>(node);
|
File * const file = dynamic_cast<File *>(node);
|
||||||
if (!file)
|
if (!file)
|
||||||
|
return { };
|
||||||
|
|
||||||
|
size_t const len = file->length();
|
||||||
|
|
||||||
|
return _env.env().ram().try_alloc(len).convert<Dataspace_capability>(
|
||||||
|
[&] (Ram_dataspace_capability ds_cap) {
|
||||||
|
return _env.env().rm().attach(ds_cap, {
|
||||||
|
.size = { }, .offset = { }, .use_at = { },
|
||||||
|
.at = { }, .executable = { }, .writeable = true
|
||||||
|
}).convert<Dataspace_capability>(
|
||||||
|
[&] (Region_map::Range const range) {
|
||||||
|
file->read(Byte_range_ptr((char *)range.start, len), Seek{0});
|
||||||
|
_env.env().rm().detach(range.start);
|
||||||
return ds_cap;
|
return ds_cap;
|
||||||
|
},
|
||||||
size_t len = file->length();
|
[&] (Region_map::Attach_error) {
|
||||||
|
|
||||||
char *local_addr = nullptr;
|
|
||||||
try {
|
|
||||||
ds_cap = _env.env().ram().alloc(len);
|
|
||||||
|
|
||||||
local_addr = _env.env().rm().attach(ds_cap);
|
|
||||||
file->read(Byte_range_ptr(local_addr, file->length()), Seek{0});
|
|
||||||
_env.env().rm().detach(local_addr);
|
|
||||||
|
|
||||||
} catch(...) {
|
|
||||||
_env.env().rm().detach(local_addr);
|
|
||||||
_env.env().ram().free(ds_cap);
|
_env.env().ram().free(ds_cap);
|
||||||
return Dataspace_capability();
|
return Dataspace_capability();
|
||||||
}
|
}
|
||||||
return ds_cap;
|
);
|
||||||
|
},
|
||||||
|
[&] (Ram_allocator::Alloc_error) { return Dataspace_capability(); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void release(char const *, Dataspace_capability ds_cap) override {
|
void release(char const *, Dataspace_capability ds_cap) override
|
||||||
|
{
|
||||||
_env.env().ram().free(
|
_env.env().ram().free(
|
||||||
static_cap_cast<Genode::Ram_dataspace>(ds_cap)); }
|
static_cap_cast<Genode::Ram_dataspace>(ds_cap));
|
||||||
|
}
|
||||||
|
|
||||||
Watch_result watch(char const * const path, Vfs_watch_handle **handle,
|
Watch_result watch(char const * const path, Vfs_watch_handle **handle,
|
||||||
Allocator &alloc) override
|
Allocator &alloc) override
|
||||||
|
@ -563,20 +563,31 @@ class Vfs::Tar_file_system : public File_system
|
|||||||
return Dataspace_capability();
|
return Dataspace_capability();
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
size_t const len = size_t(record->size());
|
||||||
Ram_dataspace_capability ds_cap =
|
|
||||||
_env.ram().alloc((size_t)record->size());
|
|
||||||
|
|
||||||
void *local_addr = _env.rm().attach(ds_cap);
|
using Region_map = Genode::Region_map;
|
||||||
memcpy(local_addr, record->data(), (size_t)record->size());
|
|
||||||
_env.rm().detach(local_addr);
|
|
||||||
|
|
||||||
|
return _env.ram().try_alloc(len).convert<Dataspace_capability>(
|
||||||
|
[&] (Ram_dataspace_capability ds_cap) {
|
||||||
|
return _env.rm().attach(ds_cap, {
|
||||||
|
.size = { }, .offset = { }, .use_at = { },
|
||||||
|
.at = { }, .executable = { }, .writeable = true
|
||||||
|
}).convert<Dataspace_capability>(
|
||||||
|
[&] (Region_map::Range const range) {
|
||||||
|
memcpy((void *)range.start, record->data(), len);
|
||||||
|
_env.rm().detach(range.start);
|
||||||
return ds_cap;
|
return ds_cap;
|
||||||
}
|
},
|
||||||
catch (...) { Genode::warning(__func__, " could not create new dataspace"); }
|
[&] (Region_map::Attach_error) {
|
||||||
|
_env.ram().free(ds_cap);
|
||||||
return Dataspace_capability();
|
return Dataspace_capability();
|
||||||
}
|
}
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Genode::Ram_allocator::Alloc_error) {
|
||||||
|
return Dataspace_capability(); }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
void release(char const *, Dataspace_capability ds_cap) override
|
void release(char const *, Dataspace_capability ds_cap) override
|
||||||
{
|
{
|
||||||
|
@ -77,10 +77,10 @@ struct Monitor::Gdb::State : Noncopyable
|
|||||||
linker_area_region.writeable ?
|
linker_area_region.writeable ?
|
||||||
"ram" : "rom");
|
"ram" : "rom");
|
||||||
xml.attribute("start",
|
xml.attribute("start",
|
||||||
Value(Hex(region.range.addr +
|
Value(Hex(region.range.start +
|
||||||
linker_area_region.range.addr)));
|
linker_area_region.range.start)));
|
||||||
xml.attribute("length",
|
xml.attribute("length",
|
||||||
Value(Hex(linker_area_region.range.size)));
|
Value(Hex(linker_area_region.range.num_bytes)));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -97,10 +97,10 @@ struct Monitor::Gdb::State : Noncopyable
|
|||||||
stack_area_region.writeable ?
|
stack_area_region.writeable ?
|
||||||
"ram" : "rom");
|
"ram" : "rom");
|
||||||
xml.attribute("start",
|
xml.attribute("start",
|
||||||
Value(Hex(region.range.addr +
|
Value(Hex(region.range.start +
|
||||||
stack_area_region.range.addr)));
|
stack_area_region.range.start)));
|
||||||
xml.attribute("length",
|
xml.attribute("length",
|
||||||
Value(Hex(stack_area_region.range.size)));
|
Value(Hex(stack_area_region.range.num_bytes)));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -109,8 +109,8 @@ struct Monitor::Gdb::State : Noncopyable
|
|||||||
|
|
||||||
xml.node("memory", [&] {
|
xml.node("memory", [&] {
|
||||||
xml.attribute("type", region.writeable ? "ram" : "rom");
|
xml.attribute("type", region.writeable ? "ram" : "rom");
|
||||||
xml.attribute("start", Value(Hex(region.range.addr)));
|
xml.attribute("start", Value(Hex(region.range.start)));
|
||||||
xml.attribute("length", Value(Hex(region.range.size)));
|
xml.attribute("length", Value(Hex(region.range.num_bytes)));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -52,14 +52,29 @@ class Monitor::Memory_accessor : Noncopyable
|
|||||||
|
|
||||||
struct { uint8_t * const _local_ptr; };
|
struct { uint8_t * const _local_ptr; };
|
||||||
|
|
||||||
|
uint8_t *_attach()
|
||||||
|
{
|
||||||
|
return _local_rm.attach(_pd._address_space.dataspace(), {
|
||||||
|
.size = WINDOW_SIZE,
|
||||||
|
.offset = _offset,
|
||||||
|
.use_at = { },
|
||||||
|
.at = { },
|
||||||
|
.executable = false,
|
||||||
|
.writeable = true
|
||||||
|
}).convert<uint8_t *>(
|
||||||
|
[&] (Region_map::Range range) { return (uint8_t *)range.start; },
|
||||||
|
[&] (Region_map::Attach_error) { return nullptr; }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Curr_view(Region_map &local_rm, Inferior_pd &pd, addr_t offset)
|
Curr_view(Region_map &local_rm, Inferior_pd &pd, addr_t offset)
|
||||||
:
|
:
|
||||||
_local_rm(local_rm), _pd(pd), _offset(offset),
|
_local_rm(local_rm), _pd(pd), _offset(offset), _local_ptr(_attach())
|
||||||
_local_ptr(_local_rm.attach(pd._address_space.dataspace(),
|
|
||||||
WINDOW_SIZE, offset))
|
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
~Curr_view() { _local_rm.detach(_local_ptr); }
|
~Curr_view() { if (_local_ptr) _local_rm.detach(addr_t(_local_ptr)); }
|
||||||
|
|
||||||
|
bool valid() const { return (_local_ptr != nullptr); };
|
||||||
|
|
||||||
bool _in_curr_range(Virt_addr at) const
|
bool _in_curr_range(Virt_addr at) const
|
||||||
{
|
{
|
||||||
@ -271,8 +286,9 @@ class Monitor::Memory_accessor : Noncopyable
|
|||||||
|
|
||||||
if (!_curr_view.constructed()) {
|
if (!_curr_view.constructed()) {
|
||||||
addr_t const offset = at.value & ~(WINDOW_SIZE - 1);
|
addr_t const offset = at.value & ~(WINDOW_SIZE - 1);
|
||||||
try { _curr_view.construct(_env.rm(), pd, offset); }
|
_curr_view.construct(_env.rm(), pd, offset);
|
||||||
catch (Region_map::Region_conflict) {
|
if (!_curr_view->valid()) {
|
||||||
|
_curr_view.destruct();
|
||||||
warning("attempt to access memory outside the virtual address space: ",
|
warning("attempt to access memory outside the virtual address space: ",
|
||||||
Hex(at.value));
|
Hex(at.value));
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -66,7 +66,7 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object<Region_map>
|
|||||||
}
|
}
|
||||||
|
|
||||||
Dataspace_capability create_writable_copy(Dataspace_capability orig_ds,
|
Dataspace_capability create_writable_copy(Dataspace_capability orig_ds,
|
||||||
off_t offset, size_t size)
|
addr_t offset, size_t size)
|
||||||
{
|
{
|
||||||
Attached_dataspace ds { _local_rm, orig_ds };
|
Attached_dataspace ds { _local_rm, orig_ds };
|
||||||
|
|
||||||
@ -86,6 +86,12 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object<Region_map>
|
|||||||
|
|
||||||
Constructible<Writeable_text_segments> _writeable_text_segments { };
|
Constructible<Writeable_text_segments> _writeable_text_segments { };
|
||||||
|
|
||||||
|
static bool _intersects(Range const &a, Range const &b)
|
||||||
|
{
|
||||||
|
addr_t const a_end = a.start + a.num_bytes - 1;
|
||||||
|
addr_t const b_end = b.start + b.num_bytes - 1;
|
||||||
|
return (b.start <= a_end) && (b_end >= a.start);
|
||||||
|
}
|
||||||
|
|
||||||
void writeable_text_segments(Allocator &alloc,
|
void writeable_text_segments(Allocator &alloc,
|
||||||
Ram_allocator &ram,
|
Ram_allocator &ram,
|
||||||
@ -97,27 +103,16 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object<Region_map>
|
|||||||
|
|
||||||
struct Region : Registry<Region>::Element
|
struct Region : Registry<Region>::Element
|
||||||
{
|
{
|
||||||
struct Range
|
|
||||||
{
|
|
||||||
addr_t addr;
|
|
||||||
size_t size;
|
|
||||||
|
|
||||||
bool intersects(Range const &other) const
|
|
||||||
{
|
|
||||||
addr_t end = addr + size - 1;
|
|
||||||
addr_t other_end = other.addr + other.size - 1;
|
|
||||||
return ((other.addr <= end) && (other_end >= addr));
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Dataspace_capability cap;
|
Dataspace_capability cap;
|
||||||
Range range;
|
Range range;
|
||||||
bool writeable;
|
bool writeable;
|
||||||
|
|
||||||
Region(Registry<Region> ®istry, Dataspace_capability cap,
|
Region(Registry<Region> ®istry, Dataspace_capability cap,
|
||||||
addr_t addr, size_t size, bool writeable)
|
Range range, bool writeable)
|
||||||
: Registry<Region>::Element(registry, *this),
|
:
|
||||||
cap(cap), range(addr, size), writeable(writeable) { }
|
Registry<Region>::Element(registry, *this),
|
||||||
|
cap(cap), range(range), writeable(writeable)
|
||||||
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
Registry<Region> _regions { };
|
Registry<Region> _regions { };
|
||||||
@ -125,8 +120,7 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object<Region_map>
|
|||||||
void for_each_region(auto const &fn) const
|
void for_each_region(auto const &fn) const
|
||||||
{
|
{
|
||||||
_regions.for_each([&] (Region const ®ion) {
|
_regions.for_each([&] (Region const ®ion) {
|
||||||
fn(region);
|
fn(region); });
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Allocator &_alloc;
|
Allocator &_alloc;
|
||||||
@ -147,28 +141,17 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object<Region_map>
|
|||||||
** Region_map interface **
|
** Region_map interface **
|
||||||
**************************/
|
**************************/
|
||||||
|
|
||||||
Local_addr attach(Dataspace_capability ds, size_t size = 0,
|
Attach_result attach(Dataspace_capability ds, Attr const &orig_attr) override
|
||||||
off_t offset = 0, bool use_local_addr = false,
|
|
||||||
Local_addr local_addr = (void *)0,
|
|
||||||
bool executable = false,
|
|
||||||
bool writeable = true) override
|
|
||||||
{
|
{
|
||||||
if (executable && !writeable && _writeable_text_segments.constructed()) {
|
Attr attr = orig_attr;
|
||||||
ds = _writeable_text_segments->create_writable_copy(ds, offset, size);
|
if (attr.executable && !attr.writeable && _writeable_text_segments.constructed()) {
|
||||||
offset = 0;
|
ds = _writeable_text_segments->create_writable_copy(ds, attr.offset, attr.size);
|
||||||
writeable = true;
|
attr.offset = 0;
|
||||||
|
attr.writeable = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Local_addr attached_addr = _real.call<Rpc_attach>(ds, size, offset,
|
return _real.call<Rpc_attach>(ds, attr).convert<Attach_result>(
|
||||||
use_local_addr,
|
[&] (Range const range) -> Attach_result {
|
||||||
local_addr,
|
|
||||||
executable,
|
|
||||||
writeable);
|
|
||||||
size_t region_size = size ? size :
|
|
||||||
(Dataspace_client(ds).size() - offset);
|
|
||||||
enum { PAGE_SIZE_LOG2 = 12 };
|
|
||||||
region_size = align_addr(region_size, PAGE_SIZE_LOG2);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It can happen that previous attachments got implicitly
|
* It can happen that previous attachments got implicitly
|
||||||
* removed by destruction of the dataspace without knowledge
|
* removed by destruction of the dataspace without knowledge
|
||||||
@ -176,28 +159,34 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object<Region_map>
|
|||||||
* overlap with outdated region registry entries which must
|
* overlap with outdated region registry entries which must
|
||||||
* be removed before inserting the new region.
|
* be removed before inserting the new region.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
Region::Range range { attached_addr, region_size };
|
|
||||||
|
|
||||||
_regions.for_each([&] (Region ®ion) {
|
_regions.for_each([&] (Region ®ion) {
|
||||||
if (region.range.intersects(range))
|
if (_intersects(region.range, range))
|
||||||
destroy(_alloc, ®ion);
|
destroy(_alloc, ®ion); });
|
||||||
});
|
|
||||||
|
|
||||||
new (_alloc) Region(_regions, ds, (addr_t)attached_addr,
|
try {
|
||||||
region_size, writeable);
|
new (_alloc) Region(_regions, ds, range, attr.writeable);
|
||||||
|
}
|
||||||
return attached_addr;
|
catch (Out_of_ram) {
|
||||||
|
_real.call<Rpc_detach>(range.start);
|
||||||
|
return Attach_error::OUT_OF_RAM;
|
||||||
|
}
|
||||||
|
catch (Out_of_caps) {
|
||||||
|
_real.call<Rpc_detach>(range.start);
|
||||||
|
return Attach_error::OUT_OF_CAPS;
|
||||||
|
}
|
||||||
|
return range;
|
||||||
|
},
|
||||||
|
[&] (Attach_error e) { return e; }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void detach(Local_addr local_addr) override
|
void detach(addr_t const at) override
|
||||||
{
|
{
|
||||||
_real.call<Rpc_detach>(local_addr);
|
_real.call<Rpc_detach>(at);
|
||||||
|
|
||||||
_regions.for_each([&] (Region ®ion) {
|
_regions.for_each([&] (Region ®ion) {
|
||||||
if (region.range.intersects(Region::Range { local_addr, 1 }))
|
if (_intersects(region.range, Range { at, 1 }))
|
||||||
destroy(_alloc, ®ion);
|
destroy(_alloc, ®ion); });
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void fault_handler(Signal_context_capability) override
|
void fault_handler(Signal_context_capability) override
|
||||||
@ -205,10 +194,7 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object<Region_map>
|
|||||||
warning("Monitored_region_map: ignoring custom fault_handler for ", _name);
|
warning("Monitored_region_map: ignoring custom fault_handler for ", _name);
|
||||||
}
|
}
|
||||||
|
|
||||||
State state() override
|
Fault fault() override { return _real.call<Rpc_fault>(); }
|
||||||
{
|
|
||||||
return _real.call<Rpc_state>();
|
|
||||||
}
|
|
||||||
|
|
||||||
Dataspace_capability dataspace() override
|
Dataspace_capability dataspace() override
|
||||||
{
|
{
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user