diff --git a/repos/base-foc/src/core/include/platform_thread.h b/repos/base-foc/src/core/include/platform_thread.h index 5829ac714d..5783179e74 100644 --- a/repos/base-foc/src/core/include/platform_thread.h +++ b/repos/base-foc/src/core/include/platform_thread.h @@ -152,10 +152,10 @@ class Core::Platform_thread : Interface Affinity::Location affinity() const; /** - * Make thread to vCPU + * Turn thread into vCPU */ Foc::l4_cap_idx_t setup_vcpu(unsigned, Cap_mapping const &, - Cap_mapping &, Region_map::Local_addr &); + Cap_mapping &, addr_t &); /************************ diff --git a/repos/base-foc/src/core/include/vm_session_component.h b/repos/base-foc/src/core/include/vm_session_component.h index 6df15ff0c1..6ec5602d24 100644 --- a/repos/base-foc/src/core/include/vm_session_component.h +++ b/repos/base-foc/src/core/include/vm_session_component.h @@ -49,7 +49,7 @@ struct Core::Vcpu : Rpc_object Vcpu_id_allocator &_vcpu_ids; Cap_mapping _recall { true }; Foc::l4_cap_idx_t _task_index_client { }; - Region_map::Local_addr _foc_vcpu_state { }; + addr_t _foc_vcpu_state { }; public: @@ -64,8 +64,8 @@ struct Core::Vcpu : Rpc_object ** Native_vcpu RPC interface ** *******************************/ - Foc::l4_cap_idx_t task_index() const { return _task_index_client; } - Region_map::Local_addr foc_vcpu_state() const { return _foc_vcpu_state; } + Foc::l4_cap_idx_t task_index() const { return _task_index_client; } + addr_t foc_vcpu_state() const { return _foc_vcpu_state; } }; @@ -93,7 +93,7 @@ class Core::Vm_session_component /* helpers for vm_session_common.cc */ void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr); void _detach_vm_memory(addr_t, size_t); - void _with_region(Region_map::Local_addr, auto const &); + void _with_region(addr_t, auto const &); protected: @@ -116,9 +116,9 @@ class Core::Vm_session_component *********************************/ /* used on destruction of attached dataspaces */ - void detach(Region_map::Local_addr) override; /* vm_session_common.cc */ - void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */ - void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */ + void detach_at (addr_t) override; + void unmap_region (addr_t, size_t) override; + void reserve_and_flush (addr_t) override; /************************** diff --git a/repos/base-foc/src/core/platform_thread.cc b/repos/base-foc/src/core/platform_thread.cc index 60bb51b333..4530633c13 100644 --- a/repos/base-foc/src/core/platform_thread.cc +++ b/repos/base-foc/src/core/platform_thread.cc @@ -352,7 +352,7 @@ Platform_thread::~Platform_thread() Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id, Cap_mapping const &task_vcpu, Cap_mapping &vcpu_irq, - Region_map::Local_addr &vcpu_state) + addr_t &vcpu_state) { if (!_platform_pd) return Foc::L4_INVALID_CAP; @@ -361,8 +361,7 @@ Foc::l4_cap_idx_t Platform_thread::setup_vcpu(unsigned const vcpu_id, return Foc::L4_INVALID_CAP; /* vCPU state attached by kernel syscall to client PD directly */ - vcpu_state = Region_map::Local_addr(Platform::VCPU_VIRT_EXT_START + - L4_PAGESIZE * vcpu_id); + vcpu_state = Platform::VCPU_VIRT_EXT_START + L4_PAGESIZE * vcpu_id; l4_fpage_t const vm_page = l4_fpage(vcpu_state, L4_PAGESHIFT, L4_FPAGE_RW); diff --git a/repos/base-foc/src/core/vm_session_component.cc b/repos/base-foc/src/core/vm_session_component.cc index ab9a21e4b9..d758dfc5fe 100644 --- a/repos/base-foc/src/core/vm_session_component.cc +++ b/repos/base-foc/src/core/vm_session_component.cc @@ -132,7 +132,7 @@ Vm_session_component::~Vm_session_component() if (!_map.any_block_addr(&out_addr)) break; - detach(out_addr); + detach_at(out_addr); } } diff --git a/repos/base-foc/src/include/foc_native_vcpu/foc_native_vcpu.h b/repos/base-foc/src/include/foc_native_vcpu/foc_native_vcpu.h index 4a3b9bd303..113af8faf1 100644 --- a/repos/base-foc/src/include/foc_native_vcpu/foc_native_vcpu.h +++ b/repos/base-foc/src/include/foc_native_vcpu/foc_native_vcpu.h @@ -22,7 +22,7 @@ struct Genode::Vm_session::Native_vcpu : Interface { - GENODE_RPC(Rpc_foc_vcpu_state, Region_map::Local_addr, foc_vcpu_state); + GENODE_RPC(Rpc_foc_vcpu_state, addr_t, foc_vcpu_state); GENODE_RPC(Rpc_task_index, Foc::l4_cap_idx_t, task_index); GENODE_RPC_INTERFACE(Rpc_task_index, Rpc_foc_vcpu_state); diff --git a/repos/base-foc/src/lib/base/x86/vm.cc b/repos/base-foc/src/lib/base/x86/vm.cc index 33c966005d..c2cd71be62 100644 --- a/repos/base-foc/src/lib/base/x86/vm.cc +++ b/repos/base-foc/src/lib/base/x86/vm.cc @@ -90,7 +90,7 @@ struct Foc_native_vcpu_rpc : Rpc_client, Noncopyable Foc::l4_cap_idx_t task_index() { return call(); } Foc::l4_vcpu_state_t * foc_vcpu_state() { - return static_cast(call()); } + return reinterpret_cast(call()); } }; diff --git a/repos/base-hw/src/core/core_region_map.cc b/repos/base-hw/src/core/core_region_map.cc index 4f48f89252..8965688c8a 100644 --- a/repos/base-hw/src/core/core_region_map.cc +++ b/repos/base-hw/src/core/core_region_map.cc @@ -23,67 +23,63 @@ using namespace Core; -Region_map::Local_addr -Core_region_map::attach(Dataspace_capability ds_cap, size_t size, - off_t offset, bool use_local_addr, - Region_map::Local_addr, bool, bool writeable) +Region_map::Attach_result +Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr) { - return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Local_addr { + return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Attach_result { if (!ds_ptr) - throw Invalid_dataspace(); + return Attach_error::INVALID_DATASPACE; Dataspace_component &ds = *ds_ptr; - if (size == 0) - size = ds.size(); + size_t const size = (attr.size == 0) ? ds.size() : attr.size; + size_t const page_rounded_size = (size + get_page_size() - 1) & get_page_mask(); - size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask(); - - if (use_local_addr) { - error("Parameter 'use_local_addr' not supported within core"); - return nullptr; - } - - if (offset) { - error("Parameter 'offset' not supported within core"); - return nullptr; - } + /* attach attributes 'use_at' and 'offset' not supported within core */ + if (attr.use_at || attr.offset) + return Attach_error::REGION_CONFLICT; unsigned const align = get_page_size_log2(); /* allocate range in core's virtual address space */ - Allocator::Alloc_result virt = + Allocator::Alloc_result const virt = platform().region_alloc().alloc_aligned(page_rounded_size, align); if (virt.failed()) { error("could not allocate virtual address range in core of size ", page_rounded_size); - return nullptr; + return Attach_error::REGION_CONFLICT; } using namespace Hw; /* map the dataspace's physical pages to corresponding virtual addresses */ - unsigned num_pages = (unsigned)(page_rounded_size >> get_page_size_log2()); - Page_flags const flags { (writeable && ds.writeable()) ? RW : RO, - NO_EXEC, KERN, GLOBAL, - ds.io_mem() ? DEVICE : RAM, - ds.cacheability() }; + unsigned const num_pages = unsigned(page_rounded_size >> get_page_size_log2()); - return virt.convert( + Page_flags const flags { + .writeable = (attr.writeable && ds.writeable()) ? RW : RO, + .executable = NO_EXEC, + .privileged = KERN, + .global = GLOBAL, + .type = ds.io_mem() ? DEVICE : RAM, + .cacheable = ds.cacheability() + }; - [&] (void *virt_addr) -> void * { + return virt.convert( + + [&] (void *virt_addr) -> Attach_result { if (map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages, flags)) - return virt_addr; + return Range { .start = addr_t(virt_addr), + .num_bytes = page_rounded_size }; platform().region_alloc().free(virt_addr, page_rounded_size); - return nullptr; }, + return Attach_error::REGION_CONFLICT; }, [&] (Allocator::Alloc_error) { - return nullptr; }); + return Attach_error::REGION_CONFLICT; }); }); } -void Core_region_map::detach(Local_addr) { } +void Core_region_map::detach(addr_t) { } diff --git a/repos/base-hw/src/core/platform_thread.cc b/repos/base-hw/src/core/platform_thread.cc index 57891e82f9..898f0f68d0 100644 --- a/repos/base-hw/src/core/platform_thread.cc +++ b/repos/base-hw/src/core/platform_thread.cc @@ -108,7 +108,14 @@ Platform_thread::Platform_thread(size_t const quota, error("failed to allocate UTCB"); throw Out_of_ram(); } - _utcb_core_addr = (Native_utcb *)core_env().rm_session()->attach(_utcb); + + Region_map::Attr attr { }; + attr.writeable = true; + core_env().rm_session()->attach(_utcb, attr).with_result( + [&] (Region_map::Range range) { + _utcb_core_addr = (Native_utcb *)range.start; }, + [&] (Region_map::Attach_error) { + error("failed to attach UTCB of new thread within core"); }); } diff --git a/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc b/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc index 2876420bc9..9eb54ab608 100644 --- a/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc +++ b/repos/base-hw/src/core/spec/arm/virtualization/vm_session_component.cc @@ -144,7 +144,7 @@ Vm_session_component::~Vm_session_component() if (!_map.any_block_addr(&out_addr)) break; - detach(out_addr); + detach_at(out_addr); } /* free region in allocator */ diff --git a/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc b/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc index b0aa8ee57a..a189cc5d8c 100644 --- a/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc +++ b/repos/base-hw/src/core/spec/arm_v7/trustzone/vm_session_component.cc @@ -92,7 +92,7 @@ Vm_session_component::~Vm_session_component() if (!_map.any_block_addr(&out_addr)) break; - detach(out_addr); + detach_at(out_addr); } /* free region in allocator */ diff --git a/repos/base-hw/src/core/spec/x86_64/virtualization/vm_session_component.cc b/repos/base-hw/src/core/spec/x86_64/virtualization/vm_session_component.cc index c454a190aa..1265959696 100644 --- a/repos/base-hw/src/core/spec/x86_64/virtualization/vm_session_component.cc +++ b/repos/base-hw/src/core/spec/x86_64/virtualization/vm_session_component.cc @@ -174,7 +174,7 @@ Vm_session_component::~Vm_session_component() if (!_map.any_block_addr(&out_addr)) break; - detach(out_addr); + detach_at(out_addr); } /* free region in allocator */ diff --git a/repos/base-hw/src/core/vm_session_component.cc b/repos/base-hw/src/core/vm_session_component.cc index b214823092..a1f4b781f9 100644 --- a/repos/base-hw/src/core/vm_session_component.cc +++ b/repos/base-hw/src/core/vm_session_component.cc @@ -42,7 +42,7 @@ void Vm_session_component::Vcpu::exception_handler(Signal_context_capability han unsigned const cpu = location.xpos(); - if (!kobj.create(cpu, ds_addr, Capability_space::capid(handler), id)) + if (!kobj.create(cpu, (void *)ds_addr, Capability_space::capid(handler), id)) warning("Cannot instantiate vm kernel object, invalid signal context?"); } @@ -65,7 +65,18 @@ Capability Vm_session_component::create_vcpu(Thread_cap try { vcpu.ds_cap = _constrained_md_ram_alloc.alloc(_ds_size(), Cache::UNCACHED); - vcpu.ds_addr = _alloc_vcpu_data(_region_map.attach(vcpu.ds_cap)); + + Region_map::Attr attr { }; + attr.writeable = true; + vcpu.ds_addr = _region_map.attach(vcpu.ds_cap, attr).convert( + [&] (Region_map::Range range) { return _alloc_vcpu_data(range.start); }, + [&] (Region_map::Attach_error) -> addr_t { + error("failed to attach VCPU data within core"); + if (vcpu.ds_cap.valid()) + _constrained_md_ram_alloc.free(vcpu.ds_cap); + _vcpus[_vcpu_id_alloc].destruct(); + return 0; + }); } catch (...) { if (vcpu.ds_cap.valid()) _constrained_md_ram_alloc.free(vcpu.ds_cap); diff --git a/repos/base-hw/src/core/vm_session_component.h b/repos/base-hw/src/core/vm_session_component.h index 8b12f56917..67635df996 100644 --- a/repos/base-hw/src/core/vm_session_component.h +++ b/repos/base-hw/src/core/vm_session_component.h @@ -55,9 +55,9 @@ class Core::Vm_session_component Kernel::Vm::Identity &id; Rpc_entrypoint &ep; Ram_dataspace_capability ds_cap { }; - Region_map::Local_addr ds_addr { nullptr }; - Kernel_object kobj {}; - Affinity::Location location {}; + addr_t ds_addr { }; + Kernel_object kobj { }; + Affinity::Location location { }; Vcpu(Kernel::Vm::Identity &id, Rpc_entrypoint &ep) : id(id), ep(ep) { @@ -94,14 +94,13 @@ class Core::Vm_session_component static size_t _ds_size(); static size_t _alloc_vcpu_data(Genode::addr_t ds_addr); - void * _alloc_table(); - void _attach(addr_t phys_addr, addr_t vm_addr, size_t size); + void *_alloc_table(); + void _attach(addr_t phys_addr, addr_t vm_addr, size_t size); /* helpers for vm_session_common.cc */ - void _attach_vm_memory(Dataspace_component &, addr_t, - Attach_attr); - void _detach_vm_memory(addr_t, size_t); - void _with_region(Region_map::Local_addr, auto const &); + void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr); + void _detach_vm_memory(addr_t, size_t); + void _with_region(addr_t, auto const &); protected: @@ -119,13 +118,15 @@ class Core::Vm_session_component Trace::Source_registry &); ~Vm_session_component(); + /********************************* ** Region_map_detach interface ** *********************************/ - void detach(Region_map::Local_addr) override; /* vm_session_common.cc */ - void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */ - void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */ + void detach_at (addr_t) override; + void unmap_region (addr_t, size_t) override; + void reserve_and_flush (addr_t) override; + /************************** ** Vm session interface ** diff --git a/repos/base-hw/src/lib/base/thread_start.cc b/repos/base-hw/src/lib/base/thread_start.cc index bcd6bed15a..96150760ee 100644 --- a/repos/base-hw/src/lib/base/thread_start.cc +++ b/repos/base-hw/src/lib/base/thread_start.cc @@ -67,14 +67,18 @@ void Thread::_init_platform_thread(size_t weight, Type type) size_t const utcb_size = sizeof(Native_utcb); addr_t const stack_area = stack_area_virtual_base(); addr_t const utcb_new = (addr_t)&_stack->utcb() - stack_area; - Region_map * const rm = env_stack_area_region_map; /* remap initial main-thread UTCB according to stack-area spec */ - try { rm->attach_at(Hw::_main_thread_utcb_ds, utcb_new, utcb_size); } - catch(...) { - error("failed to re-map UTCB"); - while (1) ; - } + if (env_stack_area_region_map->attach(Hw::_main_thread_utcb_ds, { + .size = utcb_size, + .offset = { }, + .use_at = true, + .at = utcb_new, + .executable = { }, + .writeable = true + }).failed()) + error("failed to attach UTCB to local address space"); + /* adjust initial object state in case of a main thread */ native_thread().cap = Hw::_main_thread_cap; _thread_cap = main_thread_cap(); @@ -108,17 +112,25 @@ Thread::Start_result Thread::start() /* attach UTCB at top of stack */ size_t const size = sizeof(_stack->utcb()); - addr_t dst = Stack_allocator::addr_to_base(_stack) + - stack_virtual_size() - size - stack_area_virtual_base(); - try { - env_stack_area_region_map->attach_at(cpu_thread.utcb(), dst, size); - } catch (...) { - error("failed to attach userland stack"); - sleep_forever(); - } - /* start execution with initial IP and aligned SP */ - cpu_thread.start((addr_t)_thread_start, _stack->top()); - return Start_result::OK; + return env_stack_area_region_map->attach(cpu_thread.utcb(), { + .size = size, + .offset = { }, + .use_at = true, + .at = Stack_allocator::addr_to_base(_stack) + + stack_virtual_size() - size - stack_area_virtual_base(), + .executable = { }, + .writeable = true + }).convert( + [&] (Region_map::Range) { + /* start execution with initial IP and aligned SP */ + cpu_thread.start((addr_t)_thread_start, _stack->top()); + return Start_result::OK; + }, + [&] (Region_map::Attach_error) { + error("failed to attach userland stack"); + return Start_result::DENIED; + } + ); }, [&] (Cpu_session::Create_thread_error) { return Start_result::DENIED; } ); diff --git a/repos/base-linux/src/core/include/region_map_component.h b/repos/base-linux/src/core/include/region_map_component.h index 5d219d1e6b..18cc5be6ed 100644 --- a/repos/base-linux/src/core/include/region_map_component.h +++ b/repos/base-linux/src/core/include/region_map_component.h @@ -52,15 +52,14 @@ class Core::Region_map_component : public Rpc_object, void add_client(Rm_client &) { } void remove_client(Rm_client &) { } - Local_addr attach(Dataspace_capability, size_t, off_t, bool, - Local_addr, bool, bool) override { - return (addr_t)0; } + Attach_result attach(Dataspace_capability, Attr const &) override { + return Attach_error::REGION_CONFLICT; } - void detach(Local_addr) override { } + void detach(addr_t) override { } void fault_handler(Signal_context_capability) override { } - State state() override { return State(); } + Fault fault() override { return { }; } Dataspace_capability dataspace() override { return Dataspace_capability(); } diff --git a/repos/base-linux/src/core/stack_area.cc b/repos/base-linux/src/core/stack_area.cc index fc43ca216a..e59fd93a0f 100644 --- a/repos/base-linux/src/core/stack_area.cc +++ b/repos/base-linux/src/core/stack_area.cc @@ -42,38 +42,35 @@ class Stack_area_region_map : public Genode::Region_map /** * Attach backing store to stack area */ - Local_addr attach(Genode::Dataspace_capability, Genode::size_t size, - Genode::off_t, bool, Local_addr local_addr, bool, - bool) override + Attach_result attach(Genode::Dataspace_capability, Attr const &attr) override { using namespace Genode; /* convert stack-area-relative to absolute virtual address */ - addr_t addr = (addr_t)local_addr + stack_area_virtual_base(); + addr_t const addr = attr.at + stack_area_virtual_base(); /* use anonymous mmap for allocating stack backing store */ int flags = MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE; int prot = PROT_READ | PROT_WRITE; - void *res = lx_mmap((void*)addr, size, prot, flags, -1, 0); + void *res = lx_mmap((void*)addr, attr.size, prot, flags, -1, 0); if ((addr_t)res != addr) - throw Region_conflict(); + return Attach_error::REGION_CONFLICT; - return local_addr; + return Range { .start = attr.at, .num_bytes = attr.size }; } - void detach(Local_addr local_addr) override + void detach(Genode::addr_t at) override { - Genode::warning("stack area detach from ", (void*)local_addr, + Genode::warning("stack area detach from ", (void*)at, " - not implemented"); } void fault_handler(Genode::Signal_context_capability) override { } - State state() override { return State(); } + Fault fault() override { return { }; } - Genode::Dataspace_capability dataspace() override { - return Genode::Dataspace_capability(); } + Genode::Dataspace_capability dataspace() override { return { }; } }; diff --git a/repos/base-linux/src/include/base/internal/local_rm_session.h b/repos/base-linux/src/include/base/internal/local_rm_session.h index 87c1dda141..7d6c7679cd 100644 --- a/repos/base-linux/src/include/base/internal/local_rm_session.h +++ b/repos/base-linux/src/include/base/internal/local_rm_session.h @@ -55,7 +55,7 @@ struct Genode::Local_rm_session : Rm_session, Local_session /* detach sub region map from local address space */ Region_map_mmap &rm = static_cast(*rm_ptr); rm.with_attached_sub_rm_base_ptr([&] (void *base_ptr) { - _local_rm.detach(base_ptr); }); + _local_rm.detach(addr_t(base_ptr)); }); Genode::destroy(_md_alloc, &rm); } diff --git a/repos/base-linux/src/include/base/internal/region_map_mmap.h b/repos/base-linux/src/include/base/internal/region_map_mmap.h index 5545b31dd0..345485ab28 100644 --- a/repos/base-linux/src/include/base/internal/region_map_mmap.h +++ b/repos/base-linux/src/include/base/internal/region_map_mmap.h @@ -126,54 +126,13 @@ class Genode::Region_map_mmap : public Region_map, public Dataspace ** Region map interface ** **************************/ - struct Attach_attr - { - size_t size; - off_t offset; - bool use_local_addr; - void *local_addr; - bool executable; - bool writeable; - }; + Attach_result attach(Dataspace_capability, Attr const &) override; - enum class Attach_error - { - INVALID_DATASPACE, REGION_CONFLICT, OUT_OF_RAM, OUT_OF_CAPS - }; - - using Attach_result = Attempt; - - Attach_result attach(Dataspace_capability, Attach_attr const &); - - Local_addr attach(Dataspace_capability ds, size_t size, off_t offset, - bool use_local_addr, Local_addr local_addr, - bool executable, bool writeable) override - { - Attach_attr const attr { .size = size, - .offset = offset, - .use_local_addr = use_local_addr, - .local_addr = local_addr, - .executable = executable, - .writeable = writeable }; - - return attach(ds, attr).convert( - [&] (Local_addr local_addr) { return local_addr; }, - [&] (Attach_error e) -> Local_addr { - switch (e) { - case Attach_error::INVALID_DATASPACE: throw Invalid_dataspace(); - case Attach_error::REGION_CONFLICT: throw Region_conflict(); - case Attach_error::OUT_OF_RAM: throw Out_of_ram(); - case Attach_error::OUT_OF_CAPS: throw Out_of_caps(); - } - throw Region_conflict(); - }); - } - - void detach(Local_addr) override; + void detach(addr_t) override; void fault_handler(Signal_context_capability) override { } - State state() override { return State(); } + Fault fault() override { return { }; } /************************* diff --git a/repos/base-linux/src/include/base/internal/region_registry.h b/repos/base-linux/src/include/base/internal/region_registry.h index 80c4a3d221..495c8259c8 100644 --- a/repos/base-linux/src/include/base/internal/region_registry.h +++ b/repos/base-linux/src/include/base/internal/region_registry.h @@ -28,7 +28,7 @@ class Genode::Region private: addr_t _start { 0 }; - off_t _offset { 0 }; + addr_t _offset { 0 }; Dataspace_capability _ds { }; size_t _size { 0 }; @@ -41,7 +41,7 @@ class Genode::Region Region() { } - Region(addr_t start, off_t offset, Dataspace_capability ds, size_t size) + Region(addr_t start, addr_t offset, Dataspace_capability ds, size_t size) : _start(start), _offset(offset), _ds(ds), _size(size) { } bool used() const { return _size > 0; } diff --git a/repos/base-linux/src/include/base/internal/stack_area.h b/repos/base-linux/src/include/base/internal/stack_area.h index e5e90cfa9b..8c405b9102 100644 --- a/repos/base-linux/src/include/base/internal/stack_area.h +++ b/repos/base-linux/src/include/base/internal/stack_area.h @@ -49,10 +49,8 @@ static inline void flush_stack_area() Genode::size_t const size = stack_area_virtual_size(); int ret; - if ((ret = lx_munmap(base, size)) < 0) { + if ((ret = lx_munmap(base, size)) < 0) error(__func__, ": failed ret=", ret); - throw Region_map::Region_conflict(); - } } @@ -71,10 +69,8 @@ static inline Genode::addr_t reserve_stack_area() if (addr_in != addr_out) { lx_munmap((void *)addr_out, size); error(__func__, ": failed addr_in=", addr_in, " addr_out=", addr_out); - throw Region_map::Region_conflict(); } - - return (addr_t) addr_out; + return (addr_t)addr_out; } #endif /* _INCLUDE__BASE__INTERNAL__STACK_AREA_H_ */ diff --git a/repos/base-linux/src/lib/base/attach_stack_area.cc b/repos/base-linux/src/lib/base/attach_stack_area.cc index 360a6b185d..c832e39d0d 100644 --- a/repos/base-linux/src/lib/base/attach_stack_area.cc +++ b/repos/base-linux/src/lib/base/attach_stack_area.cc @@ -23,9 +23,14 @@ using namespace Genode; void Platform::_attach_stack_area() { - pd._address_space.attach_at(pd._stack_area.dataspace(), - stack_area_virtual_base(), - stack_area_virtual_size()); + pd._address_space.attach(pd._stack_area.dataspace(), Region_map::Attr { + .size = stack_area_virtual_size(), + .offset = { }, + .use_at = true, + .at = stack_area_virtual_base(), + .executable = { }, + .writeable = true + }); env_stack_area_region_map = &pd._stack_area; env_stack_area_ram_allocator = &pd; diff --git a/repos/base-linux/src/lib/base/region_map_client.cc b/repos/base-linux/src/lib/base/region_map_client.cc index 2874c970d4..209e2b04a4 100644 --- a/repos/base-linux/src/lib/base/region_map_client.cc +++ b/repos/base-linux/src/lib/base/region_map_client.cc @@ -40,19 +40,14 @@ Region_map_client::Region_map_client(Capability session) : Rpc_client(session) { } -Region_map::Local_addr -Region_map_client::attach(Dataspace_capability ds, size_t size, - off_t offset, bool use_local_addr, - Region_map::Local_addr local_addr, - bool executable, bool writeable) +Region_map::Attach_result +Region_map_client::attach(Dataspace_capability ds, Attr const &attr) { - return _local(rpc_cap())->attach(ds, size, offset, use_local_addr, - local_addr, executable, writeable); + return _local(rpc_cap())->attach(ds, attr); } -void Region_map_client::detach(Local_addr local_addr) { - return _local(rpc_cap())->detach(local_addr); } +void Region_map_client::detach(addr_t at) { return _local(rpc_cap())->detach(at); } void Region_map_client::fault_handler(Signal_context_capability /*handler*/) @@ -66,7 +61,7 @@ void Region_map_client::fault_handler(Signal_context_capability /*handler*/) } -Region_map::State Region_map_client::state() { return _local(rpc_cap())->state(); } +Region_map::Fault Region_map_client::fault() { return _local(rpc_cap())->fault(); } Dataspace_capability Region_map_client::dataspace() diff --git a/repos/base-linux/src/lib/base/region_map_mmap.cc b/repos/base-linux/src/lib/base/region_map_mmap.cc index 291c21c47a..80411fc1ef 100644 --- a/repos/base-linux/src/lib/base/region_map_mmap.cc +++ b/repos/base-linux/src/lib/base/region_map_mmap.cc @@ -74,12 +74,12 @@ static Mutex &mutex() Region_map_mmap::Reserve_local_result -Region_map_mmap::_reserve_local(bool use_local_addr, addr_t local_addr, size_t size) +Region_map_mmap::_reserve_local(bool use_at, addr_t at, size_t size) { /* special handling for stack area */ - if (use_local_addr - && local_addr == stack_area_virtual_base() - && size == stack_area_virtual_size()) { + if (use_at + && at == stack_area_virtual_base() + && size == stack_area_virtual_size()) { /* * On the first request to reserve the stack area, we flush the @@ -96,19 +96,19 @@ Region_map_mmap::_reserve_local(bool use_local_addr, addr_t local_addr, size_t s } } inst; - return local_addr; + return at; } int const flags = MAP_ANONYMOUS | MAP_PRIVATE; int const prot = PROT_NONE; - void * const addr_in = use_local_addr ? (void *)local_addr : 0; + void * const addr_in = use_at ? (void *)at : 0; void * const addr_out = lx_mmap(addr_in, size, prot, flags, -1, 0); /* reserve at local address failed - unmap incorrect mapping */ - if (use_local_addr && addr_in != addr_out) + if (use_at && addr_in != addr_out) lx_munmap((void *)addr_out, size); - if ((use_local_addr && addr_in != addr_out) + if ((use_at && addr_in != addr_out) || (((long)addr_out < 0) && ((long)addr_out > -4095))) { error("_reserve_local: lx_mmap failed " "(addr_in=", addr_in, ",addr_out=", addr_out, "/", (long)addr_out, ")"); @@ -123,8 +123,8 @@ Region_map_mmap::Map_local_result Region_map_mmap::_map_local(Dataspace_capability ds, size_t size, addr_t offset, - bool use_local_addr, - addr_t local_addr, + bool use_at, + addr_t at, bool executable, bool overmap, bool writeable) @@ -136,7 +136,7 @@ Region_map_mmap::_map_local(Dataspace_capability ds, int const prot = PROT_READ | (writeable ? PROT_WRITE : 0) | (executable ? PROT_EXEC : 0); - void * const addr_in = use_local_addr ? (void*)local_addr : 0; + void * const addr_in = use_at ? (void*)at : 0; void * const addr_out = lx_mmap(addr_in, size, prot, flags, fd, offset); /* @@ -148,10 +148,10 @@ Region_map_mmap::_map_local(Dataspace_capability ds, lx_close(fd); /* attach at local address failed - unmap incorrect mapping */ - if (use_local_addr && addr_in != addr_out) + if (use_at && addr_in != addr_out) lx_munmap((void *)addr_out, size); - if ((use_local_addr && addr_in != addr_out) + if ((use_at && addr_in != addr_out) || (((long)addr_out < 0) && ((long)addr_out > -4095))) { error("_map_local: lx_mmap failed" "(addr_in=", addr_in, ", addr_out=", addr_out, "/", (long)addr_out, ") " @@ -192,28 +192,23 @@ struct Inhibit_tracing_guard Region_map_mmap::Attach_result -Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr) +Region_map_mmap::attach(Dataspace_capability ds, Attr const &attr) { Mutex::Guard mutex_guard(mutex()); Inhibit_tracing_guard it_guard { }; /* only support attach_at for sub RM sessions */ - if (_sub_rm && !attr.use_local_addr) { + if (_sub_rm && !attr.use_at) { error("Region_map_mmap::attach: attaching w/o local addr not supported"); return Attach_error::REGION_CONFLICT; } - if (attr.offset < 0) { - error("Region_map_mmap::attach: negative offset not supported"); - return Attach_error::REGION_CONFLICT; - } - if (!ds.valid()) return Attach_error::INVALID_DATASPACE; - size_t const remaining_ds_size = _dataspace_size(ds) > (addr_t)attr.offset - ? _dataspace_size(ds) - (addr_t)attr.offset : 0; + size_t const remaining_ds_size = _dataspace_size(ds) > attr.offset + ? _dataspace_size(ds) - attr.offset : 0; /* determine size of virtual address region */ size_t const region_size = attr.size ? min(remaining_ds_size, attr.size) @@ -248,12 +243,12 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr) * Check for the dataspace to not exceed the boundaries of the * sub RM session */ - if (region_size + (addr_t)attr.local_addr > _size) { + if (region_size + attr.at > _size) { error("Region_map_mmap::attach: dataspace does not fit in sub RM session"); return Attach_error::REGION_CONFLICT; } - if (!_add_to_rmap(Region((addr_t)attr.local_addr, attr.offset, ds, region_size))) + if (!_add_to_rmap(Region(attr.at, attr.offset, ds, region_size))) return Attach_error::REGION_CONFLICT; /* @@ -266,10 +261,10 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr) */ if (_is_attached()) _map_local(ds, region_size, attr.offset, - true, _base + (addr_t)attr.local_addr, + true, _base + attr.at, attr.executable, true, attr.writeable); - return Local_addr(attr.local_addr); + return Range { .start = attr.at, .num_bytes = region_size }; } else { @@ -296,8 +291,7 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr) * Reserve local address range that can hold the entire sub RM * session. */ - return _reserve_local(attr.use_local_addr, (addr_t)attr.local_addr, - region_size) + return _reserve_local(attr.use_at, attr.at, region_size) .convert( [&] (addr_t base) -> Attach_result @@ -328,7 +322,7 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr) attr.executable, true, attr.writeable); } - return Local_addr(rm->_base); + return Range { .start = rm->_base, .num_bytes = region_size }; }, [&] (Reserve_local_error e) { switch (e) { case Reserve_local_error::REGION_CONFLICT: break; } @@ -344,14 +338,13 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr) * Boring, a plain dataspace is attached to a root RM session. * Note, we do not overmap. */ - return _map_local(ds, region_size, attr.offset, attr.use_local_addr, - (addr_t)attr.local_addr, attr.executable, false, - attr.writeable) + return _map_local(ds, region_size, attr.offset, attr.use_at, + attr.at, attr.executable, false, attr.writeable) .convert( [&] (void *addr) -> Attach_result { if (_add_to_rmap(Region((addr_t)addr, attr.offset, ds, region_size))) - return Local_addr(addr); + return Range { .start = (addr_t)addr, .num_bytes = region_size }; return Attach_error::REGION_CONFLICT; }, @@ -366,7 +359,7 @@ Region_map_mmap::attach(Dataspace_capability ds, Attach_attr const &attr) } -void Region_map_mmap::detach(Region_map::Local_addr local_addr) +void Region_map_mmap::detach(addr_t at) { Mutex::Guard mutex_guard(mutex()); @@ -381,14 +374,14 @@ void Region_map_mmap::detach(Region_map::Local_addr local_addr) * 2.2 we are attached to a root RM */ - Region region = _rmap.lookup(local_addr); + Region region = _rmap.lookup(at); if (!region.used()) return; /* * Remove meta data from region map */ - _rmap.remove_region(local_addr); + _rmap.remove_region(at); if (_sub_rm) { @@ -404,8 +397,8 @@ void Region_map_mmap::detach(Region_map::Local_addr local_addr) * needed. */ if (_is_attached()) { - lx_munmap((void *)((addr_t)local_addr + _base), region.size()); - _reserve_local(true, (addr_t)local_addr + _base, region.size()); + lx_munmap((void *)(at + _base), region.size()); + _reserve_local(true, at + _base, region.size()); } } else { @@ -417,7 +410,7 @@ void Region_map_mmap::detach(Region_map::Local_addr local_addr) * sub RM session. In both cases, we simply mark the local address * range as free. */ - lx_munmap(local_addr, region.size()); + lx_munmap((void *)at, region.size()); } /* diff --git a/repos/base-linux/src/test/lx_rmap/main.cc b/repos/base-linux/src/test/lx_rmap/main.cc index 7786b5fefc..3307666426 100644 --- a/repos/base-linux/src/test/lx_rmap/main.cc +++ b/repos/base-linux/src/test/lx_rmap/main.cc @@ -64,49 +64,64 @@ Main::Main(Env &env) : heap(env.ram(), env.rm()) log("blob region region ", Hex_range(beg, size), " size=", size); /* RAM dataspace attachment overlapping binary */ - try { - Ram_dataspace_capability ds(env.ram().alloc(size)); - - log("before RAM dataspace attach"); - env.rm().attach_at(ds, beg); - error("after RAM dataspace attach -- ERROR"); - env.parent().exit(-1); - } catch (Region_map::Region_conflict) { - log("OK caught Region_conflict exception"); - } + log("before RAM dataspace attach"); + env.rm().attach(env.ram().alloc(size), { + .size = { }, .offset = { }, + .use_at = true, .at = beg, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range) { + error("after RAM dataspace attach -- ERROR"); + env.parent().exit(-1); }, + [&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::REGION_CONFLICT) + log("OK caught Region_conflict exception"); } + ); /* empty managed dataspace overlapping binary */ - try { - Rm_connection rm_connection(env); - Region_map_client rm(rm_connection.create(size)); - Dataspace_capability ds(rm.dataspace()); + { + Rm_connection rm_connection(env); + Region_map_client rm(rm_connection.create(size)); log("before sub-RM dataspace attach"); - env.rm().attach_at(ds, beg); - error("after sub-RM dataspace attach -- ERROR"); - env.parent().exit(-1); - } catch (Region_map::Region_conflict) { - log("OK caught Region_conflict exception"); + env.rm().attach(rm.dataspace(), { + .size = { }, .offset = { }, + .use_at = true, .at = beg, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range) { + error("after sub-RM dataspace attach -- ERROR"); + env.parent().exit(-1); }, + [&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::REGION_CONFLICT) + log("OK caught Region_conflict exception"); } + ); } /* sparsely populated managed dataspace in free VM area */ - try { + { Rm_connection rm_connection(env); Region_map_client rm(rm_connection.create(0x100000)); - rm.attach_at(env.ram().alloc(0x1000), 0x1000); - - Dataspace_capability ds(rm.dataspace()); + rm.attach(env.ram().alloc(0x1000), { + .size = { }, .offset = { }, + .use_at = true, .at = 0x1000, + .executable = { }, .writeable = true + }); log("before populated sub-RM dataspace attach"); - char *addr = (char *)env.rm().attach(ds) + 0x1000; + char * const addr = env.rm().attach(rm.dataspace(), { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range r) { return (char *)r.start + 0x1000; }, + [&] (Region_map::Attach_error) { return nullptr; } + ); log("after populated sub-RM dataspace attach / before touch"); char const val = *addr; *addr = 0x55; log("after touch (", val, "/", *addr, ")"); - } catch (Region_map::Region_conflict) { - error("Caught Region_conflict exception -- ERROR"); - env.parent().exit(-1); } env.parent().exit(0); } diff --git a/repos/base-nova/src/core/core_region_map.cc b/repos/base-nova/src/core/core_region_map.cc index 83e9b72c8e..30a3551434 100644 --- a/repos/base-nova/src/core/core_region_map.cc +++ b/repos/base-nova/src/core/core_region_map.cc @@ -49,58 +49,50 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size) return virt_addr; } -Region_map::Local_addr -Core_region_map::attach(Dataspace_capability ds_cap, size_t, - off_t offset, bool use_local_addr, - Region_map::Local_addr, - bool executable, bool writeable) +Region_map::Attach_result +Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr) { - auto lambda = [&] (Dataspace_component *ds_ptr) -> Local_addr { + return _ep.apply(ds_cap, [&] (Dataspace_component * const ds_ptr) -> Attach_result { + if (!ds_ptr) - throw Invalid_dataspace(); + return Attach_error::INVALID_DATASPACE; Dataspace_component &ds = *ds_ptr; - if (use_local_addr) { - error("Parameter 'use_local_addr' not supported within core"); - return nullptr; - } - - if (offset) { - error("Parameter 'offset' not supported within core"); - return nullptr; - } + /* attach attributes 'use_at' and 'offset' not supported within core */ + if (attr.use_at || attr.offset) + return Attach_error::REGION_CONFLICT; const size_t page_rounded_size = align_addr(ds.size(), get_page_size_log2()); /* allocate the virtual region contiguous for the dataspace */ void * virt_ptr = alloc_region(ds, page_rounded_size); if (!virt_ptr) - throw Out_of_ram(); + return Attach_error::OUT_OF_RAM; /* map it */ Nova::Utcb &utcb = *reinterpret_cast(Thread::myself()->utcb()); - const Nova::Rights rights(true, writeable && ds.writeable(), executable); + const Nova::Rights rights(true, attr.writeable && ds.writeable(), attr.executable); if (map_local(platform_specific().core_pd_sel(), utcb, ds.phys_addr(), reinterpret_cast(virt_ptr), page_rounded_size >> get_page_size_log2(), rights, true)) { platform().region_alloc().free(virt_ptr, page_rounded_size); - throw Out_of_ram(); + + return Attach_error::OUT_OF_RAM; } - return virt_ptr; - }; - return _ep.apply(ds_cap, lambda); + return Range { .start = addr_t(virt_ptr), .num_bytes = page_rounded_size }; + }); } -void Core_region_map::detach(Local_addr core_local_addr) +void Core_region_map::detach(addr_t core_local_addr) { - size_t size = platform_specific().region_alloc_size_at(core_local_addr); + size_t size = platform_specific().region_alloc_size_at((void *)core_local_addr); unmap_local(*reinterpret_cast(Thread::myself()->utcb()), core_local_addr, size >> get_page_size_log2()); - platform().region_alloc().free(core_local_addr); + platform().region_alloc().free((void *)core_local_addr); } diff --git a/repos/base-nova/src/core/include/vm_session_component.h b/repos/base-nova/src/core/include/vm_session_component.h index 08ef9b68ec..8ebc38b993 100644 --- a/repos/base-nova/src/core/include/vm_session_component.h +++ b/repos/base-nova/src/core/include/vm_session_component.h @@ -32,8 +32,8 @@ class Core::Vm_session_component : private Ram_quota_guard, private Cap_quota_guard, - public Rpc_object, - public Region_map_detach + public Rpc_object, + private Region_map_detach { private: @@ -141,7 +141,7 @@ class Core::Vm_session_component /* helpers for vm_session_common.cc */ void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr); void _detach_vm_memory(addr_t, size_t); - void _with_region(Region_map::Local_addr, auto const &); + void _with_region(addr_t, auto const &); protected: @@ -158,14 +158,16 @@ class Core::Vm_session_component Trace::Source_registry &); ~Vm_session_component(); + /********************************* ** Region_map_detach interface ** *********************************/ /* used on destruction of attached dataspaces */ - void detach(Region_map::Local_addr) override; /* vm_session_common.cc */ - void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */ - void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */ + void detach_at(addr_t) override; + void unmap_region(addr_t, size_t) override; + void reserve_and_flush(addr_t) override; + /************************** ** Vm session interface ** @@ -174,8 +176,8 @@ class Core::Vm_session_component Capability create_vcpu(Thread_capability); void attach_pic(addr_t) override { /* unused on NOVA */ } - void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */ - void detach(addr_t, size_t) override; /* vm_session_common.cc */ + void attach(Dataspace_capability, addr_t, Attach_attr) override; + void detach(addr_t, size_t) override; }; diff --git a/repos/base-nova/src/core/vm_session_component.cc b/repos/base-nova/src/core/vm_session_component.cc index 1093db09e1..dd937ae156 100644 --- a/repos/base-nova/src/core/vm_session_component.cc +++ b/repos/base-nova/src/core/vm_session_component.cc @@ -408,7 +408,7 @@ Vm_session_component::~Vm_session_component() if (!_map.any_block_addr(&out_addr)) break; - detach(out_addr); + detach_at(out_addr); } if (_pd_sel && _pd_sel != invalid_sel()) diff --git a/repos/base-nova/src/lib/base/region_map_client.cc b/repos/base-nova/src/lib/base/region_map_client.cc index 36ffeb0a37..404c805f7f 100644 --- a/repos/base-nova/src/lib/base/region_map_client.cc +++ b/repos/base-nova/src/lib/base/region_map_client.cc @@ -21,25 +21,22 @@ Region_map_client::Region_map_client(Capability session) : Rpc_client(session) { } -Region_map::Local_addr -Region_map_client::attach(Dataspace_capability ds, size_t size, off_t offset, - bool use_local_addr, Local_addr local_addr, - bool executable, bool writeable) +Region_map::Attach_result +Region_map_client::attach(Dataspace_capability ds, Attr const &attr) { - return call(ds, size, offset, use_local_addr, local_addr, - executable, writeable); + return call(ds, attr); } -void Region_map_client::detach(Local_addr local_addr) { - call(local_addr); } +void Region_map_client::detach(addr_t at) { + call(at); } void Region_map_client::fault_handler(Signal_context_capability cap) { call(cap); } -Region_map::State Region_map_client::state() { return call(); } +Region_map::Fault Region_map_client::fault() { return call(); } Dataspace_capability Region_map_client::dataspace() diff --git a/repos/base-nova/src/test/nova/main.cc b/repos/base-nova/src/test/nova/main.cc index b38929f13a..5f62b9532b 100644 --- a/repos/base-nova/src/test/nova/main.cc +++ b/repos/base-nova/src/test/nova/main.cc @@ -22,6 +22,7 @@ #include #include +#include #include @@ -298,9 +299,9 @@ void test_pat(Genode::Env &env) enum { DS_ORDER = 12, PAGE_4K = 12 }; - Ram_dataspace_capability ds = env.ram().alloc (1 << (DS_ORDER + PAGE_4K), - WRITE_COMBINED); - addr_t map_addr = env.rm().attach(ds); + Attached_dataspace ds { env.rm(), env.ram().alloc (1 << (DS_ORDER + PAGE_4K), + WRITE_COMBINED) }; + addr_t const map_addr = addr_t(ds.local_addr()); enum { STACK_SIZE = 4096 }; @@ -309,7 +310,10 @@ void test_pat(Genode::Env &env) Genode::Rm_connection rm(env); Genode::Region_map_client rm_free_area(rm.create(1 << (DS_ORDER + PAGE_4K))); - addr_t remap_addr = env.rm().attach(rm_free_area.dataspace()); + + Attached_dataspace remap { env.rm(), rm_free_area.dataspace() }; + + addr_t const remap_addr = addr_t(remap.local_addr()); /* trigger mapping of whole area */ for (addr_t i = map_addr; i < map_addr + (1 << (DS_ORDER + PAGE_4K)); i += (1 << PAGE_4K)) @@ -435,7 +439,7 @@ class Pager : private Genode::Thread { private: Native_capability _call_to_map { }; - Ram_dataspace_capability _ds; + Attached_ram_dataspace _ds; static addr_t _ds_mem; void entry() override { } @@ -468,9 +472,10 @@ class Pager : private Genode::Thread { Pager(Genode::Env &env, Location location) : Thread(env, "pager", 0x1000, location, Weight(), env.cpu()), - _ds(env.ram().alloc (4096)) + _ds(env.ram(), env.rm(), 4096) { - _ds_mem = env.rm().attach(_ds); + _ds_mem = addr_t(_ds.local_addr()); + touch_read(reinterpret_cast(_ds_mem)); /* request creation of a 'local' EC */ @@ -503,7 +508,8 @@ class Cause_mapping : public Genode::Thread { Native_capability _call_to_map { }; Rm_connection _rm; Region_map_client _sub_rm; - addr_t _mem_nd; + Attached_dataspace _mem_ds; + addr_t _mem_nd = addr_t(_mem_ds.local_addr()); addr_t _mem_st; Nova::Rights const _mapping_rwx = {true, true, true}; @@ -518,7 +524,7 @@ class Cause_mapping : public Genode::Thread { _call_to_map(call_to_map), _rm(env), _sub_rm(_rm.create(0x2000)), - _mem_nd(env.rm().attach(_sub_rm.dataspace())), + _mem_ds(env.rm(), _sub_rm.dataspace()), _mem_st(mem_st) { } @@ -606,7 +612,13 @@ class Greedy : public Genode::Thread { for (unsigned i = 0; i < SUB_RM_SIZE / 4096; i++) { - addr_t map_to = _env.rm().attach(ds); + addr_t const map_to = _env.rm().attach(ds, { }).convert( + [&] (Region_map::Range r) { return r.start; }, + [&] (Region_map::Attach_error) { + error("Greedy: failed to attach RAM dataspace"); + return 0UL; + } + ); /* check that we really got the mapping */ touch_read(reinterpret_cast(map_to)); diff --git a/repos/base-okl4/src/core/core_region_map.cc b/repos/base-okl4/src/core/core_region_map.cc index 0a915f63bc..81b17c2f20 100644 --- a/repos/base-okl4/src/core/core_region_map.cc +++ b/repos/base-okl4/src/core/core_region_map.cc @@ -19,53 +19,42 @@ using namespace Core; -Region_map::Local_addr -Core_region_map::attach(Dataspace_capability ds_cap, size_t size, - off_t offset, bool use_local_addr, - Region_map::Local_addr, bool, bool) +Region_map::Attach_result +Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr) { - return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> void * { - + return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Attach_result { if (!ds) - throw Invalid_dataspace(); - - if (size == 0) - size = ds->size(); + return Attach_error::INVALID_DATASPACE; + size_t const size = (attr.size == 0) ? ds->size() : attr.size; size_t const page_rounded_size = (size + get_page_size() - 1) & get_page_mask(); - if (use_local_addr) { - error("parameter 'use_local_addr' not supported within core"); - return nullptr; - } - - if (offset) { - error("parameter 'offset' not supported within core"); - return nullptr; - } + /* attach attributes 'use_at' and 'offset' not supported within core */ + if (attr.use_at || attr.offset) + return Attach_error::REGION_CONFLICT; /* allocate range in core's virtual address space */ Range_allocator &virt_alloc = platform().region_alloc(); - return virt_alloc.try_alloc(page_rounded_size).convert( + return virt_alloc.try_alloc(page_rounded_size).convert( - [&] (void *virt_addr) -> void * { + [&] (void *virt_addr) -> Attach_result { /* map the dataspace's physical pages to virtual memory */ unsigned num_pages = page_rounded_size >> get_page_size_log2(); if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages)) - return nullptr; + return Attach_error::INVALID_DATASPACE; - return virt_addr; + return Range { .start = addr_t(virt_addr), .num_bytes = page_rounded_size }; }, - [&] (Range_allocator::Alloc_error) -> void * { + [&] (Range_allocator::Alloc_error) { error("could not allocate virtual address range in core of size ", page_rounded_size); - return nullptr; + return Attach_error::REGION_CONFLICT; }); }); } -void Core_region_map::detach(Local_addr) { } +void Core_region_map::detach(addr_t) { } diff --git a/repos/base-sel4/src/core/core_region_map.cc b/repos/base-sel4/src/core/core_region_map.cc index 50cc2bd0f7..c0c70aab2c 100644 --- a/repos/base-sel4/src/core/core_region_map.cc +++ b/repos/base-sel4/src/core/core_region_map.cc @@ -19,59 +19,48 @@ using namespace Core; -Region_map::Local_addr -Core_region_map::attach(Dataspace_capability ds_cap, size_t size, off_t offset, - bool use_local_addr, Region_map::Local_addr, bool, bool) +Region_map::Attach_result +Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr) { - return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Local_addr { - + return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Attach_result { if (!ds) - throw Invalid_dataspace(); + return Attach_error::INVALID_DATASPACE; - if (size == 0) - size = ds->size(); + size_t const size = (attr.size == 0) ? ds->size() : attr.size; + size_t const page_rounded_size = (size + get_page_size() - 1) & get_page_mask(); - size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask(); - - if (use_local_addr) { - error(__func__, ": 'use_local_addr' not supported within core"); - return nullptr; - } - - if (offset) { - error(__func__, ": 'offset' not supported within core"); - return nullptr; - } + /* attach attributes 'use_at' and 'offset' not supported within core */ + if (attr.use_at || attr.offset) + return Attach_error::REGION_CONFLICT; /* allocate range in core's virtual address space */ - return platform().region_alloc().try_alloc(page_rounded_size).convert( + return platform().region_alloc().try_alloc(page_rounded_size).convert( [&] (void *virt_ptr) { /* map the dataspace's physical pages to core-local virtual addresses */ size_t num_pages = page_rounded_size >> get_page_size_log2(); map_local(ds->phys_addr(), (addr_t)virt_ptr, num_pages); - return virt_ptr; + return Range { .start = addr_t(virt_ptr), .num_bytes = page_rounded_size }; }, - [&] (Range_allocator::Alloc_error) -> Local_addr { + [&] (Range_allocator::Alloc_error) -> Attach_result { error("could not allocate virtual address range in core of size ", page_rounded_size); - return nullptr; + return Attach_error::REGION_CONFLICT; } ); }); } -void Core_region_map::detach(Local_addr core_local_addr) +void Core_region_map::detach(addr_t const at) { - size_t size = platform_specific().region_alloc_size_at(core_local_addr); + size_t const size = platform_specific().region_alloc_size_at((void *)at); - if (!unmap_local(core_local_addr, size >> get_page_size_log2())) { - error("could not unmap core virtual address ", - Hex(core_local_addr), " in ", __PRETTY_FUNCTION__); + if (!unmap_local(at, size >> get_page_size_log2())) { + error("could not unmap core virtual address ", Hex(at), " in ", __PRETTY_FUNCTION__); return; } - platform().region_alloc().free(core_local_addr); + platform().region_alloc().free((void *)at); } diff --git a/repos/base-sel4/src/core/include/vm_session_component.h b/repos/base-sel4/src/core/include/vm_session_component.h index 6de6cc75d6..3e7349093d 100644 --- a/repos/base-sel4/src/core/include/vm_session_component.h +++ b/repos/base-sel4/src/core/include/vm_session_component.h @@ -29,8 +29,8 @@ class Core::Vm_session_component : private Ram_quota_guard, private Cap_quota_guard, - public Rpc_object, - public Region_map_detach + public Rpc_object, + private Region_map_detach { private: @@ -85,7 +85,7 @@ class Core::Vm_session_component /* helpers for vm_session_common.cc */ void _attach_vm_memory(Dataspace_component &, addr_t, Attach_attr); void _detach_vm_memory(addr_t, size_t); - void _with_region(Region_map::Local_addr, auto const &); + void _with_region(addr_t, auto const &); protected: @@ -102,14 +102,16 @@ class Core::Vm_session_component Trace::Source_registry &); ~Vm_session_component(); + /********************************* ** Region_map_detach interface ** *********************************/ /* used on destruction of attached dataspaces */ - void detach(Region_map::Local_addr) override; /* vm_session_common.cc */ - void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */ - void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */ + void detach_at (addr_t) override; + void unmap_region (addr_t, size_t) override; + void reserve_and_flush (addr_t) override; + /************************** ** Vm session interface ** @@ -118,8 +120,8 @@ class Core::Vm_session_component Capability create_vcpu(Thread_capability); void attach_pic(addr_t) override { /* unused on seL4 */ } - void attach(Dataspace_capability, addr_t, Attach_attr) override; /* vm_session_common.cc */ - void detach(addr_t, size_t) override; /* vm_session_common.cc */ + void attach(Dataspace_capability, addr_t, Attach_attr) override; + void detach(addr_t, size_t) override; }; #endif /* _CORE__VM_SESSION_COMPONENT_H_ */ diff --git a/repos/base-sel4/src/core/include/vm_space.h b/repos/base-sel4/src/core/include/vm_space.h index 4192022df1..80c09da314 100644 --- a/repos/base-sel4/src/core/include/vm_space.h +++ b/repos/base-sel4/src/core/include/vm_space.h @@ -408,7 +408,7 @@ class Core::Vm_space bool ok = true; for (size_t i = 0; i < num_pages; i++) { - off_t const offset = i << get_page_size_log2(); + addr_t const offset = i << get_page_size_log2(); if (_map_frame(from_phys + offset, to_virt + offset, attr, false /* host page table */, fn_unmap)) @@ -442,7 +442,7 @@ class Core::Vm_space Mutex::Guard guard(_mutex); for (size_t i = 0; i < num_pages; i++) { - off_t const offset = i << get_page_size_log2(); + addr_t const offset = i << get_page_size_log2(); _map_frame(from_phys + offset, guest_phys + offset, attr, true /* guest page table */, fn_unmap); @@ -457,7 +457,7 @@ class Core::Vm_space Mutex::Guard guard(_mutex); for (size_t i = 0; unmap_success && i < num_pages; i++) { - off_t const offset = i << get_page_size_log2(); + addr_t const offset = i << get_page_size_log2(); _page_table_registry.flush_page(virt + offset, [&] (Cap_sel const &idx, addr_t) { diff --git a/repos/base-sel4/src/core/spec/x86/vm_session_component.cc b/repos/base-sel4/src/core/spec/x86/vm_session_component.cc index 5967a16911..b0cda8f2e3 100644 --- a/repos/base-sel4/src/core/spec/x86/vm_session_component.cc +++ b/repos/base-sel4/src/core/spec/x86/vm_session_component.cc @@ -185,7 +185,7 @@ Vm_session_component::~Vm_session_component() if (!_map.any_block_addr(&out_addr)) break; - detach(out_addr); + detach_at(out_addr); } if (_vm_page_table.value()) @@ -300,7 +300,7 @@ void Vm_session_component::_attach_vm_memory(Dataspace_component &dsc, if (!_map.any_block_addr(&out_addr)) break; - detach(out_addr); + detach_at(out_addr); } _vm_space.map_guest(page.addr, page.hotspot, diff --git a/repos/base-sel4/src/core/stack_area.cc b/repos/base-sel4/src/core/stack_area.cc index cc6b627628..c687a89d16 100644 --- a/repos/base-sel4/src/core/stack_area.cc +++ b/repos/base-sel4/src/core/stack_area.cc @@ -56,45 +56,43 @@ class Stack_area_region_map : public Region_map /** * Allocate and attach on-the-fly backing store to the stack area */ - Local_addr attach(Dataspace_capability, size_t size, off_t, - bool, Local_addr local_addr, bool, bool) override + Attach_result attach(Dataspace_capability, Attr const &attr) override { using namespace Core; - size = round_page(size); + size_t const size = round_page(attr.size); + size_t const num_pages = size >> get_page_size_log2(); /* allocate physical memory */ Range_allocator &phys_alloc = Core::platform_specific().ram_alloc(); - size_t const num_pages = size >> get_page_size_log2(); addr_t const phys = Untyped_memory::alloc_pages(phys_alloc, num_pages); Untyped_memory::convert_to_page_frames(phys, num_pages); Dataspace_component &ds = *new (&_ds_slab) Dataspace_component(size, 0, phys, CACHED, true, 0); - addr_t const core_local_addr = - stack_area_virtual_base() + (addr_t)local_addr; + addr_t const core_local_addr = stack_area_virtual_base() + attr.at; if (!map_local(ds.phys_addr(), core_local_addr, ds.size() >> get_page_size_log2())) { error(__func__, ": could not map phys ", Hex(ds.phys_addr()), " " "at local ", Hex(core_local_addr)); - return (addr_t)0; + return Attach_error::INVALID_DATASPACE; } ds.assign_core_local_addr((void*)core_local_addr); - return local_addr; + return Range { .start = attr.at, .num_bytes = size }; } - void detach(Local_addr local_addr) override + void detach(addr_t at) override { using namespace Core; - if ((addr_t)local_addr >= stack_area_virtual_size()) + if (at >= stack_area_virtual_size()) return; - addr_t const detach = stack_area_virtual_base() + (addr_t)local_addr; + addr_t const detach = stack_area_virtual_base() + at; addr_t const stack = stack_virtual_size(); addr_t const pages = ((detach & ~(stack - 1)) + stack - detach) >> get_page_size_log2(); @@ -107,9 +105,9 @@ class Stack_area_region_map : public Region_map void fault_handler(Signal_context_capability) override { } - State state() override { return State(); } + Fault fault() override { return { }; } - Dataspace_capability dataspace() override { return Dataspace_capability(); } + Dataspace_capability dataspace() override { return { }; } }; diff --git a/repos/base/include/base/attached_dataspace.h b/repos/base/include/base/attached_dataspace.h index ade39b1a64..8adaba4d48 100644 --- a/repos/base/include/base/attached_dataspace.h +++ b/repos/base/include/base/attached_dataspace.h @@ -24,7 +24,8 @@ class Genode::Attached_dataspace : Noncopyable { public: - typedef Region_map::Invalid_dataspace Invalid_dataspace; + struct Invalid_dataspace : Exception { }; + struct Region_conflict : Exception { }; private: @@ -32,16 +33,25 @@ class Genode::Attached_dataspace : Noncopyable Region_map &_rm; - size_t const _size = { Dataspace_client(_ds).size() }; - - void * _local_addr = nullptr; - Dataspace_capability _check(Dataspace_capability ds) { if (ds.valid()) return ds; - throw Region_map::Invalid_dataspace(); + throw Invalid_dataspace(); + } + + Region_map::Attach_result _attached = _rm.attach(_ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true }); + + template + T *_ptr() const + { + return _attached.convert( + [&] (Region_map::Range range) { return (T *)range.start; }, + [&] (Region_map::Attach_error) { return nullptr; }); } /* @@ -55,21 +65,30 @@ class Genode::Attached_dataspace : Noncopyable /** * Constructor * - * \throw Region_map::Region_conflict - * \throw Region_map::Invalid_dataspace + * \throw Region_conflict + * \throw Invalid_dataspace * \throw Out_of_caps * \throw Out_of_ram */ Attached_dataspace(Region_map &rm, Dataspace_capability ds) - : _ds(_check(ds)), _rm(rm), _local_addr(_rm.attach(_ds)) { } + : + _ds(_check(ds)), _rm(rm) + { + _attached.with_error([&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::OUT_OF_RAM) throw Out_of_ram(); + if (e == Region_map::Attach_error::OUT_OF_CAPS) throw Out_of_caps(); + throw Region_conflict(); + }); + } /** * Destructor */ ~Attached_dataspace() { - if (_local_addr) - _rm.detach(_local_addr); + _attached.with_result( + [&] (Region_map::Range range) { _rm.detach(range.start); }, + [&] (Region_map::Attach_error) { }); } /** @@ -84,15 +103,20 @@ class Genode::Attached_dataspace : Noncopyable * A newly attached dataspace is untyped memory anyway. */ template - T *local_addr() { return static_cast(_local_addr); } + T *local_addr() { return _ptr(); } template - T const *local_addr() const { return static_cast(_local_addr); } + T const *local_addr() const { return _ptr(); } /** * Return size */ - size_t size() const { return _size; } + size_t size() const + { + return _attached.convert( + [&] (Region_map::Range range) { return range.num_bytes; }, + [&] (Region_map::Attach_error) { return 0UL; }); + } /** * Forget dataspace, thereby skipping the detachment on destruction @@ -103,7 +127,7 @@ class Genode::Attached_dataspace : Noncopyable * removed the memory mappings of the dataspace. So we have to omit the * detach operation in '~Attached_dataspace'. */ - void invalidate() { _local_addr = nullptr; } + void invalidate() { _attached = Region_map::Attach_error::INVALID_DATASPACE; } }; #endif /* _INCLUDE__BASE__ATTACHED_DATASPACE_H_ */ diff --git a/repos/base/include/base/attached_io_mem_dataspace.h b/repos/base/include/base/attached_io_mem_dataspace.h index 315761a2e0..4052444f11 100644 --- a/repos/base/include/base/attached_io_mem_dataspace.h +++ b/repos/base/include/base/attached_io_mem_dataspace.h @@ -15,7 +15,7 @@ #define _INCLUDE__BASE__ATTACHED_IO_MEM_DATASPACE_H_ #include -#include +#include namespace Genode { class Attached_io_mem_dataspace; } @@ -34,11 +34,23 @@ class Genode::Attached_io_mem_dataspace Region_map &_env_rm; Io_mem_connection _mmio; Io_mem_dataspace_capability _ds; - Region_map::Local_addr _local_addr; + addr_t const _at; - static void *_with_sub_page_offset(void *local, addr_t io_base) + static addr_t _with_sub_page_offset(addr_t local, addr_t io_base) { - return (void *)((addr_t)local | (io_base & (addr_t)0xfff)); + return local | (io_base & 0xfffUL); + } + + addr_t _attach() + { + return _env_rm.attach(_ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range range) { return range.start; }, + [&] (Region_map::Attach_error) { return 0UL; } + ); } public: @@ -55,8 +67,8 @@ class Genode::Attached_io_mem_dataspace * \throw Insufficient_cap_quota * \throw Out_of_ram * \throw Out_of_caps - * \throw Region_map::Region_conflict - * \throw Region_map::Invalid_dataspace + * \throw Attached_dataspace::Region_conflict + * \throw Attached_dataspace::Invalid_dataspace */ Attached_io_mem_dataspace(Env &env, Genode::addr_t base, Genode::size_t size, bool write_combined = false) @@ -64,13 +76,16 @@ class Genode::Attached_io_mem_dataspace _env_rm(env.rm()), _mmio(env, base, size, write_combined), _ds(_mmio.dataspace()), - _local_addr(_with_sub_page_offset(env.rm().attach(_ds), base)) - { } + _at(_with_sub_page_offset(_attach(), base)) + { + if (!_ds.valid()) throw Attached_dataspace::Invalid_dataspace(); + if (!_at) throw Attached_dataspace::Region_conflict(); + } /** * Destructor */ - ~Attached_io_mem_dataspace() { _env_rm.detach(_local_addr); } + ~Attached_io_mem_dataspace() { if (_at) _env_rm.detach(_at); } /** * Return capability of the used RAM dataspace @@ -84,7 +99,7 @@ class Genode::Attached_io_mem_dataspace * A newly allocated I/O MEM dataspace is untyped memory anyway. */ template - T *local_addr() { return static_cast(_local_addr); } + T *local_addr() { return reinterpret_cast(_at); } }; #endif /* _INCLUDE__BASE__ATTACHED_IO_MEM_DATASPACE_H_ */ diff --git a/repos/base/include/base/attached_ram_dataspace.h b/repos/base/include/base/attached_ram_dataspace.h index 10f1026cc8..f6b316ef94 100644 --- a/repos/base/include/base/attached_ram_dataspace.h +++ b/repos/base/include/base/attached_ram_dataspace.h @@ -17,6 +17,7 @@ #include #include #include +#include namespace Genode { class Attached_ram_dataspace; } @@ -34,11 +35,11 @@ class Genode::Attached_ram_dataspace { private: - size_t _size = 0; - Ram_allocator *_ram = nullptr; - Region_map *_rm = nullptr; + size_t _size = 0; + Ram_allocator *_ram = nullptr; + Region_map *_rm = nullptr; Ram_dataspace_capability _ds { }; - void *_local_addr = nullptr; + addr_t _at = 0; Cache const _cache = CACHED; template @@ -46,8 +47,8 @@ class Genode::Attached_ram_dataspace void _detach_and_free_dataspace() { - if (_local_addr) - _rm->detach(_local_addr); + if (_at) + _rm->detach(_at); if (_ds.valid()) _ram->free(_ds); @@ -57,13 +58,19 @@ class Genode::Attached_ram_dataspace { if (!_size) return; - try { - _ds = _ram->alloc(_size, _cache); - _local_addr = _rm->attach(_ds); - } - /* revert allocation if attaching the dataspace failed */ - catch (Region_map::Region_conflict) { _ram->free(_ds); throw; } - catch (Region_map::Invalid_dataspace) { _ram->free(_ds); throw; } + _ds = _ram->alloc(_size, _cache); + + Region_map::Attr attr { }; + attr.writeable = true; + _rm->attach(_ds, attr).with_result( + [&] (Region_map::Range range) { _at = range.start; }, + [&] (Region_map::Attach_error e) { + /* revert allocation if attaching the dataspace failed */ + _ram->free(_ds); + if (e == Region_map::Attach_error::OUT_OF_RAM) throw Out_of_ram(); + if (e == Region_map::Attach_error::OUT_OF_CAPS) throw Out_of_caps(); + throw Attached_dataspace::Region_conflict(); + }); /* * Eagerly map dataspace if used for DMA @@ -77,7 +84,7 @@ class Genode::Attached_ram_dataspace */ if (_cache != CACHED) { enum { PAGE_SIZE = 4096 }; - unsigned char volatile *base = (unsigned char volatile *)_local_addr; + unsigned char volatile *base = (unsigned char volatile *)_at; for (size_t i = 0; i < _size; i += PAGE_SIZE) touch_read_write(base + i); } @@ -96,8 +103,8 @@ class Genode::Attached_ram_dataspace * * \throw Out_of_ram * \throw Out_of_caps - * \throw Region_map::Region_conflict - * \throw Region_map::Invalid_dataspace + * \throw Attached_dataspace::Region_conflict + * \throw Attached_dataspace::Invalid_dataspace */ Attached_ram_dataspace(Ram_allocator &ram, Region_map &rm, size_t size, Cache cache = CACHED) @@ -125,7 +132,7 @@ class Genode::Attached_ram_dataspace * untyped memory anyway. */ template - T *local_addr() const { return static_cast(_local_addr); } + T *local_addr() const { return reinterpret_cast(_at); } /** * Return size @@ -134,10 +141,10 @@ class Genode::Attached_ram_dataspace void swap(Attached_ram_dataspace &other) { - _swap(_size, other._size); - _swap(_ram, other._ram); - _swap(_ds, other._ds); - _swap(_local_addr, other._local_addr); + _swap(_size, other._size); + _swap(_ram, other._ram); + _swap(_ds, other._ds); + _swap(_at, other._at); } /** diff --git a/repos/base/include/region_map/client.h b/repos/base/include/region_map/client.h index 46fda09831..c484f517b2 100644 --- a/repos/base/include/region_map/client.h +++ b/repos/base/include/region_map/client.h @@ -41,16 +41,11 @@ class Genode::Region_map_client : public Rpc_client explicit Region_map_client(Capability); - Local_addr attach(Dataspace_capability ds, size_t size = 0, - off_t offset = 0, bool use_local_addr = false, - Local_addr local_addr = (void *)0, - bool executable = false, - bool writeable = true) override; - - void detach(Local_addr) override; - void fault_handler(Signal_context_capability) override; - State state() override; - Dataspace_capability dataspace() override; + Attach_result attach(Dataspace_capability, Attr const &) override; + void detach(addr_t) override; + void fault_handler(Signal_context_capability) override; + Fault fault() override; + Dataspace_capability dataspace() override; }; #endif /* _INCLUDE__REGION_MAP__CLIENT_H_ */ diff --git a/repos/base/include/region_map/region_map.h b/repos/base/include/region_map/region_map.h index e068e3d7db..2d8b3c95f7 100644 --- a/repos/base/include/region_map/region_map.h +++ b/repos/base/include/region_map/region_map.h @@ -27,7 +27,7 @@ namespace Genode { struct Region_map; } struct Genode::Region_map : Interface { /** - * State of region map + * Fault state of region map * * If a thread accesses a location outside the regions attached to its * address space, a fault occurs and gets signalled to the registered fault @@ -35,115 +35,46 @@ struct Genode::Region_map : Interface * fault address and fault type to resolve the fault. This information is * represented by this structure. */ - struct State + struct Fault { - enum Fault_type { READY, READ_FAULT, WRITE_FAULT, EXEC_FAULT }; + enum class Type { NONE, READ, WRITE, EXEC }; - /** - * Type of occurred fault - */ - Fault_type type = READY; - - /** - * Fault address - */ - addr_t addr = 0; - - /** - * Default constructor - */ - State() { } - - /** - * Constructor - */ - State(Fault_type fault_type, addr_t fault_addr) - : type(fault_type), addr(fault_addr) { } + Type type; /* type of occurred fault */ + addr_t addr; /* fault address unless fault is 'NONE' */ }; + struct Range { addr_t start; size_t num_bytes; }; /** - * Helper for tranferring the bit representation of a pointer as RPC - * argument. + * Attributes for 'attach' */ - class Local_addr + struct Attr { - private: - - void *_ptr = nullptr; - - public: - - Local_addr(auto ptr) : _ptr((void *)ptr) { } - - Local_addr() { } - - template - operator T () { return (T)_ptr; } + size_t size; /* size of the mapping, or 0 for the whole dataspace */ + addr_t offset; /* page-aligned offset in dataspace */ + bool use_at; + addr_t at; /* designated start of region if 'use_at' is true */ + bool executable; + bool writeable; }; + enum class Attach_error { OUT_OF_RAM, OUT_OF_CAPS, REGION_CONFLICT, INVALID_DATASPACE }; - /********************* - ** Exception types ** - *********************/ - - class Invalid_dataspace : public Exception { }; - class Region_conflict : public Exception { }; - + using Attach_result = Attempt; /** * Map dataspace into region map * - * \param ds capability of dataspace to map - * \param size size of the locally mapped region - * default (0) is the whole dataspace - * \param offset start at offset in dataspace (page-aligned) - * \param use_local_addr if set to true, attach the dataspace at - * the specified 'local_addr' - * \param local_addr local destination address - * \param executable if the mapping should be executable - * \param writeable if the mapping should be writeable - * - * \throw Invalid_dataspace - * \throw Region_conflict - * \throw Out_of_ram RAM quota of meta-data backing store is exhausted - * \throw Out_of_caps cap quota of meta-data backing store is exhausted - * - * \return address of mapped dataspace within region map - * + * \param ds capability of dataspace to map + * \param attr mapping attributes + * \return address range of mapping within region map */ - virtual Local_addr attach(Dataspace_capability ds, - size_t size = 0, off_t offset = 0, - bool use_local_addr = false, - Local_addr local_addr = (void *)0, - bool executable = false, - bool writeable = true) = 0; - - /** - * Shortcut for attaching a dataspace at a predefined local address - */ - Local_addr attach_at(Dataspace_capability ds, addr_t local_addr, - size_t size = 0, off_t offset = 0) { - return attach(ds, size, offset, true, local_addr); } - - /** - * Shortcut for attaching a dataspace executable at local address - */ - Local_addr attach_executable(Dataspace_capability ds, addr_t local_addr, - size_t size = 0, off_t offset = 0) { - return attach(ds, size, offset, true, local_addr, true, false ); } - - /** - * Shortcut for attaching a dataspace will full rights at local address - */ - Local_addr attach_rwx(Dataspace_capability ds, addr_t local_addr, - size_t size = 0, off_t offset = 0) { - return attach(ds, size, offset, true, local_addr, true, true ); } + virtual Attach_result attach(Dataspace_capability ds, Attr const &attr) = 0; /** * Remove region from local address space */ - virtual void detach(Local_addr local_addr) = 0; + virtual void detach(addr_t) = 0; /** * Register signal handler for region-manager faults @@ -156,9 +87,9 @@ struct Genode::Region_map : Interface virtual void fault_handler(Signal_context_capability handler) = 0; /** - * Request current state of region map + * Request current fault state of region map */ - virtual State state() = 0; + virtual Fault fault() = 0; /** * Return dataspace representation of region map @@ -170,17 +101,13 @@ struct Genode::Region_map : Interface ** RPC declaration ** *********************/ - GENODE_RPC_THROW(Rpc_attach, Local_addr, attach, - GENODE_TYPE_LIST(Invalid_dataspace, Region_conflict, - Out_of_ram, Out_of_caps), - Dataspace_capability, size_t, off_t, bool, Local_addr, - bool, bool); - GENODE_RPC(Rpc_detach, void, detach, Local_addr); + GENODE_RPC(Rpc_attach, Attach_result, attach, Dataspace_capability, Attr const &); + GENODE_RPC(Rpc_detach, void, detach, addr_t); GENODE_RPC(Rpc_fault_handler, void, fault_handler, Signal_context_capability); - GENODE_RPC(Rpc_state, State, state); + GENODE_RPC(Rpc_fault, Fault, fault); GENODE_RPC(Rpc_dataspace, Dataspace_capability, dataspace); - GENODE_RPC_INTERFACE(Rpc_attach, Rpc_detach, Rpc_fault_handler, Rpc_state, + GENODE_RPC_INTERFACE(Rpc_attach, Rpc_detach, Rpc_fault_handler, Rpc_fault, Rpc_dataspace); }; diff --git a/repos/base/lib/symbols/ld b/repos/base/lib/symbols/ld index e2814e98a4..5c705f5af1 100644 --- a/repos/base/lib/symbols/ld +++ b/repos/base/lib/symbols/ld @@ -136,9 +136,9 @@ _ZN6Genode17Native_capability4_incEv T _ZN6Genode17Native_capabilityC1Ev T _ZN6Genode17Native_capabilityC2Ev T _ZN6Genode17Region_map_client13fault_handlerENS_10CapabilityINS_14Signal_contextEEE T -_ZN6Genode17Region_map_client5stateEv T -_ZN6Genode17Region_map_client6attachENS_10CapabilityINS_9DataspaceEEEmlbNS_10Region_map10Local_addrEbb T -_ZN6Genode17Region_map_client6detachENS_10Region_map10Local_addrE T +_ZN6Genode17Region_map_client5faultEv T +_ZN6Genode17Region_map_client6attachENS_10CapabilityINS_9DataspaceEEERKNS_10Region_map4AttrE T +_ZN6Genode17Region_map_client6detachEm T _ZN6Genode17Region_map_client9dataspaceEv T _ZN6Genode17Region_map_clientC1ENS_10CapabilityINS_10Region_mapEEE T _ZN6Genode17Region_map_clientC2ENS_10CapabilityINS_10Region_mapEEE T diff --git a/repos/base/src/core/core_region_map.cc b/repos/base/src/core/core_region_map.cc index b505d6f3a8..3a186e3946 100644 --- a/repos/base/src/core/core_region_map.cc +++ b/repos/base/src/core/core_region_map.cc @@ -21,18 +21,16 @@ using namespace Core; -Region_map::Local_addr -Core_region_map::attach(Dataspace_capability ds_cap, size_t, off_t, bool, - Region_map::Local_addr, bool, bool) +Region_map::Attach_result +Core_region_map::attach(Dataspace_capability ds_cap, Attr const &) { - auto lambda = [] (Dataspace_component *ds) { + return _ep.apply(ds_cap, [] (Dataspace_component *ds) -> Attach_result { if (!ds) - throw Invalid_dataspace(); + return Attach_error::INVALID_DATASPACE; - return (void *)ds->phys_addr(); - }; - return _ep.apply(ds_cap, lambda); + return Range { .start = ds->phys_addr(), .num_bytes = ds->size() }; + }); } -void Core_region_map::detach(Local_addr) { } +void Core_region_map::detach(addr_t) { } diff --git a/repos/base/src/core/dataspace_component.cc b/repos/base/src/core/dataspace_component.cc index b852dcf9a1..57d99fb631 100644 --- a/repos/base/src/core/dataspace_component.cc +++ b/repos/base/src/core/dataspace_component.cc @@ -31,6 +31,7 @@ void Dataspace_component::detached_from(Rm_region ®ion) _regions.remove(®ion); } + void Dataspace_component::detach_from_rm_sessions() { _mutex.acquire(); @@ -44,13 +45,14 @@ void Dataspace_component::detach_from_rm_sessions() * removes the current region from the '_regions' list. */ _mutex.release(); - r->rm().reserve_and_flush((void *)r->base()); + r->rm().reserve_and_flush(r->base()); _mutex.acquire(); } _mutex.release(); } + Dataspace_component::~Dataspace_component() { detach_from_rm_sessions(); diff --git a/repos/base/src/core/include/core_region_map.h b/repos/base/src/core/include/core_region_map.h index fee4e1dedc..28623cd0e0 100644 --- a/repos/base/src/core/include/core_region_map.h +++ b/repos/base/src/core/include/core_region_map.h @@ -34,18 +34,11 @@ class Core::Core_region_map : public Region_map Core_region_map(Rpc_entrypoint &ep) : _ep(ep) { } - Local_addr attach(Dataspace_capability, size_t size = 0, - off_t offset=0, bool use_local_addr = false, - Local_addr local_addr = 0, - bool executable = false, - bool writeable = true) override; - - void detach(Local_addr) override; - - void fault_handler (Signal_context_capability) override { } - State state () override { return State(); } - - Dataspace_capability dataspace() override { return Dataspace_capability(); } + Attach_result attach(Dataspace_capability, Attr const &) override; + void detach(addr_t) override; + void fault_handler (Signal_context_capability) override { } + Fault fault() override { return { }; } + Dataspace_capability dataspace() override { return { }; } }; #endif /* _CORE__INCLUDE__CORE_REGION_MAP_H_ */ diff --git a/repos/base/src/core/include/region_map_component.h b/repos/base/src/core/include/region_map_component.h index 220ba2db5e..a62877cea0 100644 --- a/repos/base/src/core/include/region_map_component.h +++ b/repos/base/src/core/include/region_map_component.h @@ -40,7 +40,7 @@ #include namespace Core { - class Region_map_detach; + struct Region_map_detach; class Rm_region; struct Fault; class Cpu_thread_component; @@ -52,13 +52,19 @@ namespace Core { } -class Core::Region_map_detach : Interface +struct Core::Region_map_detach : Interface { - public: + virtual void detach_at(addr_t) = 0; - virtual void detach(Region_map::Local_addr) = 0; - virtual void unmap_region(addr_t base, size_t size) = 0; - virtual void reserve_and_flush(Region_map::Local_addr) = 0; + /** + * Unmap memory area from all address spaces referencing it + * + * \param base base address of region to unmap + * \param size size of region to unmap in bytes + */ + virtual void unmap_region(addr_t base, size_t size) = 0; + + virtual void reserve_and_flush(addr_t) = 0; }; @@ -81,7 +87,7 @@ class Core::Rm_region : public List::Element size_t size; bool write; bool exec; - off_t off; + addr_t off; bool dma; void print(Output &out) const @@ -110,7 +116,7 @@ class Core::Rm_region : public List::Element size_t size() const { return _attr.size; } bool write() const { return _attr.write; } bool executable() const { return _attr.exec; } - off_t offset() const { return _attr.off; } + addr_t offset() const { return _attr.off; } bool dma() const { return _attr.dma; } Region_map_detach &rm() const { return _rm; } @@ -213,7 +219,7 @@ class Core::Rm_faulter : Fifo::Element, Interface Pager_object &_pager_object; Mutex _mutex { }; Weak_ptr _faulting_region_map { }; - Region_map::State _fault_state { }; + Region_map::Fault _fault { }; friend class Fifo; @@ -231,8 +237,7 @@ class Core::Rm_faulter : Fifo::Element, Interface /** * Assign fault state */ - void fault(Region_map_component &faulting_region_map, - Region_map::State fault_state); + void fault(Region_map_component &faulting_region_map, Region_map::Fault); /** * Disassociate faulter from the faulted region map @@ -246,12 +251,12 @@ class Core::Rm_faulter : Fifo::Element, Interface * Return true if page fault occurred in specified address range */ bool fault_in_addr_range(addr_t addr, size_t size) { - return (_fault_state.addr >= addr) && (_fault_state.addr <= addr + size - 1); } + return (_fault.addr >= addr) && (_fault.addr <= addr + size - 1); } /** * Return fault state as exported via the region-map interface */ - Region_map::State fault_state() { return _fault_state; } + Region_map::Fault fault() { return _fault; } /** * Wake up faulter by answering the pending page fault @@ -412,7 +417,7 @@ class Core::Region_map_component : private Weak_object, * Called recursively when resolving a page fault in nested region maps. */ With_mapping_result _with_region_at_fault(Recursion_limit const recursion_limit, - Fault const &fault, + Core::Fault const &fault, auto const &resolved_fn, auto const &reflect_fn) { @@ -441,7 +446,7 @@ class Core::Region_map_component : private Weak_object, Rm_region const ®ion = *region_ptr; /* fault information relative to 'region' */ - Fault const relative_fault = fault.within_region(region); + Core::Fault const relative_fault = fault.within_region(region); Result result = Result::NO_REGION; @@ -476,7 +481,7 @@ class Core::Region_map_component : private Weak_object, } /* traverse into managed dataspace */ - Fault const sub_region_map_relative_fault = + Core::Fault const sub_region_map_relative_fault = relative_fault.within_sub_region_map(region.offset(), dataspace.size()); @@ -497,30 +502,25 @@ class Core::Region_map_component : private Weak_object, struct Attach_attr { - size_t size; - off_t offset; - bool use_local_addr; - addr_t local_addr; - bool executable; - bool writeable; - bool dma; + Attr attr; + bool dma; }; - Local_addr _attach(Dataspace_capability, Attach_attr); + Attach_result _attach(Dataspace_capability, Attach_attr); - void _with_region(Local_addr local_addr, auto const &fn) + void _with_region(addr_t at, auto const &fn) { /* read meta data for address */ - Rm_region *region_ptr = _map.metadata(local_addr); + Rm_region * const region_ptr = _map.metadata((void *)at); if (!region_ptr) { if (_diag.enabled) - warning("_with_region: no attachment at ", (void *)local_addr); + warning("_with_region: no attachment at ", (void *)at); return; } - if ((region_ptr->base() != static_cast(local_addr)) && _diag.enabled) - warning("_with_region: ", static_cast(local_addr), " is not " + if ((region_ptr->base() != static_cast(at)) && _diag.enabled) + warning("_with_region: ", reinterpret_cast(at), " is not " "the beginning of the region ", Hex(region_ptr->base())); fn(*region_ptr); @@ -530,16 +530,6 @@ class Core::Region_map_component : private Weak_object, public: - /* - * Unmaps a memory area from all address spaces referencing it. - * - * \param base base address of region to unmap - * \param size size of region to unmap - */ - void unmap_region(addr_t base, size_t size) override; - - void reserve_and_flush(Local_addr) override; - /** * Constructor * @@ -572,11 +562,9 @@ class Core::Region_map_component : private Weak_object, * for resolution. * * \param faulter faulting region-manager client - * \param pf_addr page-fault address - * \param pf_type type of page fault (read/write/execute) + * \param fault fault information */ - void fault(Rm_faulter &faulter, addr_t pf_addr, - Region_map::State::Fault_type pf_type); + void fault(Rm_faulter &faulter, Fault); /** * Dissolve faulter from region map @@ -596,16 +584,16 @@ class Core::Region_map_component : private Weak_object, * /param reflect_fn functor called to reflect a missing mapping * to user space if a fault handler is registered */ - With_mapping_result with_mapping_for_fault(Fault const &fault, - auto const &apply_fn, - auto const &reflect_fn) + With_mapping_result with_mapping_for_fault(Core::Fault const &fault, + auto const &apply_fn, + auto const &reflect_fn) { return _with_region_at_fault(Recursion_limit { 5 }, fault, - [&] (Rm_region const ®ion, Fault const ®ion_relative_fault) + [&] (Rm_region const ®ion, Core::Fault const ®ion_relative_fault) { With_mapping_result result = With_mapping_result::NO_REGION; region.with_dataspace([&] (Dataspace_component &dataspace) { - Fault const ram_relative_fault = + Core::Fault const ram_relative_fault = region_relative_fault.within_ram(region.offset(), dataspace.attr()); Log2_range src_range { ram_relative_fault.hotspot }; @@ -661,15 +649,23 @@ class Core::Region_map_component : private Weak_object, Attach_dma_result attach_dma(Dataspace_capability, addr_t); + /********************************* + ** Region_map_detach interface ** + *********************************/ + + void unmap_region (addr_t, size_t) override; + void detach_at (addr_t) override; + void reserve_and_flush (addr_t) override; + + /************************** ** Region map interface ** **************************/ - Local_addr attach (Dataspace_capability, size_t, off_t, - bool, Local_addr, bool, bool) override; - void detach (Local_addr) override; - void fault_handler (Signal_context_capability handler) override; - State state () override; + Attach_result attach (Dataspace_capability, Attr const &) override; + void detach (addr_t at) override { detach_at(at); } + void fault_handler (Signal_context_capability) override; + Fault fault () override; Dataspace_capability dataspace () override { return _ds_cap; } }; diff --git a/repos/base/src/core/include/trace/subject_registry.h b/repos/base/src/core/include/trace/subject_registry.h index 08f2a6d06e..00cdb5935a 100644 --- a/repos/base/src/core/include/trace/subject_registry.h +++ b/repos/base/src/core/include/trace/subject_registry.h @@ -110,13 +110,10 @@ class Core::Trace::Subject _size = size; /* copy content */ - void *src = local_rm.attach(from_ds), - *dst = local_rm.attach(_ds); + Attached_dataspace from { local_rm, from_ds }, + to { local_rm, _ds }; - Genode::memcpy(dst, src, _size); - - local_rm.detach(src); - local_rm.detach(dst); + Genode::memcpy(to.local_addr(), from.local_addr(), _size); } /** diff --git a/repos/base/src/core/region_map_component.cc b/repos/base/src/core/region_map_component.cc index f0f53b9324..830b87527f 100644 --- a/repos/base/src/core/region_map_component.cc +++ b/repos/base/src/core/region_map_component.cc @@ -62,12 +62,12 @@ Pager_object::Pager_result Rm_client::pager(Ipc_pager &pager) [&] (Region_map_component &rm, Fault const &fault) /* reflect to user space */ { - using Type = Region_map::State::Fault_type; - Type const type = (fault.access == Access::READ) ? Type::READ_FAULT - : (fault.access == Access::WRITE) ? Type::WRITE_FAULT - : Type::EXEC_FAULT; + using Type = Region_map::Fault::Type; + Type const type = (fault.access == Access::READ) ? Type::READ + : (fault.access == Access::WRITE) ? Type::WRITE + : Type::EXEC; /* deliver fault info to responsible region map */ - rm.fault(*this, fault.hotspot.value, type); + rm.fault(*this, { .type = type, .addr = fault.hotspot.value }); } ); @@ -118,12 +118,12 @@ Pager_object::Pager_result Rm_client::pager(Ipc_pager &pager) *************/ void Rm_faulter::fault(Region_map_component &faulting_region_map, - Region_map::State fault_state) + Region_map::Fault fault) { Mutex::Guard lock_guard(_mutex); _faulting_region_map = faulting_region_map.weak_ptr(); - _fault_state = fault_state; + _fault = fault; _pager_object.unresolved_page_fault_occurred(); } @@ -154,7 +154,7 @@ void Rm_faulter::continue_after_resolved_fault() _pager_object.wake_up(); _faulting_region_map = Weak_ptr(); - _fault_state = Region_map::State(); + _fault = { }; } @@ -162,55 +162,54 @@ void Rm_faulter::continue_after_resolved_fault() ** Region-map component ** **************************/ -Region_map::Local_addr -Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const attr) +Region_map::Attach_result +Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const core_attr) { + Attr const attr = core_attr.attr; + /* serialize access */ Mutex::Guard lock_guard(_mutex); - /* offset must be positive and page-aligned */ - if (attr.offset < 0 || align_addr(attr.offset, get_page_size_log2()) != attr.offset) - throw Region_conflict(); + /* offset must be page-aligned */ + if (align_addr(attr.offset, get_page_size_log2()) != attr.offset) + return Attach_error::REGION_CONFLICT; - auto lambda = [&] (Dataspace_component *dsc) { + auto lambda = [&] (Dataspace_component *dsc) -> Attach_result { using Alloc_error = Range_allocator::Alloc_error; /* check dataspace validity */ if (!dsc) - throw Invalid_dataspace(); + return Attach_error::INVALID_DATASPACE; unsigned const min_align_log2 = get_page_size_log2(); - size_t const off = attr.offset; - if (off >= dsc->size()) - throw Region_conflict(); + size_t const ds_size = dsc->size(); - size_t size = attr.size; + if (attr.offset >= ds_size) + return Attach_error::REGION_CONFLICT; - if (!size) - size = dsc->size() - attr.offset; + size_t size = attr.size ? attr.size : ds_size - attr.offset; /* work with page granularity */ size = align_addr(size, min_align_log2); /* deny creation of regions larger then the actual dataspace */ - if (dsc->size() < size + attr.offset) - throw Region_conflict(); + if (ds_size < size + attr.offset) + return Attach_error::REGION_CONFLICT; /* allocate region for attachment */ - void *attach_at = nullptr; - if (attr.use_local_addr) { - _map.alloc_addr(size, attr.local_addr).with_result( - [&] (void *ptr) { attach_at = ptr; }, - [&] (Range_allocator::Alloc_error error) { - switch (error) { - case Alloc_error::OUT_OF_RAM: throw Out_of_ram(); - case Alloc_error::OUT_OF_CAPS: throw Out_of_caps(); - case Alloc_error::DENIED: break; - } - throw Region_conflict(); - }); + bool at_defined = false; + addr_t at { }; + if (attr.use_at) { + Alloc_error error = Alloc_error::DENIED; + _map.alloc_addr(size, attr.at).with_result( + [&] (void *ptr) { at = addr_t(ptr); at_defined = true; }, + [&] (Alloc_error e) { error = e; }); + + if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM; + if (error == Alloc_error::OUT_OF_CAPS) return Attach_error::OUT_OF_CAPS; + } else { /* @@ -222,8 +221,7 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const att if (align_log2 >= sizeof(void *)*8) align_log2 = min_align_log2; - bool done = false; - for (; !done && (align_log2 >= min_align_log2); align_log2--) { + for (; !at_defined && (align_log2 >= min_align_log2); align_log2--) { /* * Don't use an alignment higher than the alignment of the backing @@ -233,60 +231,52 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const att if (((dsc->map_src_addr() + attr.offset) & ((1UL << align_log2) - 1)) != 0) continue; - /* try allocating the align region */ - _map.alloc_aligned(size, (unsigned)align_log2).with_result( + /* try allocating the aligned region */ + Alloc_error error = Alloc_error::DENIED; + _map.alloc_aligned(size, unsigned(align_log2)).with_result( + [&] (void *ptr) { at = addr_t(ptr); at_defined = true; }, + [&] (Alloc_error e) { error = e; }); - [&] (void *ptr) { - attach_at = ptr; - done = true; }, - - [&] (Range_allocator::Alloc_error error) { - switch (error) { - case Alloc_error::OUT_OF_RAM: throw Out_of_ram(); - case Alloc_error::OUT_OF_CAPS: throw Out_of_caps(); - case Alloc_error::DENIED: break; /* no fit */ - } - /* try smaller alignment in next iteration... */ - }); + if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM; + if (error == Alloc_error::OUT_OF_CAPS) return Attach_error::OUT_OF_CAPS; } - - if (!done) - throw Region_conflict(); } + if (!at_defined) + return Attach_error::REGION_CONFLICT; Rm_region::Attr const region_attr { - .base = (addr_t)attach_at, + .base = at, .size = size, .write = attr.writeable, .exec = attr.executable, .off = attr.offset, - .dma = attr.dma, + .dma = core_attr.dma, }; /* store attachment info in meta data */ try { - _map.construct_metadata(attach_at, *dsc, *this, region_attr); + _map.construct_metadata((void *)at, *dsc, *this, region_attr); } catch (Allocator_avl_tpl::Assign_metadata_failed) { error("failed to store attachment info"); - throw Invalid_dataspace(); + return Attach_error::INVALID_DATASPACE; } /* inform dataspace about attachment */ - Rm_region * const region_ptr = _map.metadata(attach_at); + Rm_region * const region_ptr = _map.metadata((void *)at); if (region_ptr) dsc->attached_to(*region_ptr); /* check if attach operation resolves any faulting region-manager clients */ _faulters.for_each([&] (Rm_faulter &faulter) { - if (faulter.fault_in_addr_range((addr_t)attach_at, size)) { + if (faulter.fault_in_addr_range(at, size)) { _faulters.remove(faulter); faulter.continue_after_resolved_fault(); } }); - return attach_at; + return Range { .start = at, .num_bytes = size }; }; return _ds_ep.apply(ds_cap, lambda); @@ -351,23 +341,10 @@ void Region_map_component::unmap_region(addr_t base, size_t size) } -Region_map::Local_addr -Region_map_component::attach(Dataspace_capability ds_cap, size_t size, - off_t offset, bool use_local_addr, - Region_map::Local_addr local_addr, - bool executable, bool writeable) +Region_map::Attach_result +Region_map_component::attach(Dataspace_capability ds_cap, Attr const &attr) { - Attach_attr const attr { - .size = size, - .offset = offset, - .use_local_addr = use_local_addr, - .local_addr = local_addr, - .executable = executable, - .writeable = writeable, - .dma = false, - }; - - return _attach(ds_cap, attr); + return _attach(ds_cap, { .attr = attr, .dma = false }); } @@ -375,25 +352,30 @@ Region_map_component::Attach_dma_result Region_map_component::attach_dma(Dataspace_capability ds_cap, addr_t at) { Attach_attr const attr { - .size = 0, - .offset = 0, - .use_local_addr = true, - .local_addr = at, - .executable = false, - .writeable = true, + .attr = { + .size = { }, + .offset = { }, + .use_at = true, + .at = at, + .executable = false, + .writeable = true, + }, .dma = true, }; using Attach_dma_error = Pd_session::Attach_dma_error; - try { - _attach(ds_cap, attr); - return Pd_session::Attach_dma_ok(); - } - catch (Invalid_dataspace) { return Attach_dma_error::DENIED; } - catch (Region_conflict) { return Attach_dma_error::DENIED; } - catch (Out_of_ram) { return Attach_dma_error::OUT_OF_RAM; } - catch (Out_of_caps) { return Attach_dma_error::OUT_OF_CAPS; } + return _attach(ds_cap, attr).convert( + [&] (Range) { return Pd_session::Attach_dma_ok(); }, + [&] (Attach_error e) { + switch (e) { + case Attach_error::OUT_OF_RAM: return Attach_dma_error::OUT_OF_RAM; + case Attach_error::OUT_OF_CAPS: return Attach_dma_error::OUT_OF_CAPS; + case Attach_error::REGION_CONFLICT: break; + case Attach_error::INVALID_DATASPACE: break; + } + return Attach_dma_error::DENIED; + }); } @@ -448,23 +430,23 @@ void Region_map_component::_reserve_and_flush_unsynchronized(Rm_region ®ion) /* * Flush the region, but keep it reserved until 'detach()' is called. */ -void Region_map_component::reserve_and_flush(Local_addr local_addr) +void Region_map_component::reserve_and_flush(addr_t const at) { /* serialize access */ Mutex::Guard lock_guard(_mutex); - _with_region(local_addr, [&] (Rm_region ®ion) { + _with_region(at, [&] (Rm_region ®ion) { _reserve_and_flush_unsynchronized(region); }); } -void Region_map_component::detach(Local_addr local_addr) +void Region_map_component::detach_at(addr_t const at) { /* serialize access */ Mutex::Guard lock_guard(_mutex); - _with_region(local_addr, [&] (Rm_region ®ion) { + _with_region(at, [&] (Rm_region ®ion) { if (!region.reserved()) _reserve_and_flush_unsynchronized(region); /* free the reserved region */ @@ -490,11 +472,10 @@ void Region_map_component::remove_client(Rm_client &rm_client) } -void Region_map_component::fault(Rm_faulter &faulter, addr_t pf_addr, - Region_map::State::Fault_type pf_type) +void Region_map_component::fault(Rm_faulter &faulter, Region_map::Fault fault) { /* remember fault state in faulting thread */ - faulter.fault(*this, Region_map::State(pf_type, pf_addr)); + faulter.fault(*this, fault); /* enqueue faulter */ _faulters.enqueue(faulter); @@ -520,17 +501,15 @@ void Region_map_component::fault_handler(Signal_context_capability sigh) } -Region_map::State Region_map_component::state() +Region_map::Fault Region_map_component::fault() { /* serialize access */ Mutex::Guard lock_guard(_mutex); - /* return ready state if there are not current faulters */ - Region_map::State result; - - /* otherwise return fault information regarding the first faulter */ + /* return fault information regarding the first faulter */ + Region_map::Fault result { }; _faulters.head([&] (Rm_faulter &faulter) { - result = faulter.fault_state(); }); + result = faulter.fault(); }); return result; } @@ -609,7 +588,7 @@ Region_map_component::~Region_map_component() break; } - detach(out_addr); + detach_at(out_addr); } /* revoke dataspace representation */ diff --git a/repos/base/src/core/stack_area.cc b/repos/base/src/core/stack_area.cc index f8791b334d..a8116e91d1 100644 --- a/repos/base/src/core/stack_area.cc +++ b/repos/base/src/core/stack_area.cc @@ -65,52 +65,53 @@ class Stack_area_region_map : public Region_map /** * Allocate and attach on-the-fly backing store to stack area */ - Local_addr attach(Dataspace_capability, size_t size, off_t, - bool, Local_addr local_addr, bool, bool) override + Attach_result attach(Dataspace_capability, Attr const &attr) override { /* allocate physical memory */ - size = round_page(size); + size_t const size = round_page(attr.size); Range_allocator &phys = platform_specific().ram_alloc(); - return phys.alloc_aligned(size, get_page_size_log2()).convert( + return phys.alloc_aligned(size, get_page_size_log2()).convert( - [&] (void *phys_ptr) { + [&] (void *phys_ptr) -> Attach_result { - addr_t const phys_base = (addr_t)phys_ptr; + try { + addr_t const phys_base = (addr_t)phys_ptr; - Dataspace_component &ds = *new (&_ds_slab) - Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0); + Dataspace_component &ds = *new (&_ds_slab) + Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0); - addr_t const core_local_addr = stack_area_virtual_base() - + (addr_t)local_addr; + addr_t const core_local_addr = stack_area_virtual_base() + + attr.at; - if (!map_local(ds.phys_addr(), core_local_addr, - ds.size() >> get_page_size_log2())) { - error("could not map phys ", Hex(ds.phys_addr()), - " at local ", Hex(core_local_addr)); + if (!map_local(ds.phys_addr(), core_local_addr, + ds.size() >> get_page_size_log2())) { + error("could not map phys ", Hex(ds.phys_addr()), + " at local ", Hex(core_local_addr)); - phys.free(phys_ptr); - return Local_addr { (addr_t)0 }; + phys.free(phys_ptr); + return Attach_error::INVALID_DATASPACE; + } + + ds.assign_core_local_addr((void*)core_local_addr); + + return Range { .start = attr.at, .num_bytes = size }; } - - ds.assign_core_local_addr((void*)core_local_addr); - - return local_addr; + catch (Out_of_ram) { return Attach_error::OUT_OF_RAM; } + catch (Out_of_caps) { return Attach_error::OUT_OF_CAPS; } }, [&] (Range_allocator::Alloc_error) { error("could not allocate backing store for new stack"); - return (addr_t)0; }); + return Attach_error::REGION_CONFLICT; }); } - void detach(Local_addr local_addr) override + void detach(addr_t const at) override { - using Genode::addr_t; - - if ((addr_t)local_addr >= stack_area_virtual_size()) + if (at >= stack_area_virtual_size()) return; - addr_t const detach = stack_area_virtual_base() + (addr_t)local_addr; + addr_t const detach = stack_area_virtual_base() + at; addr_t const stack = stack_virtual_size(); addr_t const pages = ((detach & ~(stack - 1)) + stack - detach) >> get_page_size_log2(); @@ -120,9 +121,9 @@ class Stack_area_region_map : public Region_map void fault_handler(Signal_context_capability) override { } - State state() override { return State(); } + Fault fault() override { return { }; } - Dataspace_capability dataspace() override { return Dataspace_capability(); } + Dataspace_capability dataspace() override { return { }; } }; diff --git a/repos/base/src/core/vm_session_common.cc b/repos/base/src/core/vm_session_common.cc index e5c8290d81..23c71f5fa0 100644 --- a/repos/base/src/core/vm_session_common.cc +++ b/repos/base/src/core/vm_session_common.cc @@ -65,6 +65,8 @@ void Vm_session_component::attach(Dataspace_capability const cap, using Alloc_error = Range_allocator::Alloc_error; + Region_map_detach &rm_detach = *this; + _map.alloc_addr(attribute.size, guest_phys).with_result( [&] (void *) { @@ -75,14 +77,14 @@ void Vm_session_component::attach(Dataspace_capability const cap, .size = attribute.size, .write = dsc.writeable() && attribute.writeable, .exec = attribute.executable, - .off = (off_t)attribute.offset, + .off = attribute.offset, .dma = false, }; /* store attachment info in meta data */ try { _map.construct_metadata((void *)guest_phys, - dsc, *this, region_attr); + dsc, rm_detach, region_attr); } catch (Allocator_avl_tpl::Assign_metadata_failed) { error("failed to store attachment info"); @@ -149,7 +151,7 @@ void Vm_session_component::detach(addr_t guest_phys, size_t size) if (region) { iteration_size = region->size(); - detach(region->base()); + detach_at(region->base()); } if (addr >= guest_phys_end - (iteration_size - 1)) @@ -160,10 +162,10 @@ void Vm_session_component::detach(addr_t guest_phys, size_t size) } -void Vm_session_component::_with_region(Region_map::Local_addr addr, +void Vm_session_component::_with_region(addr_t const addr, auto const &fn) { - Rm_region *region = _map.metadata(addr); + Rm_region *region = _map.metadata((void *)addr); if (region) fn(*region); else @@ -171,7 +173,7 @@ void Vm_session_component::_with_region(Region_map::Local_addr addr, } -void Vm_session_component::detach(Region_map::Local_addr addr) +void Vm_session_component::detach_at(addr_t const addr) { _with_region(addr, [&] (Rm_region ®ion) { @@ -190,7 +192,7 @@ void Vm_session_component::unmap_region(addr_t base, size_t size) } -void Vm_session_component::reserve_and_flush(Region_map::Local_addr addr) +void Vm_session_component::reserve_and_flush(addr_t const addr) { _with_region(addr, [&] (Rm_region ®ion) { diff --git a/repos/base/src/include/base/internal/attached_stack_area.h b/repos/base/src/include/base/internal/attached_stack_area.h index f48949a5ba..5347485559 100644 --- a/repos/base/src/include/base/internal/attached_stack_area.h +++ b/repos/base/src/include/base/internal/attached_stack_area.h @@ -33,11 +33,16 @@ struct Genode::Attached_stack_area : Expanding_region_map_client Expanding_region_map_client(parent, pd, Pd_session_client(pd).stack_area(), Parent::Env::pd()) { - Region_map_client address_space(Pd_session_client(pd).address_space()); + Region_map_client local_rm(Pd_session_client(pd).address_space()); - address_space.attach_at(Expanding_region_map_client::dataspace(), - stack_area_virtual_base(), - stack_area_virtual_size()); + local_rm.attach(Expanding_region_map_client::dataspace(), Region_map::Attr { + .size = stack_area_virtual_size(), + .offset = { }, + .use_at = true, + .at = stack_area_virtual_base(), + .executable = false, + .writeable = true + }); } }; diff --git a/repos/base/src/include/base/internal/expanding_region_map_client.h b/repos/base/src/include/base/internal/expanding_region_map_client.h index bca104edd3..b06b37f85d 100644 --- a/repos/base/src/include/base/internal/expanding_region_map_client.h +++ b/repos/base/src/include/base/internal/expanding_region_map_client.h @@ -34,22 +34,15 @@ struct Genode::Expanding_region_map_client : Region_map_client Parent::Client::Id pd_id) : Region_map_client(rm), _pd_client(parent, pd, pd_id) { } - Local_addr attach(Dataspace_capability ds, size_t size, off_t offset, - bool use_local_addr, Local_addr local_addr, - bool executable, bool writeable) override + Attach_result attach(Dataspace_capability ds, Attr const &attr) override { - return retry( - [&] () { - return retry( - [&] { - return Region_map_client::attach(ds, size, offset, - use_local_addr, - local_addr, - executable, - writeable); }, - [&] { _pd_client.upgrade_caps(2); }); - }, - [&] () { _pd_client.upgrade_ram(8*1024); }); + for (;;) { + Attach_result const result = Region_map_client::attach(ds, attr); + if (result == Attach_error::OUT_OF_RAM) _pd_client.upgrade_ram(8*1024); + else if (result == Attach_error::OUT_OF_CAPS) _pd_client.upgrade_caps(2); + else + return result; + } } }; diff --git a/repos/base/src/lib/base/child.cc b/repos/base/src/lib/base/child.cc index 4bc3775ed0..06dcc1b089 100644 --- a/repos/base/src/lib/base/child.cc +++ b/repos/base/src/lib/base/child.cc @@ -759,8 +759,6 @@ void Child::_try_construct_env_dependent_members() catch (Out_of_caps) { _error("out of caps during ELF loading"); } catch (Process::Missing_dynamic_linker) { _error("dynamic linker unavailable"); } catch (Process::Invalid_executable) { _error("invalid ELF executable"); } - catch (Region_map::Invalid_dataspace) { _error("ELF loading failed (Invalid_dataspace)"); } - catch (Region_map::Region_conflict) { _error("ELF loading failed (Region_conflict)"); } } diff --git a/repos/base/src/lib/base/child_process.cc b/repos/base/src/lib/base/child_process.cc index 59235dc07f..3cc428b0d0 100644 --- a/repos/base/src/lib/base/child_process.cc +++ b/repos/base/src/lib/base/child_process.cc @@ -41,12 +41,17 @@ Child::Process::Loaded_executable::Loaded_executable(Type type, throw Missing_dynamic_linker(); } - addr_t elf_addr = 0; - try { elf_addr = local_rm.attach(ldso_ds); } - catch (Region_map::Invalid_dataspace) { - error("dynamic linker is an invalid dataspace"); throw; } - catch (Region_map::Region_conflict) { - error("region conflict while attaching dynamic linker"); throw; } + addr_t const elf_addr = local_rm.attach(ldso_ds, Region_map::Attr{}).convert( + [&] (Region_map::Range range) { return range.start; }, + [&] (Region_map::Attach_error const e) -> addr_t { + if (e == Region_map::Attach_error::INVALID_DATASPACE) + error("dynamic linker is an invalid dataspace"); + if (e == Region_map::Attach_error::REGION_CONFLICT) + error("region conflict while attaching dynamic linker"); + return 0; }); + + if (!elf_addr) + return; Elf_binary elf(elf_addr); @@ -66,7 +71,6 @@ Child::Process::Loaded_executable::Loaded_executable(Type type, size_t const size = seg.mem_size(); bool const write = seg.flags().w; - bool const exec = seg.flags().x; if (write) { @@ -89,14 +93,17 @@ Child::Process::Loaded_executable::Loaded_executable(Type type, error("allocation of read-write segment failed"); throw; }; /* attach dataspace */ - void *base; - try { base = local_rm.attach(ds_cap); } - catch (Region_map::Invalid_dataspace) { - error("attempt to attach invalid segment dataspace"); throw; } - catch (Region_map::Region_conflict) { - error("region conflict while locally attaching ELF segment"); throw; } + Region_map::Attr attr { }; + attr.writeable = true; + void * const ptr = local_rm.attach(ds_cap, attr).convert( + [&] (Region_map::Range range) { return (void *)range.start; }, + [&] (Region_map::Attach_error const e) { + if (e == Region_map::Attach_error::INVALID_DATASPACE) + error("attempt to attach invalid segment dataspace"); + if (e == Region_map::Attach_error::REGION_CONFLICT) + error("region conflict while locally attaching ELF segment"); + return nullptr; }); - void * const ptr = base; addr_t const laddr = elf_addr + seg.file_offset(); /* copy contents and fill with zeros */ @@ -115,15 +122,21 @@ Child::Process::Loaded_executable::Loaded_executable(Type type, } /* detach dataspace */ - local_rm.detach(base); - - off_t const offset = 0; - try { remote_rm.attach_at(ds_cap, addr, size, offset); } - catch (Region_map::Region_conflict) { - error("region conflict while remotely attaching ELF segment"); - error("addr=", (void *)addr, " size=", (void *)size, " offset=", (void *)offset); - throw; } + local_rm.detach(addr_t(ptr)); + remote_rm.attach(ds_cap, Region_map::Attr { + .size = size, + .offset = { }, + .use_at = true, + .at = addr, + .executable = false, + .writeable = true + }).with_result( + [&] (Region_map::Range) { }, + [&] (Region_map::Attach_error) { + error("region conflict while remotely attaching ELF segment"); + error("addr=", (void *)addr, " size=", (void *)size); } + ); } else { /* read-only segment */ @@ -131,27 +144,28 @@ Child::Process::Loaded_executable::Loaded_executable(Type type, if (seg.file_size() != seg.mem_size()) warning("filesz and memsz for read-only segment differ"); - off_t const offset = seg.file_offset(); - try { - if (exec) - remote_rm.attach_executable(ldso_ds, addr, size, offset); - else - remote_rm.attach_at(ldso_ds, addr, size, offset); - } - catch (Region_map::Region_conflict) { - error("region conflict while remotely attaching read-only ELF segment"); - error("addr=", (void *)addr, " size=", (void *)size, " offset=", (void *)offset); - throw; - } - catch (Region_map::Invalid_dataspace) { - error("attempt to attach invalid read-only segment dataspace"); - throw; - } + remote_rm.attach(ldso_ds, Region_map::Attr { + .size = size, + .offset = seg.file_offset(), + .use_at = true, + .at = addr, + .executable = seg.flags().x, + .writeable = false + }).with_result( + [&] (Region_map::Range) { }, + [&] (Region_map::Attach_error const e) { + if (e == Region_map::Attach_error::REGION_CONFLICT) + error("region conflict while remotely attaching read-only ELF segment"); + if (e == Region_map::Attach_error::INVALID_DATASPACE) + error("attempt to attach invalid read-only segment dataspace"); + error("addr=", (void *)addr, " size=", (void *)size); + } + ); } } /* detach ELF */ - local_rm.detach((void *)elf_addr); + local_rm.detach(elf_addr); } diff --git a/repos/base/src/lib/base/heap.cc b/repos/base/src/lib/base/heap.cc index 13a7e93305..f2e904ea7d 100644 --- a/repos/base/src/lib/base/heap.cc +++ b/repos/base/src/lib/base/heap.cc @@ -44,7 +44,7 @@ void Heap::Dataspace_pool::remove_and_free(Dataspace &ds) */ Ram_dataspace_capability ds_cap = ds.cap; - void *ds_local_addr = ds.local_addr; + addr_t const at = addr_t(ds.local_addr); remove(&ds); @@ -56,7 +56,7 @@ void Heap::Dataspace_pool::remove_and_free(Dataspace &ds) */ ds.~Dataspace(); - region_map->detach(ds_local_addr); + region_map->detach(at); ram_alloc->free(ds_cap); } @@ -102,22 +102,36 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata) struct Attach_guard { Region_map &rm; - struct { void *ptr = nullptr; }; + Region_map::Range range { }; bool keep = false; Attach_guard(Region_map &rm) : rm(rm) { } - ~Attach_guard() { if (!keep && ptr) rm.detach(ptr); } + ~Attach_guard() { if (!keep && range.start) rm.detach(range.start); } } attach_guard(*_ds_pool.region_map); - try { - attach_guard.ptr = _ds_pool.region_map->attach(ds_cap); + Region_map::Attr attr { }; + attr.writeable = true; + Region_map::Attach_result const result = _ds_pool.region_map->attach(ds_cap, attr); + if (result.failed()) { + using Error = Region_map::Attach_error; + return result.convert( + [&] (auto) /* never called */ { return Alloc_error::DENIED; }, + [&] (Error e) { + switch (e) { + case Error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM; + case Error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS; + case Error::REGION_CONFLICT: break; + case Error::INVALID_DATASPACE: break; + } + return Alloc_error::DENIED; + }); } - catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; } - catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; } - catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; } - catch (Region_map::Region_conflict) { return Alloc_error::DENIED; } + + result.with_result( + [&] (Region_map::Range range) { attach_guard.range = range; }, + [&] (auto) { /* handled above */ }); Alloc_result metadata = Alloc_error::DENIED; @@ -128,7 +142,7 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata) } else { /* add new local address range to our local allocator */ - _alloc->add_range((addr_t)attach_guard.ptr, size).with_result( + _alloc->add_range(attach_guard.range.start, size).with_result( [&] (Range_allocator::Range_ok) { metadata = _alloc->alloc_aligned(sizeof(Heap::Dataspace), log2(16U)); }, [&] (Alloc_error error) { @@ -138,7 +152,7 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata) return metadata.convert( [&] (void *md_ptr) -> Result { Dataspace &ds = *construct_at(md_ptr, ds_cap, - attach_guard.ptr, size); + (void *)attach_guard.range.start, size); _ds_pool.insert(&ds); alloc_guard.keep = attach_guard.keep = true; return &ds; diff --git a/repos/base/src/lib/base/region_map_client.cc b/repos/base/src/lib/base/region_map_client.cc index 25a675600f..6fcc74c550 100644 --- a/repos/base/src/lib/base/region_map_client.cc +++ b/repos/base/src/lib/base/region_map_client.cc @@ -20,25 +20,23 @@ Region_map_client::Region_map_client(Capability cap) : Rpc_client(cap) { } -Region_map::Local_addr -Region_map_client::attach(Dataspace_capability ds, size_t size, off_t offset, - bool use_local_addr, Local_addr local_addr, - bool executable, bool writeable) +Region_map::Attach_result +Region_map_client::attach(Dataspace_capability ds, Attr const &attr) { - return call(ds, size, offset, use_local_addr, local_addr, - executable, writeable); + return call(ds, attr); } -void Region_map_client::detach(Local_addr local_addr) { - call(local_addr); } +void Region_map_client::detach(addr_t at) { call(at); } -void Region_map_client::fault_handler(Signal_context_capability cap) { - call(cap); } +void Region_map_client::fault_handler(Signal_context_capability cap) +{ + call(cap); +} -Region_map::State Region_map_client::state() { return call(); } +Region_map::Fault Region_map_client::fault() { return call(); } Dataspace_capability Region_map_client::dataspace() { return call(); } diff --git a/repos/base/src/lib/base/sliced_heap.cc b/repos/base/src/lib/base/sliced_heap.cc index c4e00cf247..9a90984952 100644 --- a/repos/base/src/lib/base/sliced_heap.cc +++ b/repos/base/src/lib/base/sliced_heap.cc @@ -63,28 +63,42 @@ Allocator::Alloc_result Sliced_heap::try_alloc(size_t size) struct Attach_guard { Region_map &rm; - struct { void *ptr = nullptr; }; + Region_map::Range range { }; bool keep = false; Attach_guard(Region_map &rm) : rm(rm) { } - ~Attach_guard() { if (!keep && ptr) rm.detach(ptr); } + ~Attach_guard() { if (!keep && range.start) rm.detach(range.start); } } attach_guard(_region_map); - try { - attach_guard.ptr = _region_map.attach(ds_cap); + Region_map::Attr attr { }; + attr.writeable = true; + Region_map::Attach_result const result = _region_map.attach(ds_cap, attr); + if (result.failed()) { + using Error = Region_map::Attach_error; + return result.convert( + [&] (auto) /* never called */ { return Alloc_error::DENIED; }, + [&] (Error e) { + switch (e) { + case Error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM; + case Error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS; + case Error::REGION_CONFLICT: break; + case Error::INVALID_DATASPACE: break; + } + return Alloc_error::DENIED; + }); } - catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; } - catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; } - catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; } - catch (Region_map::Region_conflict) { return Alloc_error::DENIED; } + + result.with_result( + [&] (Region_map::Range range) { attach_guard.range = range; }, + [&] (auto) { /* handled above */ }); /* serialize access to block list */ Mutex::Guard guard(_mutex); - Block * const block = construct_at(attach_guard.ptr, ds_cap, size); - + Block * const block = construct_at((void *)attach_guard.range.start, + ds_cap, size); _consumed += size; _blocks.insert(block); @@ -126,7 +140,7 @@ void Sliced_heap::free(void *addr, size_t) block->~Block(); } - _region_map.detach(local_addr); + _region_map.detach(addr_t(local_addr)); _ram_alloc.free(ds_cap); } diff --git a/repos/base/src/lib/base/thread.cc b/repos/base/src/lib/base/thread.cc index f1f41402ed..b8ebd7d83b 100644 --- a/repos/base/src/lib/base/thread.cc +++ b/repos/base/src/lib/base/thread.cc @@ -49,19 +49,35 @@ void Stack::size(size_t const size) /* allocate and attach backing store for the stack enhancement */ addr_t const ds_addr = _base - ds_size - stack_area_virtual_base(); - try { - Ram_allocator * const ram = env_stack_area_ram_allocator; - Ram_dataspace_capability const ds_cap = ram->alloc(ds_size); - Region_map * const rm = env_stack_area_region_map; - void * const attach_addr = rm->attach_at(ds_cap, ds_addr, ds_size); - if (ds_addr != (addr_t)attach_addr) - throw Thread::Out_of_stack_space(); - } - catch (Out_of_ram) { throw Thread::Stack_alloc_failed(); } + Ram_allocator &ram = *env_stack_area_ram_allocator; + Region_map &rm = *env_stack_area_region_map; - /* update stack information */ - _base -= ds_size; + ram.try_alloc(ds_size).with_result( + [&] (Ram_dataspace_capability ds_cap) { + + rm.attach(ds_cap, Region_map::Attr { + .size = ds_size, + .offset = 0, + .use_at = true, + .at = ds_addr, + .executable = { }, + .writeable = true, + }).with_result( + [&] (Region_map::Range r) { + if (r.start != ds_addr) + throw Thread::Stack_alloc_failed(); + + /* update stack information */ + _base -= ds_size; + }, + [&] (Region_map::Attach_error) { + throw Thread::Stack_alloc_failed(); } + ); + }, + [&] (Ram_allocator::Alloc_error) { + throw Thread::Stack_alloc_failed(); } + ); } @@ -93,27 +109,53 @@ Thread::_alloc_stack(size_t stack_size, char const *name, bool main_thread) if (sizeof(Native_utcb) >= (1 << PAGE_SIZE_LOG2)) ds_addr -= sizeof(Native_utcb); + Ram_allocator &ram = *env_stack_area_ram_allocator; + /* allocate and attach backing store for the stack */ - Ram_dataspace_capability ds_cap; - try { - ds_cap = env_stack_area_ram_allocator->alloc(ds_size); - addr_t attach_addr = ds_addr - stack_area_virtual_base(); - if (attach_addr != (addr_t)env_stack_area_region_map->attach_at(ds_cap, attach_addr, ds_size)) - throw Stack_alloc_failed(); - } - catch (Out_of_ram) { throw Stack_alloc_failed(); } + return ram.try_alloc(ds_size).convert( - /* - * Now the stack is backed by memory, so it is safe to access its members. - * - * We need to initialize the stack object's memory with zeroes, otherwise - * the ds_cap isn't invalid. That would cause trouble when the assignment - * operator of Native_capability is used. - */ - construct_at(stack, name, *this, ds_addr, ds_cap); + [&] (Ram_dataspace_capability const ds_cap) + { + addr_t const attach_addr = ds_addr - stack_area_virtual_base(); - Abi::init_stack(stack->top()); - return stack; + return env_stack_area_region_map->attach(ds_cap, Region_map::Attr { + .size = ds_size, + .offset = { }, + .use_at = true, + .at = attach_addr, + .executable = { }, + .writeable = true + }).convert( + + [&] (Region_map::Range const range) -> Stack * { + if (range.start != attach_addr) { + ram.free(ds_cap); + throw Stack_alloc_failed(); + } + + /* + * Now the stack is backed by memory, it is safe to access + * its members. + * + * We need to initialize the stack object's memory with + * zeroes, otherwise the ds_cap isn't invalid. That would + * cause trouble when the assignment operator of + * Native_capability is used. + */ + construct_at(stack, name, *this, ds_addr, ds_cap); + + Abi::init_stack(stack->top()); + return stack; + }, + [&] (Region_map::Attach_error) -> Stack * { + ram.free(ds_cap); + throw Stack_alloc_failed(); + } + ); + }, + [&] (Ram_allocator::Alloc_error) -> Stack * { + throw Stack_alloc_failed(); } + ); } @@ -125,7 +167,7 @@ void Thread::_free_stack(Stack *stack) /* call de-constructor explicitly before memory gets detached */ stack->~Stack(); - Genode::env_stack_area_region_map->detach((void *)ds_addr); + Genode::env_stack_area_region_map->detach(ds_addr); Genode::env_stack_area_ram_allocator->free(ds_cap); /* stack ready for reuse */ @@ -226,7 +268,15 @@ void Thread::_init_cpu_session_and_trace_control() /* initialize trace control now that the CPU session must be valid */ Dataspace_capability ds = _cpu_session->trace_control(); if (ds.valid()) { - _trace_control = local_rm_ptr->attach(ds); } + Region_map::Attr attr { }; + attr.writeable = true; + local_rm_ptr->attach(ds, attr).with_result( + [&] (Region_map::Range range) { + _trace_control = reinterpret_cast(range.start); }, + [&] (Region_map::Attach_error) { + error("failed to initialize trace control for new thread"); } + ); + } } @@ -270,7 +320,7 @@ Thread::~Thread() * detached trace control dataspace. */ if (_trace_control && local_rm_ptr) - local_rm_ptr->detach(_trace_control); + local_rm_ptr->detach(addr_t(_trace_control)); } diff --git a/repos/base/src/lib/base/trace.cc b/repos/base/src/lib/base/trace.cc index 17505db90f..c0183529bb 100644 --- a/repos/base/src/lib/base/trace.cc +++ b/repos/base/src/lib/base/trace.cc @@ -60,13 +60,13 @@ bool Trace::Logger::_evaluate_control() /* unload policy */ if (policy_module) { - _env().rm().detach(policy_module); + _env().rm().detach(addr_t(policy_module)); policy_module = 0; } /* unmap trace buffer */ if (buffer) { - _env().rm().detach(buffer); + _env().rm().detach(addr_t(buffer)); buffer = 0; } @@ -97,29 +97,31 @@ bool Trace::Logger::_evaluate_control() return false; } - try { - max_event_size = 0; - policy_module = 0; + max_event_size = 0; + policy_module = nullptr; - enum { - MAX_SIZE = 0, NO_OFFSET = 0, ANY_LOCAL_ADDR = false, - EXECUTABLE = true - }; + _env().rm().attach(policy_ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = true, .writeable = true, + }).with_result( + [&] (Region_map::Range range) { + policy_module = reinterpret_cast(range.start); }, + [&] (Region_map::Attach_error) { error("failed to attach trace policy"); } + ); - policy_module = _env().rm().attach(policy_ds, MAX_SIZE, NO_OFFSET, - ANY_LOCAL_ADDR, nullptr, EXECUTABLE); + if (!policy_module) + return false; - /* relocate function pointers of policy callback table */ - for (unsigned i = 0; i < sizeof(Trace::Policy_module)/sizeof(void *); i++) { - ((addr_t *)policy_module)[i] += (addr_t)(policy_module); - } + /* relocate function pointers of policy callback table */ + for (unsigned i = 0; i < sizeof(Trace::Policy_module)/sizeof(void *); i++) { + ((addr_t *)policy_module)[i] += (addr_t)(policy_module); + } - max_event_size = policy_module->max_event_size(); - - } catch (...) { } + max_event_size = policy_module->max_event_size(); /* obtain buffer */ - buffer = 0; + buffer = nullptr; Dataspace_capability buffer_ds = Cpu_thread_client(thread_cap).trace_buffer(); if (!buffer_ds.valid()) { @@ -129,11 +131,16 @@ bool Trace::Logger::_evaluate_control() return false; } - try { - buffer = _env().rm().attach(buffer_ds); - buffer->init(Dataspace_client(buffer_ds).size()); - } catch (...) { } + Region_map::Attr attr { }; + attr.writeable = true; + _env().rm().attach(buffer_ds, attr).with_result( + [&] (Region_map::Range range) { + buffer = reinterpret_cast(range.start); }, + [&] (Region_map::Attach_error) { error("failed to attach trace buffer"); }); + if (!buffer) + return false; + buffer->init(Dataspace_client(buffer_ds).size()); policy_version = control->policy_version(); } @@ -229,12 +236,18 @@ Trace::Logger *Thread::_logger() Cpu_session &cpu = myself ? *myself->_cpu_session : _env().cpu(); - if (!myself) - if (!main_trace_control) { - Dataspace_capability ds = _env().cpu().trace_control(); - if (ds.valid()) - main_trace_control = _env().rm().attach(ds); + if (!myself && !main_trace_control) { + Dataspace_capability ds = _env().cpu().trace_control(); + if (ds.valid()) { + Region_map::Attr attr { }; + attr.writeable = true; + _env().rm().attach(ds, attr).with_result( + [&] (Region_map::Range range) { + main_trace_control = reinterpret_cast(range.start); }, + [&] (Region_map::Attach_error) { + error("failed to attach trace control"); }); } + } thread_cap.with_result( [&] (Thread_capability cap) { diff --git a/repos/base/src/lib/cxx/malloc_free.cc b/repos/base/src/lib/cxx/malloc_free.cc index 44578744d1..67f237a572 100644 --- a/repos/base/src/lib/cxx/malloc_free.cc +++ b/repos/base/src/lib/cxx/malloc_free.cc @@ -32,8 +32,11 @@ static Heap *cxx_heap_ptr; Heap &cxx_heap() { class Cxx_heap_uninitialized : Exception { }; - if (!cxx_heap_ptr) + if (!cxx_heap_ptr) { + raw("Cxx_heap_uninitialized"); + for (;;); throw Cxx_heap_uninitialized(); + } return *cxx_heap_ptr; } diff --git a/repos/base/src/lib/ldso/include/dynamic.h b/repos/base/src/lib/ldso/include/dynamic.h index 94ee9e8be0..7b298dcb9f 100644 --- a/repos/base/src/lib/ldso/include/dynamic.h +++ b/repos/base/src/lib/ldso/include/dynamic.h @@ -72,9 +72,9 @@ class Linker::Dynamic struct Needed : Fifo::Element { - off_t offset; + addr_t offset; - Needed(off_t offset) : offset(offset) { } + Needed(addr_t offset) : offset(offset) { } char const *path(char const *strtab) { diff --git a/repos/base/src/lib/ldso/include/file.h b/repos/base/src/lib/ldso/include/file.h index cee8fed73e..b5a807a62f 100644 --- a/repos/base/src/lib/ldso/include/file.h +++ b/repos/base/src/lib/ldso/include/file.h @@ -137,8 +137,17 @@ struct Linker::Elf_file : File || (name == "posix.lib.so") || (strcmp(name.string(), "vfs", 3) == 0); - reloc_base = resident ? Region_map::r()->alloc_region_at_end(size) - : Region_map::r()->alloc_region(size); + Region_map::Alloc_region_result const allocated_region = + resident ? Region_map::r()->alloc_region_at_end(size) + : Region_map::r()->alloc_region(size); + + reloc_base = allocated_region.convert( + [&] (addr_t base) { return base; }, + [&] (Region_map::Alloc_region_error) { return 0UL; }); + + if (!reloc_base) + error("failed to allocate region within linker area"); + start = 0; } @@ -292,10 +301,15 @@ struct Linker::Elf_file : File */ void load_segment_rx(Elf::Phdr const &p) { - Region_map::r()->attach_executable(rom_cap, - trunc_page(p.p_vaddr) + reloc_base, - round_page(p.p_memsz), - trunc_page(p.p_offset)); + if (Region_map::r()->attach(rom_cap, Region_map::Attr { + .size = round_page(p.p_memsz), + .offset = trunc_page(p.p_offset), + .use_at = true, + .at = trunc_page(p.p_vaddr) + reloc_base, + .executable = true, + .writeable = false + }).failed()) + error("failed to load RX segment"); } /** @@ -303,19 +317,46 @@ struct Linker::Elf_file : File */ void load_segment_rw(Elf::Phdr const &p, int nr) { - void *src = env.rm().attach(rom_cap, 0, p.p_offset); - addr_t dst = p.p_vaddr + reloc_base; + void * const src = env.rm().attach(rom_cap, Region_map::Attr { + .size = { }, + .offset = p.p_offset, + .use_at = { }, + .at = { }, + .executable = { }, + .writeable = true + }).convert( + [&] (Genode::Region_map::Range range) { return (void *)range.start; }, + [&] (Genode::Region_map::Attach_error) { return nullptr; } + ); + if (!src) { + error("dynamic linker failed to locally map RW segment ", nr); + return; + } + + addr_t const dst = p.p_vaddr + reloc_base; ram_cap[nr] = env.ram().alloc(p.p_memsz); - Region_map::r()->attach_at(ram_cap[nr], dst); - memcpy((void*)dst, src, p.p_filesz); + Region_map::r()->attach(ram_cap[nr], Region_map::Attr { + .size = { }, + .offset = { }, + .use_at = true, + .at = dst, + .executable = { }, + .writeable = true + }).with_result( + [&] (Genode::Region_map::Range) { - /* clear if file size < memory size */ - if (p.p_filesz < p.p_memsz) - memset((void *)(dst + p.p_filesz), 0, p.p_memsz - p.p_filesz); + memcpy((void*)dst, src, p.p_filesz); - env.rm().detach(src); + /* clear if file size < memory size */ + if (p.p_filesz < p.p_memsz) + memset((void *)(dst + p.p_filesz), 0, p.p_memsz - p.p_filesz); + }, + [&] (Genode::Region_map::Attach_error) { + error("dynamic linker failed to copy RW segment"); } + ); + env.rm().detach(addr_t(src)); } /** diff --git a/repos/base/src/lib/ldso/include/region_map.h b/repos/base/src/lib/ldso/include/region_map.h index 0e2b32a2be..da7f7c85d5 100644 --- a/repos/base/src/lib/ldso/include/region_map.h +++ b/repos/base/src/lib/ldso/include/region_map.h @@ -39,11 +39,6 @@ namespace Linker { */ class Linker::Region_map { - public: - - typedef Region_map_client::Local_addr Local_addr; - typedef Region_map_client::Region_conflict Region_conflict; - private: Env &_env; @@ -56,15 +51,27 @@ class Linker::Region_map Region_map(Env &env, Allocator &md_alloc, addr_t base) : - _env(env), _range(&md_alloc), - _base((addr_t)_env.rm().attach_rwx(_rm.dataspace(), base)) + _env(env), _range(&md_alloc), _base(base) { - _range.add_range(base, Pd_session::LINKER_AREA_SIZE); + _env.rm().attach(_rm.dataspace(), Genode::Region_map::Attr { + .size = 0, + .offset = 0, + .use_at = true, + .at = _base, + .executable = true, + .writeable = true + }).with_result( + [&] (Genode::Region_map::Range) { + _range.add_range(base, Pd_session::LINKER_AREA_SIZE); - if (Linker::verbose) - log(" ", Hex(base), - " .. ", Hex(base + Pd_session::LINKER_AREA_SIZE - 1), - ": linker area"); + if (Linker::verbose) + log(" ", Hex(base), + " .. ", Hex(base + Pd_session::LINKER_AREA_SIZE - 1), + ": linker area"); + }, + [&] (Genode::Region_map::Attach_error) { + error("failed to locally attach linker area"); } + ); } public: @@ -73,63 +80,55 @@ class Linker::Region_map static Constructible_region_map &r(); + using Alloc_region_error = Ram_allocator::Alloc_error; + using Alloc_region_result = Attempt; + using Attach_result = Genode::Region_map::Attach_result; + using Attr = Genode::Region_map::Attr; + /** * Allocate region anywhere within the region map - * - * XXX propagate OUT_OF_RAM, OUT_OF_CAPS */ - addr_t alloc_region(size_t size) + Alloc_region_result alloc_region(size_t size) { - return _range.alloc_aligned(size, get_page_size_log2()).convert( - [&] (void *ptr) { return (addr_t)ptr; }, - [&] (Allocator::Alloc_error) -> addr_t { throw Region_conflict(); }); + return _range.alloc_aligned(size, get_page_size_log2()).convert( + [&] (void *ptr) { return (addr_t)ptr; }, + [&] (Allocator::Alloc_error e) { return e; }); } /** * Allocate region at specified 'vaddr' */ - void alloc_region_at(size_t size, addr_t vaddr) + Alloc_region_result alloc_region_at(size_t size, addr_t vaddr) { - if (_range.alloc_addr(size, vaddr).failed()) - throw Region_conflict(); + return _range.alloc_addr(size, vaddr).convert( + [&] (void *ptr) { return (addr_t)ptr; }, + [&] (Allocator::Alloc_error e) { return e; }); } - addr_t alloc_region_at_end(size_t size) + Alloc_region_result alloc_region_at_end(size_t size) { _end -= align_addr(size, get_page_size_log2()); - alloc_region_at(size, _end); - return _end; + return alloc_region_at(size, _end); } void free_region(addr_t vaddr) { _range.free((void *)vaddr); } - /** - * Overwritten from 'Region_map_client' - */ - Local_addr attach_at(Dataspace_capability ds, addr_t local_addr, - size_t size = 0, off_t offset = 0) + Attach_result attach(Dataspace_capability ds, Attr attr) { - return retry( - [&] () { - return _rm.attach_at(ds, local_addr - _base, size, offset); + if (!attr.use_at) + error("unexpected arguments of Linker::Region_map::attach"); + + attr.at -= _base; + return _rm.attach(ds, attr).convert( + [&] (Genode::Region_map::Range range) { + range.start += _base; + return range; }, - [&] () { _env.upgrade(Parent::Env::pd(), "ram_quota=8K"); }); + [&] (Genode::Region_map::Attach_error e) { return e; } + ); } - /** - * Overwritten from 'Region_map_client' - */ - Local_addr attach_executable(Dataspace_capability ds, addr_t local_addr, - size_t size = 0, off_t offset = 0) - { - return retry( - [&] () { - return _rm.attach_executable(ds, local_addr - _base, size, offset); - }, - [&] () { _env.upgrade(Parent::Env::pd(), "ram_quota=8K"); }); - } - - void detach(Local_addr local_addr) { _rm.detach((addr_t)local_addr - _base); } + void detach(addr_t local_addr) { _rm.detach(local_addr - _base); } }; #endif /* _INCLUDE__REGION_MAP_H_ */ diff --git a/repos/base/src/lib/ldso/main.cc b/repos/base/src/lib/ldso/main.cc index bb73543d1a..c9b21c3d52 100644 --- a/repos/base/src/lib/ldso/main.cc +++ b/repos/base/src/lib/ldso/main.cc @@ -652,25 +652,32 @@ void Genode::init_ldso_phdr(Env &env) { struct Not_implemented : Exception { }; - Local_addr attach(Dataspace_capability ds, size_t, off_t, - bool, Local_addr, bool, bool) override + Attach_result attach(Dataspace_capability ds, Attr const &) override { size_t const size = Dataspace_client(ds).size(); Linker::Region_map &linker_area = *Linker::Region_map::r(); - addr_t const at = linker_area.alloc_region_at_end(size); - - (void)linker_area.attach_at(ds, at, size, 0UL); - - return at; + return linker_area.alloc_region_at_end(size).convert( + [&] (addr_t const at) { + return linker_area.attach(ds, Region_map::Attr { + .size = size, + .offset = { }, + .use_at = true, + .at = at, + .executable = { }, + .writeable = true }); + }, + [&] (Linker::Region_map::Alloc_region_error) { + return Attach_error::REGION_CONFLICT; } + ); } - void detach(Local_addr) override { throw Not_implemented(); } + void detach(addr_t) override { throw Not_implemented(); } void fault_handler(Signal_context_capability) override { } - State state() override { throw Not_implemented(); } + Fault fault() override { throw Not_implemented(); } Dataspace_capability dataspace() override { throw Not_implemented(); } diff --git a/repos/base/src/test/ds_ownership/main.cc b/repos/base/src/test/ds_ownership/main.cc index 301461e763..557f7e306c 100644 --- a/repos/base/src/test/ds_ownership/main.cc +++ b/repos/base/src/test/ds_ownership/main.cc @@ -36,7 +36,7 @@ void Component::construct(Genode::Env &env) pd_2.free(ds); log("try to attach dataspace to see if it still exists"); - env.rm().attach(ds); + env.rm().attach(ds, { }); log("attach operation succeeded"); diff --git a/repos/base/src/test/rm_fault/main.cc b/repos/base/src/test/rm_fault/main.cc index 1a86ab7f94..465d7ba989 100644 --- a/repos/base/src/test/rm_fault/main.cc +++ b/repos/base/src/test/rm_fault/main.cc @@ -40,11 +40,11 @@ enum { SHUTDOWN = EXEC_TEST - 1 }; -static char const *state_name(Region_map::State &state) +static char const *fault_name(Region_map::Fault const &fault) { - return state.type == Region_map::State::READ_FAULT ? "READ_FAULT" : - state.type == Region_map::State::WRITE_FAULT ? "WRITE_FAULT" : - state.type == Region_map::State::EXEC_FAULT ? "EXEC_FAULT" : "READY"; + return fault.type == Region_map::Fault::Type::READ ? "READ_FAULT" : + fault.type == Region_map::Fault::Type::WRITE ? "WRITE_FAULT" : + fault.type == Region_map::Fault::Type::EXEC ? "EXEC_FAULT" : "READY"; } @@ -295,6 +295,15 @@ struct Main_parent long volatile &_child_value() { return *_ds.local_addr(); } long volatile &_child_stop() { return *(_ds.local_addr() + 1); } + void _attach_at(Dataspace_capability ds, addr_t at) + { + if (_address_space.attach(ds, { + .size = { }, .offset = { }, + .use_at = true, .at = at, + .executable = { }, .writeable = true + }).failed()) error("_attach_at unexpectedly failed"); + } + void _test_read_fault(addr_t const child_virt_addr) { /* allocate dataspace to resolve the fault */ @@ -302,7 +311,7 @@ struct Main_parent _child_value() = READ_TEST; - _address_space.attach_at(_ds.cap(), child_virt_addr); + _attach_at(_ds.cap(), child_virt_addr); /* poll until our child modifies the dataspace content */ while (_child_value() == READ_TEST); @@ -311,7 +320,7 @@ struct Main_parent Hex(_child_value())); log("revoke dataspace from child"); - _address_space.detach((void *)child_virt_addr); + _address_space.detach(child_virt_addr); } void _test_write_fault(addr_t const child_virt_addr, unsigned round) @@ -322,7 +331,7 @@ struct Main_parent _child_value() = WRITE_TEST; - _address_space.attach_at(_binary.dataspace(), child_virt_addr); + _attach_at(_binary.dataspace(), child_virt_addr); return; } @@ -337,36 +346,35 @@ struct Main_parent : " unknown"); /* detach region where fault happened */ - _address_space.detach((void *)child_virt_addr); + _address_space.detach(child_virt_addr); if (round == ROUND_FAULT_ON_ROM_BINARY) { /* attach a RAM dataspace read-only */ - enum { - SIZE = 4096, OFFSET = 0, ATTACH_AT = true, NON_EXEC = false, - READONLY = false - }; + if (_address_space.attach(_ds.cap(), { + .size = 4096, .offset = { }, + .use_at = true, .at = child_virt_addr, + .executable = { }, .writeable = { } + }).failed()) error("attach of ROUND_FAULT_ON_ROM_BINARY failed"); - _address_space.attach(_ds.cap(), SIZE, OFFSET, ATTACH_AT, - child_virt_addr, NON_EXEC, READONLY); } else if (round == ROUND_FAULT_ON_RO_RAM) { /* let client continue by attaching RAM dataspace writeable */ - _address_space.attach_at(_ds.cap(), child_virt_addr); + _attach_at(_ds.cap(), child_virt_addr); } } - void _test_exec_fault(Region_map::State &state) + void _test_exec_fault(Region_map::Fault const &fault) { if (_child_value() == WRITE_TEST) { _child_value() = EXEC_TEST; return; } - if (state.type != Region_map::State::EXEC_FAULT || - state.addr != MANAGED_ADDR) + if (fault.type != Region_map::Fault::Type::EXEC || + fault.addr != MANAGED_ADDR) { - error("exec test failed ", (int)state.type, - " addr=", Hex(state.addr)); + error("exec test failed ", (int)fault.type, + " addr=", Hex(fault.addr)); return; } @@ -381,17 +389,17 @@ struct Main_parent log("received region-map fault signal, request fault state"); - Region_map::State state = _address_space.state(); + Region_map::Fault const fault = _address_space.fault(); - log("rm session state is ", state_name(state), ", pf_addr=", Hex(state.addr)); + log("rm session state is ", fault_name(fault), ", pf_addr=", Hex(fault.addr)); /* ignore spurious fault signal */ - if (state.type == Region_map::State::READY) { + if (fault.type == Region_map::Fault::Type::NONE) { log("ignoring spurious fault signal"); return; } - addr_t child_virt_addr = state.addr & ~(4096 - 1); + addr_t child_virt_addr = fault.addr & ~(4096 - 1); if (_fault_cnt < FAULT_CNT_READ) _test_read_fault(child_virt_addr); @@ -404,7 +412,7 @@ struct Main_parent _handle_fault_stack(); if (_fault_cnt > FAULT_CNT_WRITE) - _test_exec_fault(state); + _test_exec_fault(fault); _fault_cnt++; } @@ -413,9 +421,9 @@ struct Main_parent { /* sanity check that we got exec fault */ if (_config.xml().attribute_value("executable_fault_test", true)) { - Region_map::State state = _address_space.state(); - if (state.type != Region_map::State::EXEC_FAULT) { - error("unexpected state ", state_name(state)); + Region_map::Fault const fault = _address_space.fault(); + if (fault.type != Region_map::Fault::Type::EXEC) { + error("unexpected state ", fault_name(fault)); return; } diff --git a/repos/base/src/test/rm_nested/main.cc b/repos/base/src/test/rm_nested/main.cc index 5f019b63a8..662fa3449c 100644 --- a/repos/base/src/test/rm_nested/main.cc +++ b/repos/base/src/test/rm_nested/main.cc @@ -46,19 +46,22 @@ class Local_fault_handler : public Entrypoint void _handle_fault() { - Region_map::State state = _region_map.state(); + Region_map::Fault fault = _region_map.fault(); _fault_cnt = _fault_cnt + 1; - log("region-map state is ", - state.type == Region_map::State::READ_FAULT ? "READ_FAULT" : - state.type == Region_map::State::WRITE_FAULT ? "WRITE_FAULT" : - state.type == Region_map::State::EXEC_FAULT ? "EXEC_FAULT" : "READY", - ", pf_addr=", Hex(state.addr, Hex::PREFIX)); + log("region-map fault is ", + fault.type == Region_map::Fault::Type::READ ? "READ_FAULT" : + fault.type == Region_map::Fault::Type::WRITE ? "WRITE_FAULT" : + fault.type == Region_map::Fault::Type::EXEC ? "EXEC_FAULT" : "READY", + ", pf_addr=", Hex(fault.addr, Hex::PREFIX)); log("allocate dataspace and attach it to sub region map"); Dataspace_capability ds = _env.ram().alloc(PAGE_SIZE); - _region_map.attach_at(ds, state.addr & ~(PAGE_SIZE - 1)); + _region_map.attach(ds, { + .size = { }, .offset = { }, + .use_at = true, .at = fault.addr & ~(PAGE_SIZE - 1), + .executable = { }, .writeable = true }); log("returning from handle_fault"); } @@ -83,6 +86,25 @@ class Local_fault_handler : public Entrypoint }; +static void *ptr_from_attach_result(Region_map::Attach_result const &result) +{ + return result.convert( + [&] (Region_map::Range range) { return (void *)range.start; }, + [&] (Region_map::Attach_error) { + error("read-only attach unexpectedly failed"); + return nullptr; }); +} + + +static void *attach_rw(Region_map &rm, Dataspace_capability ds) +{ + return ptr_from_attach_result(rm.attach(ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true })); +} + + void nested_regions(Genode::Env &env) { enum { @@ -98,7 +120,7 @@ void nested_regions(Genode::Env &env) Region_map_client rm_top(rm.create(MANAGED_REGION_TOP_SIZE)); Dataspace_client rm_top_client(rm_top.dataspace()); - void *ptr_top = env.rm().attach(rm_top.dataspace()); + void *ptr_top = attach_rw(env.rm(), rm_top.dataspace()); addr_t const addr_top = reinterpret_cast(ptr_top); log(" region top ", Hex_range(addr_top, rm_top_client.size())); @@ -106,13 +128,13 @@ void nested_regions(Genode::Env &env) /* shim region 1 */ Region_map_client rm_shim1(rm.create(MANAGED_REGION_SHIM1_SIZE)); Dataspace_client rm_shim1_client(rm_shim1.dataspace()); - void *ptr_shim1 = rm_top.attach(rm_shim1.dataspace()); + void *ptr_shim1 = attach_rw(rm_top, rm_shim1.dataspace()); addr_t const addr_shim1 = reinterpret_cast(ptr_shim1); /* shim region 2 */ Region_map_client rm_shim2(rm.create(MANAGED_REGION_SHIM2_SIZE)); Dataspace_client rm_shim2_client(rm_shim2.dataspace()); - void *ptr_shim2 = rm_top.attach(rm_shim2.dataspace()); + void *ptr_shim2 = attach_rw(rm_top, rm_shim2.dataspace()); addr_t const addr_shim2 = reinterpret_cast(ptr_shim2); log(" region shim ", @@ -122,16 +144,12 @@ void nested_regions(Genode::Env &env) /* attach some memory to region 2 as readonly and touch/map it */ size_t const shim2_ram_size = PAGE_SIZE * 2; Dataspace_capability shim2_ram_ds = env.ram().alloc(shim2_ram_size); - enum { - COMPLETE_SIZE = 0, OFFSET_0 = 0, OFFSET_1000 = 0x1000, - USE_LOCAL_ADDR = true, LOCAL_ADDR_0 = 0, LOCAL_ADDR_1000 = 0x1000, - NON_EXECUTABLE = false, - READONLY = false, WRITEABLE = true - }; - void * ptr_shim2_ram = rm_shim2.attach(shim2_ram_ds, COMPLETE_SIZE, - OFFSET_0, USE_LOCAL_ADDR, - LOCAL_ADDR_1000, NON_EXECUTABLE, - READONLY); + void * const ptr_shim2_ram = + ptr_from_attach_result(rm_shim2.attach(shim2_ram_ds, { + .size = { }, .offset = { }, + .use_at = true, .at = 0x1000, + .executable = { }, .writeable = { } })); + addr_t const addr_shim2_ram = reinterpret_cast(ptr_shim2_ram); addr_t const read_shim2 = addr_top + addr_shim2 + addr_shim2_ram; @@ -148,7 +166,13 @@ void nested_regions(Genode::Env &env) Region_map_client rm_bottom(rm.create(MANAGED_REGION_BOTTOM_SIZE)); Dataspace_client rm_bottom_client(rm_bottom.dataspace()); size_t const size_bottom = MANAGED_REGION_BOTTOM_SIZE - MANAGED_REGION_SHIM2_SIZE; - void const *ptr_bottom = rm_shim1.attach(rm_bottom.dataspace(), size_bottom); + + void const * const ptr_bottom = + ptr_from_attach_result(rm_shim1.attach(rm_bottom.dataspace(), { + .size = size_bottom, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = { } })); + addr_t const addr_bottom = reinterpret_cast(ptr_bottom); log(" bottom shim (r) ", @@ -159,14 +183,17 @@ void nested_regions(Genode::Env &env) /* attach some memory to bottom as writeable */ Dataspace_capability bottom_ram_ds = env.ram().alloc(MANAGED_REGION_BOTTOM_SIZE); { - void * base_rw = env.rm().attach(bottom_ram_ds); + void * base_rw = attach_rw(env.rm(), bottom_ram_ds); memset(base_rw, 0xff, MANAGED_REGION_BOTTOM_SIZE); - env.rm().detach(base_rw); + env.rm().detach(addr_t(base_rw)); } - void * ptr_bottom_ram = rm_bottom.attach(bottom_ram_ds, COMPLETE_SIZE, - OFFSET_0, USE_LOCAL_ADDR, - LOCAL_ADDR_0, NON_EXECUTABLE, - WRITEABLE); + + void const * const ptr_bottom_ram = + ptr_from_attach_result(rm_bottom.attach(bottom_ram_ds, { + .size = { }, .offset = { }, + .use_at = true, .at = 0, + .executable = { }, .writeable = true })); + addr_t const addr_bottom_ram = reinterpret_cast(ptr_bottom_ram); addr_t const write_bottom = addr_top + addr_shim1 + addr_bottom + addr_bottom_ram; @@ -212,7 +239,7 @@ void Component::construct(Genode::Env & env) /* * Attach region map as dataspace to the local address space. */ - void *addr = env.rm().attach(region_map.dataspace()); + void *addr = attach_rw(env.rm(), region_map.dataspace()); log("attached sub dataspace at local address ", addr); Dataspace_client client(region_map.dataspace()); diff --git a/repos/base/src/test/rm_stress/main.cc b/repos/base/src/test/rm_stress/main.cc index 7b66fac0c2..c04a9c00e9 100644 --- a/repos/base/src/test/rm_stress/main.cc +++ b/repos/base/src/test/rm_stress/main.cc @@ -52,17 +52,27 @@ void Component::construct(Env &env) for (unsigned r = 0; r < ROUNDS; ++r) { for (unsigned i = 0; i < sizeof(page)/sizeof(*page); ++i) { - off_t const offset = 0; + addr_t const offset = 0; - unsigned char volatile const *v = - env.rm().attach(page[i].cap(), page[i].size(), offset); + uint8_t volatile const *v = + env.rm().attach(page[i].cap(), { + .size = page[i].size(), + .offset = offset, + .use_at = { }, + .at = { }, + .executable = false, + .writeable = true + }).convert( + [&] (Region_map::Range range) { return (uint8_t *)range.start; }, + [&] (Region_map::Attach_error) { return nullptr; } + ); if (page[i].color != *v) { error("value @ ", v, " ", X(*v), " != ", X(page[i].color), " in round ", r); env.parent().exit(-1); } - env.rm().detach(Region_map::Local_addr(v)); + env.rm().detach(addr_t(v)); } } diff --git a/repos/base/src/test/sub_rm/main.cc b/repos/base/src/test/sub_rm/main.cc index 1dad1106d1..7cceaacdca 100644 --- a/repos/base/src/test/sub_rm/main.cc +++ b/repos/base/src/test/sub_rm/main.cc @@ -39,12 +39,11 @@ static char const *test_pattern_2() { static void fill_ds_with_test_pattern(Env &env, char const *pattern, - Dataspace_capability ds, size_t offset) + Dataspace_capability ds_cap, size_t offset) { log("fill dataspace with information"); - char *content = env.rm().attach(ds); - copy_cstring(content + offset, pattern, ~0); - env.rm().detach(content); + Attached_dataspace ds { env.rm(), ds_cap }; + copy_cstring(ds.local_addr() + offset, pattern, ~0); } @@ -62,7 +61,7 @@ void Component::construct(Env &env) log("--- sub-rm test ---"); log("create RM connection"); - enum { SUB_RM_SIZE = 1024*1024 }; + size_t const SUB_RM_SIZE = 1024*1024; Rm_connection rm(env); /* @@ -80,7 +79,7 @@ void Component::construct(Env &env) */ log("create managed dataspace"); Region_map_client sub_rm(rm.create(SUB_RM_SIZE)); - enum { DS_SIZE = 4*4096 }; + size_t const DS_SIZE = 4*4096; Ram_dataspace_capability ds = env.ram().alloc(DS_SIZE); /* @@ -91,19 +90,32 @@ void Component::construct(Env &env) if (!config.xml().attribute_value("support_attach_sub_any", true)) { log("attach RAM ds to any position at sub rm - this should fail"); - try { - sub_rm.attach(ds, 0, 0, false, (addr_t)0); - fail("sub rm attach_any unexpectedly did not fail"); - } - catch (Region_map::Region_conflict) { - log("attach failed as expected"); } + sub_rm.attach(ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range) { + fail("sub rm attach_any unexpectedly did not fail"); }, + [&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::REGION_CONFLICT) + log("attach failed as expected"); } + ); } log("attach RAM ds to a fixed position at sub rm"); - enum { DS_SUB_OFFSET = 4096 }; - if ((addr_t)sub_rm.attach_at(ds, DS_SUB_OFFSET, 0, 0) != DS_SUB_OFFSET) - fail("attach_at return-value mismatch"); + addr_t const DS_SUB_OFFSET = 4096; + sub_rm.attach(ds, { + .size = { }, .offset = { }, + .use_at = true, .at = DS_SUB_OFFSET, + .executable = { }, .writeable = { } + }).with_result( + [&] (Region_map::Range const range) { + if (range.start != DS_SUB_OFFSET) + fail("attach-at return-value mismatch"); }, + [&] (Region_map::Attach_error) { } + ); log("attach sub rm at local address space"); @@ -117,8 +129,15 @@ void Component::construct(Env &env) */ addr_t const local_attach_addr = config.xml().attribute_value("local_attach_addr", (addr_t)0); - char *sub_rm_base = env.rm().attach_at(sub_rm.dataspace(), - local_attach_addr); + + char * const sub_rm_base = env.rm().attach(sub_rm.dataspace(), { + .size = { }, .offset = { }, + .use_at = true, .at = local_attach_addr, + .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range const range) { return (char *)range.start; }, + [&] (Region_map::Attach_error) { return nullptr; } + ); log("validate pattern in sub rm"); validate_pattern_at(test_pattern(), sub_rm_base + DS_SUB_OFFSET); @@ -129,9 +148,17 @@ void Component::construct(Env &env) */ log("attach RAM ds at another fixed position at sub rm"); - enum { DS_SUB_OFFSET_2 = 0x40000 }; - if ((addr_t)sub_rm.attach_at(ds, DS_SUB_OFFSET_2, 0, 0) != DS_SUB_OFFSET_2) - fail("attach_at return-value mismatch"); + addr_t const DS_SUB_OFFSET_2 = 0x40000; + sub_rm.attach(ds, { + .size = { }, .offset = { }, + .use_at = true, .at = DS_SUB_OFFSET_2, + .executable = { }, .writeable = { } + }).with_result( + [&] (Region_map::Range const range) { + if (range.start != DS_SUB_OFFSET_2) + fail("attach-at return-value mismatch"); }, + [&] (Region_map::Attach_error) { } + ); log("validate pattern in second mapping in sub rm"); validate_pattern_at(test_pattern(), sub_rm_base + DS_SUB_OFFSET_2); @@ -140,35 +167,50 @@ void Component::construct(Env &env) * Try to cross the boundaries of the sub RM session. This should * produce an error. */ - try { - sub_rm.attach_at(ds, SUB_RM_SIZE - 4096, 0, 0); - fail("undetected boundary conflict\n"); - } - catch (Region_map::Region_conflict) { - log("attaching beyond sub RM boundary failed as expected"); } + sub_rm.attach(ds, { + .size = { }, .offset = { }, + .use_at = true, .at = SUB_RM_SIZE - 4096, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range) { + fail("undetected boundary conflict\n"); }, + [&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::REGION_CONFLICT) + log("attaching beyond sub RM boundary failed as expected"); } + ); /* * Check for working region - conflict detection */ log("attaching RAM ds to a conflicting region"); - try { - sub_rm.attach_at(ds, DS_SUB_OFFSET + 4096, 0, 0); - fail("region conflict went undetected\n"); - } - catch (Region_map::Region_conflict) { - log("attaching conflicting region failed as expected"); } + sub_rm.attach(ds, { + .size = { }, .offset = { }, + .use_at = true, .at = DS_SUB_OFFSET + 4096, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range) { + fail("region conflict went undetected"); }, + [&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::REGION_CONFLICT) + log("attaching conflicting region failed as expected"); } + ); if (config.xml().attribute_value("attach_twice_forbidden", false)) { /* * Try to double-attach the same sub RM session. This should fail */ log("attach sub rm again at local address space"); - try { - env.rm().attach(sub_rm.dataspace()); - fail("double attachment of sub RM session went undetected\n"); - } - catch (Region_map::Region_conflict) { - log("doubly attaching sub RM session failed as expected"); } + sub_rm.attach(ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range) { + fail("double attachment of sub RM session went undetected"); }, + [&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::REGION_CONFLICT) + log("doubly attaching sub RM session failed as expected"); } + ); } /* @@ -178,8 +220,12 @@ void Component::construct(Env &env) * page. */ log("attach RAM ds with offset"); - enum { DS_SUB_OFFSET_3 = 0x80000 }; - sub_rm.attach_at(ds, DS_SUB_OFFSET_3, 0, 4096); + addr_t const DS_SUB_OFFSET_3 = 0x80000; + sub_rm.attach(ds, { + .size = { }, .offset = 4096, + .use_at = true, .at = DS_SUB_OFFSET_3, + .executable = { }, .writeable = true + }); validate_pattern_at(test_pattern_2(), sub_rm_base + DS_SUB_OFFSET_3); /* @@ -187,15 +233,19 @@ void Component::construct(Env &env) * starting with the second page. */ log("attach RAM ds with offset and size"); - enum { DS_SUB_OFFSET_4 = 0xc0000 }; - sub_rm.attach_at(ds, DS_SUB_OFFSET_4, 2*4096, 4096); + addr_t const DS_SUB_OFFSET_4 = 0xc0000; + sub_rm.attach(ds, { + .size = 2*4096, .offset = 4096, + .use_at = true, .at = DS_SUB_OFFSET_4, + .executable = { }, .writeable = true + }); validate_pattern_at(test_pattern_2(), sub_rm_base + DS_SUB_OFFSET_4); /* * Detach the first attachment (to be validated by the run script by * inspecting '/proc/pid/maps' after running the test. */ - sub_rm.detach((void *)DS_SUB_OFFSET); + sub_rm.detach(DS_SUB_OFFSET); log("--- end of sub-rm test ---"); diff --git a/repos/base/src/test/token/main.cc b/repos/base/src/test/token/main.cc index 1595e6f9f4..dcfdcaae19 100644 --- a/repos/base/src/test/token/main.cc +++ b/repos/base/src/test/token/main.cc @@ -38,10 +38,20 @@ static void test_out_of_bounds_access(Env &env) Attached_ram_dataspace buf_ds(env.ram(), env.rm(), BUF_SIZE); /* attach buffer at start of managed dataspace, leave 2nd page as guard */ - sub_rm.attach_at(buf_ds.cap(), 0); + sub_rm.attach(buf_ds.cap(), { + .size = { }, .offset = { }, + .use_at = true, .at = 0, + .executable = { }, .writeable = true }); /* locally attach managed dataspace */ - char * const buf_ptr = env.rm().attach(sub_rm.dataspace()); + char * const buf_ptr = env.rm().attach(sub_rm.dataspace(), { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true } + ).convert( + [&] (Region_map::Range range) { return (char *)range.start; }, + [&] (Region_map::Attach_error) { return nullptr; } + ); auto tokenize_two_tokens_at_end_of_buffer = [&] (char const * const input) { diff --git a/repos/dde_ipxe/src/lib/dde_ipxe/dde_support.cc b/repos/dde_ipxe/src/lib/dde_ipxe/dde_support.cc index 1e5a5d70f9..1ba8d8a250 100644 --- a/repos/dde_ipxe/src/lib/dde_ipxe/dde_support.cc +++ b/repos/dde_ipxe/src/lib/dde_ipxe/dde_support.cc @@ -346,21 +346,21 @@ extern "C" dde_addr_t dde_dma_get_physaddr(void *virt) { extern "C" dde_uint8_t dde_inb(dde_addr_t port) { - dde_uint8_t v; + dde_uint8_t v { }; pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inb(port); }); return v; } extern "C" dde_uint16_t dde_inw(dde_addr_t port) { - dde_uint16_t v; + dde_uint16_t v { }; pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inw(port); }); return v; } extern "C" dde_uint32_t dde_inl(dde_addr_t port) { - dde_uint32_t v; + dde_uint32_t v { }; pci_drv().with_io_port([&] (Io_port & iop) { v = iop.inl(port); }); return v; } @@ -384,17 +384,16 @@ struct Slab_backend_alloc : public Genode::Allocator, public Genode::Rm_connection, public Genode::Region_map_client { - enum { - VM_SIZE = 2 * 1024 * 1024, - BLOCK_SIZE = 64 * 1024, - ELEMENTS = VM_SIZE / BLOCK_SIZE, - }; + static constexpr Genode::size_t VM_SIZE = 2 * 1024 * 1024, + BLOCK_SIZE = 64 * 1024, + ELEMENTS = VM_SIZE / BLOCK_SIZE; - Genode::addr_t _base; - Genode::Ram_dataspace_capability _ds_cap[ELEMENTS]; - int _index; - Genode::Allocator_avl _range; - Genode::Ram_allocator &_ram; + Genode::Attached_dataspace _managed_ds; + Genode::addr_t _base = Genode::addr_t(_managed_ds.local_addr()); + Genode::Ram_dataspace_capability _ds_cap[ELEMENTS]; + unsigned _index = 0; + Genode::Allocator_avl _range; + Genode::Ram_allocator &_ram; struct Extend_ok { }; using Extend_result = Genode::Attempt; @@ -414,30 +413,41 @@ struct Slab_backend_alloc : public Genode::Allocator, _ds_cap[_index] = ds; - Alloc_error error = Alloc_error::DENIED; + return Region_map_client::attach(_ds_cap[_index], { + .size = BLOCK_SIZE, + .offset = { }, + .use_at = true, + .at = _index*BLOCK_SIZE, + .executable = false, + .writeable = true + }).convert( - try { - Region_map_client::attach_at(_ds_cap[_index], - _index * BLOCK_SIZE, - BLOCK_SIZE, 0); - /* return base + offset in VM area */ - addr_t block_base = _base + (_index * BLOCK_SIZE); - ++_index; + [&] (Region_map::Range range) { - _range.add_range(block_base, BLOCK_SIZE); + _index++; - return Extend_ok(); - } - catch (Out_of_ram) { error = Alloc_error::OUT_OF_RAM; } - catch (Out_of_caps) { error = Alloc_error::OUT_OF_CAPS; } - catch (...) { error = Alloc_error::DENIED; } + return _range.add_range(_base + range.start, range.num_bytes) + .convert( + [&] (Range_allocator::Range_ok) { return Extend_ok(); }, + [&] (Alloc_error e) { return e; }); + }, - Genode::error("Slab_backend_alloc: local attach_at failed"); + [&] (Region_map::Attach_error e) { - _ram.free(ds); - _ds_cap[_index] = { }; + Genode::error("Slab_backend_alloc: local attach_at failed"); + _ram.free(ds); + _ds_cap[_index] = { }; - return error; + using Error = Region_map::Attach_error; + switch (e) { + case Error::OUT_OF_RAM: return Alloc_error::OUT_OF_RAM; + case Error::OUT_OF_CAPS: return Alloc_error::OUT_OF_CAPS; + case Error::INVALID_DATASPACE: break; + case Error::REGION_CONFLICT: break; + } + return Alloc_error::DENIED; + } + ); }, [&] (Alloc_error e) -> Extend_result { @@ -451,11 +461,8 @@ struct Slab_backend_alloc : public Genode::Allocator, : Rm_connection(env), Region_map_client(Rm_connection::create(VM_SIZE)), - _index(0), _range(&md_alloc), _ram(ram) - { - /* reserver attach us, anywere */ - _base = rm.attach(dataspace()); - } + _managed_ds(rm, dataspace()), _range(&md_alloc), _ram(ram) + { } Genode::addr_t start() const { return _base; } Genode::addr_t end() const { return _base + VM_SIZE - 1; } diff --git a/repos/dde_rump/src/include/util/allocator_fap.h b/repos/dde_rump/src/include/util/allocator_fap.h index 499de98a28..a3151f5b07 100644 --- a/repos/dde_rump/src/include/util/allocator_fap.h +++ b/repos/dde_rump/src/include/util/allocator_fap.h @@ -67,10 +67,27 @@ namespace Allocator { addr_t _base; /* virt. base address */ Cache _cache; /* non-/cached RAM */ Ram_dataspace_capability _ds_cap[ELEMENTS]; /* dataspaces to put in VM */ - int _index = 0; /* current index in ds_cap */ + unsigned _index = 0; /* current index in ds_cap */ Allocator_avl _range; /* manage allocations */ bool _quota_exceeded = false; + addr_t _attach_managed_ds(Region_map &local_rm) + { + return local_rm.attach(dataspace(), { + .size = { }, + .offset = { }, + .use_at = { }, + .at = { }, + .executable = false, + .writeable = true + }).convert( + [&] (Range range) { return range.start; }, + [&] (Attach_error) { + error("rump backend allocator failed to attach managed dataspace"); + return 0UL; } + ); + } + bool _alloc_block() { if (_quota_exceeded) @@ -83,29 +100,39 @@ namespace Allocator { Policy_guard guard; - try { - _ds_cap[_index] = Rump::env().env().ram().alloc(BLOCK_SIZE, _cache); - /* attach at index * BLOCK_SIZE */ - Region_map_client::attach_at(_ds_cap[_index], _index * BLOCK_SIZE, BLOCK_SIZE, 0); - } catch (Genode::Out_of_ram) { - warning("backend allocator exhausted (out of RAM)"); - _quota_exceeded = true; - return false; - } catch (Genode::Out_of_caps) { - warning("backend allocator exhausted (out of caps)"); - _quota_exceeded = true; - return false; - } catch (Genode::Region_map::Region_conflict) { - warning("backend VM region exhausted"); - _quota_exceeded = true; + _ds_cap[_index] = Rump::env().env().ram().try_alloc(BLOCK_SIZE, _cache) + .template convert( + [&] (Ram_dataspace_capability cap) { return cap; }, + [&] (Allocator::Alloc_error) { return Ram_dataspace_capability(); } + ); + + if (!_ds_cap[_index].valid()) { + warning("backend allocator exhausted"); return false; } - /* return base + offset in VM area */ - addr_t block_base = _base + (_index * BLOCK_SIZE); - ++_index; + if (Region_map_client::attach(_ds_cap[_index], { + .size = BLOCK_SIZE, + .offset = { }, + .use_at = true, + .at = _index*BLOCK_SIZE, + .executable = false, + .writeable = true + }).failed()) { + warning("failed to locally attach backend memory"); + Rump::env().env().ram().free(_ds_cap[_index]); + return false; + } - _range.add_range(block_base, BLOCK_SIZE); + addr_t const block_base = _base + _index*BLOCK_SIZE; + if (_range.add_range(block_base, BLOCK_SIZE).failed()) { + warning("failed to extend backend allocator metadata"); + Region_map_client::detach(_index*BLOCK_SIZE); + Rump::env().env().ram().free(_ds_cap[_index]); + _ds_cap[_index] = { }; + return false; + } + ++_index; return true; } @@ -115,12 +142,10 @@ namespace Allocator { : Rm_connection(Rump::env().env()), Region_map_client(Rm_connection::create(VM_SIZE)), + _base(_attach_managed_ds(Rump::env().env().rm())), _cache(cache), _range(&Rump::env().heap()) - { - /* reserver attach us, anywere */ - _base = Rump::env().env().rm().attach(dataspace()); - } + { } /** * Allocate diff --git a/repos/dde_rump/src/lib/vfs/rump/vfs_rump.cc b/repos/dde_rump/src/lib/vfs/rump/vfs_rump.cc index ed9151df46..e1636afe5a 100644 --- a/repos/dde_rump/src/lib/vfs/rump/vfs_rump.cc +++ b/repos/dde_rump/src/lib/vfs/rump/vfs_rump.cc @@ -456,39 +456,59 @@ class Vfs::Rump_file_system : public File_system Genode::Dataspace_capability dataspace(char const *path) override { - Genode::Env &env = _env.env(); + struct stat s { }; + if (rump_sys_lstat(path, &s) != 0) + return { }; - int fd = rump_sys_open(path, O_RDONLY); - if (fd == -1) return Genode::Dataspace_capability(); + using Region_map = Genode::Region_map; - struct stat s; - if (rump_sys_lstat(path, &s) != 0) return Genode::Dataspace_capability(); - size_t const ds_size = s.st_size; - - char *local_addr = nullptr; - Ram_dataspace_capability ds_cap; - try { - ds_cap = env.ram().alloc(ds_size); - - local_addr = env.rm().attach(ds_cap); - - enum { CHUNK_SIZE = 16U << 10 }; - - for (size_t i = 0; i < ds_size;) { - ssize_t n = rump_sys_read(fd, &local_addr[i], min(ds_size-i, CHUNK_SIZE)); - if (n == -1) - throw n; - i += n; + auto read_file_content = [&path] (Region_map::Range const range) -> bool + { + int const fd = rump_sys_open(path, O_RDONLY); + size_t i = 0; /* bytes read */ + if (fd >= 0) { + while (i < range.num_bytes) { + size_t const CHUNK_SIZE = 16U << 10; + ssize_t const n = rump_sys_read(fd, (void *)(range.start + i), + min(range.num_bytes - i, CHUNK_SIZE)); + if (n <= 0) + break; + i += n; + } + rump_sys_close(fd); } + return (i == range.num_bytes); + }; - env.rm().detach(local_addr); - } catch(...) { - if (local_addr) - env.rm().detach(local_addr); - env.ram().free(ds_cap); - } - rump_sys_close(fd); - return ds_cap; + return _env.env().ram().try_alloc(s.st_size).convert( + [&] (Ram_dataspace_capability const ds_cap) { + return _env.env().rm().attach(ds_cap, { + .size = { }, .offset = { }, .use_at = { }, + .at = { }, .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range const range) -> Dataspace_capability { + + bool const complete = read_file_content(range); + _env.env().rm().detach(range.start); + + if (complete) + return ds_cap; + + Genode::error("rump failed to read content into VFS dataspace"); + _env.env().ram().free(ds_cap); + return Dataspace_capability(); + }, + [&] (Region_map::Attach_error) { + _env.env().ram().free(ds_cap); + return Dataspace_capability(); + } + ); + }, + [&] (Genode::Ram_allocator::Alloc_error) { + Genode::error("rump failed to allocate VFS dataspace of size ", s.st_size); + return Dataspace_capability(); + } + ); } void release(char const *path, diff --git a/repos/gems/src/app/trace_recorder/monitor.h b/repos/gems/src/app/trace_recorder/monitor.h index a5ed3a627a..d27b181f53 100644 --- a/repos/gems/src/app/trace_recorder/monitor.h +++ b/repos/gems/src/app/trace_recorder/monitor.h @@ -73,6 +73,7 @@ class Trace_recorder::Monitor private: Env &_env; + Attached_dataspace _ds; Trace_buffer _buffer; Registry::Element _element; Subject_info _info; @@ -86,18 +87,15 @@ class Trace_recorder::Monitor Genode::Dataspace_capability ds, Trace::Subject_info const &info, Trace::Subject_id id) - : _env(env), - _buffer(*((Trace::Buffer*)_env.rm().attach(ds))), - _element(registry, *this), - _info(info), - _subject_id(id) + : + _env(env), + _ds(env.rm(), ds), + _buffer(*_ds.local_addr()), + _element(registry, *this), + _info(info), + _subject_id(id) { } - ~Attached_buffer() - { - _env.rm().detach(_buffer.address()); - } - void process_events(Trace_directory &); Registry &writers() { return _writers; } diff --git a/repos/gems/src/lib/vfs/trace/vfs.cc b/repos/gems/src/lib/vfs/trace/vfs.cc index 8b1c804754..c444f74342 100644 --- a/repos/gems/src/lib/vfs/trace/vfs.cc +++ b/repos/gems/src/lib/vfs/trace/vfs.cc @@ -55,14 +55,23 @@ class Vfs_trace::Trace_buffer_file_system : public Single_file_system void setup(Dataspace_capability ds) { - _buffer.construct(*((Trace::Buffer *)_env.env().rm().attach(ds))); + _env.env().rm().attach(ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range range) { + _buffer.construct(*(Trace::Buffer *)range.start); }, + [&] (Region_map::Attach_error) { + error("failed to attach trace buffer"); } + ); } void flush() { if (!_buffer.constructed()) return; - _env.env().rm().detach(_buffer->address()); + _env.env().rm().detach(addr_t(_buffer->address())); _buffer.destruct(); } diff --git a/repos/libports/src/driver/framebuffer/vesa/framebuffer.cc b/repos/libports/src/driver/framebuffer/vesa/framebuffer.cc index fb225c0339..0989ee65c3 100644 --- a/repos/libports/src/driver/framebuffer/vesa/framebuffer.cc +++ b/repos/libports/src/driver/framebuffer/vesa/framebuffer.cc @@ -161,10 +161,20 @@ int Framebuffer::map_io_mem(addr_t base, size_t size, bool write_combined, if (!io_ds.valid()) return -2; - try { - *out_addr = genode_env().rm().attach(io_ds, size, 0, addr != 0, addr); - } - catch (Region_map::Region_conflict) { return -3; } + genode_env().rm().attach(io_ds, { + .size = size, + .offset = { }, + .use_at = (addr != 0), + .at = addr, + .executable = false, + .writeable = true + }).with_result( + [&] (Region_map::Range range) { *out_addr = (void *)range.start; }, + [&] (Region_map::Attach_error) { } + ); + + if (*out_addr == nullptr) + return -3; log("fb mapped to ", *out_addr); diff --git a/repos/libports/src/lib/acpica/iomem.cc b/repos/libports/src/lib/acpica/iomem.cc index 89f7424571..732e594567 100644 --- a/repos/libports/src/lib/acpica/iomem.cc +++ b/repos/libports/src/lib/acpica/iomem.cc @@ -180,14 +180,23 @@ class Acpica::Io_mem /* create managed dataspace to let virt region reserved */ Genode::Region_map_client managed_region(rm_conn->create(io_mem._size)); /* remember virt, since it get invalid during invalidate() */ - Genode::addr_t const re_attach_virt = reinterpret_cast(io_mem._virt); + Genode::addr_t const orig_virt = reinterpret_cast(io_mem._virt); /* drop I/O mem and virt region get's freed */ io_mem.invalidate(); /* re-attach dummy managed dataspace to virt region */ - Genode::addr_t const re_attached_virt = Acpica::env().rm().attach_at(managed_region.dataspace(), re_attach_virt); - if (re_attach_virt != re_attached_virt) + Genode::addr_t const re_attached_virt = + Acpica::env().rm().attach(managed_region.dataspace(), { + .size = { }, .offset = { }, + .use_at = true, .at = orig_virt, + .executable = false, .writeable = true + }).convert( + [&] (Genode::Region_map::Range range) { return range.start; }, + [&] (Genode::Region_map::Attach_error) { return 0UL; } + ); + + if (orig_virt != re_attached_virt) FAIL(0); if (!io_mem.unused() || io_mem.stale()) @@ -235,7 +244,7 @@ class Acpica::Io_mem return; if (!stale()) { - Acpica::env().rm().detach(_virt); + Acpica::env().rm().detach(Genode::addr_t(_virt)); Genode::destroy(Acpica::heap(), _io_mem); } @@ -294,8 +303,14 @@ class Acpica::Io_mem if (!io_mem) return 0UL; - io_mem->_virt = Acpica::env().rm().attach(io_mem->_io_mem->dataspace(), - io_mem->_size); + io_mem->_virt = Acpica::env().rm().attach(io_mem->_io_mem->dataspace(), { + .size = io_mem->_size, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).convert( + [&] (Genode::Region_map::Range r) { return (Genode::uint8_t *)r.start; }, + [&] (Genode::Region_map::Attach_error) { return nullptr; } + ); return reinterpret_cast(io_mem->_virt); } @@ -303,7 +318,7 @@ class Acpica::Io_mem Genode::addr_t pre_expand(ACPI_PHYSICAL_ADDRESS p, ACPI_SIZE s) { if (_io_mem) { - Acpica::env().rm().detach(_virt); + Acpica::env().rm().detach(Genode::addr_t(_virt)); Genode::destroy(Acpica::heap(), _io_mem); } @@ -317,7 +332,7 @@ class Acpica::Io_mem Genode::addr_t post_expand(ACPI_PHYSICAL_ADDRESS p, ACPI_SIZE s) { if (_io_mem) { - Acpica::env().rm().detach(_virt); + Acpica::env().rm().detach(Genode::addr_t(_virt)); Genode::destroy(Acpica::heap(), _io_mem); } @@ -352,14 +367,28 @@ class Acpica::Io_mem Genode::addr_t virt = reinterpret_cast(io2._virt); Acpica::env().rm().detach(virt); - Acpica::env().rm().attach_at(io_ds, virt, io2._size, off_phys); + if (Acpica::env().rm().attach(io_ds, { + .size = io2._size, .offset = off_phys, + .use_at = true, .at = virt, + .executable = { }, .writeable = true + }).failed()) Genode::error("re-attach io2 failed"); }); if (io_mem._virt) FAIL(0UL); /* attach whole memory */ - io_mem._virt = Acpica::env().rm().attach(io_ds); + io_mem._virt = Acpica::env().rm().attach(io_ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).convert( + [&] (Genode::Region_map::Range range) { return (Genode::uint8_t *)range.start; }, + [&] (Genode::Region_map::Attach_error) { + Genode::error("attaching while io_ds failed"); + return nullptr; + } + ); return io_mem.to_virt(p); }); diff --git a/repos/libports/src/lib/libc/internal/cloned_malloc_heap_range.h b/repos/libports/src/lib/libc/internal/cloned_malloc_heap_range.h index 60d19c10fd..379218e67e 100644 --- a/repos/libports/src/lib/libc/internal/cloned_malloc_heap_range.h +++ b/repos/libports/src/lib/libc/internal/cloned_malloc_heap_range.h @@ -30,28 +30,45 @@ struct Libc::Cloned_malloc_heap_range Ram_dataspace_capability ds; - size_t const size; - addr_t const local_addr; + using Range = Region_map::Range; - Cloned_malloc_heap_range(Ram_allocator &ram, Region_map &rm, - void *start, size_t size) - try : - ram(ram), rm(rm), ds(ram.alloc(size)), size(size), - local_addr(rm.attach_at(ds, (addr_t)start)) - { } - catch (Region_map::Region_conflict) { - error("could not clone heap region ", Hex_range((addr_t)start, size)); - throw; + Range const range; + + Cloned_malloc_heap_range(Ram_allocator &ram, Region_map &rm, Range const range) + : + ram(ram), rm(rm), ds(ram.alloc(range.num_bytes)), range(range) + { + rm.attach(ds, { + .size = { }, + .offset = { }, + .use_at = true, + .at = range.start, + .executable = { }, + .writeable = true + }).with_result( + [&] (Range) { }, + [&] (Region_map::Attach_error e) { + using Error = Region_map::Attach_error; + switch (e) { + case Error::OUT_OF_RAM: throw Out_of_ram(); + case Error::OUT_OF_CAPS: throw Out_of_caps(); + case Error::INVALID_DATASPACE: break; + case Error::REGION_CONFLICT: break; + } + error("failed to clone heap region ", + Hex_range(range.start, range.num_bytes)); + } + ); } void import_content(Clone_connection &clone_connection) { - clone_connection.memory_content((void *)local_addr, size); + clone_connection.memory_content((void *)range.start, range.num_bytes); } virtual ~Cloned_malloc_heap_range() { - rm.detach(local_addr); + rm.detach(range.start); ram.free(ds); } }; diff --git a/repos/libports/src/lib/libc/internal/mem_alloc.h b/repos/libports/src/lib/libc/internal/mem_alloc.h index bbc8908050..959b8e64c4 100644 --- a/repos/libports/src/lib/libc/internal/mem_alloc.h +++ b/repos/libports/src/lib/libc/internal/mem_alloc.h @@ -50,13 +50,15 @@ namespace Libc { MAX_CHUNK_SIZE = 1024*1024 }; + using Range = Region_map::Range; + struct Dataspace : List::Element { Ram_dataspace_capability cap; - void *local_addr; + Range range; - Dataspace(Ram_dataspace_capability c, void *a) - : cap(c), local_addr(a) {} + Dataspace(Ram_dataspace_capability cap, Range range) + : cap(cap), range(range) { } }; class Dataspace_pool : public List diff --git a/repos/libports/src/lib/libc/kernel.cc b/repos/libports/src/lib/libc/kernel.cc index f2af848e2b..a820679034 100644 --- a/repos/libports/src/lib/libc/kernel.cc +++ b/repos/libports/src/lib/libc/kernel.cc @@ -342,13 +342,13 @@ void Libc::Kernel::_handle_user_interrupt() void Libc::Kernel::_clone_state_from_parent() { - struct Range { void *at; size_t size; }; + using Range = Region_map::Range; - auto range_attr = [&] (Xml_node node) + auto range_attr = [&] (Xml_node const &node) { return Range { - .at = (void *)node.attribute_value("at", 0UL), - .size = node.attribute_value("size", 0UL) + .start = node.attribute_value("at", 0UL), + .num_bytes = node.attribute_value("size", 0UL) }; }; @@ -365,7 +365,7 @@ void Libc::Kernel::_clone_state_from_parent() new (_heap) Registered(_cloned_heap_ranges, _env.ram(), _env.rm(), - range.at, range.size); }); + range); }); _clone_connection.construct(_env); @@ -385,7 +385,7 @@ void Libc::Kernel::_clone_state_from_parent() auto copy_from_parent = [&] (Range range) { - _clone_connection->memory_content(range.at, range.size); + _clone_connection->memory_content((void *)range.start, range.num_bytes); }; /* clone application stack */ diff --git a/repos/libports/src/lib/libc/libc_mem_alloc.cc b/repos/libports/src/lib/libc/libc_mem_alloc.cc index 6e2746d70c..7d3463b36c 100644 --- a/repos/libports/src/lib/libc/libc_mem_alloc.cc +++ b/repos/libports/src/lib/libc/libc_mem_alloc.cc @@ -36,12 +36,12 @@ Libc::Mem_alloc_impl::Dataspace_pool::~Dataspace_pool() */ Ram_dataspace_capability ds_cap = ds->cap; - void const * const local_addr = ds->local_addr; + Range const range = ds->range; remove(ds); delete ds; - _region_map->detach(local_addr); + _region_map->detach(range.start); _ram->free(ds_cap); } } @@ -49,33 +49,58 @@ Libc::Mem_alloc_impl::Dataspace_pool::~Dataspace_pool() int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *alloc) { - Ram_dataspace_capability new_ds_cap; - void *local_addr; - /* make new ram dataspace available at our local address space */ - try { - new_ds_cap = _ram->alloc(size); - enum { MAX_SIZE = 0, NO_OFFSET = 0, ANY_LOCAL_ADDR = false }; - local_addr = _region_map->attach(new_ds_cap, MAX_SIZE, NO_OFFSET, - ANY_LOCAL_ADDR, nullptr, _executable); - } - catch (Out_of_ram) { return -2; } - catch (Out_of_caps) { return -4; } - catch (Region_map::Region_conflict) { + Ram_dataspace_capability new_ds_cap { }; + int result = 0; + _ram->try_alloc(size).with_result( + [&] (Ram_dataspace_capability cap) { new_ds_cap = cap; }, + [&] (Ram_allocator::Alloc_error e) { + switch (e) { + case Ram_allocator::Alloc_error::OUT_OF_RAM: result = -2; break; + case Ram_allocator::Alloc_error::OUT_OF_CAPS: result = -4; break; + case Ram_allocator::Alloc_error::DENIED: break; + } + result = -5; + }); + + if (result < 0) + return result; + + Region_map::Range const range = _region_map->attach(new_ds_cap, { + .size = { }, + .offset = { }, + .use_at = { }, + .at = { }, + .executable = _executable, + .writeable = true + }).convert( + [&] (Region_map::Range range) { return range; }, + [&] (Region_map::Attach_error e) { + switch (e) { + case Region_map::Attach_error::OUT_OF_RAM: result = -2; break; + case Region_map::Attach_error::OUT_OF_CAPS: result = -4; break; + case Region_map::Attach_error::INVALID_DATASPACE: result = -6; break; + case Region_map::Attach_error::REGION_CONFLICT: break; + } + result = -7; + return Region_map::Range { }; + }); + + if (result < 0) { _ram->free(new_ds_cap); - return -3; + return result; } /* add new local address range to our local allocator */ - alloc->add_range((addr_t)local_addr, size); + alloc->add_range(range.start, range.num_bytes); /* now that we have new backing store, allocate Dataspace structure */ return alloc->alloc_aligned(sizeof(Dataspace), 2).convert( [&] (void *ptr) { /* add dataspace information to list of dataspaces */ - Dataspace *ds = construct_at(ptr, new_ds_cap, local_addr); + Dataspace *ds = construct_at(ptr, new_ds_cap, range); insert(ds); return 0; }, diff --git a/repos/libports/src/lib/libc/vfs_plugin.cc b/repos/libports/src/lib/libc/vfs_plugin.cc index a2bdf23c7f..b061826348 100644 --- a/repos/libports/src/lib/libc/vfs_plugin.cc +++ b/repos/libports/src/lib/libc/vfs_plugin.cc @@ -2424,9 +2424,19 @@ void *Libc::Vfs_plugin::mmap(void *addr_in, ::size_t length, int prot, int flags return MAP_FAILED; } - try { - addr = region_map().attach(ds_cap, length, offset); - } catch (...) { + region_map().attach(ds_cap, { + .size = length, + .offset = addr_t(offset), + .use_at = { }, + .at = { }, + .executable = { }, + .writeable = true + }).with_result( + [&] (Region_map::Range range) { addr = (void *)range.start; }, + [&] (Region_map::Attach_error) { addr = nullptr; } + ); + + if (!addr) { monitor().monitor([&] { reference_handle->close(); return Fn::COMPLETE; @@ -2469,7 +2479,7 @@ int Libc::Vfs_plugin::munmap(void *addr, ::size_t) if (entry.start == addr) { reference_handle = entry.reference_handle; destroy(_alloc, &entry); - region_map().detach(addr); + region_map().detach(addr_t(addr)); } }); diff --git a/repos/libports/src/lib/libdrm/ioctl_iris.cc b/repos/libports/src/lib/libdrm/ioctl_iris.cc index 637adccf03..f5723227b7 100644 --- a/repos/libports/src/lib/libdrm/ioctl_iris.cc +++ b/repos/libports/src/lib/libdrm/ioctl_iris.cc @@ -333,17 +333,28 @@ struct Drm::Buffer bool mmap(Genode::Env &env) { - if (_local_addr) return true; + using Region_map = Genode::Region_map; - _local_addr = Gpu::addr_t(env.rm().attach(_allocation.cap, _allocation.size, - _allocation.offset)); - return true; + if (!_local_addr) + env.rm().attach(_allocation.cap, { + .size = _allocation.size, + .offset = Genode::addr_t(_allocation.offset), + .use_at = { }, + .at = { }, + .executable = { }, + .writeable = true + }).with_result( + [&] (Region_map::Range range) { _local_addr = range.start; }, + [&] (Region_map::Attach_error) { Genode::error("Drm::Buffer::mmap failed"); } + ); + + return (_local_addr != 0); } void unmap() { if (_local_addr) - _env.rm().detach((void *)_local_addr); + _env.rm().detach(_local_addr); _local_addr = 0; } diff --git a/repos/libports/src/test/ldso/include/test-ldso.h b/repos/libports/src/test/ldso/include/test-ldso.h index b6091b79dc..d82d5c926f 100644 --- a/repos/libports/src/test/ldso/include/test-ldso.h +++ b/repos/libports/src/test/ldso/include/test-ldso.h @@ -20,6 +20,8 @@ void lib_1_good(); void lib_1_exception(); void lib_2_exception(); +struct Lib_1_exception { }; + struct Lib_1_local_3 { int x { 0x12345678 }; diff --git a/repos/libports/src/test/ldso/lib_1.cc b/repos/libports/src/test/ldso/lib_1.cc index d6bb68de1d..758803c610 100644 --- a/repos/libports/src/test/ldso/lib_1.cc +++ b/repos/libports/src/test/ldso/lib_1.cc @@ -115,7 +115,7 @@ static void lib_1_attr_destructor_2() { log(__func__, " ", Hex(--lib_1_pod_2)); static void exception() { throw 666; } -void lib_1_exception() { throw Genode::Region_map::Region_conflict(); } +void lib_1_exception() { throw Lib_1_exception(); } void lib_1_good() { } diff --git a/repos/libports/src/test/ldso/main.cc b/repos/libports/src/test/ldso/main.cc index 325bf62b73..e69a517765 100644 --- a/repos/libports/src/test/ldso/main.cc +++ b/repos/libports/src/test/ldso/main.cc @@ -247,7 +247,7 @@ void Libc::Component::construct(Libc::Env &env) lib_1_exception(); error("undelivered exception in shared lib"); } - catch (Region_map::Region_conflict) { log("exception in shared lib: caught"); } + catch (Lib_1_exception) { log("exception in shared lib: caught"); } try { __ldso_raise_exception(); diff --git a/repos/os/include/os/packet_stream.h b/repos/os/include/os/packet_stream.h index 7a7a498b26..4b8114958d 100644 --- a/repos/os/include/os/packet_stream.h +++ b/repos/os/include/os/packet_stream.h @@ -76,7 +76,7 @@ #include #include #include -#include +#include #include #include @@ -492,8 +492,7 @@ class Genode::Packet_stream_base Genode::Region_map &_rm; Genode::Dataspace_capability _ds_cap; - void *_ds_local_base; - Genode::size_t _ds_size { 0 }; + Genode::Attached_dataspace _ds { _rm, _ds_cap }; Genode::off_t _submit_queue_offset; Genode::off_t _ack_queue_offset; @@ -515,43 +514,24 @@ class Genode::Packet_stream_base _rm(rm), _ds_cap(transport_ds), /* map dataspace locally */ - _ds_local_base(rm.attach(_ds_cap)), _submit_queue_offset(0), _ack_queue_offset(_submit_queue_offset + submit_queue_size), _bulk_buffer_offset(align_addr(_ack_queue_offset + ack_queue_size, 6)) { - Genode::size_t ds_size = Genode::Dataspace_client(_ds_cap).size(); - - if ((Genode::size_t)_bulk_buffer_offset >= ds_size) + if ((Genode::size_t)_bulk_buffer_offset >= _ds.size()) throw Transport_dataspace_too_small(); - _ds_size = ds_size; - _bulk_buffer_size = ds_size - _bulk_buffer_offset; - } - - /** - * Destructor - */ - ~Packet_stream_base() - { - /* - * Prevent throwing exceptions from the destructor. Otherwise, - * the compiler may generate implicit calls to 'std::terminate'. - */ - try { - /* unmap transport dataspace locally */ - _rm.detach(_ds_local_base); - } catch (...) { } + _bulk_buffer_size = _ds.size() - _bulk_buffer_offset; } void *_submit_queue_local_base() { - return (void *)((Genode::addr_t)_ds_local_base + _submit_queue_offset); } + return _ds.local_addr() + _submit_queue_offset; } void *_ack_queue_local_base() { - return (void *)((Genode::addr_t)_ds_local_base + _ack_queue_offset); } + return _ds.local_addr() + _ack_queue_offset; } Genode::addr_t _bulk_buffer_local_base() { - return (Genode::addr_t)_ds_local_base + _bulk_buffer_offset; } + return (Genode::addr_t)_ds.local_addr() + _bulk_buffer_offset; } /** * Hook for unit testing @@ -578,11 +558,11 @@ class Genode::Packet_stream_base if (!packet_valid(packet) || packet.size() < sizeof(CONTENT_TYPE)) throw Packet_descriptor::Invalid_packet(); - return (CONTENT_TYPE *)((Genode::addr_t)_ds_local_base + packet.offset()); + return (CONTENT_TYPE *)(_ds.local_addr() + packet.offset()); } - Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds_local_base; } - Genode::addr_t ds_size() const { return _ds_size; } + Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds.local_addr(); } + Genode::addr_t ds_size() const { return _ds.size(); } }; @@ -853,8 +833,8 @@ class Genode::Packet_stream_source : private Packet_stream_base Genode::Dataspace_capability dataspace() { return Packet_stream_base::_dataspace(); } - Genode::addr_t ds_local_base() const { return reinterpret_cast(_ds_local_base); } - Genode::addr_t ds_size() const { return Packet_stream_base::_ds_size; } + Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds.local_addr(); } + Genode::addr_t ds_size() const { return Packet_stream_base::_ds.size(); } }; @@ -1030,8 +1010,8 @@ class Genode::Packet_stream_sink : private Packet_stream_base Genode::Dataspace_capability dataspace() { return Packet_stream_base::_dataspace(); } - Genode::addr_t ds_local_base() const { return reinterpret_cast(_ds_local_base); } - Genode::addr_t ds_size() const { return Packet_stream_base::_ds_size; } + Genode::addr_t ds_local_base() const { return (Genode::addr_t)_ds.local_addr(); } + Genode::addr_t ds_size() const { return Packet_stream_base::_ds.size(); } }; #endif /* _INCLUDE__OS__PACKET_STREAM_H_ */ diff --git a/repos/os/src/app/trace_logger/monitor.cc b/repos/os/src/app/trace_logger/monitor.cc index 050576b7de..87bfeea753 100644 --- a/repos/os/src/app/trace_logger/monitor.cc +++ b/repos/os/src/app/trace_logger/monitor.cc @@ -15,7 +15,6 @@ #include /* Genode includes */ -#include #include using namespace Genode; @@ -80,25 +79,6 @@ struct Conditional }; -/****************** - ** Monitor_base ** - ******************/ - -Monitor_base::Monitor_base(Trace::Connection &trace, - Region_map &rm, - Trace::Subject_id subject_id) -: - _trace(trace), _rm(rm), - _buffer_raw(*(Trace::Buffer *)rm.attach(_trace.buffer(subject_id))) -{ } - - -Monitor_base::~Monitor_base() -{ - _rm.detach(&_buffer_raw); -} - - /************* ** Monitor ** *************/ diff --git a/repos/os/src/app/trace_logger/monitor.h b/repos/os/src/app/trace_logger/monitor.h index 44110fca15..264fd48946 100644 --- a/repos/os/src/app/trace_logger/monitor.h +++ b/repos/os/src/app/trace_logger/monitor.h @@ -19,7 +19,9 @@ /* Genode includes */ #include +#include #include +#include namespace Genode { namespace Trace { class Connection; } } @@ -33,13 +35,15 @@ class Monitor_base Genode::Trace::Connection &_trace; Genode::Region_map &_rm; - Genode::Trace::Buffer &_buffer_raw; + Genode::Attached_dataspace _ds; + Genode::Trace::Buffer &_buffer_raw = *_ds.local_addr(); Monitor_base(Genode::Trace::Connection &trace, Genode::Region_map &rm, - Genode::Trace::Subject_id subject_id); - - ~Monitor_base(); + Genode::Trace::Subject_id subject_id) + : + _trace(trace), _rm(rm), _ds(rm, _trace.buffer(subject_id)) + { } }; diff --git a/repos/os/src/app/trace_logger/policy.cc b/repos/os/src/app/trace_logger/policy.cc index 6bd6eca35c..72686fd785 100644 --- a/repos/os/src/app/trace_logger/policy.cc +++ b/repos/os/src/app/trace_logger/policy.cc @@ -11,6 +11,9 @@ * under the terms of the GNU Affero General Public License version 3. */ +/* Genode includes */ +#include + /* local includes */ #include @@ -26,11 +29,9 @@ Policy::Policy(Env &env, Trace::Connection &trace, Policy_dict &dict, [&] (Trace::Policy_id id) { Dataspace_capability const dst_ds = _trace.policy(id); if (dst_ds.valid()) { - void * const dst = _env.rm().attach(dst_ds); - void const * const src = _env.rm().attach(_ds); - memcpy(dst, src, _size); - _env.rm().detach(dst); - _env.rm().detach(src); + Attached_dataspace dst { _env.rm(), dst_ds }, + src { _env.rm(), _ds }; + memcpy(dst.local_addr(), src.local_addr(), _size); return; } warning("failed to obtain policy buffer for '", name, "'"); diff --git a/repos/os/src/driver/acpi/memory.h b/repos/os/src/driver/acpi/memory.h index 63b66e8a7e..8ef5d7c844 100644 --- a/repos/os/src/driver/acpi/memory.h +++ b/repos/os/src/driver/acpi/memory.h @@ -185,9 +185,14 @@ class Acpi::Memory * address is the offset of loop_region.base() from * _io_region.base(). */ - _acpi_window.attach_at( - _range.metadata((void *)loop_region.base())->connection->dataspace(), - loop_region.base() - _io_region->base(), loop_region.size()); + _acpi_window.attach(_range.metadata((void *)loop_region.base())->connection->dataspace(), { + .size = loop_region.size(), + .offset = { }, + .use_at = true, + .at = loop_region.base() - _io_region->base(), + .executable = { }, + .writeable = { } + }); return _acpi_ptr(req_base); } diff --git a/repos/os/src/driver/gpu/intel/platform_session.h b/repos/os/src/driver/gpu/intel/platform_session.h index c40af5dc3c..282fe3f1d3 100644 --- a/repos/os/src/driver/gpu/intel/platform_session.h +++ b/repos/os/src/driver/gpu/intel/platform_session.h @@ -322,7 +322,7 @@ class Platform::Resources : Noncopyable, public Hw_ready_state /* GTT starts at half of the mmio memory */ size_t const gttm_half_size = mmio.size() / 2; - off_t const gtt_offset = gttm_half_size; + addr_t const gtt_offset = gttm_half_size; if (gttm_half_size < gtt_reserved()) { Genode::error("GTTM size too small"); @@ -331,15 +331,36 @@ class Platform::Resources : Noncopyable, public Hw_ready_state /* attach actual iomem + reserved */ _rm_gttmm.detach(0ul); - _rm_gttmm.attach_at(mmio.cap(), 0ul, gtt_offset); + if (_rm_gttmm.attach(mmio.cap(), { + .size = gtt_offset, + .offset = { }, + .use_at = true, + .at = 0, + .executable = { }, + .writeable = true + }).failed()) error("failed to re-attach mmio to gttmm"); /* attach beginning of GTT */ _rm_gttmm.detach(gtt_offset); - _rm_gttmm.attach_at(mmio.cap(), gtt_offset, - gtt_reserved(), gtt_offset); + if (_rm_gttmm.attach(mmio.cap(), { + .size = size_t(gtt_reserved()), + .offset = gtt_offset, + .use_at = true, + .at = gtt_offset, + .executable = { }, + .writeable = true + }).failed()) error("failed to re-attach mmio at gtt offset to gttmm"); _rm_gmadr.detach(0ul); - _rm_gmadr.attach_at(gmadr.cap(), 0ul, aperture_reserved()); + if (_rm_gmadr.attach(gmadr.cap(), { + .size = size_t(aperture_reserved()), + .offset = { }, + .use_at = true, + .at = 0, + .executable = { }, + .writeable = true + }).failed()) error("failed to re-attach gmadr"); + }, []() { error("reinit failed"); }); @@ -407,12 +428,32 @@ class Platform::Resources : Noncopyable, public Hw_ready_state auto const dummmy_gtt_ds = _env.ram().alloc(Igd::PAGE_SIZE); auto remainder = gttm_half_size - gtt_reserved(); - for (off_t offset = gtt_offset + gtt_reserved(); + for (addr_t offset = gtt_offset + gtt_reserved(); remainder > 0; offset += Igd::PAGE_SIZE, remainder -= Igd::PAGE_SIZE) { - rm.retry_with_upgrade({Igd::PAGE_SIZE}, Cap_quota{8}, [&]() { - _rm_gttmm.attach_at(dummmy_gtt_ds, offset, Igd::PAGE_SIZE); }); + for (;;) { + Region_map::Attach_result const result = + _rm_gttmm.attach(dummmy_gtt_ds, { + .size = Igd::PAGE_SIZE, + .offset = { }, + .use_at = true, + .at = offset, + .executable = false, + .writeable = true + }); + if (result.ok()) + break; + + using Error = Region_map::Attach_error; + + if (result == Error::OUT_OF_RAM) rm.upgrade_ram(Igd::PAGE_SIZE); + else if (result == Error::OUT_OF_CAPS) rm.upgrade_caps(8); + else { + error("failed to fill up GTT as dummy RAM"); + break; + } + } } } diff --git a/repos/os/src/driver/gpu/intel/ppgtt_allocator.h b/repos/os/src/driver/gpu/intel/ppgtt_allocator.h index e661b613ea..0460ee7dc1 100644 --- a/repos/os/src/driver/gpu/intel/ppgtt_allocator.h +++ b/repos/os/src/driver/gpu/intel/ppgtt_allocator.h @@ -77,28 +77,44 @@ class Igd::Ppgtt_allocator : public Genode::Translation_table_allocator catch (Gpu::Session::Out_of_caps) { throw; } catch (...) { return Alloc_error::DENIED; } - Alloc_error alloc_error = Alloc_error::DENIED; + return _rm.attach(ds, { + .size = { }, + .offset = { }, + .use_at = { }, + .at = { }, + .executable = { }, + .writeable = true + }).convert( - try { - void * const va = _rm.attach(ds); - void * const pa = (void*)_backend.dma_addr(ds); + [&] (Genode::Region_map::Range const range) -> Alloc_result { - if (_map.add(ds, pa, va, alloc_size) == true) { - _range.add_range((Genode::addr_t)va, alloc_size); - result = _range.alloc_aligned(size, 12); - return result; + void * const va = (void*)range.start; + void * const pa = (void*)_backend.dma_addr(ds); + + if (_map.add(ds, pa, va, range.num_bytes) == true) { + if (_range.add_range(range.start, range.num_bytes).ok()) + return _range.alloc_aligned(size, 12); + + Genode::error("Ppgtt_allocator failed to extend meta data"); + } + + /* _map.add failed, roll back _rm.attach */ + _rm.detach(range.start); + _backend.free(ds); + return Alloc_error::DENIED; + }, + + [&] (Genode::Region_map::Attach_error e) { + + _backend.free(ds); + + using Error = Genode::Region_map::Attach_error; + + if (e == Error::OUT_OF_RAM) return Alloc_error::OUT_OF_RAM; + if (e == Error::OUT_OF_CAPS) return Alloc_error::OUT_OF_CAPS; + return Alloc_error::DENIED; } - - /* _map.add failed, roll back _rm.attach */ - _rm.detach(va); - } - catch (Genode::Out_of_ram) { alloc_error = Alloc_error::OUT_OF_RAM; } - catch (Genode::Out_of_caps) { alloc_error = Alloc_error::OUT_OF_CAPS; } - catch (...) { alloc_error = Alloc_error::DENIED; } - - /* roll back allocation */ - _backend.free(ds); - return alloc_error; + ); } void free(void *addr, size_t size) override diff --git a/repos/os/src/driver/platform/device_pd.cc b/repos/os/src/driver/platform/device_pd.cc index 695ad6f6d0..471aaad89a 100644 --- a/repos/os/src/driver/platform/device_pd.cc +++ b/repos/os/src/driver/platform/device_pd.cc @@ -25,31 +25,17 @@ using namespace Driver; -Device_pd::Region_map_client::Local_addr -Device_pd::Region_map_client::attach(Dataspace_capability ds, - size_t size, - off_t offset, - bool use_local_addr, - Local_addr local_addr, - bool executable, - bool writeable) + +Device_pd::Region_map_client::Attach_result +Device_pd::Region_map_client::attach(Dataspace_capability ds, Attr const &attr) { - return retry( - [&] () { - return retry( - [&] () { - return Genode::Region_map_client::attach(ds, size, offset, - use_local_addr, - local_addr, - executable, - writeable); }, - [&] () { - upgrade_caps(); - } - ); - }, - [&] () { upgrade_ram(); } - ); + for (;;) { + Attach_result const result = Genode::Region_map_client::attach(ds, attr); + if (result == Attach_error::OUT_OF_RAM) upgrade_ram(); + else if (result == Attach_error::OUT_OF_CAPS) upgrade_caps(); + else + return result; + } } @@ -116,21 +102,26 @@ void Device_pd::remove_range(Io_mmu::Range const & range) void Device_pd::enable_pci_device(Io_mem_dataspace_capability const io_mem_cap, Pci::Bdf const & bdf) { - addr_t addr = _address_space.attach(io_mem_cap, 0x1000); + _address_space.attach(io_mem_cap, { + .size = 0x1000, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range range) { - /* sanity check */ - if (!addr) - throw Region_map::Region_conflict(); + /* trigger eager mapping of memory */ + _pd.map(Pd_session::Virt_range { range.start, range.num_bytes }); - /* trigger eager mapping of memory */ - _pd.map(Pd_session::Virt_range { addr, 0x1000 }); + /* try to assign pci device to this protection domain */ + if (!_pd.assign_pci(range.start, Pci::Bdf::rid(bdf))) + log("Assignment of PCI device ", bdf, " to device PD failed, no IOMMU?!"); - /* try to assign pci device to this protection domain */ - if (!_pd.assign_pci(addr, Pci::Bdf::rid(bdf))) - log("Assignment of PCI device ", bdf, " to device PD failed, no IOMMU?!"); - - /* we don't need the mapping anymore */ - _address_space.detach(addr); + /* we don't need the mapping anymore */ + _address_space.detach(range.start); + }, + [&] (Region_map::Attach_error) { + error("failed to attach PCI device to device PD"); } + ); } diff --git a/repos/os/src/driver/platform/device_pd.h b/repos/os/src/driver/platform/device_pd.h index 3ca4e89fcd..ffbeb3d8e1 100644 --- a/repos/os/src/driver/platform/device_pd.h +++ b/repos/os/src/driver/platform/device_pd.h @@ -66,13 +66,7 @@ class Driver::Device_pd : public Io_mmu::Domain _ram_guard(ram_guard), _cap_guard(cap_guard) { } - Local_addr attach(Dataspace_capability ds, - size_t size = 0, - off_t offset = 0, - bool use_local_addr = false, - Local_addr local_addr = (void *)0, - bool executable = false, - bool writeable = true) override; + Attach_result attach(Dataspace_capability ds, Attr const &attr) override; void upgrade_ram(); void upgrade_caps(); diff --git a/repos/os/src/lib/sandbox/library.cc b/repos/os/src/lib/sandbox/library.cc index 846c9f39d9..9bad5ee66b 100644 --- a/repos/os/src/lib/sandbox/library.cc +++ b/repos/os/src/lib/sandbox/library.cc @@ -402,12 +402,9 @@ bool Genode::Sandbox::Library::ready_to_create_child(Start_model::Name const warning("local capabilities exhausted during child creation"); } catch (Child::Missing_name_attribute) { warning("skipped startup of nameless child"); } - catch (Region_map::Region_conflict) { + catch (Attached_dataspace::Region_conflict) { warning("failed to attach dataspace to local address space " "during child construction"); } - catch (Region_map::Invalid_dataspace) { - warning("attempt to attach invalid dataspace to local address space " - "during child construction"); } catch (Service_denied) { warning("failed to create session during child construction"); } diff --git a/repos/os/src/lib/vfs/ram_file_system.h b/repos/os/src/lib/vfs/ram_file_system.h index a06984f81a..c0a724685b 100644 --- a/repos/os/src/lib/vfs/ram_file_system.h +++ b/repos/os/src/lib/vfs/ram_file_system.h @@ -898,38 +898,42 @@ class Vfs::Ram_file_system : public Vfs::File_system { using namespace Vfs_ram; - Ram_dataspace_capability ds_cap; - Node * const node = lookup(path); if (!node) - return ds_cap; + return { }; File * const file = dynamic_cast(node); if (!file) - return ds_cap; + return { }; - size_t len = file->length(); + size_t const len = file->length(); - char *local_addr = nullptr; - try { - ds_cap = _env.env().ram().alloc(len); - - local_addr = _env.env().rm().attach(ds_cap); - file->read(Byte_range_ptr(local_addr, file->length()), Seek{0}); - _env.env().rm().detach(local_addr); - - } catch(...) { - _env.env().rm().detach(local_addr); - _env.env().ram().free(ds_cap); - return Dataspace_capability(); - } - return ds_cap; + return _env.env().ram().try_alloc(len).convert( + [&] (Ram_dataspace_capability ds_cap) { + return _env.env().rm().attach(ds_cap, { + .size = { }, .offset = { }, .use_at = { }, + .at = { }, .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range const range) { + file->read(Byte_range_ptr((char *)range.start, len), Seek{0}); + _env.env().rm().detach(range.start); + return ds_cap; + }, + [&] (Region_map::Attach_error) { + _env.env().ram().free(ds_cap); + return Dataspace_capability(); + } + ); + }, + [&] (Ram_allocator::Alloc_error) { return Dataspace_capability(); } + ); } - void release(char const *, Dataspace_capability ds_cap) override { + void release(char const *, Dataspace_capability ds_cap) override + { _env.env().ram().free( - static_cap_cast(ds_cap)); } - + static_cap_cast(ds_cap)); + } Watch_result watch(char const * const path, Vfs_watch_handle **handle, Allocator &alloc) override diff --git a/repos/os/src/lib/vfs/tar_file_system.h b/repos/os/src/lib/vfs/tar_file_system.h index 2424b36968..1f12c8a28e 100644 --- a/repos/os/src/lib/vfs/tar_file_system.h +++ b/repos/os/src/lib/vfs/tar_file_system.h @@ -563,19 +563,30 @@ class Vfs::Tar_file_system : public File_system return Dataspace_capability(); } - try { - Ram_dataspace_capability ds_cap = - _env.ram().alloc((size_t)record->size()); + size_t const len = size_t(record->size()); - void *local_addr = _env.rm().attach(ds_cap); - memcpy(local_addr, record->data(), (size_t)record->size()); - _env.rm().detach(local_addr); + using Region_map = Genode::Region_map; - return ds_cap; - } - catch (...) { Genode::warning(__func__, " could not create new dataspace"); } - - return Dataspace_capability(); + return _env.ram().try_alloc(len).convert( + [&] (Ram_dataspace_capability ds_cap) { + return _env.rm().attach(ds_cap, { + .size = { }, .offset = { }, .use_at = { }, + .at = { }, .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range const range) { + memcpy((void *)range.start, record->data(), len); + _env.rm().detach(range.start); + return ds_cap; + }, + [&] (Region_map::Attach_error) { + _env.ram().free(ds_cap); + return Dataspace_capability(); + } + ); + }, + [&] (Genode::Ram_allocator::Alloc_error) { + return Dataspace_capability(); } + ); } void release(char const *, Dataspace_capability ds_cap) override diff --git a/repos/os/src/monitor/gdb_stub.h b/repos/os/src/monitor/gdb_stub.h index bc137c3c03..a4f71000ab 100644 --- a/repos/os/src/monitor/gdb_stub.h +++ b/repos/os/src/monitor/gdb_stub.h @@ -77,10 +77,10 @@ struct Monitor::Gdb::State : Noncopyable linker_area_region.writeable ? "ram" : "rom"); xml.attribute("start", - Value(Hex(region.range.addr + - linker_area_region.range.addr))); + Value(Hex(region.range.start + + linker_area_region.range.start))); xml.attribute("length", - Value(Hex(linker_area_region.range.size))); + Value(Hex(linker_area_region.range.num_bytes))); }); }); @@ -97,10 +97,10 @@ struct Monitor::Gdb::State : Noncopyable stack_area_region.writeable ? "ram" : "rom"); xml.attribute("start", - Value(Hex(region.range.addr + - stack_area_region.range.addr))); + Value(Hex(region.range.start + + stack_area_region.range.start))); xml.attribute("length", - Value(Hex(stack_area_region.range.size))); + Value(Hex(stack_area_region.range.num_bytes))); }); }); @@ -108,9 +108,9 @@ struct Monitor::Gdb::State : Noncopyable } xml.node("memory", [&] { - xml.attribute("type", region.writeable ? "ram" : "rom"); - xml.attribute("start", Value(Hex(region.range.addr))); - xml.attribute("length", Value(Hex(region.range.size))); + xml.attribute("type", region.writeable ? "ram" : "rom"); + xml.attribute("start", Value(Hex(region.range.start))); + xml.attribute("length", Value(Hex(region.range.num_bytes))); }); }); }); diff --git a/repos/os/src/monitor/memory_accessor.h b/repos/os/src/monitor/memory_accessor.h index a122cdb55c..f245e2f125 100644 --- a/repos/os/src/monitor/memory_accessor.h +++ b/repos/os/src/monitor/memory_accessor.h @@ -52,14 +52,29 @@ class Monitor::Memory_accessor : Noncopyable struct { uint8_t * const _local_ptr; }; + uint8_t *_attach() + { + return _local_rm.attach(_pd._address_space.dataspace(), { + .size = WINDOW_SIZE, + .offset = _offset, + .use_at = { }, + .at = { }, + .executable = false, + .writeable = true + }).convert( + [&] (Region_map::Range range) { return (uint8_t *)range.start; }, + [&] (Region_map::Attach_error) { return nullptr; } + ); + } + Curr_view(Region_map &local_rm, Inferior_pd &pd, addr_t offset) : - _local_rm(local_rm), _pd(pd), _offset(offset), - _local_ptr(_local_rm.attach(pd._address_space.dataspace(), - WINDOW_SIZE, offset)) + _local_rm(local_rm), _pd(pd), _offset(offset), _local_ptr(_attach()) { } - ~Curr_view() { _local_rm.detach(_local_ptr); } + ~Curr_view() { if (_local_ptr) _local_rm.detach(addr_t(_local_ptr)); } + + bool valid() const { return (_local_ptr != nullptr); }; bool _in_curr_range(Virt_addr at) const { @@ -271,8 +286,9 @@ class Monitor::Memory_accessor : Noncopyable if (!_curr_view.constructed()) { addr_t const offset = at.value & ~(WINDOW_SIZE - 1); - try { _curr_view.construct(_env.rm(), pd, offset); } - catch (Region_map::Region_conflict) { + _curr_view.construct(_env.rm(), pd, offset); + if (!_curr_view->valid()) { + _curr_view.destruct(); warning("attempt to access memory outside the virtual address space: ", Hex(at.value)); return 0; diff --git a/repos/os/src/monitor/monitored_region_map.h b/repos/os/src/monitor/monitored_region_map.h index bca46d396b..c45286e89e 100644 --- a/repos/os/src/monitor/monitored_region_map.h +++ b/repos/os/src/monitor/monitored_region_map.h @@ -66,7 +66,7 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object } Dataspace_capability create_writable_copy(Dataspace_capability orig_ds, - off_t offset, size_t size) + addr_t offset, size_t size) { Attached_dataspace ds { _local_rm, orig_ds }; @@ -86,6 +86,12 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object Constructible _writeable_text_segments { }; + static bool _intersects(Range const &a, Range const &b) + { + addr_t const a_end = a.start + a.num_bytes - 1; + addr_t const b_end = b.start + b.num_bytes - 1; + return (b.start <= a_end) && (b_end >= a.start); + } void writeable_text_segments(Allocator &alloc, Ram_allocator &ram, @@ -97,27 +103,16 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object struct Region : Registry::Element { - struct Range - { - addr_t addr; - size_t size; - - bool intersects(Range const &other) const - { - addr_t end = addr + size - 1; - addr_t other_end = other.addr + other.size - 1; - return ((other.addr <= end) && (other_end >= addr)); - } - }; - Dataspace_capability cap; Range range; bool writeable; Region(Registry ®istry, Dataspace_capability cap, - addr_t addr, size_t size, bool writeable) - : Registry::Element(registry, *this), - cap(cap), range(addr, size), writeable(writeable) { } + Range range, bool writeable) + : + Registry::Element(registry, *this), + cap(cap), range(range), writeable(writeable) + { } }; Registry _regions { }; @@ -125,8 +120,7 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object void for_each_region(auto const &fn) const { _regions.for_each([&] (Region const ®ion) { - fn(region); - }); + fn(region); }); } Allocator &_alloc; @@ -147,57 +141,52 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object ** Region_map interface ** **************************/ - Local_addr attach(Dataspace_capability ds, size_t size = 0, - off_t offset = 0, bool use_local_addr = false, - Local_addr local_addr = (void *)0, - bool executable = false, - bool writeable = true) override + Attach_result attach(Dataspace_capability ds, Attr const &orig_attr) override { - if (executable && !writeable && _writeable_text_segments.constructed()) { - ds = _writeable_text_segments->create_writable_copy(ds, offset, size); - offset = 0; - writeable = true; + Attr attr = orig_attr; + if (attr.executable && !attr.writeable && _writeable_text_segments.constructed()) { + ds = _writeable_text_segments->create_writable_copy(ds, attr.offset, attr.size); + attr.offset = 0; + attr.writeable = true; } - Local_addr attached_addr = _real.call(ds, size, offset, - use_local_addr, - local_addr, - executable, - writeable); - size_t region_size = size ? size : - (Dataspace_client(ds).size() - offset); - enum { PAGE_SIZE_LOG2 = 12 }; - region_size = align_addr(region_size, PAGE_SIZE_LOG2); + return _real.call(ds, attr).convert( + [&] (Range const range) -> Attach_result { + /* + * It can happen that previous attachments got implicitly + * removed by destruction of the dataspace without knowledge + * of the monitor. The newly obtained region could then + * overlap with outdated region registry entries which must + * be removed before inserting the new region. + */ + _regions.for_each([&] (Region ®ion) { + if (_intersects(region.range, range)) + destroy(_alloc, ®ion); }); - /* - * It can happen that previous attachments got implicitly - * removed by destruction of the dataspace without knowledge - * of the monitor. The newly obtained region could then - * overlap with outdated region registry entries which must - * be removed before inserting the new region. - */ - - Region::Range range { attached_addr, region_size }; - - _regions.for_each([&] (Region ®ion) { - if (region.range.intersects(range)) - destroy(_alloc, ®ion); - }); - - new (_alloc) Region(_regions, ds, (addr_t)attached_addr, - region_size, writeable); - - return attached_addr; + try { + new (_alloc) Region(_regions, ds, range, attr.writeable); + } + catch (Out_of_ram) { + _real.call(range.start); + return Attach_error::OUT_OF_RAM; + } + catch (Out_of_caps) { + _real.call(range.start); + return Attach_error::OUT_OF_CAPS; + } + return range; + }, + [&] (Attach_error e) { return e; } + ); } - void detach(Local_addr local_addr) override + void detach(addr_t const at) override { - _real.call(local_addr); + _real.call(at); _regions.for_each([&] (Region ®ion) { - if (region.range.intersects(Region::Range { local_addr, 1 })) - destroy(_alloc, ®ion); - }); + if (_intersects(region.range, Range { at, 1 })) + destroy(_alloc, ®ion); }); } void fault_handler(Signal_context_capability) override @@ -205,10 +194,7 @@ struct Monitor::Monitored_region_map : Monitored_rpc_object warning("Monitored_region_map: ignoring custom fault_handler for ", _name); } - State state() override - { - return _real.call(); - } + Fault fault() override { return _real.call(); } Dataspace_capability dataspace() override { diff --git a/repos/os/src/server/cached_fs_rom/main.cc b/repos/os/src/server/cached_fs_rom/main.cc index 5afb7cf95e..2bcbbb174f 100644 --- a/repos/os/src/server/cached_fs_rom/main.cc +++ b/repos/os/src/server/cached_fs_rom/main.cc @@ -74,7 +74,7 @@ struct Cached_fs_rom::Cached_rom final * Read-only region map exposed as ROM module to the client */ Region_map_client rm { rm_connection.create(ram_ds.size()) }; - Region_map::Local_addr rm_attachment { }; + addr_t rm_attachment { }; Dataspace_capability rm_ds { }; Path const path; @@ -117,10 +117,20 @@ struct Cached_fs_rom::Cached_rom final void complete() { /* attach dataspace read-only into region map */ - enum { OFFSET = 0, LOCAL_ADDR = false, EXEC = true, WRITE = false }; - rm_attachment = rm.attach( - ram_ds.cap(), ram_ds.size(), OFFSET, - LOCAL_ADDR, (addr_t)~0, EXEC, WRITE); + rm_attachment = rm.attach(ram_ds.cap(), { + .size = ram_ds.size(), + .offset = { }, + .use_at = { }, + .at = { }, + .executable = true, + .writeable = false + }).convert( + [&] (Region_map::Range range) { return range.start; }, + [&] (Region_map::Attach_error) { + error("Cached_rom failed to locally attach managed dataspace"); + return 0UL; + } + ); rm_ds = rm.dataspace(); } diff --git a/repos/os/src/server/nic_router/nic_session_root.cc b/repos/os/src/server/nic_router/nic_session_root.cc index 5914b1b5d5..b2c44f4bdc 100644 --- a/repos/os/src/server/nic_router/nic_session_root.cc +++ b/repos/os/src/server/nic_router/nic_session_root.cc @@ -332,14 +332,6 @@ Nic_session_component *Net::Nic_session_root::_create_session(char const *args) return result; }); } - catch (Region_map::Invalid_dataspace) { - _invalid_downlink("Failed to attach RAM"); - throw Service_denied(); - } - catch (Region_map::Region_conflict) { - _invalid_downlink("Failed to attach RAM"); - throw Service_denied(); - } catch (Out_of_ram) { _invalid_downlink("NIC session RAM quota"); throw Insufficient_ram_quota(); @@ -362,8 +354,8 @@ void Net::Nic_session_root::_destroy_session(Nic_session_component *session) /* copy session env to stack and detach/free all session data */ Session_env session_env_stack { session_env }; - session_env_stack.detach(session); - session_env_stack.detach(&session_env); + session_env_stack.detach(addr_t(session)); + session_env_stack.detach(addr_t(&session_env)); session_env_stack.free(ram_ds); _mac_alloc.free(mac); diff --git a/repos/os/src/server/nic_router/session_creation.h b/repos/os/src/server/nic_router/session_creation.h index bd1250dcca..af4813eab0 100644 --- a/repos/os/src/server/nic_router/session_creation.h +++ b/repos/os/src/server/nic_router/session_creation.h @@ -56,7 +56,18 @@ class Net::Session_creation /* alloc/attach ram ds and move session env to the base of it */ _ram_ds.construct(_tmp_session_env->alloc(sizeof(Session_env) + sizeof(SESSION_COMPONENT), CACHED)); - _ram_ptr = _tmp_session_env->attach(*_ram_ds); + _tmp_session_env->attach(*_ram_ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).with_result( + [&] (Region_map::Range range) { _ram_ptr = (void *)range.start; }, + [&] (Region_map::Attach_error e) { + if (e == Region_map::Attach_error::OUT_OF_RAM) throw Out_of_ram(); + if (e == Region_map::Attach_error::OUT_OF_CAPS) throw Out_of_caps(); + error("failed to attach Session_creation::_ram_ds"); } + ); + _session_env_ptr = construct_at(_ram_ptr, *_tmp_session_env); /* create session right behind the session env inside the ram ds */ @@ -76,14 +87,14 @@ class Net::Session_creation if (_ram_ds.constructed()) tmp_session_env.free(*_ram_ds); if (_ram_ptr) - tmp_session_env.detach(_ram_ptr); + tmp_session_env.detach(addr_t(_ram_ptr)); } else if (_tmp_session_env.constructed()) { if (_ram_ds.constructed()) _tmp_session_env->free(*_ram_ds); if (_ram_ptr) - _tmp_session_env->detach(_ram_ptr); + _tmp_session_env->detach(addr_t(_ram_ptr)); } } }; diff --git a/repos/os/src/server/nic_router/session_env.h b/repos/os/src/server/nic_router/session_env.h index ca4ccb0ef6..e83c800521 100644 --- a/repos/os/src/server/nic_router/session_env.h +++ b/repos/os/src/server/nic_router/session_env.h @@ -148,24 +148,21 @@ class Genode::Session_env : public Ram_allocator, ** Region_map ** ****************/ - Local_addr attach(Dataspace_capability ds, - size_t size = 0, - off_t offset = 0, - bool use_local_addr = false, - Local_addr local_addr = (void *)0, - bool executable = false, - bool writeable = true) override + Attach_result attach(Dataspace_capability ds, Attr const &attr) override { enum { MAX_SHARED_CAP = 2 }; enum { MAX_SHARED_RAM = 4 * 4096 }; - void *ptr; - _consume(0, MAX_SHARED_RAM, 0, MAX_SHARED_CAP, [&] () { - ptr = _env.rm().attach(ds, size, offset, use_local_addr, - local_addr, executable, writeable); - }); - return ptr; - }; + Attach_result result = Attach_error::REGION_CONFLICT; + try { + _consume(0, MAX_SHARED_RAM, 0, MAX_SHARED_CAP, [&] { + result = _env.rm().attach(ds, attr); + }); + } + catch (Out_of_ram) { result = Attach_error::OUT_OF_RAM; } + catch (Out_of_caps) { result = Attach_error::OUT_OF_CAPS; } + return result; + } bool report_empty() const { return false; } @@ -183,13 +180,13 @@ class Genode::Session_env : public Ram_allocator, }); } - void detach(Local_addr local_addr) override + void detach(addr_t at) override { - _replenish(0, 0, [&] () { _env.rm().detach(local_addr); }); + _replenish(0, 0, [&] { _env.rm().detach(at); }); } void fault_handler(Signal_context_capability handler) override { _env.rm().fault_handler(handler); } - State state() override { return _env.rm().state(); } + Fault fault() override { return _env.rm().fault(); } Dataspace_capability dataspace() override { return _env.rm().dataspace(); } diff --git a/repos/os/src/server/nic_router/uplink_session_root.cc b/repos/os/src/server/nic_router/uplink_session_root.cc index df17db98b4..746c3b5fc2 100644 --- a/repos/os/src/server/nic_router/uplink_session_root.cc +++ b/repos/os/src/server/nic_router/uplink_session_root.cc @@ -163,14 +163,6 @@ Net::Uplink_session_root::_create_session(char const *args) _timer, mac, label, _interfaces, *_config_ptr, ram_ds); }); } - catch (Region_map::Invalid_dataspace) { - _invalid_downlink("Failed to attach RAM"); - throw Service_denied(); - } - catch (Region_map::Region_conflict) { - _invalid_downlink("Failed to attach RAM"); - throw Service_denied(); - } catch (Out_of_ram) { _invalid_downlink("Uplink session RAM quota"); throw Insufficient_ram_quota(); @@ -192,8 +184,8 @@ Net::Uplink_session_root::_destroy_session(Uplink_session_component *session) /* copy session env to stack and detach/free all session data */ Session_env session_env_stack { session_env }; - session_env_stack.detach(session); - session_env_stack.detach(&session_env); + session_env_stack.detach(addr_t(session)); + session_env_stack.detach(addr_t(&session_env)); session_env_stack.free(ram_ds); /* check for leaked quota */ diff --git a/repos/os/src/server/nic_uplink/main.cc b/repos/os/src/server/nic_uplink/main.cc index eae4380d1d..21db1faab5 100644 --- a/repos/os/src/server/nic_uplink/main.cc +++ b/repos/os/src/server/nic_uplink/main.cc @@ -624,14 +624,6 @@ Net::Uplink_session_root::_create_session(char const *args) return &session; }); } - catch (Region_map::Invalid_dataspace) { - log_if(_main.verbose(), "[uplink] failed to attach RAM"); - throw Service_denied(); - } - catch (Region_map::Region_conflict) { - log_if(_main.verbose(), "[uplink] failed to attach RAM"); - throw Service_denied(); - } catch (Out_of_ram) { log_if(_main.verbose(), "[uplink] insufficient session RAM quota"); throw Insufficient_ram_quota(); @@ -653,8 +645,8 @@ void Net::Uplink_session_root::_destroy_session(Uplink_session_component *sessio /* copy session env to stack and detach/free all session data */ Session_env session_env_stack { session_env }; - session_env_stack.detach(session_ptr); - session_env_stack.detach(&session_env); + session_env_stack.detach(addr_t(session_ptr)); + session_env_stack.detach(addr_t(&session_env)); session_env_stack.free(ram_ds); /* check for leaked quota */ @@ -703,14 +695,6 @@ Nic_session_component *Net::Nic_session_root::_create_session(char const *args) } ); } - catch (Region_map::Invalid_dataspace) { - log_if(_main.verbose(), "[nic] failed to attach RAM"); - throw Service_denied(); - } - catch (Region_map::Region_conflict) { - log_if(_main.verbose(), "[nic] failed to attach RAM"); - throw Service_denied(); - } catch (Out_of_ram) { log_if(_main.verbose(), "[nic] insufficient session RAM quota"); throw Insufficient_ram_quota(); @@ -732,8 +716,8 @@ void Net::Nic_session_root::_destroy_session(Nic_session_component *session_ptr) /* copy session env to stack and detach/free all session data */ Session_env session_env_stack { session_env }; - session_env_stack.detach(session_ptr); - session_env_stack.detach(&session_env); + session_env_stack.detach(addr_t(session_ptr)); + session_env_stack.detach(addr_t(&session_env)); session_env_stack.free(ram_ds); /* check for leaked quota */ diff --git a/repos/os/src/server/vmm/virtio_gpu.h b/repos/os/src/server/vmm/virtio_gpu.h index 3633568362..4ccdd9a443 100644 --- a/repos/os/src/server/vmm/virtio_gpu.h +++ b/repos/os/src/server/vmm/virtio_gpu.h @@ -353,17 +353,26 @@ class Vmm::Virtio_gpu_device : public Virtio_device if (attach_off + sz > _size()) return; - retry( - [&] () { - retry( - [&] { - region_map.attach(device._ram_ds.cap(), - sz, off, true, attach_off); - attach_off += sz; - }, - [&] { rm.upgrade_caps(2); }); - }, - [&] () { rm.upgrade_ram(8*1024); }); + for (;;) { + Region_map::Attach_result const result = + region_map.attach(device._ram_ds.cap(), { + .size = sz, + .offset = off, + .use_at = true, + .at = attach_off, + .executable = false, + .writeable = true + }); + if (result.ok()) + break; + using Error = Region_map::Attach_error; + if (result == Error::OUT_OF_RAM) rm.upgrade_ram(8*1024); + else if (result == Error::OUT_OF_CAPS) rm.upgrade_caps(2); + else { + error("failed to locally attach Virtio_gpu_device resource"); + break; + } + } } }; diff --git a/repos/os/src/test/monitor/main.cc b/repos/os/src/test/monitor/main.cc index c249d9ee96..5078dcc436 100644 --- a/repos/os/src/test/monitor/main.cc +++ b/repos/os/src/test/monitor/main.cc @@ -98,7 +98,11 @@ struct Test::Main Rm_connection rm_connection { _env }; Region_map_client rm { rm_connection.create(8*1024) }; Attached_ram_dataspace ram_ds { _env.ram(), _env.rm(), 4*1024 }; - rm.attach_at(ram_ds.cap(), 0); + assert(rm.attach(ram_ds.cap(), { + .size = { }, .offset = { }, + .use_at = true, .at = 0, + .executable = false, .writeable = true + }).ok(), "failed to map ram_ds at address 0"); Attached_dataspace managed_ds { _env.rm(), rm.dataspace() }; /* try to read 100 bytes at page boundary, expect to stop after 50 bytes */ diff --git a/repos/os/src/test/trace/main.cc b/repos/os/src/test/trace/main.cc index 1053b21f49..749ea9aa5c 100644 --- a/repos/os/src/test/trace/main.cc +++ b/repos/os/src/test/trace/main.cc @@ -86,7 +86,15 @@ class Trace_buffer_monitor Trace::Subject_id id, Dataspace_capability ds_cap) : - _rm(rm), _id(id), _buffer_raw(rm.attach(ds_cap)), + _rm(rm), _id(id), + _buffer_raw(rm.attach(ds_cap, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range r) { return (Trace::Buffer *)r.start; }, + [&] (Region_map::Attach_error) { return nullptr; } + )), _buffer(*_buffer_raw) { log("monitor " @@ -96,7 +104,7 @@ class Trace_buffer_monitor ~Trace_buffer_monitor() { - if (_buffer_raw) { _rm.detach(_buffer_raw); } + if (_buffer_raw) { _rm.detach(addr_t(_buffer_raw)); } } Trace::Subject_id id() { return _id; }; diff --git a/repos/os/src/test/vmm_x86/component.cc b/repos/os/src/test/vmm_x86/component.cc index 4731aa64be..2ba14d244a 100644 --- a/repos/os/src/test/vmm_x86/component.cc +++ b/repos/os/src/test/vmm_x86/component.cc @@ -253,11 +253,10 @@ class Vmm::Vm } /* prepare guest memory with some instructions for testing */ - uint8_t * guest = env.rm().attach(_memory); - - memcpy(guest, &_binary_guest_bin_start, 4096); - - env.rm().detach(guest); + { + Attached_dataspace guest { env.rm(), _memory }; + memcpy(guest.local_addr(), &_binary_guest_bin_start, 4096); + } /* VMM ready for all the vCPUs */ _vmm_ready.up(); diff --git a/repos/ports/src/virtualbox5/frontend/fb.h b/repos/ports/src/virtualbox5/frontend/fb.h index 3e272f6e72..641e3f7169 100644 --- a/repos/ports/src/virtualbox5/frontend/fb.h +++ b/repos/ports/src/virtualbox5/frontend/fb.h @@ -43,10 +43,22 @@ class Genodefb : * The mode currently used by the VM. Can be smaller than the * framebuffer mode. */ - Fb_Genode::Mode _virtual_fb_mode; + Fb_Genode::Mode _virtual_fb_mode; - void *_fb_base; - RTCRITSECT _fb_lock; + void *_attach() + { + return _env.rm().attach(_fb.dataspace(), { + .size = { }, .offset = { }, .use_at = { }, + .at = { }, .executable = { }, .writeable = true + }).convert( + [&] (Genode::Region_map::Range range) { return (void *)range.start; }, + [&] (Genode::Region_map::Attach_error) { return nullptr; } + ); + } + + void *_fb_base = _attach(); + + RTCRITSECT _fb_lock; ComPtr _display; ComPtr _display_bitmap; @@ -95,7 +107,6 @@ class Genodefb : _gui(gui), _fb(*gui.framebuffer()), _virtual_fb_mode(_initial_setup()), - _fb_base(env.rm().attach(_fb.dataspace())), _display(display) { int rc = RTCritSectInit(&_fb_lock); @@ -112,15 +123,11 @@ class Genodefb : _fb_mode = mode; if (_fb_base) - _env.rm().detach(_fb_base); + _env.rm().detach(Genode::addr_t(_fb_base)); _adjust_buffer(); - try { - _fb_base = _env.rm().attach(_fb.dataspace()); - } catch (...) { - _fb_base = nullptr; - } + _fb_base = _attach(); Unlock(); } diff --git a/repos/ports/src/virtualbox5/mm.h b/repos/ports/src/virtualbox5/mm.h index cbe51f9bc1..2439a18fda 100644 --- a/repos/ports/src/virtualbox5/mm.h +++ b/repos/ports/src/virtualbox5/mm.h @@ -33,39 +33,48 @@ class Sub_rm_connection : private Genode::Rm_connection, Genode::addr_t const _offset; Genode::size_t const _size; + Genode::addr_t _attach(Genode::Region_map &local_rm) + { + return local_rm.attach(dataspace(), { + .size = { }, + .offset = { }, + .use_at = { }, + .at = { }, + .executable = true, + .writeable = true + }).convert( + [&] (Range range) { return range.start; }, + [&] (Attach_error) { + Genode::error("failed to attach Sub_rm_connection to local address space"); + return 0UL; } + ); + } + public: Sub_rm_connection(Genode::Env &env, Genode::size_t size) : Rm_connection(env), Genode::Region_map_client(Rm_connection::create(size)), - _offset(env.rm().attach(dataspace(), 0, 0, false, nullptr, true, true)), + _offset(_attach(env.rm())), _size(size) { } - Local_addr attach(Genode::Dataspace_capability ds, - Genode::size_t size = 0, Genode::off_t offset = 0, - bool use_local_addr = false, - Local_addr local_addr = (void *)0, - bool executable = false, - bool writeable = true) override + Attach_result attach(Genode::Dataspace_capability ds, Attr const &attr) override { - Local_addr addr = Genode::retry( - [&] () { - return Genode::retry( - [&] () { - return Region_map_client::attach(ds, size, offset, - use_local_addr, - local_addr, - executable, - writeable); }, - [&] () { upgrade_caps(2); }); - }, - [&] () { upgrade_ram(8192); }); + Attach_result result = Attach_error::REGION_CONFLICT; + for (;;) { + result = Region_map_client::attach(ds, attr); + if (result == Attach_error::OUT_OF_RAM) upgrade_ram(8*1024); + else if (result == Attach_error::OUT_OF_CAPS) upgrade_caps(2); + else + break; + } - Genode::addr_t new_addr = addr; - new_addr += _offset; - return Local_addr(new_addr); + return result.convert( + [&] (Range const r) { return Range { .start = r.start + _offset, + .num_bytes = r.num_bytes }; }, + [&] (Attach_error e) { return e; }); } bool contains(void * ptr) const diff --git a/repos/ports/src/virtualbox5/rt.cc b/repos/ports/src/virtualbox5/rt.cc index ba0b9be312..c679ed4cdc 100644 --- a/repos/ports/src/virtualbox5/rt.cc +++ b/repos/ports/src/virtualbox5/rt.cc @@ -236,14 +236,14 @@ static void *alloc_mem(size_t cb, const char *pszTag, bool executable = false) Ram_dataspace_capability ds = genode_env().ram().alloc(cb); Assert(ds.valid()); - Genode::size_t const whole_size = 0; - Genode::off_t const offset = 0; - bool const any_addr = false; - void * any_local_addr = nullptr; - - void * local_addr = rt_memory.attach(ds, whole_size, offset, - any_addr, any_local_addr, - executable); + void * const local_addr = rt_memory.attach(ds, { + .size = { }, .offset = { }, + .use_at = { }, .at = { }, + .executable = executable, .writeable = true + }).convert( + [&] (Region_map::Range range) { return (void *)range.start; }, + [&] (Region_map::Attach_error) { return nullptr; } + ); Assert(local_addr); diff --git a/repos/ports/src/virtualbox5/spec/nova/sup.cc b/repos/ports/src/virtualbox5/spec/nova/sup.cc index c2fed7af12..d02d453b4c 100644 --- a/repos/ports/src/virtualbox5/spec/nova/sup.cc +++ b/repos/ports/src/virtualbox5/spec/nova/sup.cc @@ -110,8 +110,18 @@ static Sub_rm_connection &vm_memory(Genode::uint64_t vm_size = 0) while (allocated < memory_size) { Ram_dataspace_capability ds = genode_env().ram().alloc(alloc_size); - addr_t to = vm_memory.attach_rwx(ds, memory.addr + allocated - vmm_local, - alloc_size); + addr_t const to = vm_memory.attach(ds, { + .size = alloc_size, + .offset = { }, + .use_at = true, + .at = memory.addr + allocated - vmm_local, + .executable = true, + .writeable = true + }).convert( + [&] (Region_map::Range range) { return range.start; }, + [&] (Region_map::Attach_error) { return 0UL; } + ); + Assert(to == vm_memory.local_addr(memory.addr + allocated - vmm_local)); allocated += alloc_size; diff --git a/repos/ports/src/virtualbox6/include/fb.h b/repos/ports/src/virtualbox6/include/fb.h index 482456a4b4..4dca3309ce 100644 --- a/repos/ports/src/virtualbox6/include/fb.h +++ b/repos/ports/src/virtualbox6/include/fb.h @@ -42,10 +42,21 @@ class Genodefb : * The mode currently used by the VM. Can be smaller than the * framebuffer mode. */ - Fb_Genode::Mode _virtual_fb_mode; + Fb_Genode::Mode _virtual_fb_mode; - void *_fb_base; - RTCRITSECT _fb_lock; + void *_attach() + { + return _env.rm().attach(_fb.dataspace(), { + .size = { }, .offset = { }, .use_at = { }, + .at = { }, .executable = { }, .writeable = true + }).convert( + [&] (Genode::Region_map::Range range) { return (void *)range.start; }, + [&] (Genode::Region_map::Attach_error) { return nullptr; } + ); + } + + void *_fb_base = _attach(); + RTCRITSECT _fb_lock; ComPtr _display; ComPtr _display_bitmap; @@ -96,7 +107,6 @@ class Genodefb : _gui(gui), _fb(*gui.framebuffer()), _virtual_fb_mode(_initial_setup()), - _fb_base(env.rm().attach(_fb.dataspace())), _display(display) { int rc = RTCritSectInit(&_fb_lock); @@ -115,15 +125,11 @@ class Genodefb : _fb_mode = mode; if (_fb_base) - _env.rm().detach(_fb_base); + _env.rm().detach(Genode::addr_t(_fb_base)); _adjust_buffer(); - try { - _fb_base = _env.rm().attach(_fb.dataspace()); - } catch (...) { - _fb_base = nullptr; - } + _fb_base = _attach(); Unlock(); } diff --git a/repos/ports/src/virtualbox6/sup_gmm.cc b/repos/ports/src/virtualbox6/sup_gmm.cc index f912dcbe25..d90a1a9e1c 100644 --- a/repos/ports/src/virtualbox6/sup_gmm.cc +++ b/repos/ports/src/virtualbox6/sup_gmm.cc @@ -34,8 +34,30 @@ void Sup::Gmm::_add_one_slice() Ram_dataspace_capability ds = _env.ram().alloc(slice_size); - _map.connection.retry_with_upgrade(Ram_quota{8192}, Cap_quota{2}, [&] () { - _map.rm.attach_rwx(ds, attach_base, slice_size); }); + for (;;) { + + Region_map::Attach_result const result = _map.rm.attach(ds, { + .size = slice_size, + .offset = { }, + .use_at = true, + .at = attach_base, + .executable = true, + .writeable = true + }); + + if (result.ok()) + break; + + using Error = Region_map::Attach_error; + + if (result == Error::OUT_OF_RAM) _map.connection.upgrade_ram(8*1024); + else if (result == Error::OUT_OF_CAPS) _map.connection.upgrade_caps(2); + else { + error("Gmm::_add_one_slice failed to attach slice to map"); + _env.ram().free(ds); + return; + } + } _slices[_slice_index(Offset{attach_base})] = ds; diff --git a/repos/ports/src/virtualbox6/sup_gmm.h b/repos/ports/src/virtualbox6/sup_gmm.h index d03b7c1c77..0c7703366f 100644 --- a/repos/ports/src/virtualbox6/sup_gmm.h +++ b/repos/ports/src/virtualbox6/sup_gmm.h @@ -112,10 +112,25 @@ class Sup::Gmm Region_map_client rm { connection.create(size.value) }; - Vmm_addr const base { (addr_t)env.rm().attach(rm.dataspace()) }; + addr_t _attach() + { + return env.rm().attach(rm.dataspace(), { + .size = { }, .offset = { }, .use_at = { }, + .at = { }, .executable = { }, .writeable = true + }).convert( + [&] (Region_map::Range range) { return range.start; }, + [&] (Region_map::Attach_error) { return 0UL; } + ); + } + + Vmm_addr const base { _attach() }; Vmm_addr const end { base.value + size.value - 1 }; - Map(Env &env, Bytes size) : env(env), size(size) { } + Map(Env &env, Bytes size) : env(env), size(size) + { + if (!base.value) + error("Gmm::Map failed to locally attach managed dataspace"); + } } _map { _env, _map_size };