mirror of
https://github.com/genodelabs/genode.git
synced 2025-04-07 19:34:56 +00:00
base: introduce Allocator::try_alloc
This patch changes the 'Allocator' interface to the use of 'Attempt' return values instead of using exceptions for propagating errors. To largely uphold compatibility with components using the original exception-based interface - in particluar use cases where an 'Allocator' is passed to the 'new' operator - the traditional 'alloc' is still supported. But it existes merely as a wrapper around the new 'try_alloc'. Issue #4324
This commit is contained in:
parent
9591e6caee
commit
dc39a8db62
@ -38,61 +38,67 @@ static inline bool can_use_super_page(addr_t, size_t)
|
||||
}
|
||||
|
||||
|
||||
addr_t Io_mem_session_component::_map_local(addr_t base, size_t size)
|
||||
addr_t Io_mem_session_component::_map_local(addr_t phys_base, size_t size)
|
||||
{
|
||||
using namespace Fiasco;
|
||||
auto map_io_region = [] (addr_t phys_base, addr_t local_base, size_t size)
|
||||
{
|
||||
using namespace Fiasco;
|
||||
|
||||
l4_threadid_t const sigma0 = sigma0_threadid;
|
||||
|
||||
unsigned offset = 0;
|
||||
while (size) {
|
||||
|
||||
/*
|
||||
* Call sigma0 for I/O region
|
||||
*/
|
||||
|
||||
/* special case for page0, which is RAM in sigma0/x86 */
|
||||
l4_umword_t const request = (phys_base + offset == 0)
|
||||
? SIGMA0_REQ_FPAGE_RAM
|
||||
: SIGMA0_REQ_FPAGE_IOMEM;
|
||||
|
||||
size_t const size_log2 = can_use_super_page(phys_base + offset, size)
|
||||
? get_super_page_size_log2()
|
||||
: get_page_size_log2();
|
||||
|
||||
l4_umword_t dw0 = 0, dw1 = 0;
|
||||
l4_msgdope_t result { };
|
||||
l4_msgtag_t tag { };
|
||||
|
||||
int const err =
|
||||
l4_ipc_call_tag(sigma0,
|
||||
L4_IPC_SHORT_MSG,
|
||||
request,
|
||||
l4_fpage(phys_base + offset, size_log2, 0, 0).fpage,
|
||||
l4_msgtag(L4_MSGTAG_SIGMA0, 0, 0, 0),
|
||||
L4_IPC_MAPMSG(local_base + offset, size_log2),
|
||||
&dw0, &dw1,
|
||||
L4_IPC_NEVER, &result, &tag);
|
||||
|
||||
if (err || !l4_ipc_fpage_received(result)) {
|
||||
error("map_local failed err=", err, " "
|
||||
"(", l4_ipc_fpage_received(result), ")");
|
||||
return;
|
||||
}
|
||||
|
||||
offset += 1 << size_log2;
|
||||
size -= 1 << size_log2;
|
||||
}
|
||||
};
|
||||
|
||||
/* align large I/O dataspaces on a super-page boundary within core */
|
||||
size_t alignment = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
||||
: get_page_size_log2();
|
||||
size_t align = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
||||
: get_page_size_log2();
|
||||
|
||||
/* find appropriate region for mapping */
|
||||
void *local_base = 0;
|
||||
if (platform().region_alloc().alloc_aligned(size, &local_base, alignment).error())
|
||||
return 0;
|
||||
return platform().region_alloc().alloc_aligned(size, align).convert<addr_t>(
|
||||
|
||||
/* call sigma0 for I/O region */
|
||||
int err;
|
||||
l4_umword_t request;
|
||||
l4_umword_t dw0, dw1;
|
||||
l4_msgdope_t result;
|
||||
l4_msgtag_t tag;
|
||||
[&] (void *ptr) {
|
||||
addr_t const core_local_base = (addr_t)ptr;
|
||||
map_io_region(phys_base, core_local_base, size);
|
||||
return core_local_base; },
|
||||
|
||||
l4_threadid_t sigma0 = sigma0_threadid;
|
||||
|
||||
unsigned offset = 0;
|
||||
while (size) {
|
||||
/* FIXME what about caching demands? */
|
||||
/* FIXME what about read / write? */
|
||||
|
||||
/* special case for page0, which is RAM in sigma0/x86 */
|
||||
if (base + offset == 0)
|
||||
request = SIGMA0_REQ_FPAGE_RAM;
|
||||
else
|
||||
request = SIGMA0_REQ_FPAGE_IOMEM;
|
||||
|
||||
size_t page_size_log2 = get_page_size_log2();
|
||||
if (can_use_super_page(base + offset, size))
|
||||
page_size_log2 = get_super_page_size_log2();
|
||||
|
||||
err = l4_ipc_call_tag(sigma0,
|
||||
L4_IPC_SHORT_MSG,
|
||||
request,
|
||||
l4_fpage(base + offset, page_size_log2, 0, 0).fpage,
|
||||
l4_msgtag(L4_MSGTAG_SIGMA0, 0, 0, 0),
|
||||
L4_IPC_MAPMSG((addr_t)local_base + offset, page_size_log2),
|
||||
&dw0, &dw1,
|
||||
L4_IPC_NEVER, &result, &tag);
|
||||
|
||||
if (err || !l4_ipc_fpage_received(result)) {
|
||||
error("map_local failed err=", err, " "
|
||||
"(", l4_ipc_fpage_received(result), ")");
|
||||
return 0;
|
||||
}
|
||||
|
||||
offset += 1 << page_size_log2;
|
||||
size -= 1 << page_size_log2;
|
||||
}
|
||||
|
||||
return (addr_t)local_base;
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
error("core-local mapping of memory-mapped I/O range failed");
|
||||
return 0; });
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
if (msi)
|
||||
throw Service_denied();
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -445,51 +445,42 @@ Platform::Platform()
|
||||
fiasco_register_thread_name(core_thread.native_thread_id(),
|
||||
core_thread.name().string());
|
||||
|
||||
/* core log as ROM module */
|
||||
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||
{
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
size_t const size = 1 << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
[&] (void *phys_ptr) {
|
||||
|
||||
void * const core_local_ptr = phys_ptr;
|
||||
addr_t const core_local_addr = phys_addr;
|
||||
/* core-local memory is one-to-one mapped physical RAM */
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
void * const core_local_ptr = phys_ptr;
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().remove_range(core_local_addr, log_size + get_page_size());
|
||||
region_alloc().remove_range((addr_t)core_local_ptr, size);
|
||||
memset(core_local_ptr, 0, size);
|
||||
content_fn(core_local_ptr, size);
|
||||
|
||||
memset(core_local_ptr, 0, log_size);
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, size, rom_name));
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed to export ", rom_name, " as ROM module"); }
|
||||
);
|
||||
};
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
||||
}
|
||||
/* core log as ROM module */
|
||||
export_page_as_rom_module("core_log",
|
||||
[&] (void *core_local_ptr, size_t size) {
|
||||
init_core_log(Core_log_range { (addr_t)core_local_ptr, size } ); });
|
||||
|
||||
/* export platform specific infos */
|
||||
{
|
||||
void * phys_ptr = nullptr;
|
||||
size_t const size = 1 << get_page_size_log2();
|
||||
|
||||
if (ram_alloc().alloc_aligned(size, &phys_ptr,
|
||||
get_page_size_log2()).ok()) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const core_local_addr = phys_addr;
|
||||
|
||||
region_alloc().remove_range(core_local_addr, size);
|
||||
|
||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
size, "platform_info", [&] ()
|
||||
{
|
||||
xml.node("kernel", [&] () { xml.attribute("name", "fiasco"); });
|
||||
});
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"platform_info"));
|
||||
}
|
||||
}
|
||||
export_page_as_rom_module("platform_info",
|
||||
[&] (void *core_local_ptr, size_t size) {
|
||||
Xml_generator xml(reinterpret_cast<char *>(core_local_ptr),
|
||||
size, "platform_info",
|
||||
[&] () {
|
||||
xml.node("kernel", [&] () {
|
||||
xml.attribute("name", "fiasco"); }); }); });
|
||||
}
|
||||
|
||||
|
||||
|
@ -34,14 +34,18 @@ addr_t Io_mem_session_component::_map_local(addr_t base, size_t size)
|
||||
: get_page_size_log2();
|
||||
|
||||
/* find appropriate region for mapping */
|
||||
void *local_base = 0;
|
||||
if (platform().region_alloc().alloc_aligned(size, &local_base, alignment).error())
|
||||
return 0;
|
||||
return platform().region_alloc().alloc_aligned(size, alignment).convert<addr_t>(
|
||||
|
||||
if (!map_local_io(base, (addr_t)local_base, size >> get_page_size_log2())) {
|
||||
error("map_local_io failed");
|
||||
return 0;
|
||||
}
|
||||
[&] (void *local_base) {
|
||||
if (!map_local_io(base, (addr_t)local_base, size >> get_page_size_log2())) {
|
||||
error("map_local_io failed");
|
||||
platform().region_alloc().free(local_base, base);
|
||||
return 0UL;
|
||||
}
|
||||
return (addr_t)local_base;
|
||||
},
|
||||
|
||||
return (addr_t)local_base;
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("allocation of virtual memory for local I/O mapping failed");
|
||||
return 0UL; });
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
}
|
||||
msi_alloc.set(_irq_number, 1);
|
||||
} else {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -467,75 +467,68 @@ Platform::Platform()
|
||||
core_thread.pager(_sigma0);
|
||||
_core_pd->bind_thread(core_thread);
|
||||
|
||||
/* export x86 platform specific infos */
|
||||
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const align = get_page_size_log2();
|
||||
size_t const size = pages << get_page_size_log2();
|
||||
size_t const pages = 1;
|
||||
size_t const align = get_page_size_log2();
|
||||
size_t const bytes = pages << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(bytes, align).with_result(
|
||||
|
||||
if (ram_alloc().alloc_aligned(size, &phys_ptr, align).error())
|
||||
return;
|
||||
[&] (void *phys_ptr) {
|
||||
|
||||
if (region_alloc().alloc_aligned(size, &core_local_ptr, align).error())
|
||||
return;
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
region_alloc().alloc_aligned(bytes, align).with_result(
|
||||
[&] (void *core_local_ptr) {
|
||||
|
||||
if (!map_local(phys_addr, core_local_addr, pages))
|
||||
return;
|
||||
if (!map_local(phys_addr, (addr_t)core_local_ptr, pages)) {
|
||||
warning("map_local failed while exporting ",
|
||||
rom_name, " as ROM module");
|
||||
ram_alloc().free(phys_ptr, bytes);
|
||||
region_alloc().free(core_local_ptr, bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
memset(core_local_ptr, 0, size);
|
||||
memset(core_local_ptr, 0, bytes);
|
||||
content_fn((char *)core_local_ptr, bytes);
|
||||
|
||||
Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
pages << get_page_size_log2(),
|
||||
"platform_info", [&] ()
|
||||
{
|
||||
xml.node("kernel", [&] () {
|
||||
xml.attribute("name", "foc");
|
||||
xml.attribute("acpi", true);
|
||||
xml.attribute("msi" , true);
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, bytes, rom_name));
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed allocate virtual memory to export ",
|
||||
rom_name, " as ROM module");
|
||||
ram_alloc().free(phys_ptr, bytes);
|
||||
}
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed to export ", rom_name, " as ROM module"); }
|
||||
);
|
||||
};
|
||||
|
||||
export_page_as_rom_module("platform_info",
|
||||
[&] (char *core_local_ptr, size_t size) {
|
||||
Xml_generator xml(core_local_ptr, size, "platform_info", [&] ()
|
||||
{
|
||||
xml.node("kernel", [&] () {
|
||||
xml.attribute("name", "foc");
|
||||
xml.attribute("acpi", true);
|
||||
xml.attribute("msi" , true);
|
||||
});
|
||||
xml.node("hardware", [&] () {
|
||||
_setup_platform_info(xml, sigma0_map_kip()); });
|
||||
|
||||
xml.node("affinity-space", [&] () {
|
||||
xml.attribute("width", affinity_space().width());
|
||||
xml.attribute("height", affinity_space().height()); });
|
||||
});
|
||||
xml.node("hardware", [&] () {
|
||||
_setup_platform_info(xml, sigma0_map_kip()); });
|
||||
}
|
||||
);
|
||||
|
||||
xml.node("affinity-space", [&] () {
|
||||
xml.attribute("width", affinity_space().width());
|
||||
xml.attribute("height", affinity_space().height()); });
|
||||
});
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"platform_info"));
|
||||
}
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const align = get_page_size_log2();
|
||||
size_t const size = pages << get_page_size_log2();
|
||||
|
||||
if (ram_alloc().alloc_aligned(size, &phys_ptr, align).error())
|
||||
return;
|
||||
if (region_alloc().alloc_aligned(size, &core_local_ptr, align).error())
|
||||
return;
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
|
||||
if (!map_local(phys_addr, core_local_addr, pages))
|
||||
return;
|
||||
|
||||
memset(core_local_ptr, 0, size);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"core_log"));
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, size } );
|
||||
}
|
||||
export_page_as_rom_module("core_log",
|
||||
[&] (char *core_local_ptr, size_t size) {
|
||||
init_core_log(Core_log_range { (addr_t)core_local_ptr, size } ); });
|
||||
|
||||
Affinity::Space const cpus = affinity_space();
|
||||
|
||||
|
@ -198,11 +198,9 @@ unsigned long Cap_id_allocator::alloc()
|
||||
{
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
|
||||
void *id = nullptr;
|
||||
if (_id_alloc.alloc(CAP_ID_OFFSET, &id))
|
||||
return (unsigned long) id;
|
||||
|
||||
throw Out_of_ids();
|
||||
return _id_alloc.try_alloc(CAP_ID_OFFSET).convert<unsigned long>(
|
||||
[&] (void *id) { return (unsigned long)id; },
|
||||
[&] (Range_allocator::Alloc_error) -> unsigned long { throw Out_of_ids(); });
|
||||
}
|
||||
|
||||
|
||||
|
@ -30,19 +30,16 @@ extern unsigned _bss_end;
|
||||
void * Platform::Ram_allocator::alloc_aligned(size_t size, unsigned align)
|
||||
{
|
||||
using namespace Genode;
|
||||
using namespace Hw;
|
||||
|
||||
void * ret;
|
||||
assert(Base::alloc_aligned(round_page(size), &ret,
|
||||
max(align, get_page_size_log2())).ok());
|
||||
return ret;
|
||||
}
|
||||
return Base::alloc_aligned(Hw::round_page(size),
|
||||
max(align, get_page_size_log2())).convert<void *>(
|
||||
|
||||
|
||||
bool Platform::Ram_allocator::alloc(size_t size, void **out_addr)
|
||||
{
|
||||
*out_addr = alloc_aligned(size, 0);
|
||||
return true;
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Ram_allocator::Alloc_error e) -> void *
|
||||
{
|
||||
error("bootstrap RAM allocation failed, error=", e);
|
||||
assert(false);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -53,8 +53,13 @@ class Bootstrap::Platform
|
||||
};
|
||||
|
||||
|
||||
class Ram_allocator : public Genode::Allocator_avl_base
|
||||
class Ram_allocator : private Genode::Allocator_avl_base
|
||||
{
|
||||
/*
|
||||
* 'Ram_allocator' is derived from 'Allocator_avl_base' to access
|
||||
* the protected 'slab_block_size'.
|
||||
*/
|
||||
|
||||
private:
|
||||
|
||||
using Base = Genode::Allocator_avl_base;
|
||||
@ -73,8 +78,7 @@ class Bootstrap::Platform
|
||||
{ }
|
||||
|
||||
void * alloc_aligned(size_t size, unsigned align);
|
||||
bool alloc(size_t size, void **out_addr) override;
|
||||
void * alloc(size_t size) { return Allocator::alloc(size); }
|
||||
void * alloc(size_t size) { return alloc_aligned(size, 0); }
|
||||
|
||||
void add(Memory_region const &);
|
||||
void remove(Memory_region const &);
|
||||
|
@ -29,12 +29,15 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
off_t offset, bool use_local_addr,
|
||||
Region_map::Local_addr, bool, bool writeable)
|
||||
{
|
||||
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
||||
if (!ds)
|
||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Local_addr {
|
||||
|
||||
if (!ds_ptr)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
Dataspace_component &ds = *ds_ptr;
|
||||
|
||||
if (size == 0)
|
||||
size = ds->size();
|
||||
size = ds.size();
|
||||
|
||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
@ -48,10 +51,13 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc_aligned(page_rounded_size, &virt_addr,
|
||||
get_page_size_log2()).ok()) {
|
||||
Allocator::Alloc_result virt =
|
||||
platform().region_alloc().alloc_aligned(page_rounded_size, align);
|
||||
|
||||
if (virt.failed()) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
@ -61,16 +67,23 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||
Page_flags const flags { (writeable && ds->writable()) ? RW : RO,
|
||||
Page_flags const flags { (writeable && ds.writable()) ? RW : RO,
|
||||
NO_EXEC, KERN, GLOBAL,
|
||||
ds->io_mem() ? DEVICE : RAM,
|
||||
ds->cacheability() };
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||
return nullptr;
|
||||
ds.io_mem() ? DEVICE : RAM,
|
||||
ds.cacheability() };
|
||||
|
||||
return virt_addr;
|
||||
};
|
||||
return _ep.apply(ds_cap, lambda);
|
||||
return virt.convert<Local_addr>(
|
||||
|
||||
[&] (void *virt_addr) -> void * {
|
||||
if (map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||
return virt_addr;
|
||||
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
return nullptr; },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
return nullptr; });
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -57,8 +57,8 @@ class Genode::Cpu_thread_allocator : public Allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return _alloc.alloc(size, out_addr); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _alloc.alloc(size); }
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_alloc.free(addr, size); }
|
||||
|
@ -78,7 +78,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
}
|
||||
|
||||
/* allocate interrupt */
|
||||
if (_irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (_irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -35,10 +35,41 @@ using namespace Kernel;
|
||||
|
||||
void Thread::_ipc_alloc_recv_caps(unsigned cap_count)
|
||||
{
|
||||
Genode::Allocator &slab = pd().platform_pd().capability_slab();
|
||||
using Allocator = Genode::Allocator;
|
||||
|
||||
Allocator &slab = pd().platform_pd().capability_slab();
|
||||
for (unsigned i = 0; i < cap_count; i++) {
|
||||
if (_obj_id_ref_ptr[i] == nullptr)
|
||||
_obj_id_ref_ptr[i] = slab.alloc(sizeof(Object_identity_reference));
|
||||
if (_obj_id_ref_ptr[i] != nullptr)
|
||||
continue;
|
||||
|
||||
slab.try_alloc(sizeof(Object_identity_reference)).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_obj_id_ref_ptr[i] = ptr; },
|
||||
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
|
||||
switch (e) {
|
||||
case Allocator::Alloc_error::DENIED:
|
||||
|
||||
/*
|
||||
* Slab is exhausted, reflect condition to the client.
|
||||
*/
|
||||
throw Genode::Out_of_ram();
|
||||
|
||||
case Allocator::Alloc_error::OUT_OF_CAPS:
|
||||
case Allocator::Alloc_error::OUT_OF_RAM:
|
||||
|
||||
/*
|
||||
* These conditions cannot happen because the slab
|
||||
* does not try to grow automatically. It is
|
||||
* explicitely expanded by the client as response to
|
||||
* the 'Out_of_ram' condition above.
|
||||
*/
|
||||
Genode::raw("unexpected recv_caps allocation failure");
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
_ipc_rcv_caps = cap_count;
|
||||
}
|
||||
|
@ -112,28 +112,46 @@ void Platform::_init_platform_info()
|
||||
{
|
||||
unsigned const pages = 1;
|
||||
size_t const rom_size = pages << get_page_size_log2();
|
||||
void *phys_ptr = nullptr;
|
||||
void *virt_ptr = nullptr;
|
||||
const char *rom_name = "platform_info";
|
||||
|
||||
if (!ram_alloc().alloc(get_page_size(), &phys_ptr)) {
|
||||
error("could not setup platform_info ROM - ram allocation error");
|
||||
return;
|
||||
}
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &phys_alloc;
|
||||
Range_allocator &virt_alloc;
|
||||
|
||||
if (!region_alloc().alloc(rom_size, &virt_ptr)) {
|
||||
error("could not setup platform_info ROM - region allocation error");
|
||||
ram_alloc().free(phys_ptr);
|
||||
return;
|
||||
}
|
||||
struct {
|
||||
void *phys_ptr = nullptr;
|
||||
void *virt_ptr = nullptr;
|
||||
};
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
||||
Guard(Range_allocator &phys_alloc, Range_allocator &virt_alloc)
|
||||
: phys_alloc(phys_alloc), virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard()
|
||||
{
|
||||
if (phys_ptr) phys_alloc.free(phys_ptr);
|
||||
if (virt_ptr) virt_alloc.free(phys_ptr);
|
||||
}
|
||||
} guard { ram_alloc(), region_alloc() };
|
||||
|
||||
ram_alloc().try_alloc(get_page_size()).with_result(
|
||||
[&] (void *ptr) { guard.phys_ptr = ptr; },
|
||||
[&] (Allocator::Alloc_error) {
|
||||
error("could not setup platform_info ROM - RAM allocation error"); });
|
||||
|
||||
region_alloc().try_alloc(rom_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Allocator::Alloc_error) {
|
||||
error("could not setup platform_info ROM - region allocation error"); });
|
||||
|
||||
if (!guard.phys_ptr || !guard.virt_ptr)
|
||||
return;
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(guard.phys_ptr);
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(guard.virt_ptr);
|
||||
|
||||
if (!map_local(phys_addr, virt_addr, pages, Hw::PAGE_FLAGS_KERN_DATA)) {
|
||||
error("could not setup platform_info ROM - map error");
|
||||
region_alloc().free(virt_ptr);
|
||||
ram_alloc().free(phys_ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -156,10 +174,11 @@ void Platform::_init_platform_info()
|
||||
return;
|
||||
}
|
||||
|
||||
region_alloc().free(virt_ptr);
|
||||
|
||||
_rom_fs.insert(
|
||||
new (core_mem_alloc()) Rom_module(phys_addr, rom_size, rom_name));
|
||||
|
||||
/* keep phys allocation but let guard revert virt allocation */
|
||||
guard.phys_ptr = nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -203,25 +222,32 @@ Platform::Platform()
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(log_size, &core_local_ptr, get_page_size_log2());
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
[&] (void *phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||
|
||||
map_local(phys_addr, core_local_addr, pages);
|
||||
memset(core_local_ptr, 0, log_size);
|
||||
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
[&] (void *ptr) {
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
||||
map_local(phys_addr, (addr_t)ptr, pages);
|
||||
memset(ptr, 0, log_size);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, log_size, "core_log"));
|
||||
|
||||
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { /* ignored */ }
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
}
|
||||
|
||||
class Idle_thread_trace_source : public Trace::Source::Info_accessor,
|
||||
|
@ -37,11 +37,16 @@ Core_mem_allocator &Hw::Address_space::_cma()
|
||||
|
||||
void *Hw::Address_space::_table_alloc()
|
||||
{
|
||||
void * ret = nullptr;
|
||||
if (!_cma().alloc_aligned(sizeof(Page_table), (void**)&ret,
|
||||
Page_table::ALIGNM_LOG2).ok())
|
||||
throw Insufficient_ram_quota();
|
||||
return ret;
|
||||
unsigned const align = Page_table::ALIGNM_LOG2;
|
||||
|
||||
return _cma().alloc_aligned(sizeof(Page_table), align).convert<void *>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
return ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_result) -> void * {
|
||||
/* XXX distinguish error conditions */
|
||||
throw Insufficient_ram_quota(); });
|
||||
}
|
||||
|
||||
|
||||
@ -134,10 +139,15 @@ Cap_space::Cap_space() : _slab(nullptr, &_initial_sb) { }
|
||||
|
||||
void Cap_space::upgrade_slab(Allocator &alloc)
|
||||
{
|
||||
void * block = nullptr;
|
||||
if (!alloc.alloc(SLAB_SIZE, &block))
|
||||
throw Out_of_ram();
|
||||
_slab.insert_sb(block);
|
||||
alloc.try_alloc(SLAB_SIZE).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_slab.insert_sb(ptr); },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
/* XXX distinguish error conditions */
|
||||
throw Out_of_ram();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -71,13 +71,18 @@ Platform_thread::Platform_thread(Label const &label, Native_utcb &utcb)
|
||||
_kobj(_kobj.CALLED_FROM_CORE, _label.string())
|
||||
{
|
||||
/* create UTCB for a core thread */
|
||||
void *utcb_phys;
|
||||
if (!platform().ram_alloc().alloc(sizeof(Native_utcb), &utcb_phys)) {
|
||||
error("failed to allocate UTCB");
|
||||
throw Out_of_ram();
|
||||
}
|
||||
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
|
||||
sizeof(Native_utcb) / get_page_size());
|
||||
platform().ram_alloc().try_alloc(sizeof(Native_utcb)).with_result(
|
||||
|
||||
[&] (void *utcb_phys) {
|
||||
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
|
||||
sizeof(Native_utcb) / get_page_size());
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("failed to allocate UTCB");
|
||||
/* XXX distinguish error conditions */
|
||||
throw Out_of_ram();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
@ -33,30 +33,40 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
{
|
||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &virt_alloc;
|
||||
struct { void *virt_ptr = nullptr; };
|
||||
|
||||
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||
|
||||
} guard(platform().region_alloc());
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error e) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, ", error=", e); });
|
||||
|
||||
if (!guard.virt_ptr)
|
||||
return;
|
||||
}
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages)) {
|
||||
if (!map_local(ds.phys_addr(), (addr_t)guard.virt_ptr, num_pages)) {
|
||||
error("core-local memory mapping failed");
|
||||
return;
|
||||
}
|
||||
|
||||
/* dependent on the architecture, cache maintainance might be necessary */
|
||||
Cpu::clear_memory_region((addr_t)virt_addr, page_rounded_size,
|
||||
Cpu::clear_memory_region((addr_t)guard.virt_ptr, page_rounded_size,
|
||||
ds.cacheability() != CACHED);
|
||||
|
||||
/* unmap dataspace from core */
|
||||
if (!unmap_local((addr_t)virt_addr, num_pages))
|
||||
error("could not unmap core-local address range at ", virt_addr);
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||
error("could not unmap core-local address range at ", guard.virt_ptr);
|
||||
}
|
||||
|
||||
|
@ -86,22 +86,28 @@ class Genode::Rpc_cap_factory
|
||||
{
|
||||
Mutex::Guard guard(_mutex);
|
||||
|
||||
/* allocate kernel object */
|
||||
Kobject * obj;
|
||||
if (!_slab.alloc(sizeof(Kobject), (void**)&obj))
|
||||
throw Allocator::Out_of_memory();
|
||||
construct_at<Kobject>(obj, ep);
|
||||
return _slab.try_alloc(sizeof(Kobject)).convert<Native_capability>(
|
||||
|
||||
if (!obj->cap.valid()) {
|
||||
raw("Invalid entrypoint ", (addr_t)Capability_space::capid(ep),
|
||||
" for allocating a capability!");
|
||||
destroy(&_slab, obj);
|
||||
return Native_capability();
|
||||
}
|
||||
[&] (void *ptr) {
|
||||
|
||||
/* store it in the list and return result */
|
||||
_list.insert(obj);
|
||||
return obj->cap;
|
||||
/* create kernel object */
|
||||
Kobject &obj = *construct_at<Kobject>(ptr, ep);
|
||||
|
||||
if (!obj.cap.valid()) {
|
||||
raw("Invalid entrypoint ", (addr_t)Capability_space::capid(ep),
|
||||
" for allocating a capability!");
|
||||
destroy(&_slab, &obj);
|
||||
return Native_capability();
|
||||
}
|
||||
|
||||
/* store it in the list and return result */
|
||||
_list.insert(&obj);
|
||||
return obj.cap;
|
||||
},
|
||||
[&] (Allocator::Alloc_error) -> Native_capability {
|
||||
/* XXX distinguish error conditions */
|
||||
throw Allocator::Out_of_memory();
|
||||
});
|
||||
}
|
||||
|
||||
void free(Native_capability cap)
|
||||
|
@ -43,16 +43,20 @@ void Genode::platform_add_local_services(Rpc_entrypoint &ep,
|
||||
Hw::Mm::hypervisor_exception_vector().size / get_page_size(),
|
||||
Hw::PAGE_FLAGS_KERN_TEXT);
|
||||
|
||||
void * stack = nullptr;
|
||||
assert(platform().ram_alloc().alloc_aligned(Hw::Mm::hypervisor_stack().size,
|
||||
(void**)&stack,
|
||||
get_page_size_log2()).ok());
|
||||
map_local((addr_t)stack,
|
||||
Hw::Mm::hypervisor_stack().base,
|
||||
Hw::Mm::hypervisor_stack().size / get_page_size(),
|
||||
Hw::PAGE_FLAGS_KERN_DATA);
|
||||
platform().ram_alloc().alloc_aligned(Hw::Mm::hypervisor_stack().size,
|
||||
get_page_size_log2()).with_result(
|
||||
[&] (void *stack) {
|
||||
map_local((addr_t)stack,
|
||||
Hw::Mm::hypervisor_stack().base,
|
||||
Hw::Mm::hypervisor_stack().size / get_page_size(),
|
||||
Hw::PAGE_FLAGS_KERN_DATA);
|
||||
|
||||
static Vm_root vm_root(ep, sh, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
||||
static Vm_root vm_root(ep, sh, core_env().ram_allocator(),
|
||||
core_env().local_rm(), trace_sources);
|
||||
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed to allocate hypervisor stack for VM service");
|
||||
}
|
||||
);
|
||||
}
|
||||
|
@ -73,14 +73,17 @@ void Vm_session_component::_detach_vm_memory(addr_t vm_addr, size_t size)
|
||||
|
||||
void * Vm_session_component::_alloc_table()
|
||||
{
|
||||
void * table;
|
||||
/* get some aligned space for the translation table */
|
||||
if (!cma().alloc_aligned(sizeof(Board::Vm_page_table), (void**)&table,
|
||||
Board::Vm_page_table::ALIGNM_LOG2).ok()) {
|
||||
error("failed to allocate kernel object");
|
||||
throw Insufficient_ram_quota();
|
||||
}
|
||||
return table;
|
||||
return cma().alloc_aligned(sizeof(Board::Vm_page_table),
|
||||
Board::Vm_page_table::ALIGNM_LOG2).convert<void *>(
|
||||
[&] (void *table_ptr) {
|
||||
return table_ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
/* XXX handle individual error conditions */
|
||||
error("failed to allocate kernel object");
|
||||
throw Insufficient_ram_quota(); }
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
@ -64,22 +64,21 @@ class Genode::Platform : public Platform_generic
|
||||
|
||||
struct Dummy_allocator : Range_allocator
|
||||
{
|
||||
void free(void *, size_t) override { ASSERT_NEVER_CALLED; }
|
||||
bool need_size_for_free() const override { ASSERT_NEVER_CALLED; }
|
||||
size_t consumed() const override { ASSERT_NEVER_CALLED; }
|
||||
size_t overhead(size_t) const override { ASSERT_NEVER_CALLED; }
|
||||
int add_range (addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
||||
int remove_range(addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
||||
void free(void *) override { ASSERT_NEVER_CALLED; }
|
||||
size_t avail() const override { ASSERT_NEVER_CALLED; }
|
||||
bool valid_addr(addr_t ) const override { ASSERT_NEVER_CALLED; }
|
||||
bool alloc(size_t, void **) override { ASSERT_NEVER_CALLED; }
|
||||
void free(void *, size_t) override { ASSERT_NEVER_CALLED; }
|
||||
bool need_size_for_free() const override { ASSERT_NEVER_CALLED; }
|
||||
size_t consumed() const override { ASSERT_NEVER_CALLED; }
|
||||
size_t overhead(size_t) const override { ASSERT_NEVER_CALLED; }
|
||||
Range_result add_range (addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
||||
Range_result remove_range(addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
||||
void free(void *) override { ASSERT_NEVER_CALLED; }
|
||||
size_t avail() const override { ASSERT_NEVER_CALLED; }
|
||||
bool valid_addr(addr_t ) const override { ASSERT_NEVER_CALLED; }
|
||||
Alloc_result try_alloc(size_t) override { ASSERT_NEVER_CALLED; }
|
||||
Alloc_result alloc_addr(size_t, addr_t) override { ASSERT_NEVER_CALLED; }
|
||||
|
||||
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override
|
||||
Alloc_result alloc_aligned(size_t, unsigned, Range) override
|
||||
{ ASSERT_NEVER_CALLED; }
|
||||
|
||||
Alloc_return alloc_addr(size_t, addr_t) override
|
||||
{ ASSERT_NEVER_CALLED; }
|
||||
|
||||
} _dummy_alloc { };
|
||||
|
||||
@ -88,25 +87,31 @@ class Genode::Platform : public Platform_generic
|
||||
*/
|
||||
struct Pseudo_ram_allocator : Range_allocator
|
||||
{
|
||||
bool alloc(size_t, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t) override
|
||||
{
|
||||
*out_addr = 0;
|
||||
return true;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Alloc_return alloc_aligned(size_t, void **out, unsigned, Range) override
|
||||
Alloc_result alloc_aligned(size_t, unsigned, Range) override
|
||||
{
|
||||
*out = 0;
|
||||
return Alloc_return::OK;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Alloc_return alloc_addr(size_t, addr_t) override
|
||||
Alloc_result alloc_addr(size_t, addr_t) override
|
||||
{
|
||||
return Alloc_return::OK;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Range_result add_range(addr_t, size_t) override
|
||||
{
|
||||
return Range_ok();
|
||||
}
|
||||
|
||||
Range_result remove_range(addr_t, size_t) override
|
||||
{
|
||||
return Range_ok();
|
||||
}
|
||||
|
||||
int add_range(addr_t, size_t) override { return 0; }
|
||||
int remove_range(addr_t, size_t) override { return 0; }
|
||||
void free(void *) override { }
|
||||
void free(void *, size_t) override { }
|
||||
size_t avail() const override { return ram_quota_from_env(); }
|
||||
|
@ -410,11 +410,7 @@ namespace {
|
||||
{
|
||||
typedef Genode::size_t size_t;
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
{
|
||||
*out_addr = malloc(size);
|
||||
return true;
|
||||
}
|
||||
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||
|
||||
void free(void *addr, size_t) override { ::free(addr); }
|
||||
|
||||
|
@ -51,9 +51,9 @@ Main::Main(Env &env) : heap(env.ram(), env.rm())
|
||||
|
||||
/* induce initial heap expansion to remove RM noise */
|
||||
if (1) {
|
||||
void *addr;
|
||||
heap.alloc(0x100000, &addr);
|
||||
heap.free(addr, 0);
|
||||
heap.try_alloc(0x100000).with_result(
|
||||
[&] (void *ptr) { heap.free(ptr, 0); },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
}
|
||||
|
||||
addr_t beg((addr_t)&blob_beg);
|
||||
|
@ -39,8 +39,12 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
||||
void *virt_addr = 0;
|
||||
size_t align_log2 = log2(ds.size());
|
||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||
if (platform().region_alloc().alloc_aligned(size,
|
||||
&virt_addr, align_log2).ok())
|
||||
|
||||
platform().region_alloc().alloc_aligned(size, align_log2).with_result(
|
||||
[&] (void *ptr) { virt_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
|
||||
if (virt_addr)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -69,33 +69,36 @@ static bool msi(Genode::addr_t irq_sel, Genode::addr_t phys_mem,
|
||||
Genode::addr_t &msi_addr, Genode::addr_t &msi_data,
|
||||
Genode::Signal_context_capability sig_cap)
|
||||
{
|
||||
void * virt = 0;
|
||||
if (platform().region_alloc().alloc_aligned(4096, &virt, 12).error())
|
||||
return false;
|
||||
return platform().region_alloc().alloc_aligned(4096, 12).convert<bool>(
|
||||
|
||||
Genode::addr_t virt_addr = reinterpret_cast<Genode::addr_t>(virt);
|
||||
if (!virt_addr)
|
||||
return false;
|
||||
[&] (void *virt_ptr) {
|
||||
|
||||
using Nova::Rights;
|
||||
using Nova::Utcb;
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
||||
|
||||
Nova::Mem_crd phys_crd(phys_mem >> 12, 0, Rights(true, false, false));
|
||||
Nova::Mem_crd virt_crd(virt_addr >> 12, 0, Rights(true, false, false));
|
||||
Utcb &utcb = *reinterpret_cast<Utcb *>(Thread::myself()->utcb());
|
||||
using Nova::Rights;
|
||||
using Nova::Utcb;
|
||||
|
||||
if (map_local_phys_to_virt(utcb, phys_crd, virt_crd, platform_specific().core_pd_sel())) {
|
||||
platform().region_alloc().free(virt, 4096);
|
||||
return false;
|
||||
}
|
||||
Nova::Mem_crd phys_crd(phys_mem >> 12, 0, Rights(true, false, false));
|
||||
Nova::Mem_crd virt_crd(virt_addr >> 12, 0, Rights(true, false, false));
|
||||
|
||||
/* try to assign MSI to device */
|
||||
bool res = associate(irq_sel, msi_addr, msi_data, sig_cap, virt_addr);
|
||||
Utcb &utcb = *reinterpret_cast<Utcb *>(Thread::myself()->utcb());
|
||||
|
||||
unmap_local(Nova::Mem_crd(virt_addr >> 12, 0, Rights(true, true, true)));
|
||||
platform().region_alloc().free(virt, 4096);
|
||||
if (map_local_phys_to_virt(utcb, phys_crd, virt_crd, platform_specific().core_pd_sel())) {
|
||||
platform().region_alloc().free(virt_ptr, 4096);
|
||||
return false;
|
||||
}
|
||||
|
||||
return res;
|
||||
/* try to assign MSI to device */
|
||||
bool res = associate(irq_sel, msi_addr, msi_data, sig_cap, virt_addr);
|
||||
|
||||
unmap_local(Nova::Mem_crd(virt_addr >> 12, 0, Rights(true, true, true)));
|
||||
platform().region_alloc().free(virt_ptr, 4096);
|
||||
|
||||
return res;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@ -217,7 +220,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
throw Service_denied();
|
||||
}
|
||||
|
||||
if (irq_alloc.alloc_addr(1, irq_number).error()) {
|
||||
if (irq_alloc.alloc_addr(1, irq_number).failed()) {
|
||||
error("unavailable IRQ ", irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -80,18 +80,21 @@ addr_t Platform::_map_pages(addr_t const phys_addr, addr_t const pages,
|
||||
addr_t const size = pages << get_page_size_log2();
|
||||
|
||||
/* try to reserve contiguous virtual area */
|
||||
void *core_local_ptr = nullptr;
|
||||
if (region_alloc().alloc_aligned(size + (guard_page ? get_page_size() : 0),
|
||||
&core_local_ptr, get_page_size_log2()).error())
|
||||
return 0;
|
||||
return region_alloc().alloc_aligned(size + (guard_page ? get_page_size() : 0),
|
||||
get_page_size_log2()).convert<addr_t>(
|
||||
[&] (void *core_local_ptr) {
|
||||
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
|
||||
int res = map_local(_core_pd_sel, *__main_thread_utcb, phys_addr,
|
||||
core_local_addr, pages,
|
||||
Nova::Rights(true, true, false), true);
|
||||
int res = map_local(_core_pd_sel, *__main_thread_utcb, phys_addr,
|
||||
core_local_addr, pages,
|
||||
Nova::Rights(true, true, false), true);
|
||||
|
||||
return res ? 0 : core_local_addr;
|
||||
return res ? 0 : core_local_addr;
|
||||
},
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
return 0UL; });
|
||||
}
|
||||
|
||||
|
||||
@ -661,126 +664,113 @@ Platform::Platform()
|
||||
|
||||
_init_rom_modules();
|
||||
|
||||
auto export_pages_as_rom_module = [&] (auto rom_name, size_t pages, auto content_fn)
|
||||
{
|
||||
/* export x86 platform specific infos */
|
||||
size_t const bytes = pages << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(bytes, get_page_size_log2()).with_result(
|
||||
|
||||
unsigned const pages = 1;
|
||||
void * phys_ptr = nullptr;
|
||||
if (ram_alloc().alloc_aligned(get_page_size(), &phys_ptr,
|
||||
get_page_size_log2()).ok()) {
|
||||
[&] (void *phys_ptr) {
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const core_local_addr = _map_pages(phys_addr, pages);
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
char * const core_local_ptr = (char *)_map_pages(phys_addr, pages);
|
||||
|
||||
if (!core_local_addr) {
|
||||
ram_alloc().free(phys_ptr);
|
||||
} else {
|
||||
if (!core_local_ptr) {
|
||||
warning("failed to export ", rom_name, " as ROM module");
|
||||
ram_alloc().free(phys_ptr, bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
pages << get_page_size_log2(),
|
||||
"platform_info", [&] ()
|
||||
{
|
||||
xml.node("kernel", [&] () {
|
||||
xml.attribute("name", "nova");
|
||||
xml.attribute("acpi", true);
|
||||
xml.attribute("msi" , true);
|
||||
memset(core_local_ptr, 0, bytes);
|
||||
content_fn(core_local_ptr, bytes);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, bytes, rom_name));
|
||||
|
||||
/* leave the ROM backing store mapped within core */
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed to allocate physical memory for exporting ",
|
||||
rom_name, " as ROM module"); });
|
||||
};
|
||||
|
||||
export_pages_as_rom_module("platform_info", 1,
|
||||
[&] (char * const ptr, size_t const size) {
|
||||
Xml_generator xml(ptr, size, "platform_info", [&] ()
|
||||
{
|
||||
xml.node("kernel", [&] () {
|
||||
xml.attribute("name", "nova");
|
||||
xml.attribute("acpi", true);
|
||||
xml.attribute("msi" , true);
|
||||
});
|
||||
if (efi_sys_tab_phy) {
|
||||
xml.node("efi-system-table", [&] () {
|
||||
xml.attribute("address", String<32>(Hex(efi_sys_tab_phy)));
|
||||
});
|
||||
if (efi_sys_tab_phy) {
|
||||
xml.node("efi-system-table", [&] () {
|
||||
xml.attribute("address", String<32>(Hex(efi_sys_tab_phy)));
|
||||
});
|
||||
}
|
||||
xml.node("acpi", [&] () {
|
||||
}
|
||||
xml.node("acpi", [&] () {
|
||||
|
||||
xml.attribute("revision", 2); /* XXX */
|
||||
xml.attribute("revision", 2); /* XXX */
|
||||
|
||||
if (rsdt)
|
||||
xml.attribute("rsdt", String<32>(Hex(rsdt)));
|
||||
if (rsdt)
|
||||
xml.attribute("rsdt", String<32>(Hex(rsdt)));
|
||||
|
||||
if (xsdt)
|
||||
xml.attribute("xsdt", String<32>(Hex(xsdt)));
|
||||
if (xsdt)
|
||||
xml.attribute("xsdt", String<32>(Hex(xsdt)));
|
||||
});
|
||||
xml.node("affinity-space", [&] () {
|
||||
xml.attribute("width", _cpus.width());
|
||||
xml.attribute("height", _cpus.height());
|
||||
});
|
||||
xml.node("boot", [&] () {
|
||||
if (!boot_fb)
|
||||
return;
|
||||
|
||||
if (!efi_boot && (Resolution::Type::get(boot_fb->size) != Resolution::Type::VGA_TEXT))
|
||||
return;
|
||||
|
||||
xml.node("framebuffer", [&] () {
|
||||
xml.attribute("phys", String<32>(Hex(boot_fb->addr)));
|
||||
xml.attribute("width", Resolution::Width::get(boot_fb->size));
|
||||
xml.attribute("height", Resolution::Height::get(boot_fb->size));
|
||||
xml.attribute("bpp", Resolution::Bpp::get(boot_fb->size));
|
||||
xml.attribute("type", Resolution::Type::get(boot_fb->size));
|
||||
xml.attribute("pitch", boot_fb->aux);
|
||||
});
|
||||
xml.node("affinity-space", [&] () {
|
||||
xml.attribute("width", _cpus.width());
|
||||
xml.attribute("height", _cpus.height());
|
||||
});
|
||||
xml.node("hardware", [&] () {
|
||||
xml.node("features", [&] () {
|
||||
xml.attribute("svm", hip.has_feature_svm());
|
||||
xml.attribute("vmx", hip.has_feature_vmx());
|
||||
});
|
||||
xml.node("boot", [&] () {
|
||||
if (!boot_fb)
|
||||
return;
|
||||
|
||||
if (!efi_boot && (Resolution::Type::get(boot_fb->size) != Resolution::Type::VGA_TEXT))
|
||||
return;
|
||||
|
||||
xml.node("framebuffer", [&] () {
|
||||
xml.attribute("phys", String<32>(Hex(boot_fb->addr)));
|
||||
xml.attribute("width", Resolution::Width::get(boot_fb->size));
|
||||
xml.attribute("height", Resolution::Height::get(boot_fb->size));
|
||||
xml.attribute("bpp", Resolution::Bpp::get(boot_fb->size));
|
||||
xml.attribute("type", Resolution::Type::get(boot_fb->size));
|
||||
xml.attribute("pitch", boot_fb->aux);
|
||||
});
|
||||
xml.node("tsc", [&] () {
|
||||
xml.attribute("invariant", cpuid_invariant_tsc());
|
||||
xml.attribute("freq_khz" , hip.tsc_freq);
|
||||
});
|
||||
xml.node("hardware", [&] () {
|
||||
xml.node("features", [&] () {
|
||||
xml.attribute("svm", hip.has_feature_svm());
|
||||
xml.attribute("vmx", hip.has_feature_vmx());
|
||||
});
|
||||
xml.node("tsc", [&] () {
|
||||
xml.attribute("invariant", cpuid_invariant_tsc());
|
||||
xml.attribute("freq_khz" , hip.tsc_freq);
|
||||
});
|
||||
xml.node("cpus", [&] () {
|
||||
hip.for_each_enabled_cpu([&](Hip::Cpu_desc const &cpu, unsigned i) {
|
||||
xml.node("cpu", [&] () {
|
||||
xml.attribute("id", i);
|
||||
xml.attribute("package", cpu.package);
|
||||
xml.attribute("core", cpu.core);
|
||||
xml.attribute("thread", cpu.thread);
|
||||
xml.attribute("family", String<5>(Hex(cpu.family)));
|
||||
xml.attribute("model", String<5>(Hex(cpu.model)));
|
||||
xml.attribute("stepping", String<5>(Hex(cpu.stepping)));
|
||||
xml.attribute("platform", String<5>(Hex(cpu.platform)));
|
||||
xml.attribute("patch", String<12>(Hex(cpu.patch)));
|
||||
});
|
||||
xml.node("cpus", [&] () {
|
||||
hip.for_each_enabled_cpu([&](Hip::Cpu_desc const &cpu, unsigned i) {
|
||||
xml.node("cpu", [&] () {
|
||||
xml.attribute("id", i);
|
||||
xml.attribute("package", cpu.package);
|
||||
xml.attribute("core", cpu.core);
|
||||
xml.attribute("thread", cpu.thread);
|
||||
xml.attribute("family", String<5>(Hex(cpu.family)));
|
||||
xml.attribute("model", String<5>(Hex(cpu.model)));
|
||||
xml.attribute("stepping", String<5>(Hex(cpu.stepping)));
|
||||
xml.attribute("platform", String<5>(Hex(cpu.platform)));
|
||||
xml.attribute("patch", String<12>(Hex(cpu.patch)));
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
unmap_local(*__main_thread_utcb, core_local_addr, pages);
|
||||
region_alloc().free(reinterpret_cast<void *>(core_local_addr),
|
||||
pages * get_page_size());
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, pages * get_page_size(),
|
||||
"platform_info"));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 4;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
|
||||
if (ram_alloc().alloc_aligned(log_size, &phys_ptr,
|
||||
get_page_size_log2()).ok()) {
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
|
||||
addr_t const virt = _map_pages(phys_addr, pages, true);
|
||||
if (virt) {
|
||||
memset(reinterpret_cast<void *>(virt), 0, log_size);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
|
||||
init_core_log( Core_log_range { virt, log_size } );
|
||||
} else
|
||||
ram_alloc().free(phys_ptr);
|
||||
}
|
||||
}
|
||||
export_pages_as_rom_module("core_log", 4,
|
||||
[&] (char * const ptr, size_t const size) {
|
||||
init_core_log( Core_log_range { (addr_t)ptr, size } );
|
||||
});
|
||||
|
||||
/* export hypervisor log memory */
|
||||
if (hyp_log && hyp_log_size)
|
||||
@ -831,8 +821,12 @@ Platform::Platform()
|
||||
for (unsigned i = 0; i < 32; i++)
|
||||
{
|
||||
void * phys_ptr = nullptr;
|
||||
if (ram_alloc().alloc_aligned(get_page_size(), &phys_ptr,
|
||||
get_page_size_log2()).error())
|
||||
|
||||
ram_alloc().alloc_aligned(get_page_size(), get_page_size_log2()).with_result(
|
||||
[&] (void *ptr) { phys_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error) { /* covered by nullptr test below */ });
|
||||
|
||||
if (phys_ptr == nullptr)
|
||||
break;
|
||||
|
||||
addr_t phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
|
@ -40,12 +40,17 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
||||
void *virt_addr = 0;
|
||||
size_t align_log2 = log2(ds.size());
|
||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||
if (platform().region_alloc().alloc_aligned(size,
|
||||
&virt_addr, align_log2).ok())
|
||||
break;
|
||||
|
||||
platform().region_alloc().alloc_aligned(size, align_log2).with_result(
|
||||
[&] (void *ptr) { virt_addr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error) { /* try next iteration */ }
|
||||
);
|
||||
if (virt_addr)
|
||||
return virt_addr;
|
||||
}
|
||||
|
||||
return virt_addr;
|
||||
error("alloc_region of size ", size, " unexpectedly failed");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
|
@ -24,10 +24,8 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
off_t offset, bool use_local_addr,
|
||||
Region_map::Local_addr, bool, bool)
|
||||
{
|
||||
using namespace Okl4;
|
||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> void * {
|
||||
|
||||
auto lambda = [&] (Dataspace_component *ds) -> void *
|
||||
{
|
||||
if (!ds)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
@ -48,21 +46,25 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
||||
}
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
}
|
||||
Range_allocator &virt_alloc = platform().region_alloc();
|
||||
return virt_alloc.try_alloc(page_rounded_size).convert<void *>(
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||
return nullptr;
|
||||
return virt_addr;
|
||||
};
|
||||
[&] (void *virt_addr) -> void * {
|
||||
|
||||
return _ep.apply(ds_cap, lambda);
|
||||
/* map the dataspace's physical pages to virtual memory */
|
||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||
return nullptr;
|
||||
|
||||
return virt_addr;
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -138,7 +138,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
if (msi)
|
||||
throw Service_denied();
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -189,52 +189,66 @@ Platform::Platform()
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(log_size, &core_local_ptr, get_page_size_log2());
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
[&] (void *phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||
|
||||
map_local(phys_addr, core_local_addr, pages);
|
||||
memset(core_local_ptr, 0, log_size);
|
||||
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
[&] (void *ptr) {
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
||||
map_local(phys_addr, (addr_t)ptr, pages);
|
||||
memset(ptr, 0, log_size);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, log_size, "core_log"));
|
||||
|
||||
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
}
|
||||
|
||||
/* export platform specific infos */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const size = pages << get_page_size_log2();
|
||||
|
||||
if (ram_alloc().alloc_aligned(size, &phys_ptr, get_page_size_log2()).ok()) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(size, &core_local_ptr, get_page_size_log2());
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
[&] (void *phys_ptr) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
|
||||
if (map_local(phys_addr, core_local_addr, pages)) {
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
size, "platform_info", [&] () {
|
||||
xml.node("kernel", [&] () { xml.attribute("name", "okl4"); });
|
||||
});
|
||||
[&] (void *core_local_ptr) {
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"platform_info"));
|
||||
}
|
||||
}
|
||||
if (map_local(phys_addr, core_local_addr, pages)) {
|
||||
|
||||
Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
size, "platform_info", [&] () {
|
||||
xml.node("kernel", [&] () { xml.attribute("name", "okl4"); });
|
||||
});
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"platform_info"));
|
||||
}
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,31 +38,41 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
{
|
||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &virt_alloc;
|
||||
struct { void *virt_ptr = nullptr; };
|
||||
|
||||
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||
|
||||
} guard(platform().region_alloc());
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error e) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, ", error=", e); });
|
||||
|
||||
if (!guard.virt_ptr)
|
||||
return;
|
||||
}
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages)) {
|
||||
error("core-local memory mapping failed, error=", Okl4::L4_ErrorCode());
|
||||
if (!map_local(ds.phys_addr(), (addr_t)guard.virt_ptr, num_pages)) {
|
||||
error("core-local memory mapping failed");
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear dataspace */
|
||||
size_t num_longwords = page_rounded_size/sizeof(long);
|
||||
for (long *dst = (long *)virt_addr; num_longwords--;)
|
||||
for (long *dst = (long *)guard.virt_ptr; num_longwords--;)
|
||||
*dst++ = 0;
|
||||
|
||||
/* unmap dataspace from core */
|
||||
if (!unmap_local((addr_t)virt_addr, num_pages))
|
||||
error("could not unmap core-local address range at ", virt_addr, ", "
|
||||
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||
error("could not unmap core-local address range at ", guard.virt_ptr, ", "
|
||||
"error=", Okl4::L4_ErrorCode());
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
}
|
||||
|
@ -60,28 +60,30 @@ addr_t Io_mem_session_component::_map_local(addr_t base, size_t size)
|
||||
{
|
||||
using namespace Pistachio;
|
||||
|
||||
addr_t local_base;
|
||||
auto alloc_virt_range = [&] ()
|
||||
{
|
||||
/* special case for the null page */
|
||||
if (is_conventional_memory(base))
|
||||
return base;
|
||||
|
||||
/* align large I/O dataspaces on a super-page boundary within core */
|
||||
size_t alignment = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
||||
: get_page_size_log2();
|
||||
/* align large I/O dataspaces on a super-page boundary within core */
|
||||
size_t const align = (size >= get_super_page_size())
|
||||
? get_super_page_size_log2()
|
||||
: get_page_size_log2();
|
||||
|
||||
/* special case for the null page */
|
||||
if (is_conventional_memory(base))
|
||||
local_base = base;
|
||||
return platform().region_alloc().alloc_aligned(size, align).convert<addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
error(__func__, ": alloc_aligned failed!");
|
||||
return 0; });
|
||||
};
|
||||
|
||||
else {
|
||||
addr_t const local_base = (addr_t)alloc_virt_range();
|
||||
|
||||
/* find appropriate region for mapping */
|
||||
void *result = 0;
|
||||
if (platform().region_alloc().alloc_aligned(size, &result, alignment).error())
|
||||
error(__func__, ": alloc_aligned failed!");
|
||||
if (!local_base)
|
||||
return 0;
|
||||
|
||||
local_base = (addr_t)result;
|
||||
}
|
||||
|
||||
unsigned offset = 0;
|
||||
while (size) {
|
||||
for (unsigned offset = 0; size; ) {
|
||||
|
||||
size_t page_size = get_page_size();
|
||||
if (can_use_super_page(base + offset, size))
|
||||
|
@ -133,7 +133,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
if (msi)
|
||||
throw Service_denied();
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -605,51 +605,42 @@ Platform::Platform()
|
||||
|
||||
core_pd().bind_thread(core_thread);
|
||||
|
||||
/* core log as ROM module */
|
||||
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||
{
|
||||
void * phys_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
size_t const size = 1 << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
[&] (void *phys_ptr) {
|
||||
|
||||
void * const core_local_ptr = phys_ptr;
|
||||
addr_t const core_local_addr = phys_addr;
|
||||
/* core-local memory is one-to-one mapped physical RAM */
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
void * const core_local_ptr = phys_ptr;
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().remove_range(core_local_addr, log_size + get_page_size());
|
||||
region_alloc().remove_range((addr_t)core_local_ptr, size);
|
||||
memset(core_local_ptr, 0, size);
|
||||
content_fn(core_local_ptr, size);
|
||||
|
||||
memset(core_local_ptr, 0, log_size);
|
||||
_rom_fs.insert(new (core_mem_alloc())
|
||||
Rom_module(phys_addr, size, rom_name));
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed to export ", rom_name, " as ROM module"); }
|
||||
);
|
||||
};
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
||||
}
|
||||
/* core log as ROM module */
|
||||
export_page_as_rom_module("core_log",
|
||||
[&] (void *core_local_ptr, size_t size) {
|
||||
init_core_log(Core_log_range { (addr_t)core_local_ptr, size } ); });
|
||||
|
||||
/* export platform specific infos */
|
||||
{
|
||||
void * phys_ptr = nullptr;
|
||||
size_t const size = 1 << get_page_size_log2();
|
||||
|
||||
if (ram_alloc().alloc_aligned(size, &phys_ptr,
|
||||
get_page_size_log2()).ok()) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const core_local_addr = phys_addr;
|
||||
|
||||
region_alloc().remove_range(core_local_addr, size);
|
||||
|
||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||
size, "platform_info", [&] () {
|
||||
xml.node("kernel", [&] () { xml.attribute("name", "pistachio"); });
|
||||
});
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||
"platform_info"));
|
||||
}
|
||||
}
|
||||
|
||||
export_page_as_rom_module("platform_info",
|
||||
[&] (void *core_local_ptr, size_t size) {
|
||||
Xml_generator xml(reinterpret_cast<char *>(core_local_ptr),
|
||||
size, "platform_info",
|
||||
[&] () {
|
||||
xml.node("kernel", [&] () {
|
||||
xml.attribute("name", "pistachio"); }); }); });
|
||||
}
|
||||
|
||||
|
||||
|
@ -26,7 +26,8 @@ Region_map::Local_addr
|
||||
Core_region_map::attach(Dataspace_capability ds_cap, size_t size, off_t offset,
|
||||
bool use_local_addr, Region_map::Local_addr, bool, bool)
|
||||
{
|
||||
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Local_addr {
|
||||
|
||||
if (!ds)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
@ -46,21 +47,22 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size, off_t offset,
|
||||
}
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
}
|
||||
return platform().region_alloc().try_alloc(page_rounded_size).convert<Local_addr>(
|
||||
[&] (void *virt_ptr) {
|
||||
|
||||
/* map the dataspace's physical pages to core-local virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages);
|
||||
/* map the dataspace's physical pages to core-local virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
map_local(ds->phys_addr(), (addr_t)virt_ptr, num_pages);
|
||||
|
||||
return virt_addr;
|
||||
};
|
||||
|
||||
return _ep.apply(ds_cap, lambda);
|
||||
return virt_ptr;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) -> Local_addr {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return nullptr;
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -51,21 +51,17 @@ class Genode::Static_allocator : public Allocator
|
||||
|
||||
class Alloc_failed { };
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
*out_addr = nullptr;
|
||||
|
||||
if (size > sizeof(Elem_space)) {
|
||||
error("unexpected allocation size of ", size);
|
||||
return false;
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
|
||||
try {
|
||||
*out_addr = &_elements[_used.alloc()]; }
|
||||
return &_elements[_used.alloc()]; }
|
||||
catch (typename Bit_allocator<MAX>::Out_of_indices) {
|
||||
return false; }
|
||||
|
||||
return true;
|
||||
return Alloc_error::DENIED; }
|
||||
}
|
||||
|
||||
size_t overhead(size_t) const override { return 0; }
|
||||
|
@ -33,17 +33,17 @@ struct Genode::Untyped_memory
|
||||
|
||||
static inline addr_t alloc_pages(Range_allocator &phys_alloc, size_t num_pages)
|
||||
{
|
||||
void *out_ptr = nullptr;
|
||||
Range_allocator::Alloc_return alloc_ret =
|
||||
phys_alloc.alloc_aligned(num_pages*get_page_size(), &out_ptr,
|
||||
get_page_size_log2());
|
||||
size_t const size = num_pages*get_page_size();
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
if (alloc_ret.error()) {
|
||||
error(__PRETTY_FUNCTION__, ": allocation of untyped memory failed");
|
||||
throw Phys_alloc_failed();
|
||||
}
|
||||
return phys_alloc.alloc_aligned(size, align).convert<addr_t>(
|
||||
|
||||
return (addr_t)out_ptr;
|
||||
[&] (void *ptr) {
|
||||
return (addr_t)ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
error(__PRETTY_FUNCTION__, ": allocation of untyped memory failed");
|
||||
throw Phys_alloc_failed(); });
|
||||
}
|
||||
|
||||
|
||||
|
@ -109,7 +109,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
if (msi)
|
||||
throw Service_denied();
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -285,27 +285,29 @@ void Platform::_init_rom_modules()
|
||||
static Tslab<Rom_module, sizeof(slab_block)>
|
||||
rom_module_slab(core_mem_alloc(), &slab_block);
|
||||
|
||||
/*
|
||||
* Allocate unused range of phys CNode address space where to make the
|
||||
* boot modules available.
|
||||
*/
|
||||
void *out_ptr = nullptr;
|
||||
size_t const modules_size = (addr_t)&_boot_modules_binaries_end
|
||||
- (addr_t)&_boot_modules_binaries_begin + 1;
|
||||
auto alloc_modules_range = [&] () -> addr_t
|
||||
{
|
||||
/*
|
||||
* Allocate unused range of phys CNode address space where to make the
|
||||
* boot modules available.
|
||||
*/
|
||||
size_t const size = (addr_t)&_boot_modules_binaries_end
|
||||
- (addr_t)&_boot_modules_binaries_begin + 1;
|
||||
size_t const align = get_page_size_log2();
|
||||
|
||||
Range_allocator::Alloc_return const alloc_ret =
|
||||
_unused_phys_alloc.alloc_aligned(modules_size, &out_ptr, get_page_size_log2());
|
||||
|
||||
if (alloc_ret.error()) {
|
||||
error("could not reserve phys CNode space for boot modules");
|
||||
struct Init_rom_modules_failed { };
|
||||
throw Init_rom_modules_failed();
|
||||
}
|
||||
return _unused_phys_alloc.alloc_aligned(size, align).convert<addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
error("could not reserve phys CNode space for boot modules");
|
||||
struct Init_rom_modules_failed { };
|
||||
throw Init_rom_modules_failed();
|
||||
});
|
||||
};
|
||||
|
||||
/*
|
||||
* Calculate frame frame selector used to back the boot modules
|
||||
*/
|
||||
addr_t const unused_range_start = (addr_t)out_ptr;
|
||||
addr_t const unused_range_start = alloc_modules_range();
|
||||
addr_t const unused_first_frame_sel = unused_range_start >> get_page_size_log2();
|
||||
addr_t const modules_start = (addr_t)&_boot_modules_binaries_begin;
|
||||
addr_t const modules_core_offset = modules_start
|
||||
@ -349,36 +351,10 @@ void Platform::_init_rom_modules()
|
||||
(const char*)header->name);
|
||||
|
||||
_rom_fs.insert(rom_module);
|
||||
}
|
||||
};
|
||||
|
||||
/* export x86 platform specific infos via 'platform_info' ROM */
|
||||
|
||||
unsigned const pages = 1;
|
||||
addr_t const rom_size = pages << get_page_size_log2();
|
||||
void *virt_ptr = nullptr;
|
||||
const char *rom_name = "platform_info";
|
||||
|
||||
addr_t const phys_addr = Untyped_memory::alloc_page(ram_alloc());
|
||||
Untyped_memory::convert_to_page_frames(phys_addr, pages);
|
||||
|
||||
if (region_alloc().alloc_aligned(rom_size, &virt_ptr, get_page_size_log2()).error()) {
|
||||
error("could not setup platform_info ROM - region allocation error");
|
||||
Untyped_memory::free_page(ram_alloc(), phys_addr);
|
||||
return;
|
||||
}
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
||||
|
||||
if (!map_local(phys_addr, virt_addr, pages, this)) {
|
||||
error("could not setup platform_info ROM - map error");
|
||||
region_alloc().free(virt_ptr);
|
||||
Untyped_memory::free_page(ram_alloc(), phys_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
Genode::Xml_generator xml(reinterpret_cast<char *>(virt_addr),
|
||||
rom_size, rom_name, [&] ()
|
||||
auto gen_platform_info = [&] (Xml_generator &xml)
|
||||
{
|
||||
|
||||
if (!bi.extraLen)
|
||||
return;
|
||||
|
||||
@ -489,16 +465,69 @@ void Platform::_init_rom_modules()
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
if (!unmap_local(virt_addr, pages, this)) {
|
||||
error("could not setup platform_info ROM - unmap error");
|
||||
return;
|
||||
}
|
||||
region_alloc().free(virt_ptr);
|
||||
/* export x86 platform specific infos via 'platform_info' ROM */
|
||||
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||
{
|
||||
constexpr unsigned const pages = 1;
|
||||
|
||||
_rom_fs.insert(
|
||||
new (core_mem_alloc()) Rom_module(phys_addr, rom_size, rom_name));
|
||||
struct Phys_alloc_guard
|
||||
{
|
||||
Range_allocator &_alloc;
|
||||
|
||||
addr_t const addr = Untyped_memory::alloc_page(_alloc);
|
||||
|
||||
bool keep = false;
|
||||
|
||||
Phys_alloc_guard(Range_allocator &alloc) :_alloc(alloc)
|
||||
{
|
||||
Untyped_memory::convert_to_page_frames(addr, pages);
|
||||
}
|
||||
|
||||
~Phys_alloc_guard()
|
||||
{
|
||||
if (keep)
|
||||
return;
|
||||
|
||||
Untyped_memory::free_page(_alloc, addr);
|
||||
}
|
||||
} phys { ram_alloc() };
|
||||
|
||||
addr_t const size = pages << get_page_size_log2();
|
||||
size_t const align = get_page_size_log2();
|
||||
|
||||
region_alloc().alloc_aligned(size, align).with_result(
|
||||
|
||||
[&] (void *core_local_ptr) {
|
||||
|
||||
if (!map_local(phys.addr, (addr_t)core_local_ptr, pages, this)) {
|
||||
error("could not setup platform_info ROM - map error");
|
||||
region_alloc().free(core_local_ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
memset(core_local_ptr, 0, size);
|
||||
content_fn((char *)core_local_ptr, size);
|
||||
|
||||
_rom_fs.insert(
|
||||
new (core_mem_alloc()) Rom_module(phys.addr, size, rom_name));
|
||||
|
||||
phys.keep = true;
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("could not setup platform_info ROM - region allocation error");
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
export_page_as_rom_module("platform_info", [&] (char *ptr, size_t size) {
|
||||
Xml_generator xml(ptr, size, "platform_info", [&] () {
|
||||
gen_platform_info(xml); }); });
|
||||
|
||||
export_page_as_rom_module("core_log", [&] (char *ptr, size_t size) {
|
||||
init_core_log(Core_log_range { (addr_t)ptr, size } ); });
|
||||
}
|
||||
|
||||
|
||||
@ -560,17 +589,21 @@ Platform::Platform()
|
||||
|
||||
/* add some minor virtual region for dynamic usage by core */
|
||||
addr_t const virt_size = 32 * 1024 * 1024;
|
||||
void * virt_ptr = nullptr;
|
||||
if (_unused_virt_alloc.alloc_aligned(virt_size, &virt_ptr, get_page_size_log2()).ok()) {
|
||||
_unused_virt_alloc.alloc_aligned(virt_size, get_page_size_log2()).with_result(
|
||||
|
||||
addr_t const virt_addr = (addr_t)virt_ptr;
|
||||
[&] (void *virt_ptr) {
|
||||
addr_t const virt_addr = (addr_t)virt_ptr;
|
||||
|
||||
/* add to available virtual region of core */
|
||||
_core_mem_alloc.virt_alloc().add_range(virt_addr, virt_size);
|
||||
/* add to available virtual region of core */
|
||||
_core_mem_alloc.virt_alloc().add_range(virt_addr, virt_size);
|
||||
|
||||
/* back region by page tables */
|
||||
_core_vm_space.unsynchronized_alloc_page_tables(virt_addr, virt_size);
|
||||
}
|
||||
/* back region by page tables */
|
||||
_core_vm_space.unsynchronized_alloc_page_tables(virt_addr, virt_size);
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
warning("failed to reserve core virtual memory for dynamic use"); }
|
||||
);
|
||||
|
||||
/* add idle thread trace subjects */
|
||||
for (unsigned cpu_id = 0; cpu_id < affinity_space().width(); cpu_id ++) {
|
||||
@ -623,28 +656,6 @@ Platform::Platform()
|
||||
/* I/O port allocator (only meaningful for x86) */
|
||||
_io_port_alloc.add_range(0, 0x10000);
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
void * core_local_ptr = nullptr;
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
|
||||
addr_t const phys_addr = Untyped_memory::alloc_page(ram_alloc());
|
||||
Untyped_memory::convert_to_page_frames(phys_addr, pages);
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(log_size + get_page_size(), &core_local_ptr, get_page_size_log2());
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
|
||||
map_local(phys_addr, core_local_addr, pages, this);
|
||||
memset(core_local_ptr, 0, log_size);
|
||||
|
||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
||||
"core_log"));
|
||||
|
||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
||||
}
|
||||
|
||||
_init_rom_modules();
|
||||
|
||||
platform_in_construction = nullptr;
|
||||
|
@ -105,30 +105,34 @@ static void prepopulate_ipc_buffer(addr_t ipc_buffer_phys, Cap_sel ep_sel,
|
||||
size_t const page_rounded_size = get_page_size();
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
void *virt_addr;
|
||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return;
|
||||
}
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
|
||||
/* map the IPC buffer to core-local virtual addresses */
|
||||
map_local(ipc_buffer_phys, (addr_t)virt_addr, 1);
|
||||
[&] (void *virt_ptr) {
|
||||
|
||||
/* populate IPC buffer with thread information */
|
||||
Native_utcb &utcb = *(Native_utcb *)virt_addr;
|
||||
utcb.ep_sel (ep_sel .value());
|
||||
utcb.lock_sel(lock_sel.value());
|
||||
/* map the IPC buffer to core-local virtual addresses */
|
||||
map_local(ipc_buffer_phys, (addr_t)virt_ptr, 1);
|
||||
|
||||
/* unmap IPC buffer from core */
|
||||
if (!unmap_local((addr_t)virt_addr, 1)) {
|
||||
error("could not unmap core virtual address ",
|
||||
virt_addr, " in ", __PRETTY_FUNCTION__);
|
||||
return;
|
||||
}
|
||||
/* populate IPC buffer with thread information */
|
||||
Native_utcb &utcb = *(Native_utcb *)virt_ptr;
|
||||
utcb.ep_sel (ep_sel .value());
|
||||
utcb.lock_sel(lock_sel.value());
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
/* unmap IPC buffer from core */
|
||||
if (!unmap_local((addr_t)virt_ptr, 1)) {
|
||||
error("could not unmap core virtual address ",
|
||||
virt_ptr, " in ", __PRETTY_FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_ptr, page_rounded_size);
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
@ -42,14 +42,15 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
size_t const page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
/* allocate one page in core's virtual address space */
|
||||
void *virt_addr_ptr = nullptr;
|
||||
if (!platform().region_alloc().alloc(get_page_size(), &virt_addr_ptr))
|
||||
ASSERT_NEVER_CALLED;
|
||||
auto alloc_one_virt_page = [&] () -> void *
|
||||
{
|
||||
return platform().region_alloc().try_alloc(get_page_size()).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
ASSERT_NEVER_CALLED; });
|
||||
};
|
||||
|
||||
if (!virt_addr_ptr)
|
||||
ASSERT_NEVER_CALLED;
|
||||
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_addr_ptr);
|
||||
addr_t const virt_addr = (addr_t)alloc_one_virt_page();
|
||||
|
||||
/* map each page of dataspace one at a time and clear it */
|
||||
for (addr_t offset = 0; offset < page_rounded_size; offset += get_page_size())
|
||||
@ -72,5 +73,5 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
}
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_addr_ptr, get_page_size());
|
||||
platform().region_alloc().free((void *)virt_addr, get_page_size());
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <base/stdint.h>
|
||||
#include <base/exception.h>
|
||||
#include <base/quota_guard.h>
|
||||
#include <base/ram_allocator.h>
|
||||
|
||||
namespace Genode {
|
||||
|
||||
@ -61,7 +62,14 @@ struct Genode::Allocator : Deallocator
|
||||
/**
|
||||
* Exception type
|
||||
*/
|
||||
typedef Out_of_ram Out_of_memory;
|
||||
using Out_of_memory = Out_of_ram;
|
||||
using Denied = Ram_allocator::Denied;
|
||||
|
||||
/**
|
||||
* Return type of 'try_alloc'
|
||||
*/
|
||||
using Alloc_error = Ram_allocator::Alloc_error;
|
||||
using Alloc_result = Attempt<void *, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Destructor
|
||||
@ -74,32 +82,8 @@ struct Genode::Allocator : Deallocator
|
||||
* \param size block size to allocate
|
||||
* \param out_addr resulting pointer to the new block,
|
||||
* undefined in the error case
|
||||
*
|
||||
* \throw Out_of_ram
|
||||
* \throw Out_of_caps
|
||||
*
|
||||
* \return true on success
|
||||
*/
|
||||
virtual bool alloc(size_t size, void **out_addr) = 0;
|
||||
|
||||
/**
|
||||
* Allocate typed block
|
||||
*
|
||||
* This template allocates a typed block returned as a pointer to
|
||||
* a non-void type. By providing this method, we prevent the
|
||||
* compiler from warning us about "dereferencing type-punned
|
||||
* pointer will break strict-aliasing rules".
|
||||
*
|
||||
* \throw Out_of_ram
|
||||
* \throw Out_of_caps
|
||||
*/
|
||||
template <typename T> bool alloc(size_t size, T **out_addr)
|
||||
{
|
||||
void *addr = 0;
|
||||
bool ret = alloc(size, &addr);
|
||||
*out_addr = (T *)addr;
|
||||
return ret;
|
||||
}
|
||||
virtual Alloc_result try_alloc(size_t size) = 0;
|
||||
|
||||
/**
|
||||
* Return total amount of backing store consumed by the allocator
|
||||
@ -111,6 +95,19 @@ struct Genode::Allocator : Deallocator
|
||||
*/
|
||||
virtual size_t overhead(size_t size) const = 0;
|
||||
|
||||
/**
|
||||
* Raise exception according to the 'error' value
|
||||
*/
|
||||
static void throw_alloc_error(Alloc_error error) __attribute__((noreturn))
|
||||
{
|
||||
switch (error) {
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Alloc_error::DENIED: break;
|
||||
}
|
||||
throw Denied();
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate block and signal error as an exception
|
||||
*
|
||||
@ -118,16 +115,16 @@ struct Genode::Allocator : Deallocator
|
||||
*
|
||||
* \throw Out_of_ram
|
||||
* \throw Out_of_caps
|
||||
* \throw Denied
|
||||
*
|
||||
* \return pointer to the new block
|
||||
*/
|
||||
void *alloc(size_t size)
|
||||
{
|
||||
void *result = 0;
|
||||
if (!alloc(size, &result))
|
||||
throw Out_of_memory();
|
||||
|
||||
return result;
|
||||
return try_alloc(size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Alloc_error error) -> void * {
|
||||
throw_alloc_error(error); });
|
||||
}
|
||||
};
|
||||
|
||||
@ -139,32 +136,21 @@ struct Genode::Range_allocator : Allocator
|
||||
*/
|
||||
virtual ~Range_allocator() { }
|
||||
|
||||
/**
|
||||
* Return type of range-management operations
|
||||
*/
|
||||
struct Range_ok { };
|
||||
using Range_result = Attempt<Range_ok, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Add free address range to allocator
|
||||
*/
|
||||
virtual int add_range(addr_t base, size_t size) = 0;
|
||||
virtual Range_result add_range(addr_t base, size_t size) = 0;
|
||||
|
||||
/**
|
||||
* Remove address range from allocator
|
||||
*/
|
||||
virtual int remove_range(addr_t base, size_t size) = 0;
|
||||
|
||||
/**
|
||||
* Return value of allocation functons
|
||||
*
|
||||
* 'OK' on success, or
|
||||
* 'OUT_OF_METADATA' if meta-data allocation failed, or
|
||||
* 'RANGE_CONFLICT' if no fitting address range is found
|
||||
*/
|
||||
struct Alloc_return
|
||||
{
|
||||
enum Value { OK = 0, OUT_OF_METADATA = -1, RANGE_CONFLICT = -2 };
|
||||
Value const value;
|
||||
Alloc_return(Value value) : value(value) { }
|
||||
|
||||
bool ok() const { return value == OK; }
|
||||
bool error() const { return !ok(); }
|
||||
};
|
||||
virtual Range_result remove_range(addr_t base, size_t size) = 0;
|
||||
|
||||
struct Range { addr_t start, end; };
|
||||
|
||||
@ -172,21 +158,18 @@ struct Genode::Range_allocator : Allocator
|
||||
* Allocate block
|
||||
*
|
||||
* \param size size of new block
|
||||
* \param out_addr start address of new block,
|
||||
* undefined in the error case
|
||||
* \param align alignment of new block specified
|
||||
* as the power of two
|
||||
* \param range address-range constraint for the allocation
|
||||
*/
|
||||
virtual Alloc_return alloc_aligned(size_t size, void **out_addr,
|
||||
unsigned align, Range range) = 0;
|
||||
virtual Alloc_result alloc_aligned(size_t size, unsigned align, Range range) = 0;
|
||||
|
||||
/**
|
||||
* Allocate block without constraining the address range
|
||||
*/
|
||||
Alloc_return alloc_aligned(size_t size, void **out_addr, unsigned align)
|
||||
Alloc_result alloc_aligned(size_t size, unsigned align)
|
||||
{
|
||||
return alloc_aligned(size, out_addr, align, Range { .start = 0, .end = ~0UL });
|
||||
return alloc_aligned(size, align, Range { .start = 0, .end = ~0UL });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -194,12 +177,8 @@ struct Genode::Range_allocator : Allocator
|
||||
*
|
||||
* \param size size of new block
|
||||
* \param addr desired address of block
|
||||
*
|
||||
* \return 'ALLOC_OK' on success, or
|
||||
* 'OUT_OF_METADATA' if meta-data allocation failed, or
|
||||
* 'RANGE_CONFLICT' if specified range is occupied
|
||||
*/
|
||||
virtual Alloc_return alloc_addr(size_t size, addr_t addr) = 0;
|
||||
virtual Alloc_result alloc_addr(size_t size, addr_t addr) = 0;
|
||||
|
||||
/**
|
||||
* Free a previously allocated block
|
||||
@ -326,4 +305,32 @@ void Genode::destroy(DEALLOC && dealloc, T *obj)
|
||||
operator delete (obj, dealloc);
|
||||
}
|
||||
|
||||
|
||||
namespace Genode {
|
||||
|
||||
void static inline print(Output &out, Allocator::Alloc_error error)
|
||||
{
|
||||
using Error = Allocator::Alloc_error;
|
||||
|
||||
auto name = [] (Error error)
|
||||
{
|
||||
switch (error) {
|
||||
case Error::OUT_OF_RAM: return "OUT_OF_RAM";
|
||||
case Error::OUT_OF_CAPS: return "OUT_OF_CAPS";
|
||||
case Error::DENIED: return "DENIED";
|
||||
}
|
||||
return "<unknown>";
|
||||
};
|
||||
|
||||
Genode::print(out, name(error));
|
||||
}
|
||||
|
||||
void static inline print(Output &out, Allocator::Alloc_result result)
|
||||
{
|
||||
result.with_result(
|
||||
[&] (void *ptr) { Genode::print(out, ptr); },
|
||||
[&] (auto error) { Genode::print(out, error); });
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _INCLUDE__BASE__ALLOCATOR_H_ */
|
||||
|
@ -163,25 +163,29 @@ class Genode::Allocator_avl_base : public Range_allocator
|
||||
|
||||
private:
|
||||
|
||||
Avl_tree<Block> _addr_tree { }; /* blocks sorted by base address */
|
||||
Allocator *_md_alloc { nullptr }; /* meta-data allocator */
|
||||
Avl_tree<Block> _addr_tree { }; /* blocks sorted by base address */
|
||||
Allocator &_md_alloc; /* meta-data allocator */
|
||||
size_t _md_entry_size { 0 }; /* size of block meta-data entry */
|
||||
|
||||
struct Two_blocks { Block *b1_ptr, *b2_ptr; };
|
||||
|
||||
using Alloc_md_result = Attempt<Block *, Alloc_error>;
|
||||
using Alloc_md_two_result = Attempt<Two_blocks, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Alloc meta-data block
|
||||
*/
|
||||
Block *_alloc_block_metadata();
|
||||
Alloc_md_result _alloc_block_metadata();
|
||||
|
||||
/**
|
||||
* Alloc two meta-data blocks in a transactional way
|
||||
*/
|
||||
bool _alloc_two_blocks_metadata(Block **dst1, Block **dst2);
|
||||
Alloc_md_two_result _alloc_two_blocks_metadata();
|
||||
|
||||
/**
|
||||
* Create new block
|
||||
*/
|
||||
int _add_block(Block *block_metadata,
|
||||
addr_t base, size_t size, bool used);
|
||||
void _add_block(Block &block_metadata, addr_t base, size_t size, bool used);
|
||||
|
||||
Block *_find_any_used_block(Block *sub_tree);
|
||||
Block *_find_any_unused_block(Block *sub_tree);
|
||||
@ -189,7 +193,7 @@ class Genode::Allocator_avl_base : public Range_allocator
|
||||
/**
|
||||
* Destroy block
|
||||
*/
|
||||
void _destroy_block(Block *b);
|
||||
void _destroy_block(Block &b);
|
||||
|
||||
/**
|
||||
* Cut specified area from block
|
||||
@ -197,8 +201,13 @@ class Genode::Allocator_avl_base : public Range_allocator
|
||||
* The original block gets replaced by (up to) two smaller blocks
|
||||
* with remaining space.
|
||||
*/
|
||||
void _cut_from_block(Block *b, addr_t cut_addr, size_t cut_size,
|
||||
Block *dst1, Block *dst2);
|
||||
void _cut_from_block(Block &b, addr_t cut_addr, size_t cut_size, Two_blocks);
|
||||
|
||||
template <typename ANY_BLOCK_FN>
|
||||
void _revert_block_ranges(ANY_BLOCK_FN const &);
|
||||
|
||||
template <typename SEARCH_FN>
|
||||
Alloc_result _allocate(size_t, unsigned, Range, SEARCH_FN const &);
|
||||
|
||||
protected:
|
||||
|
||||
@ -234,7 +243,7 @@ class Genode::Allocator_avl_base : public Range_allocator
|
||||
* we can attach custom information to block meta data.
|
||||
*/
|
||||
Allocator_avl_base(Allocator *md_alloc, size_t md_entry_size) :
|
||||
_md_alloc(md_alloc), _md_entry_size(md_entry_size) { }
|
||||
_md_alloc(*md_alloc), _md_entry_size(md_entry_size) { }
|
||||
|
||||
~Allocator_avl_base() { _revert_allocations_and_ranges(); }
|
||||
|
||||
@ -258,10 +267,10 @@ class Genode::Allocator_avl_base : public Range_allocator
|
||||
** Range allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t base, size_t size) override;
|
||||
int remove_range(addr_t base, size_t size) override;
|
||||
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override;
|
||||
Alloc_return alloc_addr(size_t size, addr_t addr) override;
|
||||
Range_result add_range(addr_t base, size_t size) override;
|
||||
Range_result remove_range(addr_t base, size_t size) override;
|
||||
Alloc_result alloc_aligned(size_t, unsigned, Range) override;
|
||||
Alloc_result alloc_addr(size_t size, addr_t addr) override;
|
||||
void free(void *addr) override;
|
||||
size_t avail() const override;
|
||||
bool valid_addr(addr_t addr) const override;
|
||||
@ -273,10 +282,9 @@ class Genode::Allocator_avl_base : public Range_allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
return (Allocator_avl_base::alloc_aligned(
|
||||
size, out_addr, log2(sizeof(addr_t))).ok());
|
||||
return Allocator_avl_base::alloc_aligned(size, log2(sizeof(addr_t)));
|
||||
}
|
||||
|
||||
void free(void *addr, size_t) override { free(addr); }
|
||||
@ -385,7 +393,7 @@ class Genode::Allocator_avl_tpl : public Allocator_avl_base
|
||||
return b && b->used() ? b : 0;
|
||||
}
|
||||
|
||||
int add_range(addr_t base, size_t size) override
|
||||
Range_result add_range(addr_t base, size_t size) override
|
||||
{
|
||||
/*
|
||||
* We disable the slab block allocation while
|
||||
@ -395,9 +403,9 @@ class Genode::Allocator_avl_tpl : public Allocator_avl_base
|
||||
*/
|
||||
Allocator *md_bs = _metadata.backing_store();
|
||||
_metadata.backing_store(0);
|
||||
int ret = Allocator_avl_base::add_range(base, size);
|
||||
Range_result result = Allocator_avl_base::add_range(base, size);
|
||||
_metadata.backing_store(md_bs);
|
||||
return ret;
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,32 +96,26 @@ class Genode::Heap : public Allocator
|
||||
size_t _quota_used { 0 };
|
||||
size_t _chunk_size { 0 };
|
||||
|
||||
using Alloc_ds_result = Attempt<Dataspace *, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Allocate a new dataspace of the specified size
|
||||
*
|
||||
* \param size number of bytes to allocate
|
||||
* \param enforce_separate_metadata if true, the new dataspace
|
||||
* will not contain any meta data
|
||||
* \throw Region_map::Invalid_dataspace,
|
||||
* Region_map::Region_conflict
|
||||
* \return 0 on success or negative error code
|
||||
*/
|
||||
Heap::Dataspace *_allocate_dataspace(size_t size, bool enforce_separate_metadata);
|
||||
Alloc_ds_result _allocate_dataspace(size_t size, bool enforce_separate_metadata);
|
||||
|
||||
/**
|
||||
* Try to allocate block at our local allocator
|
||||
*
|
||||
* \return true on success
|
||||
*
|
||||
* This method is a utility used by '_unsynchronized_alloc' to
|
||||
* avoid code duplication.
|
||||
*/
|
||||
bool _try_local_alloc(size_t size, void **out_addr);
|
||||
Alloc_result _try_local_alloc(size_t size);
|
||||
|
||||
/**
|
||||
* Unsynchronized implementation of 'alloc'
|
||||
* Unsynchronized implementation of 'try_alloc'
|
||||
*/
|
||||
bool _unsynchronized_alloc(size_t size, void **out_addr);
|
||||
Alloc_result _unsynchronized_alloc(size_t size);
|
||||
|
||||
public:
|
||||
|
||||
@ -167,11 +161,11 @@ class Genode::Heap : public Allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t, void **) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _quota_used; }
|
||||
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
|
||||
bool need_size_for_free() const override { return false; }
|
||||
Alloc_result try_alloc(size_t) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _quota_used; }
|
||||
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
|
||||
bool need_size_for_free() const override { return false; }
|
||||
};
|
||||
|
||||
|
||||
@ -222,11 +216,11 @@ class Genode::Sliced_heap : public Allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t, void **) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _consumed; }
|
||||
size_t overhead(size_t size) const override;
|
||||
bool need_size_for_free() const override { return false; }
|
||||
Alloc_result try_alloc(size_t) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _consumed; }
|
||||
size_t overhead(size_t size) const override;
|
||||
bool need_size_for_free() const override { return false; }
|
||||
};
|
||||
|
||||
#endif /* _INCLUDE__BASE__HEAP_H_ */
|
||||
|
@ -44,10 +44,12 @@ class Genode::Slab : public Allocator
|
||||
|
||||
Allocator *_backing_store;
|
||||
|
||||
using New_slab_block_result = Attempt<Block *, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Allocate and initialize new slab block
|
||||
*/
|
||||
Block *_new_slab_block();
|
||||
New_slab_block_result _new_slab_block();
|
||||
|
||||
|
||||
/*****************************
|
||||
@ -58,11 +60,17 @@ class Genode::Slab : public Allocator
|
||||
|
||||
/**
|
||||
* Insert block into slab block ring
|
||||
*
|
||||
* \noapi
|
||||
*/
|
||||
void _insert_sb(Block *);
|
||||
|
||||
struct Expand_ok { };
|
||||
using Expand_result = Attempt<Expand_ok, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Expand slab by one block
|
||||
*/
|
||||
Expand_result _expand();
|
||||
|
||||
/**
|
||||
* Release slab block
|
||||
*/
|
||||
@ -88,6 +96,10 @@ class Genode::Slab : public Allocator
|
||||
* block that is used for the first couple of allocations,
|
||||
* especially for the allocation of the second slab
|
||||
* block.
|
||||
*
|
||||
* \throw Out_of_ram
|
||||
* \throw Out_of_caps
|
||||
* \throw Allocator::Denied failed to obtain initial slab block
|
||||
*/
|
||||
Slab(size_t slab_size, size_t block_size, void *initial_sb,
|
||||
Allocator *backing_store = 0);
|
||||
@ -154,7 +166,7 @@ class Genode::Slab : public Allocator
|
||||
* The 'size' parameter is ignored as only slab entries with
|
||||
* preconfigured slab-entry size are allocated.
|
||||
*/
|
||||
bool alloc(size_t size, void **addr) override;
|
||||
Alloc_result try_alloc(size_t size) override;
|
||||
void free(void *addr, size_t) override { _free(addr); }
|
||||
size_t consumed() const override;
|
||||
size_t overhead(size_t) const override { return _block_size/_entries_per_block; }
|
||||
|
@ -56,8 +56,8 @@ class Genode::Synced_allocator : public Allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return _synced_object()->alloc(size, out_addr); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _synced_object()->try_alloc(size); }
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_synced_object()->free(addr, size); }
|
||||
|
@ -64,7 +64,7 @@ _ZN6Genode10Ipc_serverC2Ev T
|
||||
_ZN6Genode10Ipc_serverD1Ev T
|
||||
_ZN6Genode10Ipc_serverD2Ev T
|
||||
_ZN6Genode11Sliced_heap4freeEPvm T
|
||||
_ZN6Genode11Sliced_heap5allocEmPPv T
|
||||
_ZN6Genode11Sliced_heap9try_allocEm T
|
||||
_ZN6Genode11Sliced_heapC1ERNS_13Ram_allocatorERNS_10Region_mapE T
|
||||
_ZN6Genode11Sliced_heapC2ERNS_13Ram_allocatorERNS_10Region_mapE T
|
||||
_ZN6Genode11Sliced_heapD0Ev T
|
||||
@ -165,23 +165,15 @@ _ZN6Genode17Timeout_schedulerC2ERNS_11Time_sourceENS_12MicrosecondsE T
|
||||
_ZN6Genode17Timeout_schedulerD0Ev T
|
||||
_ZN6Genode17Timeout_schedulerD1Ev T
|
||||
_ZN6Genode17Timeout_schedulerD2Ev T
|
||||
_ZN6Genode18Allocator_avl_base10_add_blockEPNS0_5BlockEmmb T
|
||||
_ZN6Genode18Allocator_avl_base10_add_blockERNS0_5BlockEmmb T
|
||||
_ZN6Genode18Allocator_avl_base10alloc_addrEmm T
|
||||
_ZN6Genode18Allocator_avl_base12remove_rangeEmm T
|
||||
_ZN6Genode18Allocator_avl_base13alloc_alignedEmPPvjNS_15Range_allocator5RangeE T
|
||||
_ZN6Genode18Allocator_avl_base14_destroy_blockEPNS0_5BlockE T
|
||||
_ZN6Genode18Allocator_avl_base13alloc_alignedEmjNS_15Range_allocator5RangeE T
|
||||
_ZN6Genode18Allocator_avl_base14any_block_addrEPm T
|
||||
_ZN6Genode18Allocator_avl_base15_cut_from_blockEPNS0_5BlockEmmS2_S2_ T
|
||||
_ZN6Genode18Allocator_avl_base20_find_any_used_blockEPNS0_5BlockE T
|
||||
_ZN6Genode18Allocator_avl_base21_alloc_block_metadataEv T
|
||||
_ZN6Genode18Allocator_avl_base21_revert_unused_rangesEv T
|
||||
_ZN6Genode18Allocator_avl_base22_find_any_unused_blockEPNS0_5BlockE T
|
||||
_ZN6Genode18Allocator_avl_base26_alloc_two_blocks_metadataEPPNS0_5BlockES3_ T
|
||||
_ZN6Genode18Allocator_avl_base30_revert_allocations_and_rangesEv T
|
||||
_ZN6Genode18Allocator_avl_base4freeEPv T
|
||||
_ZN6Genode18Allocator_avl_base5Block13find_best_fitEmjmm T
|
||||
_ZN6Genode18Allocator_avl_base5Block15find_by_addressEmmb T
|
||||
_ZN6Genode18Allocator_avl_base5Block16avail_in_subtreeEv T
|
||||
_ZN6Genode18Allocator_avl_base5Block9recomputeEv T
|
||||
_ZN6Genode18Allocator_avl_base9add_rangeEmm T
|
||||
_ZN6Genode18Signal_transmitter6submitEj T
|
||||
@ -201,8 +193,10 @@ _ZN6Genode3Raw7_outputEv T
|
||||
_ZN6Genode3Raw8_acquireEv T
|
||||
_ZN6Genode3Raw8_releaseEv T
|
||||
_ZN6Genode4Heap11quota_limitEm T
|
||||
_ZN6Genode4Heap14Dataspace_poolD1Ev T
|
||||
_ZN6Genode4Heap14Dataspace_poolD2Ev T
|
||||
_ZN6Genode4Heap4freeEPvm T
|
||||
_ZN6Genode4Heap5allocEmPPv T
|
||||
_ZN6Genode4Heap9try_allocEm T
|
||||
_ZN6Genode4HeapC1EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
|
||||
_ZN6Genode4HeapC2EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
|
||||
_ZN6Genode4HeapD0Ev T
|
||||
@ -213,13 +207,9 @@ _ZN6Genode4Lock6unlockEv T
|
||||
_ZN6Genode4LockC1ENS0_5StateE T
|
||||
_ZN6Genode4Slab13any_used_elemEv T
|
||||
_ZN6Genode4Slab17free_empty_blocksEv T
|
||||
_ZN6Genode4Slab5Block11_slab_entryEi T
|
||||
_ZN6Genode4Slab5Block14any_used_entryEv T
|
||||
_ZN6Genode4Slab5Block5allocEv T
|
||||
_ZN6Genode4Slab5Block9inc_availERNS0_5EntryE T
|
||||
_ZN6Genode4Slab5_freeEPv T
|
||||
_ZN6Genode4Slab5allocEmPPv T
|
||||
_ZN6Genode4Slab9insert_sbEPv T
|
||||
_ZN6Genode4Slab9try_allocEm T
|
||||
_ZN6Genode4SlabC1EmmPvPNS_9AllocatorE T
|
||||
_ZN6Genode4SlabC2EmmPvPNS_9AllocatorE T
|
||||
_ZN6Genode4SlabD0Ev T
|
||||
|
@ -33,42 +33,46 @@ void * Mapped_avl_allocator::map_addr(void * addr)
|
||||
}
|
||||
|
||||
|
||||
Range_allocator::Alloc_return
|
||||
Mapped_mem_allocator::alloc_aligned(size_t size, void **out_addr,
|
||||
unsigned align, Range range)
|
||||
Range_allocator::Alloc_result
|
||||
Mapped_mem_allocator::alloc_aligned(size_t size, unsigned align, Range range)
|
||||
{
|
||||
size_t page_rounded_size = align_addr(size, get_page_size_log2());
|
||||
void *phys_addr = 0;
|
||||
align = max((size_t)align, get_page_size_log2());
|
||||
|
||||
/* allocate physical pages */
|
||||
Alloc_return ret1 = _phys_alloc->alloc_aligned(page_rounded_size,
|
||||
&phys_addr, align, range);
|
||||
if (!ret1.ok()) {
|
||||
error("Could not allocate physical memory region of size ",
|
||||
page_rounded_size);
|
||||
return ret1;
|
||||
}
|
||||
return _phys_alloc->alloc_aligned(page_rounded_size, align, range)
|
||||
.convert<Alloc_result>(
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
Alloc_return ret2 = _virt_alloc->alloc_aligned(page_rounded_size,
|
||||
out_addr, align);
|
||||
if (!ret2.ok()) {
|
||||
error("Could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
[&] (void *phys_addr) -> Alloc_result {
|
||||
|
||||
/* revert physical allocation */
|
||||
_phys_alloc->free(phys_addr);
|
||||
return ret2;
|
||||
}
|
||||
/* allocate range in core's virtual address space */
|
||||
return _virt_alloc->alloc_aligned(page_rounded_size, align)
|
||||
.convert<Alloc_result>(
|
||||
|
||||
_phys_alloc->metadata(phys_addr, { *out_addr });
|
||||
_virt_alloc->metadata(*out_addr, { phys_addr });
|
||||
[&] (void *virt_addr) {
|
||||
|
||||
/* make physical page accessible at the designated virtual address */
|
||||
_map_local((addr_t)*out_addr, (addr_t)phys_addr, page_rounded_size);
|
||||
_phys_alloc->metadata(phys_addr, { virt_addr });
|
||||
_virt_alloc->metadata(virt_addr, { phys_addr });
|
||||
|
||||
return Alloc_return::OK;
|
||||
/* make physical page accessible at the designated virtual address */
|
||||
_map_local((addr_t)virt_addr, (addr_t)phys_addr, page_rounded_size);
|
||||
|
||||
return virt_addr;
|
||||
},
|
||||
[&] (Alloc_error e) {
|
||||
error("Could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, " (error ", (int)e, ")");
|
||||
|
||||
/* revert physical allocation */
|
||||
_phys_alloc->free(phys_addr);
|
||||
return e;
|
||||
});
|
||||
},
|
||||
[&] (Alloc_error e) {
|
||||
error("Could not allocate physical memory region of size ",
|
||||
page_rounded_size, " (error ", (int)e, ")");
|
||||
return e;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
@ -47,7 +47,7 @@ class Genode::Constrained_core_ram : public Allocator
|
||||
" in core !");
|
||||
}
|
||||
|
||||
bool alloc(size_t const size, void **ptr) override
|
||||
Alloc_result try_alloc(size_t const size) override
|
||||
{
|
||||
size_t const page_aligned_size = align_addr(size, 12);
|
||||
|
||||
@ -56,15 +56,16 @@ class Genode::Constrained_core_ram : public Allocator
|
||||
/* on some kernels we require a cap, on some not XXX */
|
||||
Cap_quota_guard::Reservation caps(_cap_guard, Cap_quota{1});
|
||||
|
||||
if (!_core_mem.alloc(page_aligned_size, ptr))
|
||||
return false;
|
||||
return _core_mem.try_alloc(page_aligned_size).convert<Alloc_result>(
|
||||
|
||||
ram.acknowledge();
|
||||
caps.acknowledge();
|
||||
[&] (void *ptr) {
|
||||
ram.acknowledge();
|
||||
caps.acknowledge();
|
||||
core_mem_allocated += page_aligned_size;
|
||||
return ptr; },
|
||||
|
||||
core_mem_allocated += page_aligned_size;
|
||||
|
||||
return true;
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
void free(void *ptr, size_t const size) override
|
||||
|
@ -163,11 +163,10 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
|
||||
** Range allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t, size_t) override { return -1; }
|
||||
int remove_range(addr_t, size_t) override { return -1; }
|
||||
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override;
|
||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
||||
return Alloc_return::RANGE_CONFLICT; }
|
||||
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Alloc_result alloc_aligned(size_t, unsigned, Range) override;
|
||||
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
|
||||
void free(void *) override;
|
||||
size_t avail() const override { return _phys_alloc->avail(); }
|
||||
bool valid_addr(addr_t addr) const override {
|
||||
@ -180,8 +179,8 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return alloc_aligned(size, log2(sizeof(addr_t))); }
|
||||
void free(void *addr, size_t) override;
|
||||
size_t consumed() const override { return _phys_alloc->consumed(); }
|
||||
size_t overhead(size_t size) const override {
|
||||
@ -276,16 +275,14 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
|
||||
** Range allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t, size_t) override { return -1; }
|
||||
int remove_range(addr_t, size_t) override { return -1; }
|
||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
||||
return Alloc_return::RANGE_CONFLICT; }
|
||||
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
|
||||
|
||||
Alloc_return alloc_aligned(size_t size, void **out_addr,
|
||||
unsigned align, Range range) override
|
||||
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override
|
||||
{
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
return _mem_alloc.alloc_aligned(size, out_addr, align, range);
|
||||
return _mem_alloc.alloc_aligned(size, align, range);
|
||||
}
|
||||
|
||||
void free(void *addr) override
|
||||
@ -305,8 +302,10 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
return alloc_aligned(size, log2(sizeof(addr_t)));
|
||||
}
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
|
@ -40,7 +40,7 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
friend class Mapped_mem_allocator;
|
||||
|
||||
Mutex _default_mutex { };
|
||||
Mutex &_mutex;
|
||||
Mutex &_mutex { _default_mutex };
|
||||
ALLOC _alloc;
|
||||
Synced_interface<ALLOC, Mutex> _synced_object;
|
||||
|
||||
@ -54,8 +54,7 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
|
||||
template <typename... ARGS>
|
||||
Synced_range_allocator(ARGS &&... args)
|
||||
: _mutex(_default_mutex), _alloc(args...),
|
||||
_synced_object(_mutex, &_alloc) { }
|
||||
: _alloc(args...), _synced_object(_mutex, &_alloc) { }
|
||||
|
||||
Guard operator () () { return _synced_object(); }
|
||||
Guard operator () () const { return _synced_object(); }
|
||||
@ -67,8 +66,8 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override {
|
||||
return _synced_object()->alloc(size, out_addr); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _synced_object()->try_alloc(size); }
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_synced_object()->free(addr, size); }
|
||||
@ -87,17 +86,16 @@ class Genode::Synced_range_allocator : public Range_allocator
|
||||
** Range-allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t base, size_t size) override {
|
||||
Range_result add_range(addr_t base, size_t size) override {
|
||||
return _synced_object()->add_range(base, size); }
|
||||
|
||||
int remove_range(addr_t base, size_t size) override {
|
||||
Range_result remove_range(addr_t base, size_t size) override {
|
||||
return _synced_object()->remove_range(base, size); }
|
||||
|
||||
Alloc_return alloc_aligned(size_t size, void **out_addr,
|
||||
unsigned align, Range range) override {
|
||||
return _synced_object()->alloc_aligned(size, out_addr, align, range); }
|
||||
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override {
|
||||
return _synced_object()->alloc_aligned(size, align, range); }
|
||||
|
||||
Alloc_return alloc_addr(size_t size, addr_t addr) override {
|
||||
Alloc_result alloc_addr(size_t size, addr_t addr) override {
|
||||
return _synced_object()->alloc_addr(size, addr); }
|
||||
|
||||
void free(void *addr) override {
|
||||
|
@ -41,24 +41,16 @@ Io_mem_session_component::_prepare_io_mem(const char *args,
|
||||
_cacheable = WRITE_COMBINED;
|
||||
|
||||
/* check for RAM collision */
|
||||
int ret;
|
||||
if ((ret = ram_alloc.remove_range(base, size))) {
|
||||
if (ram_alloc.remove_range(base, size).failed()) {
|
||||
error("I/O memory ", Hex_range<addr_t>(base, size), " "
|
||||
"used by RAM allocator (", ret, ")");
|
||||
"used by RAM allocator");
|
||||
return Dataspace_attr();
|
||||
}
|
||||
|
||||
/* allocate region */
|
||||
switch (_io_mem_alloc.alloc_addr(req_size, req_base).value) {
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
if (_io_mem_alloc.alloc_addr(req_size, req_base).failed()) {
|
||||
error("I/O memory ", Hex_range<addr_t>(req_base, req_size), " not available");
|
||||
return Dataspace_attr();
|
||||
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
error("I/O memory allocator ran out of meta data");
|
||||
return Dataspace_attr();
|
||||
|
||||
case Range_allocator::Alloc_return::OK: break;
|
||||
}
|
||||
|
||||
/* request local mapping */
|
||||
|
@ -38,8 +38,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
* If this does not work, we subsequently weaken the alignment constraint
|
||||
* until the allocation succeeds.
|
||||
*/
|
||||
void *ds_addr = nullptr;
|
||||
bool alloc_succeeded = false;
|
||||
Range_allocator::Alloc_result allocated_range = Allocator::Alloc_error::DENIED;
|
||||
|
||||
/*
|
||||
* If no physical constraint exists, try to allocate physical memory at
|
||||
@ -53,63 +52,57 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
Phys_range const range { .start = high_start, .end = _phys_range.end };
|
||||
|
||||
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
||||
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2, range).ok()) {
|
||||
alloc_succeeded = true;
|
||||
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, range);
|
||||
if (allocated_range.ok())
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* apply constraints, or retry if larger memory allocation failed */
|
||||
if (!alloc_succeeded) {
|
||||
if (!allocated_range.ok()) {
|
||||
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
||||
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2,
|
||||
_phys_range).ok()) {
|
||||
alloc_succeeded = true;
|
||||
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, _phys_range);
|
||||
if (allocated_range.ok())
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to release the allocated physical memory whenever we leave the
|
||||
* scope via an exception.
|
||||
*/
|
||||
class Phys_alloc_guard
|
||||
{
|
||||
private:
|
||||
|
||||
/*
|
||||
* Noncopyable
|
||||
*/
|
||||
Phys_alloc_guard(Phys_alloc_guard const &);
|
||||
Phys_alloc_guard &operator = (Phys_alloc_guard const &);
|
||||
|
||||
public:
|
||||
|
||||
Range_allocator &phys_alloc;
|
||||
void * const ds_addr;
|
||||
bool ack = false;
|
||||
|
||||
Phys_alloc_guard(Range_allocator &phys_alloc, void *ds_addr)
|
||||
: phys_alloc(phys_alloc), ds_addr(ds_addr) { }
|
||||
|
||||
~Phys_alloc_guard() { if (!ack) phys_alloc.free(ds_addr); }
|
||||
|
||||
} phys_alloc_guard(_phys_alloc, ds_addr);
|
||||
|
||||
/*
|
||||
* Normally, init's quota equals the size of physical memory and this quota
|
||||
* is distributed among the processes. As we check the quota before
|
||||
* allocating, the allocation should always succeed in theory. However,
|
||||
* fragmentation could cause a failing allocation.
|
||||
*/
|
||||
if (!alloc_succeeded) {
|
||||
if (allocated_range.failed()) {
|
||||
error("out of physical memory while allocating ", ds_size, " bytes ",
|
||||
"in range [", Hex(_phys_range.start), "-", Hex(_phys_range.end), "]");
|
||||
return Alloc_error::OUT_OF_RAM;
|
||||
|
||||
return allocated_range.convert<Ram_allocator::Alloc_result>(
|
||||
[&] (void *) { return Alloc_error::DENIED; },
|
||||
[&] (Alloc_error error) { return error; });
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to release the allocated physical memory whenever we leave the
|
||||
* scope via an exception.
|
||||
*/
|
||||
struct Phys_alloc_guard
|
||||
{
|
||||
Range_allocator &phys_alloc;
|
||||
struct { void * ds_addr = nullptr; };
|
||||
bool keep = false;
|
||||
|
||||
Phys_alloc_guard(Range_allocator &phys_alloc)
|
||||
: phys_alloc(phys_alloc) { }
|
||||
|
||||
~Phys_alloc_guard() { if (!keep && ds_addr) phys_alloc.free(ds_addr); }
|
||||
|
||||
} phys_alloc_guard(_phys_alloc);
|
||||
|
||||
allocated_range.with_result(
|
||||
[&] (void *ptr) { phys_alloc_guard.ds_addr = ptr; },
|
||||
[&] (Alloc_error) { /* already checked above */ });
|
||||
|
||||
/*
|
||||
* For non-cached RAM dataspaces, we mark the dataspace as write
|
||||
* combined and expect the pager to evaluate this dataspace property
|
||||
@ -118,7 +111,8 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
Dataspace_component *ds_ptr = nullptr;
|
||||
try {
|
||||
ds_ptr = new (_ds_slab)
|
||||
Dataspace_component(ds_size, (addr_t)ds_addr, cache, true, this);
|
||||
Dataspace_component(ds_size, (addr_t)phys_alloc_guard.ds_addr,
|
||||
cache, true, this);
|
||||
}
|
||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||
@ -145,7 +139,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
||||
|
||||
Dataspace_capability ds_cap = _ep.manage(&ds);
|
||||
|
||||
phys_alloc_guard.ack = true;
|
||||
phys_alloc_guard.keep = true;
|
||||
|
||||
return static_cap_cast<Ram_dataspace>(ds_cap);
|
||||
}
|
||||
|
@ -365,8 +365,14 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
throw Region_conflict();
|
||||
|
||||
auto lambda = [&] (Dataspace_component *dsc) {
|
||||
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
/* check dataspace validity */
|
||||
if (!dsc) throw Invalid_dataspace();
|
||||
if (!dsc)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
unsigned const min_align_log2 = get_page_size_log2();
|
||||
|
||||
size_t const off = offset;
|
||||
if (off >= dsc->size())
|
||||
@ -376,27 +382,25 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
size = dsc->size() - offset;
|
||||
|
||||
/* work with page granularity */
|
||||
size = align_addr(size, get_page_size_log2());
|
||||
size = align_addr(size, min_align_log2);
|
||||
|
||||
/* deny creation of regions larger then the actual dataspace */
|
||||
if (dsc->size() < size + offset)
|
||||
throw Region_conflict();
|
||||
|
||||
/* allocate region for attachment */
|
||||
void *attach_at = 0;
|
||||
void *attach_at = nullptr;
|
||||
if (use_local_addr) {
|
||||
switch (_map.alloc_addr(size, local_addr).value) {
|
||||
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
throw Out_of_ram();
|
||||
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
throw Region_conflict();
|
||||
|
||||
case Range_allocator::Alloc_return::OK:
|
||||
attach_at = local_addr;
|
||||
break;
|
||||
}
|
||||
_map.alloc_addr(size, local_addr).with_result(
|
||||
[&] (void *ptr) { attach_at = ptr; },
|
||||
[&] (Range_allocator::Alloc_error error) {
|
||||
switch (error) {
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Alloc_error::DENIED: break;
|
||||
}
|
||||
throw Region_conflict();
|
||||
});
|
||||
} else {
|
||||
|
||||
/*
|
||||
@ -406,9 +410,10 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
*/
|
||||
size_t align_log2 = log2(size);
|
||||
if (align_log2 >= sizeof(void *)*8)
|
||||
align_log2 = get_page_size_log2();
|
||||
align_log2 = min_align_log2;
|
||||
|
||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||
bool done = false;
|
||||
for (; !done && (align_log2 >= min_align_log2); align_log2--) {
|
||||
|
||||
/*
|
||||
* Don't use an alignment higher than the alignment of the backing
|
||||
@ -419,21 +424,23 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
||||
continue;
|
||||
|
||||
/* try allocating the align region */
|
||||
Range_allocator::Alloc_return alloc_return =
|
||||
_map.alloc_aligned(size, &attach_at, align_log2);
|
||||
_map.alloc_aligned(size, align_log2).with_result(
|
||||
|
||||
typedef Range_allocator::Alloc_return Alloc_return;
|
||||
|
||||
switch (alloc_return.value) {
|
||||
case Alloc_return::OK: break; /* switch */
|
||||
case Alloc_return::OUT_OF_METADATA: throw Out_of_ram();
|
||||
case Alloc_return::RANGE_CONFLICT: continue; /* for loop */
|
||||
}
|
||||
break; /* for loop */
|
||||
[&] (void *ptr) {
|
||||
attach_at = ptr;
|
||||
done = true; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error error) {
|
||||
switch (error) {
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Alloc_error::DENIED: break; /* no fit */
|
||||
}
|
||||
/* try smaller alignment in next iteration... */
|
||||
});
|
||||
}
|
||||
|
||||
if (align_log2 < get_page_size_log2())
|
||||
if (!done)
|
||||
throw Region_conflict();
|
||||
}
|
||||
|
||||
|
@ -35,18 +35,23 @@ Io_port_session_component::Io_port_session_component(Range_allocator &io_port_al
|
||||
unsigned size = Arg_string::find_arg(args, "io_port_size").ulong_value(0);
|
||||
|
||||
/* allocate region (also checks out-of-bounds regions) */
|
||||
switch (io_port_alloc.alloc_addr(size, base).value) {
|
||||
io_port_alloc.alloc_addr(size, base).with_error(
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
|
||||
throw Service_denied();
|
||||
switch (e) {
|
||||
case Range_allocator::Alloc_error::DENIED:
|
||||
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
|
||||
throw Service_denied();
|
||||
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
error("I/O port allocator ran out of meta data");
|
||||
throw Service_denied();
|
||||
case Range_allocator::Alloc_error::OUT_OF_RAM:
|
||||
error("I/O port allocator ran out of RAM");
|
||||
throw Service_denied();
|
||||
|
||||
case Range_allocator::Alloc_return::OK: break;
|
||||
}
|
||||
case Range_allocator::Alloc_error::OUT_OF_CAPS:
|
||||
error("I/O port allocator ran out of caps");
|
||||
throw Service_denied();
|
||||
}
|
||||
});
|
||||
|
||||
/* store information */
|
||||
_base = base;
|
||||
|
@ -71,29 +71,37 @@ class Stack_area_region_map : public Region_map
|
||||
{
|
||||
/* allocate physical memory */
|
||||
size = round_page(size);
|
||||
void *phys_base = nullptr;
|
||||
Range_allocator &ra = platform_specific().ram_alloc();
|
||||
if (ra.alloc_aligned(size, &phys_base,
|
||||
get_page_size_log2()).error()) {
|
||||
error("could not allocate backing store for new stack");
|
||||
return (addr_t)0;
|
||||
}
|
||||
|
||||
Dataspace_component &ds = *new (&_ds_slab)
|
||||
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
||||
Range_allocator &phys = platform_specific().ram_alloc();
|
||||
|
||||
addr_t const core_local_addr = stack_area_virtual_base() + (addr_t)local_addr;
|
||||
return phys.alloc_aligned(size, get_page_size_log2()).convert<Local_addr>(
|
||||
|
||||
if (!map_local(ds.phys_addr(), core_local_addr,
|
||||
ds.size() >> get_page_size_log2())) {
|
||||
error("could not map phys ", Hex(ds.phys_addr()),
|
||||
" at local ", Hex(core_local_addr));
|
||||
return (addr_t)0;
|
||||
}
|
||||
[&] (void *phys_ptr) {
|
||||
|
||||
ds.assign_core_local_addr((void*)core_local_addr);
|
||||
addr_t const phys_base = (addr_t)phys_ptr;
|
||||
|
||||
return local_addr;
|
||||
Dataspace_component &ds = *new (&_ds_slab)
|
||||
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
||||
|
||||
addr_t const core_local_addr = stack_area_virtual_base()
|
||||
+ (addr_t)local_addr;
|
||||
|
||||
if (!map_local(ds.phys_addr(), core_local_addr,
|
||||
ds.size() >> get_page_size_log2())) {
|
||||
error("could not map phys ", Hex(ds.phys_addr()),
|
||||
" at local ", Hex(core_local_addr));
|
||||
|
||||
phys.free(phys_ptr);
|
||||
return Local_addr { (addr_t)0 };
|
||||
}
|
||||
|
||||
ds.assign_core_local_addr((void*)core_local_addr);
|
||||
|
||||
return local_addr;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
error("could not allocate backing store for new stack");
|
||||
return (addr_t)0; });
|
||||
}
|
||||
|
||||
void detach(Local_addr local_addr) override
|
||||
|
@ -63,48 +63,57 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
||||
attribute.offset > dsc.size() - attribute.size)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
switch (_map.alloc_addr(attribute.size, guest_phys).value) {
|
||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
||||
throw Out_of_ram();
|
||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
||||
{
|
||||
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
|
||||
if (!region_ptr)
|
||||
throw Region_conflict();
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
Rm_region ®ion = *region_ptr;
|
||||
_map.alloc_addr(attribute.size, guest_phys).with_result(
|
||||
|
||||
if (!(cap == region.dataspace().cap()))
|
||||
throw Region_conflict();
|
||||
if (guest_phys < region.base() ||
|
||||
guest_phys > region.base() + region.size() - 1)
|
||||
throw Region_conflict();
|
||||
[&] (void *) {
|
||||
|
||||
/* re-attach all */
|
||||
break;
|
||||
}
|
||||
case Range_allocator::Alloc_return::OK:
|
||||
{
|
||||
/* store attachment info in meta data */
|
||||
try {
|
||||
_map.construct_metadata((void *)guest_phys,
|
||||
guest_phys, attribute.size,
|
||||
dsc.writable() && attribute.writeable,
|
||||
dsc, attribute.offset, *this,
|
||||
attribute.executable);
|
||||
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||
error("failed to store attachment info");
|
||||
throw Invalid_dataspace();
|
||||
/* store attachment info in meta data */
|
||||
try {
|
||||
_map.construct_metadata((void *)guest_phys,
|
||||
guest_phys, attribute.size,
|
||||
dsc.writable() && attribute.writeable,
|
||||
dsc, attribute.offset, *this,
|
||||
attribute.executable);
|
||||
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||
error("failed to store attachment info");
|
||||
throw Invalid_dataspace();
|
||||
}
|
||||
|
||||
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
||||
|
||||
/* inform dataspace about attachment */
|
||||
dsc.attached_to(region);
|
||||
},
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
|
||||
switch (error) {
|
||||
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Alloc_error::DENIED:
|
||||
{
|
||||
/*
|
||||
* Handle attach after partial detach
|
||||
*/
|
||||
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
|
||||
if (!region_ptr)
|
||||
throw Region_conflict();
|
||||
|
||||
Rm_region ®ion = *region_ptr;
|
||||
|
||||
if (!(cap == region.dataspace().cap()))
|
||||
throw Region_conflict();
|
||||
|
||||
if (guest_phys < region.base() ||
|
||||
guest_phys > region.base() + region.size() - 1)
|
||||
throw Region_conflict();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
||||
|
||||
/* inform dataspace about attachment */
|
||||
dsc.attached_to(region);
|
||||
|
||||
break;
|
||||
}
|
||||
};
|
||||
);
|
||||
|
||||
/* kernel specific code to attach memory to guest */
|
||||
_attach_vm_memory(dsc, guest_phys, attribute);
|
||||
|
@ -95,110 +95,106 @@ void Allocator_avl_base::Block::recompute()
|
||||
** Allocator_avl implementation **
|
||||
**********************************/
|
||||
|
||||
Allocator_avl_base::Block *Allocator_avl_base::_alloc_block_metadata()
|
||||
Allocator_avl_base::Alloc_md_result Allocator_avl_base::_alloc_block_metadata()
|
||||
{
|
||||
void *b = nullptr;
|
||||
if (_md_alloc->alloc(sizeof(Block), &b))
|
||||
return construct_at<Block>(b, 0, 0, 0);
|
||||
|
||||
return nullptr;
|
||||
return _md_alloc.try_alloc(sizeof(Block)).convert<Alloc_md_result>(
|
||||
[&] (void *ptr) {
|
||||
return construct_at<Block>(ptr, 0, 0, 0); },
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
bool Allocator_avl_base::_alloc_two_blocks_metadata(Block **dst1, Block **dst2)
|
||||
Allocator_avl_base::Alloc_md_two_result
|
||||
Allocator_avl_base::_alloc_two_blocks_metadata()
|
||||
{
|
||||
Block * const b1 = _alloc_block_metadata();
|
||||
Block * b2 = nullptr;
|
||||
return _alloc_block_metadata().convert<Alloc_md_two_result>(
|
||||
|
||||
try {
|
||||
b2 = _alloc_block_metadata();
|
||||
} catch (...) {
|
||||
if (b1) _md_alloc->free(b1, sizeof(Block));
|
||||
throw;
|
||||
}
|
||||
[&] (Block *b1_ptr) {
|
||||
return _alloc_block_metadata().convert<Alloc_md_two_result>(
|
||||
|
||||
if (b1 && b2) {
|
||||
*dst1 = b1;
|
||||
*dst2 = b2;
|
||||
return true;
|
||||
}
|
||||
[&] (Block *b2_ptr) {
|
||||
return Two_blocks { b1_ptr, b2_ptr }; },
|
||||
|
||||
*dst1 = *dst2 = nullptr;
|
||||
|
||||
if (b2) _md_alloc->free(b2, sizeof(Block));
|
||||
if (b1) _md_alloc->free(b1, sizeof(Block));
|
||||
|
||||
return false;
|
||||
[&] (Alloc_error error) {
|
||||
_md_alloc.free(b1_ptr, sizeof(Block));
|
||||
return error; });
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
int Allocator_avl_base::_add_block(Block *block_metadata,
|
||||
void Allocator_avl_base::_add_block(Block &block_metadata,
|
||||
addr_t base, size_t size, bool used)
|
||||
{
|
||||
if (!block_metadata)
|
||||
return -1;
|
||||
|
||||
/* call constructor for new block */
|
||||
construct_at<Block>(block_metadata, base, size, used);
|
||||
construct_at<Block>(&block_metadata, base, size, used);
|
||||
|
||||
/* insert block into avl tree */
|
||||
_addr_tree.insert(block_metadata);
|
||||
|
||||
return 0;
|
||||
_addr_tree.insert(&block_metadata);
|
||||
}
|
||||
|
||||
|
||||
void Allocator_avl_base::_destroy_block(Block *b)
|
||||
void Allocator_avl_base::_destroy_block(Block &b)
|
||||
{
|
||||
if (!b) return;
|
||||
|
||||
/* remove block from both avl trees */
|
||||
_addr_tree.remove(b);
|
||||
_md_alloc->free(b, _md_entry_size);
|
||||
_addr_tree.remove(&b);
|
||||
_md_alloc.free(&b, _md_entry_size);
|
||||
}
|
||||
|
||||
|
||||
void Allocator_avl_base::_cut_from_block(Block *b, addr_t addr, size_t size,
|
||||
Block *dst1, Block *dst2)
|
||||
void Allocator_avl_base::_cut_from_block(Block &b, addr_t addr, size_t size, Two_blocks blocks)
|
||||
{
|
||||
size_t const padding = addr > b->addr() ? addr - b->addr() : 0;
|
||||
size_t const b_size = b->size() > padding ? b->size() - padding : 0;
|
||||
size_t const padding = addr > b.addr() ? addr - b.addr() : 0;
|
||||
size_t const b_size = b.size() > padding ? b.size() - padding : 0;
|
||||
size_t remaining = b_size > size ? b_size - size : 0;
|
||||
|
||||
/* case that a block contains the whole addressable range */
|
||||
if (!b->addr() && !b->size())
|
||||
remaining = b->size() - size - padding;
|
||||
if (!b.addr() && !b.size())
|
||||
remaining = b.size() - size - padding;
|
||||
|
||||
addr_t orig_addr = b->addr();
|
||||
addr_t orig_addr = b.addr();
|
||||
|
||||
_destroy_block(b);
|
||||
|
||||
/* create free block containing the alignment padding */
|
||||
if (padding > 0)
|
||||
_add_block(dst1, orig_addr, padding, Block::FREE);
|
||||
_add_block(*blocks.b1_ptr, orig_addr, padding, Block::FREE);
|
||||
else
|
||||
_md_alloc->free(dst1, sizeof(Block));
|
||||
_md_alloc.free(blocks.b1_ptr, sizeof(Block));
|
||||
|
||||
/* create free block for remaining space of original block */
|
||||
if (remaining > 0)
|
||||
_add_block(dst2, addr + size, remaining, Block::FREE);
|
||||
_add_block(*blocks.b2_ptr, addr + size, remaining, Block::FREE);
|
||||
else
|
||||
_md_alloc->free(dst2, sizeof(Block));
|
||||
_md_alloc.free(blocks.b2_ptr, sizeof(Block));
|
||||
}
|
||||
|
||||
|
||||
template <typename FN>
|
||||
void Allocator_avl_base::_revert_block_ranges(FN const &any_block_fn)
|
||||
{
|
||||
for (bool loop = true; loop; ) {
|
||||
|
||||
Block *block_ptr = any_block_fn();
|
||||
if (!block_ptr)
|
||||
break;
|
||||
|
||||
remove_range(block_ptr->addr(), block_ptr->size()).with_error(
|
||||
[&] (Alloc_error error) {
|
||||
if (error == Alloc_error::DENIED) /* conflict */
|
||||
_destroy_block(*block_ptr);
|
||||
else
|
||||
loop = false; /* give up on OUT_OF_RAM or OUT_OF_CAPS */
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Allocator_avl_base::_revert_unused_ranges()
|
||||
{
|
||||
do {
|
||||
Block * const block = _find_any_unused_block(_addr_tree.first());
|
||||
if (!block)
|
||||
break;
|
||||
|
||||
int const error = remove_range(block->addr(), block->size());
|
||||
if (error && block == _find_any_unused_block(_addr_tree.first()))
|
||||
/* if the invocation fails, release the block to break endless loop */
|
||||
_destroy_block(block);
|
||||
} while (true);
|
||||
_revert_block_ranges([&] () {
|
||||
return _find_any_unused_block(_addr_tree.first()); });
|
||||
}
|
||||
|
||||
|
||||
@ -220,159 +216,161 @@ void Allocator_avl_base::_revert_allocations_and_ranges()
|
||||
" at allocator destruction time");
|
||||
|
||||
/* destroy all remaining blocks */
|
||||
while (Block *block = _addr_tree.first()) {
|
||||
if (remove_range(block->addr(), block->size())) {
|
||||
/* if the invocation fails, release the block to break endless loop */
|
||||
_destroy_block(block);
|
||||
}
|
||||
}
|
||||
_revert_block_ranges([&] () { return _addr_tree.first(); });
|
||||
}
|
||||
|
||||
|
||||
int Allocator_avl_base::add_range(addr_t new_addr, size_t new_size)
|
||||
Allocator_avl_base::Range_result Allocator_avl_base::add_range(addr_t new_addr, size_t new_size)
|
||||
{
|
||||
Block *b;
|
||||
|
||||
/* sanity check for insane users ;-) */
|
||||
if (!new_size) return -2;
|
||||
if (!new_size)
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
/* check for conflicts with existing blocks */
|
||||
if (_find_by_address(new_addr, new_size, true))
|
||||
return -3;
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
Block *new_block = _alloc_block_metadata();
|
||||
if (!new_block) return -4;
|
||||
return _alloc_block_metadata().convert<Range_result>(
|
||||
|
||||
/* merge with predecessor */
|
||||
if (new_addr != 0 && (b = _find_by_address(new_addr - 1)) && !b->used()) {
|
||||
[&] (Block *new_block_ptr) {
|
||||
|
||||
new_size += b->size();
|
||||
new_addr = b->addr();
|
||||
/* merge with predecessor */
|
||||
Block *b = nullptr;
|
||||
if (new_addr != 0 && (b = _find_by_address(new_addr - 1)) && !b->used()) {
|
||||
|
||||
_destroy_block(b);
|
||||
}
|
||||
new_size += b->size();
|
||||
new_addr = b->addr();
|
||||
_destroy_block(*b);
|
||||
}
|
||||
|
||||
/* merge with successor */
|
||||
if ((b = _find_by_address(new_addr + new_size)) && !b->used()) {
|
||||
/* merge with successor */
|
||||
if ((b = _find_by_address(new_addr + new_size)) && !b->used()) {
|
||||
|
||||
new_size += b->size();
|
||||
new_size += b->size();
|
||||
_destroy_block(*b);
|
||||
}
|
||||
|
||||
_destroy_block(b);
|
||||
}
|
||||
/* create new block that spans over all merged blocks */
|
||||
_add_block(*new_block_ptr, new_addr, new_size, Block::FREE);
|
||||
|
||||
/* create new block that spans over all merged blocks */
|
||||
return _add_block(new_block, new_addr, new_size, Block::FREE);
|
||||
return Range_ok();
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
int Allocator_avl_base::remove_range(addr_t base, size_t size)
|
||||
Allocator_avl_base::Range_result Allocator_avl_base::remove_range(addr_t base, size_t size)
|
||||
{
|
||||
/* sanity check for insane users ;-) */
|
||||
if (!size) return -1;
|
||||
Range_result result = Alloc_error::DENIED;
|
||||
|
||||
Block *dst1, *dst2;
|
||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
||||
return -2;
|
||||
if (!size)
|
||||
return result;
|
||||
|
||||
/* FIXME removing ranges from allocators with used blocks is not safe! */
|
||||
while (1) {
|
||||
for (bool done = false; !done; ) {
|
||||
|
||||
/* find block overlapping the specified range */
|
||||
Block *b = _addr_tree.first();
|
||||
b = b ? b->find_by_address(base, size, 1) : 0;
|
||||
_alloc_two_blocks_metadata().with_result(
|
||||
[&] (Two_blocks blocks) {
|
||||
|
||||
/*
|
||||
* If there are no overlappings with any existing blocks (b == 0), we
|
||||
* are done. If however, the overlapping block is in use, we have a
|
||||
* problem. In both cases, return.
|
||||
*/
|
||||
if (!b || !b->avail()) {
|
||||
_md_alloc->free(dst1, sizeof(Block));
|
||||
_md_alloc->free(dst2, sizeof(Block));
|
||||
return !b ? 0 : -3;
|
||||
}
|
||||
/* find block overlapping the specified range */
|
||||
Block *b = _addr_tree.first();
|
||||
b = b ? b->find_by_address(base, size, 1) : 0;
|
||||
|
||||
/* cut intersecting address range */
|
||||
addr_t intersect_beg = max(base, b->addr());
|
||||
size_t intersect_end = min(base + size - 1, b->addr() + b->size() - 1);
|
||||
/*
|
||||
* If there are no overlappings with any existing blocks (b == 0), we
|
||||
* are done. If however, the overlapping block is in use, we have a
|
||||
* problem. Stop iterating in both cases.
|
||||
*/
|
||||
if (!b || !b->avail()) {
|
||||
_md_alloc.free(blocks.b1_ptr, sizeof(Block));
|
||||
_md_alloc.free(blocks.b2_ptr, sizeof(Block));
|
||||
|
||||
_cut_from_block(b, intersect_beg, intersect_end - intersect_beg + 1, dst1, dst2);
|
||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
||||
return -4;
|
||||
};
|
||||
if (b == 0)
|
||||
result = Range_ok();
|
||||
|
||||
done = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/* cut intersecting address range */
|
||||
addr_t intersect_beg = max(base, b->addr());
|
||||
size_t intersect_end = min(base + size - 1, b->addr() + b->size() - 1);
|
||||
|
||||
_cut_from_block(*b, intersect_beg, intersect_end - intersect_beg + 1, blocks);
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
result = error;
|
||||
done = true;
|
||||
});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
Range_allocator::Alloc_return
|
||||
Allocator_avl_base::alloc_aligned(size_t size, void **out_addr, unsigned align,
|
||||
Range range)
|
||||
template <typename SEARCH_FN>
|
||||
Allocator::Alloc_result
|
||||
Allocator_avl_base::_allocate(size_t const size, unsigned align, Range range,
|
||||
SEARCH_FN const &search_fn)
|
||||
{
|
||||
Block *dst1, *dst2;
|
||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
||||
return _alloc_two_blocks_metadata().convert<Alloc_result>(
|
||||
|
||||
/* find best fitting block */
|
||||
Block *b = _addr_tree.first();
|
||||
b = b ? b->find_best_fit(size, align, range) : 0;
|
||||
[&] (Two_blocks two_blocks) -> Alloc_result {
|
||||
|
||||
if (!b) {
|
||||
_md_alloc->free(dst1, sizeof(Block));
|
||||
_md_alloc->free(dst2, sizeof(Block));
|
||||
return Alloc_return(Alloc_return::RANGE_CONFLICT);
|
||||
}
|
||||
/* find block according to the policy implemented by 'search_fn' */
|
||||
Block *b_ptr = _addr_tree.first();
|
||||
b_ptr = b_ptr ? search_fn(*b_ptr) : 0;
|
||||
|
||||
/* calculate address of new (aligned) block */
|
||||
addr_t new_addr = align_addr(max(b->addr(), range.start), align);
|
||||
if (!b_ptr) {
|
||||
/* range conflict */
|
||||
_md_alloc.free(two_blocks.b1_ptr, sizeof(Block));
|
||||
_md_alloc.free(two_blocks.b2_ptr, sizeof(Block));
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
Block &b = *b_ptr;
|
||||
|
||||
/* remove new block from containing block */
|
||||
_cut_from_block(b, new_addr, size, dst1, dst2);
|
||||
/* calculate address of new (aligned) block */
|
||||
addr_t const new_addr = align_addr(max(b.addr(), range.start), align);
|
||||
|
||||
/* create allocated block */
|
||||
Block *new_block = _alloc_block_metadata();
|
||||
if (!new_block) {
|
||||
_md_alloc->free(new_block, sizeof(Block));
|
||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
||||
}
|
||||
_add_block(new_block, new_addr, size, Block::USED);
|
||||
/* remove new block from containing block, consume two_blocks */
|
||||
_cut_from_block(b, new_addr, size, two_blocks);
|
||||
|
||||
*out_addr = reinterpret_cast<void *>(new_addr);
|
||||
return Alloc_return(Alloc_return::OK);
|
||||
/* create allocated block */
|
||||
return _alloc_block_metadata().convert<Alloc_result>(
|
||||
|
||||
[&] (Block *new_block_ptr) {
|
||||
_add_block(*new_block_ptr, new_addr, size, Block::USED);
|
||||
return reinterpret_cast<void *>(new_addr); },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
Range_allocator::Alloc_return Allocator_avl_base::alloc_addr(size_t size, addr_t addr)
|
||||
Allocator::Alloc_result
|
||||
Allocator_avl_base::alloc_aligned(size_t size, unsigned align, Range range)
|
||||
{
|
||||
/* sanity check */
|
||||
return _allocate(size, align, range, [&] (Block &first) {
|
||||
return first.find_best_fit(size, align, range); });
|
||||
}
|
||||
|
||||
|
||||
Range_allocator::Alloc_result Allocator_avl_base::alloc_addr(size_t size, addr_t addr)
|
||||
{
|
||||
/* check for integer overflow */
|
||||
if (addr + size - 1 < addr)
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
/* check for range conflict */
|
||||
if (!_sum_in_range(addr, size))
|
||||
return Alloc_return(Alloc_return::RANGE_CONFLICT);
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
Block *dst1, *dst2;
|
||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
||||
Range const range { .start = addr, .end = addr + size - 1 };
|
||||
unsigned const align_any = 0;
|
||||
|
||||
/* find block at specified address */
|
||||
Block *b = _addr_tree.first();
|
||||
b = b ? b->find_by_address(addr, size) : 0;
|
||||
|
||||
/* skip if there's no block or block is used */
|
||||
if (!b || b->used()) {
|
||||
_md_alloc->free(dst1, sizeof(Block));
|
||||
_md_alloc->free(dst2, sizeof(Block));
|
||||
return Alloc_return(Alloc_return::RANGE_CONFLICT);
|
||||
}
|
||||
|
||||
/* remove new block from containing block */
|
||||
_cut_from_block(b, addr, size, dst1, dst2);
|
||||
|
||||
/* create allocated block */
|
||||
Block *new_block = _alloc_block_metadata();
|
||||
if (!new_block) {
|
||||
_md_alloc->free(new_block, sizeof(Block));
|
||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
||||
}
|
||||
_add_block(new_block, addr, size, Block::USED);
|
||||
|
||||
return Alloc_return(Alloc_return::OK);
|
||||
return _allocate(size, align_any, range, [&] (Block &first) {
|
||||
return first.find_by_address(addr, size); });
|
||||
}
|
||||
|
||||
|
||||
@ -383,14 +381,14 @@ void Allocator_avl_base::free(void *addr)
|
||||
|
||||
if (!b || !(b->used())) return;
|
||||
|
||||
addr_t new_addr = b->addr();
|
||||
size_t new_size = b->size();
|
||||
addr_t const new_addr = b->addr();
|
||||
size_t const new_size = b->size();
|
||||
|
||||
if (new_addr != (addr_t)addr)
|
||||
error(__PRETTY_FUNCTION__, ": given address (", addr, ") "
|
||||
"is not the block start address (", (void *)new_addr, ")");
|
||||
|
||||
_destroy_block(b);
|
||||
_destroy_block(*b);
|
||||
|
||||
add_range(new_addr, new_size);
|
||||
}
|
||||
|
@ -77,117 +77,129 @@ int Heap::quota_limit(size_t new_quota_limit)
|
||||
}
|
||||
|
||||
|
||||
Heap::Dataspace *Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
||||
Heap::Alloc_ds_result
|
||||
Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
||||
{
|
||||
Ram_dataspace_capability new_ds_cap;
|
||||
void *ds_addr = 0;
|
||||
void *ds_meta_data_addr = 0;
|
||||
Heap::Dataspace *ds = 0;
|
||||
using Result = Alloc_ds_result;
|
||||
|
||||
/* make new ram dataspace available at our local address space */
|
||||
try {
|
||||
new_ds_cap = _ds_pool.ram_alloc->alloc(size);
|
||||
try { ds_addr = _ds_pool.region_map->attach(new_ds_cap); }
|
||||
catch (Out_of_ram) {
|
||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
||||
return nullptr;
|
||||
}
|
||||
catch (Out_of_caps) {
|
||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
||||
throw;
|
||||
}
|
||||
catch (Region_map::Invalid_dataspace) {
|
||||
warning("heap: attempt to attach invalid dataspace");
|
||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
||||
return nullptr;
|
||||
}
|
||||
catch (Region_map::Region_conflict) {
|
||||
warning("heap: region conflict while allocating dataspace");
|
||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
catch (Out_of_ram) { return nullptr; }
|
||||
return _ds_pool.ram_alloc->try_alloc(size).convert<Result>(
|
||||
|
||||
if (enforce_separate_metadata) {
|
||||
[&] (Ram_dataspace_capability ds_cap) -> Result {
|
||||
|
||||
/* allocate the Dataspace structure */
|
||||
if (!_unsynchronized_alloc(sizeof(Heap::Dataspace), &ds_meta_data_addr)) {
|
||||
warning("could not allocate dataspace meta data");
|
||||
return 0;
|
||||
}
|
||||
struct Alloc_guard
|
||||
{
|
||||
Ram_allocator &ram;
|
||||
Ram_dataspace_capability ds;
|
||||
bool keep = false;
|
||||
|
||||
} else {
|
||||
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
|
||||
: ram(ram), ds(ds) { }
|
||||
|
||||
/* add new local address range to our local allocator */
|
||||
_alloc->add_range((addr_t)ds_addr, size);
|
||||
~Alloc_guard() { if (!keep) ram.free(ds); }
|
||||
|
||||
/* allocate the Dataspace structure */
|
||||
if (_alloc->alloc_aligned(sizeof(Heap::Dataspace), &ds_meta_data_addr, log2(16)).error()) {
|
||||
warning("could not allocate dataspace meta data - this should never happen");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
} alloc_guard(*_ds_pool.ram_alloc, ds_cap);
|
||||
|
||||
ds = construct_at<Dataspace>(ds_meta_data_addr, new_ds_cap, ds_addr, size);
|
||||
struct Attach_guard
|
||||
{
|
||||
Region_map &rm;
|
||||
struct { void *ptr = nullptr; };
|
||||
bool keep = false;
|
||||
|
||||
_ds_pool.insert(ds);
|
||||
Attach_guard(Region_map &rm) : rm(rm) { }
|
||||
|
||||
return ds;
|
||||
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
|
||||
|
||||
} attach_guard(*_ds_pool.region_map);
|
||||
|
||||
try {
|
||||
attach_guard.ptr = _ds_pool.region_map->attach(ds_cap);
|
||||
}
|
||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
|
||||
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
|
||||
|
||||
Alloc_result metadata = Alloc_error::DENIED;
|
||||
|
||||
/* allocate the 'Dataspace' structure */
|
||||
if (enforce_separate_metadata) {
|
||||
metadata = _unsynchronized_alloc(sizeof(Heap::Dataspace));
|
||||
|
||||
} else {
|
||||
|
||||
/* add new local address range to our local allocator */
|
||||
_alloc->add_range((addr_t)attach_guard.ptr, size).with_result(
|
||||
[&] (Range_allocator::Range_ok) {
|
||||
metadata = _alloc->alloc_aligned(sizeof(Heap::Dataspace), log2(16)); },
|
||||
[&] (Alloc_error error) {
|
||||
metadata = error; });
|
||||
}
|
||||
|
||||
return metadata.convert<Result>(
|
||||
[&] (void *md_ptr) -> Result {
|
||||
Dataspace &ds = *construct_at<Dataspace>(md_ptr, ds_cap,
|
||||
attach_guard.ptr, size);
|
||||
_ds_pool.insert(&ds);
|
||||
alloc_guard.keep = attach_guard.keep = true;
|
||||
return &ds;
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
bool Heap::_try_local_alloc(size_t size, void **out_addr)
|
||||
Allocator::Alloc_result Heap::_try_local_alloc(size_t size)
|
||||
{
|
||||
if (_alloc->alloc_aligned(size, out_addr, log2(16)).error())
|
||||
return false;
|
||||
return _alloc->alloc_aligned(size, log2(16)).convert<Alloc_result>(
|
||||
|
||||
_quota_used += size;
|
||||
return true;
|
||||
[&] (void *ptr) {
|
||||
_quota_used += size;
|
||||
return ptr; },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
|
||||
Allocator::Alloc_result Heap::_unsynchronized_alloc(size_t size)
|
||||
{
|
||||
size_t dataspace_size;
|
||||
|
||||
if (size >= BIG_ALLOCATION_THRESHOLD) {
|
||||
|
||||
/*
|
||||
* big allocation
|
||||
*
|
||||
* in this case, we allocate one dataspace without any meta data in it
|
||||
* In this case, we allocate one dataspace without any meta data in it
|
||||
* and return its local address without going through the allocator.
|
||||
*/
|
||||
|
||||
/* align to 4K page */
|
||||
dataspace_size = align_addr(size, 12);
|
||||
size_t const dataspace_size = align_addr(size, 12);
|
||||
|
||||
Heap::Dataspace *ds = _allocate_dataspace(dataspace_size, true);
|
||||
return _allocate_dataspace(dataspace_size, true).convert<Alloc_result>(
|
||||
|
||||
if (!ds) {
|
||||
warning("could not allocate dataspace");
|
||||
return false;
|
||||
}
|
||||
[&] (Dataspace *ds_ptr) {
|
||||
_quota_used += ds_ptr->size;
|
||||
return ds_ptr->local_addr; },
|
||||
|
||||
_quota_used += ds->size;
|
||||
|
||||
*out_addr = ds->local_addr;
|
||||
|
||||
return true;
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
/* try allocation at our local allocator */
|
||||
if (_try_local_alloc(size, out_addr))
|
||||
return true;
|
||||
{
|
||||
Alloc_result result = _try_local_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate block size of needed backing store. The block must hold the
|
||||
* requested 'size' and we add some space for meta data
|
||||
* ('Dataspace' structures, AVL-node slab blocks).
|
||||
* Finally, we align the size to a 4K page.
|
||||
*/
|
||||
dataspace_size = size + Allocator_avl::slab_block_size() + sizeof(Heap::Dataspace);
|
||||
size_t dataspace_size = size
|
||||
+ Allocator_avl::slab_block_size()
|
||||
+ sizeof(Heap::Dataspace);
|
||||
/* align to 4K page */
|
||||
dataspace_size = align_addr(dataspace_size, 12);
|
||||
|
||||
/*
|
||||
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
|
||||
@ -195,29 +207,34 @@ bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
|
||||
*/
|
||||
size_t const request_size = _chunk_size * sizeof(umword_t);
|
||||
|
||||
if ((dataspace_size < request_size) &&
|
||||
_allocate_dataspace(request_size, false)) {
|
||||
Alloc_ds_result result = Alloc_error::DENIED;
|
||||
|
||||
/*
|
||||
* Exponentially increase chunk size with each allocated chunk until
|
||||
* we hit 'MAX_CHUNK_SIZE'.
|
||||
*/
|
||||
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
|
||||
if (dataspace_size < request_size) {
|
||||
|
||||
result = _allocate_dataspace(request_size, false);
|
||||
if (result.ok()) {
|
||||
|
||||
/*
|
||||
* Exponentially increase chunk size with each allocated chunk until
|
||||
* we hit 'MAX_CHUNK_SIZE'.
|
||||
*/
|
||||
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
|
||||
}
|
||||
} else {
|
||||
|
||||
/* align to 4K page */
|
||||
dataspace_size = align_addr(dataspace_size, 12);
|
||||
if (!_allocate_dataspace(dataspace_size, false))
|
||||
return false;
|
||||
result = _allocate_dataspace(dataspace_size, false);
|
||||
}
|
||||
|
||||
if (result.failed())
|
||||
return result.convert<Alloc_result>(
|
||||
[&] (Dataspace *) { return Alloc_error::DENIED; },
|
||||
[&] (Alloc_error error) { return error; });
|
||||
|
||||
/* allocate originally requested block */
|
||||
return _try_local_alloc(size, out_addr);
|
||||
return _try_local_alloc(size);
|
||||
}
|
||||
|
||||
|
||||
bool Heap::alloc(size_t size, void **out_addr)
|
||||
Allocator::Alloc_result Heap::try_alloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
error("attempt to allocate zero-size block from heap");
|
||||
@ -227,9 +244,9 @@ bool Heap::alloc(size_t size, void **out_addr)
|
||||
|
||||
/* check requested allocation against quota limit */
|
||||
if (size + _quota_used > _quota_limit)
|
||||
return false;
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
return _unsynchronized_alloc(size, out_addr);
|
||||
return _unsynchronized_alloc(size);
|
||||
}
|
||||
|
||||
|
||||
|
@ -223,10 +223,13 @@ Slab::Slab(size_t slab_size, size_t block_size, void *initial_sb,
|
||||
{
|
||||
/* if no initial slab block was specified, try to get one */
|
||||
if (!_curr_sb && _backing_store)
|
||||
_curr_sb = _new_slab_block();
|
||||
_new_slab_block().with_result(
|
||||
[&] (Block *sb) { _curr_sb = sb; },
|
||||
[&] (Alloc_error error) {
|
||||
Allocator::throw_alloc_error(error); });
|
||||
|
||||
if (!_curr_sb)
|
||||
throw Out_of_memory();
|
||||
throw Allocator::Denied();
|
||||
|
||||
/* init first slab block */
|
||||
construct_at<Block>(_curr_sb, *this);
|
||||
@ -253,13 +256,19 @@ Slab::~Slab()
|
||||
}
|
||||
|
||||
|
||||
Slab::Block *Slab::_new_slab_block()
|
||||
Slab::New_slab_block_result Slab::_new_slab_block()
|
||||
{
|
||||
void *sb = nullptr;
|
||||
if (!_backing_store || !_backing_store->alloc(_block_size, &sb))
|
||||
return nullptr;
|
||||
using Result = New_slab_block_result;
|
||||
|
||||
return construct_at<Block>(sb, *this);
|
||||
if (!_backing_store)
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
Slab &this_slab = *this;
|
||||
return _backing_store->try_alloc(_block_size).convert<Result>(
|
||||
[&] (void *sb) {
|
||||
return construct_at<Block>(sb, this_slab); },
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
@ -313,19 +322,51 @@ void Slab::_insert_sb(Block *sb)
|
||||
}
|
||||
|
||||
|
||||
Slab::Expand_result Slab::_expand()
|
||||
{
|
||||
if (!_backing_store || _nested)
|
||||
return Expand_ok();
|
||||
|
||||
/* allocate new block for slab */
|
||||
_nested = true;
|
||||
|
||||
/* reset '_nested' when leaving the scope */
|
||||
struct Nested_guard {
|
||||
bool &_nested;
|
||||
Nested_guard(bool &nested) : _nested(nested) { }
|
||||
~Nested_guard() { _nested = false; }
|
||||
} guard(_nested);
|
||||
|
||||
return _new_slab_block().convert<Expand_result>(
|
||||
|
||||
[&] (Block *sb_ptr) {
|
||||
|
||||
/*
|
||||
* The new block has the maximum number of available slots.
|
||||
* Hence, we can insert it at the beginning of the sorted block
|
||||
* list.
|
||||
*/
|
||||
_insert_sb(sb_ptr);
|
||||
return Expand_ok(); },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
void Slab::insert_sb(void *ptr)
|
||||
{
|
||||
_insert_sb(construct_at<Block>(ptr, *this));
|
||||
}
|
||||
|
||||
|
||||
bool Slab::alloc(size_t size, void **out_addr)
|
||||
Allocator::Alloc_result Slab::try_alloc(size_t size)
|
||||
{
|
||||
/* too large for us ? */
|
||||
if (size > _slab_size) {
|
||||
error("requested size ", size, " is larger then slab size ",
|
||||
_slab_size);
|
||||
return false;
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -336,29 +377,12 @@ bool Slab::alloc(size_t size, void **out_addr)
|
||||
* new slab block early enough - that is if there are only three free slab
|
||||
* entries left.
|
||||
*/
|
||||
if (_backing_store && (_total_avail <= 3) && !_nested) {
|
||||
|
||||
/* allocate new block for slab */
|
||||
_nested = true;
|
||||
|
||||
try {
|
||||
Block * const sb = _new_slab_block();
|
||||
|
||||
_nested = false;
|
||||
|
||||
if (!sb) return false;
|
||||
|
||||
/*
|
||||
* The new block has the maximum number of available slots and
|
||||
* so we can insert it at the beginning of the sorted block
|
||||
* list.
|
||||
*/
|
||||
_insert_sb(sb);
|
||||
}
|
||||
catch (...) {
|
||||
_nested = false;
|
||||
throw;
|
||||
}
|
||||
if (_total_avail <= 3) {
|
||||
Expand_result expand_result = _expand();
|
||||
if (expand_result.failed())
|
||||
return expand_result.convert<Alloc_result>(
|
||||
[&] (Expand_ok) { return Alloc_error::DENIED; },
|
||||
[&] (Alloc_error error) { return error; });
|
||||
}
|
||||
|
||||
/* skip completely occupied slab blocks, detect cycles */
|
||||
@ -367,13 +391,13 @@ bool Slab::alloc(size_t size, void **out_addr)
|
||||
if (_curr_sb->next == orig_curr_sb)
|
||||
break;
|
||||
|
||||
*out_addr = _curr_sb->alloc();
|
||||
|
||||
if (*out_addr == nullptr)
|
||||
return false;
|
||||
void *ptr = _curr_sb->alloc();
|
||||
if (!ptr)
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
_total_avail--;
|
||||
return true;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
|
@ -38,56 +38,64 @@ Sliced_heap::~Sliced_heap()
|
||||
}
|
||||
|
||||
|
||||
bool Sliced_heap::alloc(size_t size, void **out_addr)
|
||||
Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
|
||||
{
|
||||
/* allocation includes space for block meta data and is page-aligned */
|
||||
size = align_addr(size + sizeof(Block), 12);
|
||||
|
||||
Ram_dataspace_capability ds_cap;
|
||||
Block *block = nullptr;
|
||||
return _ram_alloc.try_alloc(size).convert<Alloc_result>(
|
||||
|
||||
_ram_alloc.try_alloc(size).with_result(
|
||||
[&] (Ram_dataspace_capability cap) { ds_cap = cap; },
|
||||
[&] (Ram_allocator::Alloc_error error) {
|
||||
switch (error) {
|
||||
case Ram_allocator::Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||
case Ram_allocator::Alloc_error::OUT_OF_RAM: break;
|
||||
case Ram_allocator::Alloc_error::DENIED: break;
|
||||
[&] (Ram_dataspace_capability ds_cap) -> Alloc_result {
|
||||
|
||||
struct Alloc_guard
|
||||
{
|
||||
Ram_allocator &ram;
|
||||
Ram_dataspace_capability ds;
|
||||
bool keep = false;
|
||||
|
||||
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
|
||||
: ram(ram), ds(ds) { }
|
||||
|
||||
~Alloc_guard() { if (!keep) ram.free(ds); }
|
||||
|
||||
} alloc_guard(_ram_alloc, ds_cap);
|
||||
|
||||
struct Attach_guard
|
||||
{
|
||||
Region_map &rm;
|
||||
struct { void *ptr = nullptr; };
|
||||
bool keep = false;
|
||||
|
||||
Attach_guard(Region_map &rm) : rm(rm) { }
|
||||
|
||||
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
|
||||
|
||||
} attach_guard(_region_map);
|
||||
|
||||
try {
|
||||
attach_guard.ptr = _region_map.attach(ds_cap);
|
||||
}
|
||||
});
|
||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
|
||||
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
|
||||
|
||||
if (!ds_cap.valid())
|
||||
return false;
|
||||
/* serialize access to block list */
|
||||
Mutex::Guard guard(_mutex);
|
||||
|
||||
try {
|
||||
block = _region_map.attach(ds_cap);
|
||||
}
|
||||
catch (Region_map::Region_conflict) {
|
||||
error("sliced_heap: region conflict while attaching dataspace");
|
||||
_ram_alloc.free(ds_cap);
|
||||
return false;
|
||||
}
|
||||
catch (Region_map::Invalid_dataspace) {
|
||||
error("sliced_heap: attempt to attach invalid dataspace");
|
||||
_ram_alloc.free(ds_cap);
|
||||
return false;
|
||||
}
|
||||
catch (Out_of_ram) {
|
||||
return false;
|
||||
}
|
||||
Block * const block = construct_at<Block>(attach_guard.ptr, ds_cap, size);
|
||||
|
||||
/* serialize access to block list */
|
||||
Mutex::Guard guard(_mutex);
|
||||
_consumed += size;
|
||||
_blocks.insert(block);
|
||||
|
||||
construct_at<Block>(block, ds_cap, size);
|
||||
alloc_guard.keep = attach_guard.keep = true;
|
||||
|
||||
_consumed += size;
|
||||
_blocks.insert(block);
|
||||
|
||||
/* skip meta data prepended to the payload portion of the block */
|
||||
*out_addr = block + 1;
|
||||
|
||||
return true;
|
||||
/* skip meta data prepended to the payload portion of the block */
|
||||
void *ptr = block + 1;
|
||||
return ptr;
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
|
||||
|
@ -171,16 +171,18 @@ extern "C" void *__emutls_get_address(void *obj)
|
||||
|
||||
/* the heap allocates 16-byte aligned */
|
||||
if ((16 % emutls_object->align) != 0)
|
||||
Genode::warning(__func__, ": cannot ensure alignment of ",
|
||||
emutls_object->align, " bytes");
|
||||
warning(__func__, ": cannot ensure alignment of ",
|
||||
emutls_object->align, " bytes");
|
||||
|
||||
void *address = nullptr;
|
||||
|
||||
if (!cxx_heap().alloc(emutls_object->size, &address)) {
|
||||
Genode::error(__func__,
|
||||
": could not allocate thread-local variable instance");
|
||||
cxx_heap().try_alloc(emutls_object->size).with_result(
|
||||
[&] (void *ptr) { address = ptr; },
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
error(__func__,
|
||||
": could not allocate thread-local variable, error ", (int)e); });
|
||||
if (!address)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (emutls_object->templ)
|
||||
memcpy(address, emutls_object->templ, emutls_object->size);
|
||||
|
@ -76,9 +76,15 @@ extern "C" void *malloc(size_t size)
|
||||
* the size information when freeing the block.
|
||||
*/
|
||||
unsigned long real_size = size + sizeof(Block_header);
|
||||
void *addr = 0;
|
||||
if (!cxx_heap().alloc(real_size, &addr))
|
||||
return 0;
|
||||
|
||||
void *addr = nullptr;
|
||||
cxx_heap().try_alloc(real_size).with_result(
|
||||
[&] (void *ptr) { addr = ptr; },
|
||||
[&] (Allocator::Alloc_error error) {
|
||||
Genode::error(__func__,
|
||||
": cxx_heap allocation failed with error ", (int)error); });
|
||||
if (!addr)
|
||||
return nullptr;
|
||||
|
||||
*(Block_header *)addr = real_size;
|
||||
return (Block_header *)addr + 1;
|
||||
|
@ -75,15 +75,14 @@ class Linker::Region_map
|
||||
|
||||
/**
|
||||
* Allocate region anywhere within the region map
|
||||
*
|
||||
* XXX propagate OUT_OF_RAM, OUT_OF_CAPS
|
||||
*/
|
||||
addr_t alloc_region(size_t size)
|
||||
{
|
||||
addr_t result = 0;
|
||||
if (_range.alloc_aligned(size, (void **)&result,
|
||||
get_page_size_log2()).error())
|
||||
throw Region_conflict();
|
||||
|
||||
return result;
|
||||
return _range.alloc_aligned(size, get_page_size_log2()).convert<addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Allocator::Alloc_error) -> addr_t { throw Region_conflict(); });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -91,7 +90,7 @@ class Linker::Region_map
|
||||
*/
|
||||
void alloc_region_at(size_t size, addr_t vaddr)
|
||||
{
|
||||
if (_range.alloc_addr(size, vaddr).error())
|
||||
if (_range.alloc_addr(size, vaddr).failed())
|
||||
throw Region_conflict();
|
||||
}
|
||||
|
||||
|
@ -56,13 +56,13 @@ struct Allocator : Genode::Allocator
|
||||
bool need_size_for_free() const override {
|
||||
return a.need_size_for_free(); }
|
||||
|
||||
bool alloc(Genode::size_t size, void **p) override
|
||||
Alloc_result try_alloc(Genode::size_t size) override
|
||||
{
|
||||
*p = a.alloc(size);
|
||||
Alloc_result const result = a.try_alloc(size);
|
||||
|
||||
log("Allocator::alloc()");
|
||||
|
||||
return *p != 0;
|
||||
return result;
|
||||
}
|
||||
|
||||
void free(void *p, Genode::size_t size) override
|
||||
|
@ -357,7 +357,7 @@ class Audio_in::In
|
||||
float const scale = 32768.0f * 2;
|
||||
|
||||
float * const content = p->content();
|
||||
for (int i = 0; i < 2*Audio_in::PERIOD; i += 2) {
|
||||
for (unsigned long i = 0; i < 2*Audio_in::PERIOD; i += 2) {
|
||||
float sample = data[i] + data[i+1];
|
||||
content[i/2] = sample / scale;
|
||||
}
|
||||
|
@ -59,24 +59,51 @@ class Bsd::Slab_backend_alloc : public Genode::Allocator,
|
||||
Genode::Allocator_avl _range; /* manage allocations */
|
||||
Genode::Ram_allocator &_ram; /* allocator to allocate ds from */
|
||||
|
||||
bool _alloc_block()
|
||||
struct Extend_ok { };
|
||||
using Extend_result = Genode::Attempt<Extend_ok, Alloc_error>;
|
||||
|
||||
Extend_result _extend_one_block()
|
||||
{
|
||||
using namespace Genode;
|
||||
|
||||
if (_index == ELEMENTS) {
|
||||
Genode::error("Slab-backend exhausted!");
|
||||
return false;
|
||||
error("Slab-backend exhausted!");
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
|
||||
try {
|
||||
_ds_cap[_index] = _ram.alloc(BLOCK_SIZE);
|
||||
Region_map_client::attach_at(_ds_cap[_index], _index * BLOCK_SIZE, BLOCK_SIZE, 0);
|
||||
} catch (...) { return false; }
|
||||
return _ram.try_alloc(BLOCK_SIZE).convert<Extend_result>(
|
||||
|
||||
/* return base + offset in VM area */
|
||||
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
||||
++_index;
|
||||
[&] (Ram_dataspace_capability ds) -> Extend_result {
|
||||
|
||||
_range.add_range(block_base, BLOCK_SIZE);
|
||||
return true;
|
||||
_ds_cap[_index] = ds;
|
||||
|
||||
Alloc_error alloc_error = Alloc_error::DENIED;
|
||||
|
||||
try {
|
||||
Region_map_client::attach_at(_ds_cap[_index],
|
||||
_index * BLOCK_SIZE,
|
||||
BLOCK_SIZE, 0);
|
||||
|
||||
/* return base + offset in VM area */
|
||||
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
||||
++_index;
|
||||
|
||||
_range.add_range(block_base, BLOCK_SIZE);
|
||||
|
||||
return Extend_ok();
|
||||
}
|
||||
catch (Out_of_ram) { alloc_error = Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { alloc_error = Alloc_error::OUT_OF_CAPS; }
|
||||
catch (...) { alloc_error = Alloc_error::DENIED; }
|
||||
|
||||
error("Slab_backend_alloc: local attach_at failed");
|
||||
|
||||
_ram.free(ds);
|
||||
_ds_cap[_index] = { };
|
||||
|
||||
return alloc_error;
|
||||
},
|
||||
[&] (Alloc_error e) -> Extend_result { return e; });
|
||||
}
|
||||
|
||||
public:
|
||||
@ -100,20 +127,19 @@ class Bsd::Slab_backend_alloc : public Genode::Allocator,
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(Genode::size_t size, void **out_addr)
|
||||
Alloc_result try_alloc(Genode::size_t size) override
|
||||
{
|
||||
bool done = _range.alloc(size, out_addr);
|
||||
Alloc_result result = _range.try_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
|
||||
if (done)
|
||||
return done;
|
||||
return _extend_one_block().convert<Alloc_result>(
|
||||
[&] (Extend_ok) {
|
||||
return _range.try_alloc(size); },
|
||||
|
||||
done = _alloc_block();
|
||||
if (!done) {
|
||||
Genode::error("Backend allocator exhausted\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return _range.alloc(size, out_addr);
|
||||
[&] (Alloc_error e) {
|
||||
Genode::error("Backend allocator exhausted\n");
|
||||
return e; });
|
||||
}
|
||||
|
||||
void free(void *addr, Genode::size_t size) { _range.free(addr, size); }
|
||||
@ -147,8 +173,9 @@ class Bsd::Slab_alloc : public Genode::Slab
|
||||
|
||||
Genode::addr_t alloc()
|
||||
{
|
||||
Genode::addr_t result;
|
||||
return (Slab::alloc(_object_size, (void **)&result) ? result : 0);
|
||||
return Slab::try_alloc(_object_size).convert<Genode::addr_t>(
|
||||
[&] (void *ptr) { return (Genode::addr_t)ptr; },
|
||||
[&] (Alloc_error) -> Genode::addr_t { return 0; });
|
||||
}
|
||||
|
||||
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
||||
|
@ -94,10 +94,9 @@ class Pci_driver : public Bsd::Bus_driver
|
||||
_dma_initialized = true;
|
||||
}
|
||||
|
||||
void *ptr = nullptr;
|
||||
bool err = Allocator_avl::alloc_aligned(size, &ptr, align).error();
|
||||
|
||||
return err ? 0 : (addr_t)ptr;
|
||||
return Allocator_avl::alloc_aligned(size, align).convert<Genode::addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Alloc_error) { return 0UL; });
|
||||
}
|
||||
|
||||
void free(Genode::addr_t virt, Genode::size_t size) {
|
||||
|
@ -389,15 +389,16 @@ static Genode::Allocator_avl& allocator()
|
||||
extern "C" void *dde_dma_alloc(dde_size_t size, dde_size_t align,
|
||||
dde_size_t offset)
|
||||
{
|
||||
void *ptr;
|
||||
if (allocator().alloc_aligned(size, &ptr, Genode::log2(align)).error()) {
|
||||
Genode::error("memory allocation failed in alloc_memblock ("
|
||||
"size=", size, " "
|
||||
"align=", Genode::Hex(align), " "
|
||||
"offset=", Genode::Hex(offset), ")");
|
||||
return 0;
|
||||
}
|
||||
return ptr;
|
||||
return allocator().alloc_aligned(size, Genode::log2(align)).convert<void *>(
|
||||
|
||||
[&] (void *ptr) { return ptr; },
|
||||
|
||||
[&] (Genode::Range_allocator::Alloc_error) -> void * {
|
||||
Genode::error("memory allocation failed in alloc_memblock ("
|
||||
"size=", size, " "
|
||||
"align=", Genode::Hex(align), " "
|
||||
"offset=", Genode::Hex(offset), ")");
|
||||
return nullptr; });
|
||||
}
|
||||
|
||||
|
||||
@ -477,26 +478,53 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
||||
Genode::Allocator_avl _range;
|
||||
Genode::Ram_allocator &_ram;
|
||||
|
||||
bool _alloc_block()
|
||||
struct Extend_ok { };
|
||||
using Extend_result = Genode::Attempt<Extend_ok, Alloc_error>;
|
||||
|
||||
Extend_result _extend_one_block()
|
||||
{
|
||||
using namespace Genode;
|
||||
|
||||
if (_index == ELEMENTS) {
|
||||
error("slab backend exhausted!");
|
||||
return false;
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
|
||||
try {
|
||||
_ds_cap[_index] = _ram.alloc(BLOCK_SIZE);
|
||||
Region_map_client::attach_at(_ds_cap[_index], _index * BLOCK_SIZE, BLOCK_SIZE, 0);
|
||||
} catch (...) { return false; }
|
||||
return _ram.try_alloc(BLOCK_SIZE).convert<Extend_result>(
|
||||
|
||||
/* return base + offset in VM area */
|
||||
Genode::addr_t block_base = _base + (_index * BLOCK_SIZE);
|
||||
++_index;
|
||||
[&] (Ram_dataspace_capability ds) -> Extend_result {
|
||||
|
||||
_range.add_range(block_base, BLOCK_SIZE);
|
||||
return true;
|
||||
_ds_cap[_index] = ds;
|
||||
|
||||
Alloc_error error = Alloc_error::DENIED;
|
||||
|
||||
try {
|
||||
Region_map_client::attach_at(_ds_cap[_index],
|
||||
_index * BLOCK_SIZE,
|
||||
BLOCK_SIZE, 0);
|
||||
/* return base + offset in VM area */
|
||||
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
||||
++_index;
|
||||
|
||||
_range.add_range(block_base, BLOCK_SIZE);
|
||||
|
||||
return Extend_ok();
|
||||
}
|
||||
catch (Out_of_ram) { error = Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { error = Alloc_error::OUT_OF_CAPS; }
|
||||
catch (...) { error = Alloc_error::DENIED; }
|
||||
|
||||
Genode::error("Slab_backend_alloc: local attach_at failed");
|
||||
|
||||
_ram.free(ds);
|
||||
_ds_cap[_index] = { };
|
||||
|
||||
return error;
|
||||
},
|
||||
|
||||
[&] (Alloc_error e) -> Extend_result {
|
||||
error("Slab_backend_alloc: backend allocator exhausted");
|
||||
return e; });
|
||||
}
|
||||
|
||||
Slab_backend_alloc(Genode::Env &env, Genode::Region_map &rm,
|
||||
@ -518,20 +546,15 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(Genode::size_t size, void **out_addr)
|
||||
Alloc_result try_alloc(Genode::size_t size) override
|
||||
{
|
||||
bool done = _range.alloc(size, out_addr);
|
||||
Alloc_result result = _range.try_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
|
||||
if (done)
|
||||
return done;
|
||||
|
||||
done = _alloc_block();
|
||||
if (!done) {
|
||||
Genode::error("backend allocator exhausted");
|
||||
return false;
|
||||
}
|
||||
|
||||
return _range.alloc(size, out_addr);
|
||||
return _extend_one_block().convert<Alloc_result>(
|
||||
[&] (Extend_ok) { return _range.try_alloc(size); },
|
||||
[&] (Alloc_error error) { return error; });
|
||||
}
|
||||
|
||||
void free(void *addr, Genode::size_t size) { _range.free(addr, size); }
|
||||
@ -562,8 +585,9 @@ class Slab_alloc : public Genode::Slab
|
||||
|
||||
Genode::addr_t alloc()
|
||||
{
|
||||
Genode::addr_t result;
|
||||
return (Slab::alloc(_object_size, (void **)&result) ? result : 0);
|
||||
return Slab::try_alloc(_object_size).convert<Genode::addr_t>(
|
||||
[&] (void *ptr) { return (Genode::addr_t)ptr; },
|
||||
[&] (Alloc_error) -> Genode::addr_t { return 0; });
|
||||
}
|
||||
|
||||
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
||||
|
@ -565,7 +565,7 @@ void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handl
|
||||
addr = Lx::Malloc::dma().alloc_large(size);
|
||||
dma_addr = (dma_addr_t) Lx::Malloc::dma().phys_addr(addr);
|
||||
} else
|
||||
addr = Lx::Malloc::dma().alloc(size, 12, &dma_addr);
|
||||
addr = Lx::Malloc::dma().malloc(size, 12, &dma_addr);
|
||||
|
||||
*dma_handle = dma_addr;
|
||||
return addr;
|
||||
@ -702,7 +702,7 @@ int register_netdev(struct net_device * d)
|
||||
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t, int)
|
||||
{
|
||||
return (void*)cache->alloc();
|
||||
return (void*)cache->alloc_element();
|
||||
}
|
||||
|
||||
|
||||
|
@ -491,7 +491,7 @@ void dma_pool_free(struct dma_pool *d, void *vaddr, dma_addr_t a)
|
||||
|
||||
void *dma_alloc_coherent(struct device *, size_t size, dma_addr_t *dma, gfp_t)
|
||||
{
|
||||
void *addr = Lx::Malloc::dma().alloc(size, PAGE_SHIFT, dma);
|
||||
void *addr = Lx::Malloc::dma().malloc(size, PAGE_SHIFT, dma);
|
||||
|
||||
if (!addr)
|
||||
return 0;
|
||||
|
@ -509,7 +509,7 @@ int netif_carrier_ok(const struct net_device *dev)
|
||||
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t gfp_flags, int arg)
|
||||
{
|
||||
return (void*)cache->alloc();
|
||||
return (void*)cache->alloc_element();
|
||||
}
|
||||
|
||||
|
||||
|
@ -448,7 +448,7 @@ int netif_carrier_ok(const struct net_device *dev)
|
||||
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t gfp_flags, int arg)
|
||||
{
|
||||
return (void*)cache->alloc();
|
||||
return (void*)cache->alloc_element();
|
||||
}
|
||||
|
||||
|
||||
|
@ -199,9 +199,12 @@ void kmem_cache_destroy(struct kmem_cache *cache)
|
||||
|
||||
void * kmem_cache_alloc(struct kmem_cache *cache, gfp_t flags)
|
||||
{
|
||||
void *addr = (void *)cache->alloc();
|
||||
if (addr && cache->ctor) { cache->ctor(addr); }
|
||||
return addr;
|
||||
void * const ptr = cache->alloc_element();
|
||||
|
||||
if (ptr && cache->ctor)
|
||||
cache->ctor(ptr);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
|
@ -55,10 +55,11 @@ class Lx::Slab_alloc : public Genode::Slab
|
||||
_object_size(object_size)
|
||||
{ }
|
||||
|
||||
Genode::addr_t alloc()
|
||||
void *alloc_element()
|
||||
{
|
||||
Genode::addr_t result;
|
||||
return (Slab::alloc(_object_size, (void **)&result) ? result : 0);
|
||||
return Slab::try_alloc(_object_size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Alloc_error) { return (void *)nullptr; });
|
||||
}
|
||||
|
||||
void free(void *ptr)
|
||||
|
@ -39,7 +39,7 @@ class Lx::Slab_backend_alloc : public Genode::Allocator
|
||||
/**
|
||||
* Allocate
|
||||
*/
|
||||
virtual bool alloc(Genode::size_t size, void **out_addr) = 0;
|
||||
virtual Alloc_result try_alloc(Genode::size_t size) = 0;
|
||||
virtual void free(void *addr) = 0;
|
||||
|
||||
/**
|
||||
|
@ -41,7 +41,7 @@ class Lx::Malloc : public Genode::Allocator
|
||||
/**
|
||||
* Alloc in slabs
|
||||
*/
|
||||
virtual void *alloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0) = 0;
|
||||
virtual void *malloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0) = 0;
|
||||
|
||||
virtual void free(void const *a) = 0;
|
||||
|
||||
@ -67,11 +67,7 @@ class Lx::Malloc : public Genode::Allocator
|
||||
|
||||
size_t overhead(size_t size) const override { return 0; }
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
{
|
||||
*out_addr = alloc(size);
|
||||
return *out_addr ? true : false;
|
||||
}
|
||||
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||
|
||||
void free(void *addr, size_t size) override { free(addr); }
|
||||
|
||||
|
@ -96,20 +96,18 @@ class Lx_kit::Slab_backend_alloc : public Lx::Slab_backend_alloc,
|
||||
** Lx::Slab_backend_alloc interface **
|
||||
**************************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
bool done = _range.alloc(size, out_addr);
|
||||
Alloc_result result = _range.try_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
|
||||
if (done)
|
||||
return done;
|
||||
|
||||
done = _alloc_block();
|
||||
if (!done) {
|
||||
if (!_alloc_block()) {
|
||||
Genode::error("backend allocator exhausted");
|
||||
return false;
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
|
||||
return _range.alloc(size, out_addr);
|
||||
return _range.alloc(size);
|
||||
}
|
||||
|
||||
void free(void *addr) {
|
||||
@ -232,7 +230,7 @@ class Lx_kit::Malloc : public Lx::Malloc
|
||||
** Lx::Malloc interface **
|
||||
**************************/
|
||||
|
||||
void *alloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0)
|
||||
void *malloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0)
|
||||
{
|
||||
using namespace Genode;
|
||||
|
||||
@ -257,7 +255,7 @@ class Lx_kit::Malloc : public Lx::Malloc
|
||||
return 0;
|
||||
}
|
||||
|
||||
addr_t addr = _allocator[msb - SLAB_START_LOG2]->alloc();
|
||||
addr_t addr = (addr_t)_allocator[msb - SLAB_START_LOG2]->alloc_element();
|
||||
if (!addr) {
|
||||
Genode::error("failed to get slab for ", 1 << msb);
|
||||
return 0;
|
||||
@ -298,13 +296,14 @@ class Lx_kit::Malloc : public Lx::Malloc
|
||||
|
||||
void *alloc_large(size_t size)
|
||||
{
|
||||
void *addr;
|
||||
if (!_back_allocator.alloc(size, &addr)) {
|
||||
Genode::error("large back end allocation failed (", size, " bytes)");
|
||||
return nullptr;
|
||||
}
|
||||
return _back_allocator.try_alloc(size).convert<void *>(
|
||||
|
||||
return addr;
|
||||
[&] (void *ptr) {
|
||||
return ptr; },
|
||||
|
||||
[&] (Alloc_error) {
|
||||
Genode::error("large back end allocation failed (", size, " bytes)");
|
||||
return (void *)nullptr; });
|
||||
}
|
||||
|
||||
void free_large(void *ptr)
|
||||
|
@ -95,42 +95,48 @@ void * Lx_kit::Mem_allocator::alloc(size_t size, size_t align)
|
||||
if (!size)
|
||||
return nullptr;
|
||||
|
||||
void * out_addr = nullptr;
|
||||
return _mem.alloc_aligned(size, log2(align)).convert<void *>(
|
||||
|
||||
if (_mem.alloc_aligned(size, &out_addr, log2(align)).error()) {
|
||||
[&] (void *ptr) {
|
||||
memset(ptr, 0, size);
|
||||
return ptr; },
|
||||
|
||||
/*
|
||||
* Restrict the minimum buffer size to avoid the creation of
|
||||
* a separate dataspaces for tiny allocations.
|
||||
*/
|
||||
size_t const min_buffer_size = 256*1024;
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
|
||||
/*
|
||||
* Allocate one excess byte that is not officially registered at
|
||||
* the '_mem' ranges. This way, two virtual consecutive ranges
|
||||
* (that must be assumed to belong to non-contiguous physical
|
||||
* ranges) can never be merged when freeing an allocation. Such
|
||||
* a merge would violate the assumption that a both the virtual
|
||||
* and physical addresses of a multi-page allocation are always
|
||||
* contiguous.
|
||||
*/
|
||||
Attached_dataspace & ds = alloc_dataspace(max(size + 1,
|
||||
min_buffer_size));
|
||||
/*
|
||||
* Restrict the minimum buffer size to avoid the creation of
|
||||
* a separate dataspaces for tiny allocations.
|
||||
*/
|
||||
size_t const min_buffer_size = 256*1024;
|
||||
|
||||
_mem.add_range((addr_t)ds.local_addr<void>(), ds.size() - 1);
|
||||
/*
|
||||
* Allocate one excess byte that is not officially registered at
|
||||
* the '_mem' ranges. This way, two virtual consecutive ranges
|
||||
* (that must be assumed to belong to non-contiguous physical
|
||||
* ranges) can never be merged when freeing an allocation. Such
|
||||
* a merge would violate the assumption that a both the virtual
|
||||
* and physical addresses of a multi-page allocation are always
|
||||
* contiguous.
|
||||
*/
|
||||
Attached_dataspace & ds = alloc_dataspace(max(size + 1,
|
||||
min_buffer_size));
|
||||
|
||||
/* re-try allocation */
|
||||
_mem.alloc_aligned(size, &out_addr, log2(align));
|
||||
}
|
||||
_mem.add_range((addr_t)ds.local_addr<void>(), ds.size() - 1);
|
||||
|
||||
if (!out_addr) {
|
||||
error("memory allocation failed for ", size, " align ", align);
|
||||
backtrace();
|
||||
}
|
||||
else
|
||||
memset(out_addr, 0, size);
|
||||
/* re-try allocation */
|
||||
return _mem.alloc_aligned(size, log2(align)).convert<void *>(
|
||||
|
||||
return out_addr;
|
||||
[&] (void *ptr) {
|
||||
memset(ptr, 0, size);
|
||||
return ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
error("memory allocation failed for ", size, " align ", align);
|
||||
backtrace();
|
||||
return nullptr; }
|
||||
);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
@ -123,15 +123,25 @@ void *alloc_large_system_hash(const char *tablename,
|
||||
unsigned long nlog2 = ilog2(elements);
|
||||
nlog2 <<= (1 << nlog2) < elements ? 1 : 0;
|
||||
|
||||
void *table;
|
||||
lx_env->heap().alloc(elements * bucketsize, &table);
|
||||
|
||||
if (_hash_mask)
|
||||
*_hash_mask = (1 << nlog2) - 1;
|
||||
if (_hash_shift)
|
||||
*_hash_shift = nlog2;
|
||||
return lx_env->heap().try_alloc(elements * bucketsize).convert<void *>(
|
||||
|
||||
return table;
|
||||
[&] (void *table_ptr) {
|
||||
|
||||
if (_hash_mask)
|
||||
*_hash_mask = (1 << nlog2) - 1;
|
||||
|
||||
if (_hash_shift)
|
||||
*_hash_shift = nlog2;
|
||||
|
||||
return table_ptr;
|
||||
},
|
||||
|
||||
[&] (Genode::Allocator::Alloc_error) -> void * {
|
||||
Genode::error("alloc_large_system_hash allocation failed");
|
||||
return nullptr;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -148,12 +158,12 @@ void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
||||
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t flags, int node)
|
||||
{
|
||||
return (void*)cache->alloc();
|
||||
return (void*)cache->alloc_element();
|
||||
}
|
||||
|
||||
void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
|
||||
{
|
||||
void *addr = (void*)cache->alloc();
|
||||
void *addr = (void*)cache->alloc_element();
|
||||
if (addr) { memset(addr, 0, cache->size()); }
|
||||
|
||||
return addr;
|
||||
|
@ -87,8 +87,12 @@ int request_firmware_nowait(struct module *module, bool uevent,
|
||||
}
|
||||
|
||||
/* use allocator because fw is too big for slab */
|
||||
if (!Lx_kit::env().heap().alloc(fwl->size, (void**)&fw->data)) {
|
||||
Genode::error("Could not allocate memory for firmware image");
|
||||
Lx_kit::env().heap().try_alloc(fwl->size).with_result(
|
||||
[&] (void *ptr) { fw->data = (u8 *)ptr; },
|
||||
[&] (Genode::Allocator::Alloc_error) {
|
||||
Genode::error("Could not allocate memory for firmware image"); });
|
||||
|
||||
if (!fw->data) {
|
||||
kfree(fw);
|
||||
return -1;
|
||||
}
|
||||
|
@ -427,14 +427,17 @@ void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
|
||||
void *vmalloc(unsigned long size)
|
||||
{
|
||||
size_t real_size = size + sizeof(size_t);
|
||||
size_t *addr;
|
||||
|
||||
if (!Lx_kit::env().heap().alloc(real_size, (void**)&addr)) {
|
||||
return nullptr;
|
||||
}
|
||||
return Lx_kit::env().heap().try_alloc(real_size).convert<void *>(
|
||||
|
||||
*addr = real_size;
|
||||
return addr + 1;
|
||||
[&] (void *ptr) -> void * {
|
||||
size_t * const base = (size_t)ptr;
|
||||
*base = real_size;
|
||||
return base + 1;
|
||||
},
|
||||
|
||||
[&] (Genode::Allocator::Alloc_error) -> void * {
|
||||
return nullptr; });
|
||||
}
|
||||
|
||||
|
||||
@ -744,7 +747,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
bool const large_alloc = size >= DMA_LARGE_ALLOC_SIZE;
|
||||
dma_addr_t dma_addr = 0;
|
||||
void *addr = large_alloc ? Lx::Malloc::dma().alloc_large(size)
|
||||
: Lx::Malloc::dma().alloc(size, 12, &dma_addr);
|
||||
: Lx::Malloc::dma().malloc(size, 12, &dma_addr);
|
||||
|
||||
if (addr) {
|
||||
*dma_handle = large_alloc ? Lx::Malloc::dma().phys_addr(addr)
|
||||
@ -924,7 +927,7 @@ struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
|
||||
|
||||
size_t size = PAGE_SIZE << order;
|
||||
|
||||
page->addr = Lx::Malloc::dma().alloc(size, 12);
|
||||
page->addr = Lx::Malloc::dma().malloc(size, 12);
|
||||
|
||||
if (!page->addr) {
|
||||
Genode::error("alloc_pages: ", size, " failed");
|
||||
|
@ -128,37 +128,40 @@ namespace Allocator {
|
||||
/**
|
||||
* Allocate
|
||||
*/
|
||||
bool alloc(size_t size, void **out_addr)
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
bool done = _range.alloc(size, out_addr);
|
||||
Alloc_result result = _range.try_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
|
||||
if (done)
|
||||
return done;
|
||||
if (!_alloc_block())
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
done = _alloc_block();
|
||||
if (!done)
|
||||
return false;
|
||||
|
||||
return _range.alloc(size, out_addr);
|
||||
return _range.try_alloc(size);
|
||||
}
|
||||
|
||||
void *alloc_aligned(size_t size, unsigned align = 0)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
if (!_range.alloc_aligned(size, &addr, align).error())
|
||||
return addr;
|
||||
Alloc_result result = _range.alloc_aligned(size, align);
|
||||
if (result.ok())
|
||||
return result.convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Alloc_error) -> void * { return nullptr; });
|
||||
|
||||
if (!_alloc_block())
|
||||
return 0;
|
||||
|
||||
if (_range.alloc_aligned(size, &addr, align).error()) {
|
||||
error("backend allocator: Unable to allocate memory "
|
||||
"(size: ", size, " align: ", align, ")");
|
||||
return 0;
|
||||
}
|
||||
return _range.alloc_aligned(size, align).convert<void *>(
|
||||
|
||||
return addr;
|
||||
[&] (void *ptr) {
|
||||
return ptr; },
|
||||
|
||||
[&] (Alloc_error e) -> void * {
|
||||
error("backend allocator: Unable to allocate memory "
|
||||
"(size: ", size, " align: ", align, ")");
|
||||
return nullptr;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
void free(void *addr, size_t size) override { _range.free(addr, size); }
|
||||
|
@ -40,13 +40,17 @@ extern "C" void *malloc(size_t size)
|
||||
* the subsequent address. This way, we can retrieve
|
||||
* the size information when freeing the block.
|
||||
*/
|
||||
unsigned long real_size = size + sizeof(unsigned long);
|
||||
void *addr = 0;
|
||||
if (!alloc().alloc(real_size, &addr))
|
||||
return 0;
|
||||
unsigned long const real_size = size + sizeof(unsigned long);
|
||||
|
||||
*(unsigned long *)addr = real_size;
|
||||
return (unsigned long *)addr + 1;
|
||||
return alloc().try_alloc(real_size).convert<void *>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
|
||||
*(unsigned long *)ptr = real_size;
|
||||
return (unsigned long *)ptr + 1; },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
return nullptr; });
|
||||
}
|
||||
|
||||
|
||||
|
@ -73,18 +73,19 @@ class Genode::Cached_font : public Text_painter::Font
|
||||
|
||||
size_t consumed_bytes() const { return _consumed_bytes; }
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
size = _padded(size);
|
||||
|
||||
bool const result = _alloc.alloc(size, out_addr);
|
||||
return _alloc.try_alloc(size).convert<Alloc_result>(
|
||||
|
||||
if (result) {
|
||||
memset(*out_addr, 0, size);
|
||||
_consumed_bytes += size + overhead(size);
|
||||
}
|
||||
[&] (void *ptr) {
|
||||
memset(ptr, 0, size);
|
||||
_consumed_bytes += size + overhead(size);
|
||||
return ptr; },
|
||||
|
||||
return result;
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
size_t consumed() const override { return _alloc.consumed(); }
|
||||
|
@ -98,24 +98,28 @@ struct Text_area::Dynamic_array
|
||||
size_t const new_capacity =
|
||||
2 * max(_capacity, max(8U, at.value));
|
||||
|
||||
Element *new_array = nullptr;
|
||||
try {
|
||||
(void)_alloc.alloc(sizeof(Element)*new_capacity, &new_array);
|
||||
_alloc.try_alloc(sizeof(Element)*new_capacity).with_result(
|
||||
|
||||
for (unsigned i = 0; i < new_capacity; i++)
|
||||
construct_at<Element>(&new_array[i]);
|
||||
}
|
||||
catch (... /* Out_of_ram, Out_of_caps */ ) { throw; }
|
||||
[&] (void *ptr) {
|
||||
|
||||
if (_array) {
|
||||
for (unsigned i = 0; i < _upper_bound; i++)
|
||||
new_array[i].construct(*_array[i]);
|
||||
Element *new_array = (Element *)ptr;
|
||||
|
||||
_alloc.free(_array, sizeof(Element)*_capacity);
|
||||
}
|
||||
for (unsigned i = 0; i < new_capacity; i++)
|
||||
construct_at<Element>(&new_array[i]);
|
||||
|
||||
_array = new_array;
|
||||
_capacity = new_capacity;
|
||||
if (_array) {
|
||||
for (unsigned i = 0; i < _upper_bound; i++)
|
||||
new_array[i].construct(*_array[i]);
|
||||
|
||||
_alloc.free(_array, sizeof(Element)*_capacity);
|
||||
}
|
||||
|
||||
_array = new_array;
|
||||
_capacity = new_capacity;
|
||||
},
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
Allocator::throw_alloc_error(e); }
|
||||
);
|
||||
}
|
||||
|
||||
/* make room for new element */
|
||||
|
@ -87,8 +87,17 @@ void Http::resolve_uri()
|
||||
throw Http::Uri_error();
|
||||
}
|
||||
|
||||
_heap.alloc(sizeof(struct addrinfo), (void**)&_info);
|
||||
Genode::memcpy(_info, info, sizeof(struct addrinfo));
|
||||
_heap.try_alloc(sizeof(struct addrinfo)).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_info = (struct addrinfo *)ptr;
|
||||
Genode::memcpy(_info, info, sizeof(struct addrinfo));
|
||||
},
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
throw Http::Uri_error();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -180,7 +189,9 @@ void Http::do_read(void * buf, size_t size)
|
||||
Http::Http(Genode::Heap &heap, ::String const &uri)
|
||||
: _heap(heap), _port((char *)"80")
|
||||
{
|
||||
_heap.alloc(HTTP_BUF, (void**)&_http_buf);
|
||||
_heap.try_alloc(HTTP_BUF).with_result(
|
||||
[&] (void *ptr) { _http_buf = (char *)ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
|
||||
/* parse URI */
|
||||
parse_uri(uri);
|
||||
@ -221,11 +232,31 @@ void Http::parse_uri(::String const &u)
|
||||
size_t i;
|
||||
for (i = 0; i < length && uri[i] != '/'; i++) ;
|
||||
|
||||
_heap.alloc(i + 1, (void**)&_host);
|
||||
copy_cstring(_host, uri, i + 1);
|
||||
/*
|
||||
* \param len number of cstring bytes w/o null-termination
|
||||
*/
|
||||
auto copied_cstring = [&] (char const *src, size_t len) -> char *
|
||||
{
|
||||
size_t const bytes = len + 1;
|
||||
|
||||
_heap.alloc(length - i + 1, (void**)&_path);
|
||||
copy_cstring(_path, uri + i, length - i + 1);
|
||||
return _heap.try_alloc(bytes).convert<char *>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
char *dst = (char *)ptr;
|
||||
copy_cstring(dst, src, bytes);
|
||||
return dst; },
|
||||
|
||||
[&] (Allocator::Alloc_error) -> char * {
|
||||
return nullptr; });
|
||||
};
|
||||
|
||||
_host = copied_cstring(uri, i);
|
||||
_path = copied_cstring(uri + i, length - i);
|
||||
|
||||
if (!_host || !_path) {
|
||||
error("allocation failure during Http::parse_uri");
|
||||
return;
|
||||
}
|
||||
|
||||
/* look for port */
|
||||
size_t len = Genode::strlen(_host);
|
||||
|
@ -27,11 +27,7 @@ struct Libc::Allocator : Genode::Allocator
|
||||
{
|
||||
typedef Genode::size_t size_t;
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
{
|
||||
*out_addr = malloc(size);
|
||||
return true;
|
||||
}
|
||||
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||
|
||||
void free(void *addr, size_t size) override { ::free(addr); }
|
||||
|
||||
|
@ -1 +1 @@
|
||||
58ab991bee9d68f213ae71f04c796de0490f3b0e
|
||||
68b8eb5bfa950adf094fe9e6c579e6d542dd6c63
|
||||
|
@ -261,9 +261,9 @@ extern "C" int getpid()
|
||||
|
||||
extern "C" void *malloc(size_t size)
|
||||
{
|
||||
void *res = nullptr;
|
||||
gcov_env->heap.alloc(size, &res);
|
||||
return res;
|
||||
return gcov_env->heap.try_alloc(size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Genode::Allocator::Alloc_error) -> void * { return nullptr; });
|
||||
}
|
||||
|
||||
|
||||
|
@ -50,7 +50,7 @@ Libc::Mem_alloc_impl::Dataspace_pool::~Dataspace_pool()
|
||||
int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *alloc)
|
||||
{
|
||||
Ram_dataspace_capability new_ds_cap;
|
||||
void *local_addr, *ds_addr = 0;
|
||||
void *local_addr;
|
||||
|
||||
/* make new ram dataspace available at our local address space */
|
||||
try {
|
||||
@ -71,16 +71,17 @@ int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *a
|
||||
alloc->add_range((addr_t)local_addr, size);
|
||||
|
||||
/* now that we have new backing store, allocate Dataspace structure */
|
||||
if (alloc->alloc_aligned(sizeof(Dataspace), &ds_addr, 2).error()) {
|
||||
warning("libc: could not allocate meta data - this should never happen");
|
||||
return -1;
|
||||
}
|
||||
return alloc->alloc_aligned(sizeof(Dataspace), 2).convert<int>(
|
||||
|
||||
/* add dataspace information to list of dataspaces */
|
||||
Dataspace *ds = construct_at<Dataspace>(ds_addr, new_ds_cap, local_addr);
|
||||
insert(ds);
|
||||
[&] (void *ptr) {
|
||||
/* add dataspace information to list of dataspaces */
|
||||
Dataspace *ds = construct_at<Dataspace>(ptr, new_ds_cap, local_addr);
|
||||
insert(ds);
|
||||
return 0; },
|
||||
|
||||
return 0;
|
||||
[&] (Allocator::Alloc_error) {
|
||||
warning("libc: could not allocate meta data - this should never happen");
|
||||
return -1; });
|
||||
}
|
||||
|
||||
|
||||
@ -89,9 +90,14 @@ void *Libc::Mem_alloc_impl::alloc(size_t size, size_t align_log2)
|
||||
/* serialize access of heap functions */
|
||||
Mutex::Guard guard(_mutex);
|
||||
|
||||
void *out_addr = nullptr;
|
||||
|
||||
/* try allocation at our local allocator */
|
||||
void *out_addr = 0;
|
||||
if (_alloc.alloc_aligned(size, &out_addr, align_log2).ok())
|
||||
_alloc.alloc_aligned(size, align_log2).with_result(
|
||||
[&] (void *ptr) { out_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
|
||||
if (out_addr)
|
||||
return out_addr;
|
||||
|
||||
/*
|
||||
@ -119,7 +125,11 @@ void *Libc::Mem_alloc_impl::alloc(size_t size, size_t align_log2)
|
||||
}
|
||||
|
||||
/* allocate originally requested block */
|
||||
return _alloc.alloc_aligned(size, &out_addr, align_log2).ok() ? out_addr : 0;
|
||||
_alloc.alloc_aligned(size, align_log2).with_result(
|
||||
[&] (void *ptr) { out_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
|
||||
return out_addr;
|
||||
}
|
||||
|
||||
|
||||
|
@ -63,8 +63,9 @@ class Libc::Slab_alloc : public Slab
|
||||
|
||||
void *alloc()
|
||||
{
|
||||
void *result;
|
||||
return (Slab::alloc(_object_size, &result) ? result : 0);
|
||||
return Slab::try_alloc(_object_size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Alloc_error) { return nullptr; });
|
||||
}
|
||||
|
||||
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
||||
@ -167,7 +168,9 @@ class Libc::Malloc
|
||||
|
||||
/* use backing store if requested memory is larger than largest slab */
|
||||
if (msb > SLAB_STOP)
|
||||
_backing_store.alloc(real_size, &alloc_addr);
|
||||
_backing_store.try_alloc(real_size).with_result(
|
||||
[&] (void *ptr) { alloc_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
else
|
||||
alloc_addr = _slabs[msb - SLAB_START]->alloc();
|
||||
|
||||
|
@ -89,19 +89,20 @@ extern "C" {
|
||||
|
||||
void *genode_malloc(unsigned long size)
|
||||
{
|
||||
void *ptr = nullptr;
|
||||
return Lwip::_heap->alloc(size, &ptr) ? ptr : 0;
|
||||
return Lwip::_heap->try_alloc(size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Genode::Allocator::Alloc_error) -> void * { return nullptr; });
|
||||
}
|
||||
|
||||
void *genode_calloc(unsigned long number, unsigned long size)
|
||||
{
|
||||
void *ptr = nullptr;
|
||||
size *= number;
|
||||
if (Lwip::_heap->alloc(size, &ptr)) {
|
||||
|
||||
void * const ptr = genode_malloc(size);
|
||||
if (ptr)
|
||||
Genode::memset(ptr, 0x00, size);
|
||||
return ptr;
|
||||
}
|
||||
return nullptr;
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
u32_t sys_now(void) {
|
||||
|
@ -23,7 +23,7 @@ new file mode 100644
|
||||
index 0000000..03b1740
|
||||
--- /dev/null
|
||||
+++ b/sanitizer_common/sanitizer_genode.cc
|
||||
@@ -0,0 +1,331 @@
|
||||
@@ -0,0 +1,335 @@
|
||||
+/*
|
||||
+ * \brief Genode-specific functions from sanitizer_common.h
|
||||
+ * and sanitizer_libc.h
|
||||
@ -205,14 +205,18 @@ index 0000000..03b1740
|
||||
+{
|
||||
+ size = RoundUpTo(size, GetPageSizeCached());
|
||||
+
|
||||
+ void *res = nullptr;
|
||||
+ heap().alloc(size, &res);
|
||||
+ return heap().try_alloc(size).convert<void *>(
|
||||
+
|
||||
+ if (res == nullptr)
|
||||
+ ReportMmapFailureAndDie(size, mem_type, "allocate", 0, raw_report);
|
||||
+ [&] (void *ptr) {
|
||||
+ IncreaseTotalMmap(size);
|
||||
+ return ptr;
|
||||
+ },
|
||||
+
|
||||
+ IncreaseTotalMmap(size);
|
||||
+ return res;
|
||||
+ [&] (Genode::Allocator::Alloc_error) -> void * {
|
||||
+ ReportMmapFailureAndDie(size, mem_type, "allocate", 0, raw_report);
|
||||
+ return nullptr;
|
||||
+ }
|
||||
+ );
|
||||
+}
|
||||
+
|
||||
+
|
||||
|
@ -77,29 +77,50 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
||||
** Range-allocator interface **
|
||||
*******************************/
|
||||
|
||||
int add_range(addr_t const base, size_t const size) override
|
||||
Range_result add_range(addr_t const base, size_t const size) override
|
||||
{
|
||||
if (_base || _array) return -1;
|
||||
if (_base || _array)
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
size_t const bits_cnt = _bits_cnt(size);
|
||||
|
||||
_base = base;
|
||||
_bits = (addr_t *)_md_alloc->alloc(bits_cnt / 8);
|
||||
memset(_bits, 0, bits_cnt / 8);
|
||||
|
||||
_array = new (_md_alloc) Bit_array_base(bits_cnt, _bits);
|
||||
Alloc_error error = Alloc_error::DENIED;
|
||||
|
||||
/* reserve bits which are unavailable */
|
||||
size_t const max_cnt = size / _block_size;
|
||||
if (bits_cnt > max_cnt)
|
||||
_array->set(max_cnt, bits_cnt - max_cnt);
|
||||
size_t const bits_bytes = bits_cnt / 8;
|
||||
|
||||
return 0;
|
||||
try {
|
||||
_bits = (addr_t *)_md_alloc->alloc(bits_bytes);
|
||||
memset(_bits, 0, bits_cnt / 8);
|
||||
|
||||
_array = new (_md_alloc) Bit_array_base(bits_cnt, _bits);
|
||||
|
||||
/* reserve bits which are unavailable */
|
||||
size_t const max_cnt = size / _block_size;
|
||||
if (bits_cnt > max_cnt)
|
||||
_array->set(max_cnt, bits_cnt - max_cnt);
|
||||
|
||||
return Range_ok();
|
||||
|
||||
}
|
||||
catch (Out_of_ram) { error = Alloc_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { error = Alloc_error::OUT_OF_CAPS; }
|
||||
catch (...) { error = Alloc_error::DENIED; }
|
||||
|
||||
if (_bits)
|
||||
_md_alloc->free(_bits, bits_bytes);
|
||||
|
||||
if (_array)
|
||||
destroy(_md_alloc, _array);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
int remove_range(addr_t base, size_t size) override
|
||||
Range_result remove_range(addr_t base, size_t size) override
|
||||
{
|
||||
if (_base != base) return -1;
|
||||
if (_base != base)
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
_base = _next = 0;
|
||||
|
||||
@ -113,17 +134,15 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
||||
_bits = nullptr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return Range_ok();
|
||||
}
|
||||
|
||||
Alloc_return alloc_aligned(size_t size, void **out_addr, unsigned,
|
||||
Range) override
|
||||
Alloc_result alloc_aligned(size_t size, unsigned, Range) override
|
||||
{
|
||||
return alloc(size, out_addr) ? Alloc_return::OK
|
||||
: Alloc_return::RANGE_CONFLICT;
|
||||
return try_alloc(size);
|
||||
}
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
addr_t const cnt = (size % _block_size) ? size / _block_size + 1
|
||||
: size / _block_size;
|
||||
@ -138,9 +157,8 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
||||
|
||||
_array->set(i, cnt);
|
||||
_next = i + cnt;
|
||||
*out_addr = reinterpret_cast<void *>(i * _block_size
|
||||
+ _base);
|
||||
return true;
|
||||
return reinterpret_cast<void *>(i * _block_size
|
||||
+ _base);
|
||||
}
|
||||
} catch (typename Bit_array_base::Invalid_index_access) { }
|
||||
|
||||
@ -149,7 +167,7 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
||||
|
||||
} while (max != 0);
|
||||
|
||||
return false;
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
@ -171,8 +189,8 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
||||
size_t overhead(size_t) const override { return 0;}
|
||||
size_t avail() const override { return 0; }
|
||||
bool valid_addr(addr_t) const override { return 0; }
|
||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
||||
return Alloc_return(Alloc_return::OUT_OF_METADATA); }
|
||||
Alloc_result alloc_addr(size_t, addr_t) override {
|
||||
return Alloc_error::DENIED; }
|
||||
};
|
||||
|
||||
#endif /* _INCLUDE__OS__PACKET_ALLOCATOR__ */
|
||||
|
@ -769,11 +769,16 @@ class Genode::Packet_stream_source : private Packet_stream_base
|
||||
*/
|
||||
Packet_descriptor alloc_packet(Genode::size_t size, unsigned align = PACKET_ALIGNMENT)
|
||||
{
|
||||
void *base = 0;
|
||||
if (size && _packet_alloc.alloc_aligned(size, &base, align).error())
|
||||
throw Packet_alloc_failed();
|
||||
if (size == 0)
|
||||
return Packet_descriptor(0, 0);
|
||||
|
||||
return Packet_descriptor((Genode::off_t)base, size);
|
||||
return _packet_alloc.alloc_aligned(size, align).convert<Packet_descriptor>(
|
||||
|
||||
[&] (void *base) {
|
||||
return Packet_descriptor((Genode::off_t)base, size); },
|
||||
|
||||
[&] (Allocator::Alloc_error) -> Packet_descriptor {
|
||||
throw Packet_alloc_failed(); });
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -55,13 +55,36 @@ class Igd::Ppgtt_allocator : public Genode::Translation_table_allocator
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
Genode::Ram_dataspace_capability ds =
|
||||
_backend.alloc(size, _caps_guard, _ram_guard);
|
||||
Genode::Ram_dataspace_capability ds { };
|
||||
|
||||
*out_addr = _rm.attach(ds);
|
||||
return _map.add(ds, *out_addr);
|
||||
try {
|
||||
ds = _backend.alloc(size, _caps_guard, _ram_guard);
|
||||
}
|
||||
catch (Genode::Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||
catch (Genode::Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||
catch (...) { return Alloc_error::DENIED; }
|
||||
|
||||
Alloc_error alloc_error = Alloc_error::DENIED;
|
||||
|
||||
try {
|
||||
void * const ptr = _rm.attach(ds);
|
||||
|
||||
if (_map.add(ds, ptr))
|
||||
return ptr;
|
||||
|
||||
/* _map.add failed, roll back _rm.attach */
|
||||
_rm.detach(ptr);
|
||||
}
|
||||
catch (Genode::Out_of_ram) { alloc_error = Alloc_error::OUT_OF_RAM; }
|
||||
catch (Genode::Out_of_caps) { alloc_error = Alloc_error::OUT_OF_CAPS; }
|
||||
catch (...) { alloc_error = Alloc_error::DENIED; }
|
||||
|
||||
/* roll back allocation */
|
||||
_backend.free(ds);
|
||||
|
||||
return alloc_error;
|
||||
}
|
||||
|
||||
void free(void *addr, size_t) override
|
||||
|
@ -311,14 +311,15 @@ class Volume_descriptor : public Iso::Iso_base
|
||||
/* copy the root record */
|
||||
Directory_record *copy_root_record(Genode::Allocator &alloc)
|
||||
{
|
||||
Directory_record *buf;
|
||||
return alloc.try_alloc(ROOT_SIZE).convert<Directory_record *>(
|
||||
|
||||
if (!(alloc.alloc(ROOT_SIZE, &buf)))
|
||||
throw Insufficient_ram_quota();
|
||||
[&] (void *ptr) -> Directory_record * {
|
||||
memcpy(ptr, root_record(), ROOT_SIZE);
|
||||
return (Directory_record *)ptr; },
|
||||
|
||||
memcpy(buf, root_record(), ROOT_SIZE);
|
||||
|
||||
return buf;
|
||||
[&] (Allocator::Alloc_error e) -> Directory_record * {
|
||||
Allocator::throw_alloc_error(e); }
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -39,8 +39,13 @@ struct Test
|
||||
{
|
||||
log("\nTEST ", id, ": ", brief, "\n");
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
if (!heap.alloc(fb_ds.size(), (void **)&buf[i])) {
|
||||
env.parent().exit(-1); }
|
||||
heap.try_alloc(fb_ds.size()).with_result(
|
||||
[&] (void *ptr) { buf[i] = (char *)ptr; },
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
env.parent().exit(-1);
|
||||
Allocator::throw_alloc_error(e);
|
||||
}
|
||||
);
|
||||
}
|
||||
/* fill one memory buffer with white pixels */
|
||||
memset(buf[1], ~0, fb_ds.size());
|
||||
|
@ -67,12 +67,20 @@ struct Allocator_tracer : Allocator
|
||||
|
||||
Allocator_tracer(Allocator &wrapped) : wrapped(wrapped) { }
|
||||
|
||||
bool alloc(size_t size, void **out_addr) override
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
sum += size;
|
||||
bool result = wrapped.alloc(size, out_addr);
|
||||
new (wrapped) Alloc(allocs, Alloc::Id { (addr_t)*out_addr }, size);
|
||||
return result;
|
||||
return wrapped.try_alloc(size).convert<Alloc_result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
sum += size;
|
||||
new (wrapped) Alloc(allocs, Alloc::Id { (addr_t)ptr }, size);
|
||||
return ptr;
|
||||
},
|
||||
|
||||
[&] (Allocator::Alloc_error error) {
|
||||
return error;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user