mirror of
https://github.com/genodelabs/genode.git
synced 2025-06-16 06:08:16 +00:00
base: introduce Allocator::try_alloc
This patch changes the 'Allocator' interface to the use of 'Attempt' return values instead of using exceptions for propagating errors. To largely uphold compatibility with components using the original exception-based interface - in particluar use cases where an 'Allocator' is passed to the 'new' operator - the traditional 'alloc' is still supported. But it existes merely as a wrapper around the new 'try_alloc'. Issue #4324
This commit is contained in:
committed by
Christian Helmuth
parent
9591e6caee
commit
dc39a8db62
@ -38,61 +38,67 @@ static inline bool can_use_super_page(addr_t, size_t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
addr_t Io_mem_session_component::_map_local(addr_t base, size_t size)
|
addr_t Io_mem_session_component::_map_local(addr_t phys_base, size_t size)
|
||||||
{
|
{
|
||||||
using namespace Fiasco;
|
auto map_io_region = [] (addr_t phys_base, addr_t local_base, size_t size)
|
||||||
|
{
|
||||||
|
using namespace Fiasco;
|
||||||
|
|
||||||
|
l4_threadid_t const sigma0 = sigma0_threadid;
|
||||||
|
|
||||||
|
unsigned offset = 0;
|
||||||
|
while (size) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Call sigma0 for I/O region
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* special case for page0, which is RAM in sigma0/x86 */
|
||||||
|
l4_umword_t const request = (phys_base + offset == 0)
|
||||||
|
? SIGMA0_REQ_FPAGE_RAM
|
||||||
|
: SIGMA0_REQ_FPAGE_IOMEM;
|
||||||
|
|
||||||
|
size_t const size_log2 = can_use_super_page(phys_base + offset, size)
|
||||||
|
? get_super_page_size_log2()
|
||||||
|
: get_page_size_log2();
|
||||||
|
|
||||||
|
l4_umword_t dw0 = 0, dw1 = 0;
|
||||||
|
l4_msgdope_t result { };
|
||||||
|
l4_msgtag_t tag { };
|
||||||
|
|
||||||
|
int const err =
|
||||||
|
l4_ipc_call_tag(sigma0,
|
||||||
|
L4_IPC_SHORT_MSG,
|
||||||
|
request,
|
||||||
|
l4_fpage(phys_base + offset, size_log2, 0, 0).fpage,
|
||||||
|
l4_msgtag(L4_MSGTAG_SIGMA0, 0, 0, 0),
|
||||||
|
L4_IPC_MAPMSG(local_base + offset, size_log2),
|
||||||
|
&dw0, &dw1,
|
||||||
|
L4_IPC_NEVER, &result, &tag);
|
||||||
|
|
||||||
|
if (err || !l4_ipc_fpage_received(result)) {
|
||||||
|
error("map_local failed err=", err, " "
|
||||||
|
"(", l4_ipc_fpage_received(result), ")");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
offset += 1 << size_log2;
|
||||||
|
size -= 1 << size_log2;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
/* align large I/O dataspaces on a super-page boundary within core */
|
/* align large I/O dataspaces on a super-page boundary within core */
|
||||||
size_t alignment = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
size_t align = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
||||||
: get_page_size_log2();
|
: get_page_size_log2();
|
||||||
|
|
||||||
/* find appropriate region for mapping */
|
return platform().region_alloc().alloc_aligned(size, align).convert<addr_t>(
|
||||||
void *local_base = 0;
|
|
||||||
if (platform().region_alloc().alloc_aligned(size, &local_base, alignment).error())
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* call sigma0 for I/O region */
|
[&] (void *ptr) {
|
||||||
int err;
|
addr_t const core_local_base = (addr_t)ptr;
|
||||||
l4_umword_t request;
|
map_io_region(phys_base, core_local_base, size);
|
||||||
l4_umword_t dw0, dw1;
|
return core_local_base; },
|
||||||
l4_msgdope_t result;
|
|
||||||
l4_msgtag_t tag;
|
|
||||||
|
|
||||||
l4_threadid_t sigma0 = sigma0_threadid;
|
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||||
|
error("core-local mapping of memory-mapped I/O range failed");
|
||||||
unsigned offset = 0;
|
return 0; });
|
||||||
while (size) {
|
|
||||||
/* FIXME what about caching demands? */
|
|
||||||
/* FIXME what about read / write? */
|
|
||||||
|
|
||||||
/* special case for page0, which is RAM in sigma0/x86 */
|
|
||||||
if (base + offset == 0)
|
|
||||||
request = SIGMA0_REQ_FPAGE_RAM;
|
|
||||||
else
|
|
||||||
request = SIGMA0_REQ_FPAGE_IOMEM;
|
|
||||||
|
|
||||||
size_t page_size_log2 = get_page_size_log2();
|
|
||||||
if (can_use_super_page(base + offset, size))
|
|
||||||
page_size_log2 = get_super_page_size_log2();
|
|
||||||
|
|
||||||
err = l4_ipc_call_tag(sigma0,
|
|
||||||
L4_IPC_SHORT_MSG,
|
|
||||||
request,
|
|
||||||
l4_fpage(base + offset, page_size_log2, 0, 0).fpage,
|
|
||||||
l4_msgtag(L4_MSGTAG_SIGMA0, 0, 0, 0),
|
|
||||||
L4_IPC_MAPMSG((addr_t)local_base + offset, page_size_log2),
|
|
||||||
&dw0, &dw1,
|
|
||||||
L4_IPC_NEVER, &result, &tag);
|
|
||||||
|
|
||||||
if (err || !l4_ipc_fpage_received(result)) {
|
|
||||||
error("map_local failed err=", err, " "
|
|
||||||
"(", l4_ipc_fpage_received(result), ")");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
offset += 1 << page_size_log2;
|
|
||||||
size -= 1 << page_size_log2;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (addr_t)local_base;
|
|
||||||
}
|
}
|
||||||
|
@ -130,7 +130,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
|||||||
if (msi)
|
if (msi)
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
|
|
||||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||||
error("unavailable IRQ ", _irq_number, " requested");
|
error("unavailable IRQ ", _irq_number, " requested");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
@ -445,51 +445,42 @@ Platform::Platform()
|
|||||||
fiasco_register_thread_name(core_thread.native_thread_id(),
|
fiasco_register_thread_name(core_thread.native_thread_id(),
|
||||||
core_thread.name().string());
|
core_thread.name().string());
|
||||||
|
|
||||||
/* core log as ROM module */
|
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||||
{
|
{
|
||||||
void * phys_ptr = nullptr;
|
size_t const size = 1 << get_page_size_log2();
|
||||||
unsigned const pages = 1;
|
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||||
size_t const log_size = pages << get_page_size_log2();
|
|
||||||
|
|
||||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
[&] (void *phys_ptr) {
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
|
||||||
|
|
||||||
void * const core_local_ptr = phys_ptr;
|
/* core-local memory is one-to-one mapped physical RAM */
|
||||||
addr_t const core_local_addr = phys_addr;
|
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||||
|
void * const core_local_ptr = phys_ptr;
|
||||||
|
|
||||||
/* let one page free after the log buffer */
|
region_alloc().remove_range((addr_t)core_local_ptr, size);
|
||||||
region_alloc().remove_range(core_local_addr, log_size + get_page_size());
|
memset(core_local_ptr, 0, size);
|
||||||
|
content_fn(core_local_ptr, size);
|
||||||
|
|
||||||
memset(core_local_ptr, 0, log_size);
|
_rom_fs.insert(new (core_mem_alloc())
|
||||||
|
Rom_module(phys_addr, size, rom_name));
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
warning("failed to export ", rom_name, " as ROM module"); }
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
/* core log as ROM module */
|
||||||
"core_log"));
|
export_page_as_rom_module("core_log",
|
||||||
|
[&] (void *core_local_ptr, size_t size) {
|
||||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
init_core_log(Core_log_range { (addr_t)core_local_ptr, size } ); });
|
||||||
}
|
|
||||||
|
|
||||||
/* export platform specific infos */
|
/* export platform specific infos */
|
||||||
{
|
export_page_as_rom_module("platform_info",
|
||||||
void * phys_ptr = nullptr;
|
[&] (void *core_local_ptr, size_t size) {
|
||||||
size_t const size = 1 << get_page_size_log2();
|
Xml_generator xml(reinterpret_cast<char *>(core_local_ptr),
|
||||||
|
size, "platform_info",
|
||||||
if (ram_alloc().alloc_aligned(size, &phys_ptr,
|
[&] () {
|
||||||
get_page_size_log2()).ok()) {
|
xml.node("kernel", [&] () {
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
xml.attribute("name", "fiasco"); }); }); });
|
||||||
addr_t const core_local_addr = phys_addr;
|
|
||||||
|
|
||||||
region_alloc().remove_range(core_local_addr, size);
|
|
||||||
|
|
||||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
|
||||||
size, "platform_info", [&] ()
|
|
||||||
{
|
|
||||||
xml.node("kernel", [&] () { xml.attribute("name", "fiasco"); });
|
|
||||||
});
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
|
||||||
"platform_info"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -34,14 +34,18 @@ addr_t Io_mem_session_component::_map_local(addr_t base, size_t size)
|
|||||||
: get_page_size_log2();
|
: get_page_size_log2();
|
||||||
|
|
||||||
/* find appropriate region for mapping */
|
/* find appropriate region for mapping */
|
||||||
void *local_base = 0;
|
return platform().region_alloc().alloc_aligned(size, alignment).convert<addr_t>(
|
||||||
if (platform().region_alloc().alloc_aligned(size, &local_base, alignment).error())
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!map_local_io(base, (addr_t)local_base, size >> get_page_size_log2())) {
|
[&] (void *local_base) {
|
||||||
error("map_local_io failed");
|
if (!map_local_io(base, (addr_t)local_base, size >> get_page_size_log2())) {
|
||||||
return 0;
|
error("map_local_io failed");
|
||||||
}
|
platform().region_alloc().free(local_base, base);
|
||||||
|
return 0UL;
|
||||||
|
}
|
||||||
|
return (addr_t)local_base;
|
||||||
|
},
|
||||||
|
|
||||||
return (addr_t)local_base;
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
error("allocation of virtual memory for local I/O mapping failed");
|
||||||
|
return 0UL; });
|
||||||
}
|
}
|
||||||
|
@ -196,7 +196,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
|||||||
}
|
}
|
||||||
msi_alloc.set(_irq_number, 1);
|
msi_alloc.set(_irq_number, 1);
|
||||||
} else {
|
} else {
|
||||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||||
error("unavailable IRQ ", _irq_number, " requested");
|
error("unavailable IRQ ", _irq_number, " requested");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
@ -467,75 +467,68 @@ Platform::Platform()
|
|||||||
core_thread.pager(_sigma0);
|
core_thread.pager(_sigma0);
|
||||||
_core_pd->bind_thread(core_thread);
|
_core_pd->bind_thread(core_thread);
|
||||||
|
|
||||||
/* export x86 platform specific infos */
|
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||||
{
|
{
|
||||||
void * core_local_ptr = nullptr;
|
size_t const pages = 1;
|
||||||
void * phys_ptr = nullptr;
|
size_t const align = get_page_size_log2();
|
||||||
unsigned const pages = 1;
|
size_t const bytes = pages << get_page_size_log2();
|
||||||
size_t const align = get_page_size_log2();
|
ram_alloc().alloc_aligned(bytes, align).with_result(
|
||||||
size_t const size = pages << get_page_size_log2();
|
|
||||||
|
|
||||||
if (ram_alloc().alloc_aligned(size, &phys_ptr, align).error())
|
[&] (void *phys_ptr) {
|
||||||
return;
|
|
||||||
|
|
||||||
if (region_alloc().alloc_aligned(size, &core_local_ptr, align).error())
|
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||||
return;
|
|
||||||
|
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
region_alloc().alloc_aligned(bytes, align).with_result(
|
||||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
[&] (void *core_local_ptr) {
|
||||||
|
|
||||||
if (!map_local(phys_addr, core_local_addr, pages))
|
if (!map_local(phys_addr, (addr_t)core_local_ptr, pages)) {
|
||||||
return;
|
warning("map_local failed while exporting ",
|
||||||
|
rom_name, " as ROM module");
|
||||||
|
ram_alloc().free(phys_ptr, bytes);
|
||||||
|
region_alloc().free(core_local_ptr, bytes);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
memset(core_local_ptr, 0, size);
|
memset(core_local_ptr, 0, bytes);
|
||||||
|
content_fn((char *)core_local_ptr, bytes);
|
||||||
|
|
||||||
Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
_rom_fs.insert(new (core_mem_alloc())
|
||||||
pages << get_page_size_log2(),
|
Rom_module(phys_addr, bytes, rom_name));
|
||||||
"platform_info", [&] ()
|
},
|
||||||
{
|
[&] (Range_allocator::Alloc_error) {
|
||||||
xml.node("kernel", [&] () {
|
warning("failed allocate virtual memory to export ",
|
||||||
xml.attribute("name", "foc");
|
rom_name, " as ROM module");
|
||||||
xml.attribute("acpi", true);
|
ram_alloc().free(phys_ptr, bytes);
|
||||||
xml.attribute("msi" , true);
|
}
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
warning("failed to export ", rom_name, " as ROM module"); }
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export_page_as_rom_module("platform_info",
|
||||||
|
[&] (char *core_local_ptr, size_t size) {
|
||||||
|
Xml_generator xml(core_local_ptr, size, "platform_info", [&] ()
|
||||||
|
{
|
||||||
|
xml.node("kernel", [&] () {
|
||||||
|
xml.attribute("name", "foc");
|
||||||
|
xml.attribute("acpi", true);
|
||||||
|
xml.attribute("msi" , true);
|
||||||
|
});
|
||||||
|
xml.node("hardware", [&] () {
|
||||||
|
_setup_platform_info(xml, sigma0_map_kip()); });
|
||||||
|
|
||||||
|
xml.node("affinity-space", [&] () {
|
||||||
|
xml.attribute("width", affinity_space().width());
|
||||||
|
xml.attribute("height", affinity_space().height()); });
|
||||||
});
|
});
|
||||||
xml.node("hardware", [&] () {
|
}
|
||||||
_setup_platform_info(xml, sigma0_map_kip()); });
|
);
|
||||||
|
|
||||||
xml.node("affinity-space", [&] () {
|
export_page_as_rom_module("core_log",
|
||||||
xml.attribute("width", affinity_space().width());
|
[&] (char *core_local_ptr, size_t size) {
|
||||||
xml.attribute("height", affinity_space().height()); });
|
init_core_log(Core_log_range { (addr_t)core_local_ptr, size } ); });
|
||||||
});
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
|
||||||
"platform_info"));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* core log as ROM module */
|
|
||||||
{
|
|
||||||
void * core_local_ptr = nullptr;
|
|
||||||
void * phys_ptr = nullptr;
|
|
||||||
unsigned const pages = 1;
|
|
||||||
size_t const align = get_page_size_log2();
|
|
||||||
size_t const size = pages << get_page_size_log2();
|
|
||||||
|
|
||||||
if (ram_alloc().alloc_aligned(size, &phys_ptr, align).error())
|
|
||||||
return;
|
|
||||||
if (region_alloc().alloc_aligned(size, &core_local_ptr, align).error())
|
|
||||||
return;
|
|
||||||
|
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
|
||||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
|
||||||
|
|
||||||
if (!map_local(phys_addr, core_local_addr, pages))
|
|
||||||
return;
|
|
||||||
|
|
||||||
memset(core_local_ptr, 0, size);
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
|
||||||
"core_log"));
|
|
||||||
|
|
||||||
init_core_log(Core_log_range { core_local_addr, size } );
|
|
||||||
}
|
|
||||||
|
|
||||||
Affinity::Space const cpus = affinity_space();
|
Affinity::Space const cpus = affinity_space();
|
||||||
|
|
||||||
|
@ -198,11 +198,9 @@ unsigned long Cap_id_allocator::alloc()
|
|||||||
{
|
{
|
||||||
Mutex::Guard lock_guard(_mutex);
|
Mutex::Guard lock_guard(_mutex);
|
||||||
|
|
||||||
void *id = nullptr;
|
return _id_alloc.try_alloc(CAP_ID_OFFSET).convert<unsigned long>(
|
||||||
if (_id_alloc.alloc(CAP_ID_OFFSET, &id))
|
[&] (void *id) { return (unsigned long)id; },
|
||||||
return (unsigned long) id;
|
[&] (Range_allocator::Alloc_error) -> unsigned long { throw Out_of_ids(); });
|
||||||
|
|
||||||
throw Out_of_ids();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,19 +30,16 @@ extern unsigned _bss_end;
|
|||||||
void * Platform::Ram_allocator::alloc_aligned(size_t size, unsigned align)
|
void * Platform::Ram_allocator::alloc_aligned(size_t size, unsigned align)
|
||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
using namespace Hw;
|
|
||||||
|
|
||||||
void * ret;
|
return Base::alloc_aligned(Hw::round_page(size),
|
||||||
assert(Base::alloc_aligned(round_page(size), &ret,
|
max(align, get_page_size_log2())).convert<void *>(
|
||||||
max(align, get_page_size_log2())).ok());
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
[&] (void *ptr) { return ptr; },
|
||||||
bool Platform::Ram_allocator::alloc(size_t size, void **out_addr)
|
[&] (Ram_allocator::Alloc_error e) -> void *
|
||||||
{
|
{
|
||||||
*out_addr = alloc_aligned(size, 0);
|
error("bootstrap RAM allocation failed, error=", e);
|
||||||
return true;
|
assert(false);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,8 +53,13 @@ class Bootstrap::Platform
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class Ram_allocator : public Genode::Allocator_avl_base
|
class Ram_allocator : private Genode::Allocator_avl_base
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* 'Ram_allocator' is derived from 'Allocator_avl_base' to access
|
||||||
|
* the protected 'slab_block_size'.
|
||||||
|
*/
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
using Base = Genode::Allocator_avl_base;
|
using Base = Genode::Allocator_avl_base;
|
||||||
@ -73,8 +78,7 @@ class Bootstrap::Platform
|
|||||||
{ }
|
{ }
|
||||||
|
|
||||||
void * alloc_aligned(size_t size, unsigned align);
|
void * alloc_aligned(size_t size, unsigned align);
|
||||||
bool alloc(size_t size, void **out_addr) override;
|
void * alloc(size_t size) { return alloc_aligned(size, 0); }
|
||||||
void * alloc(size_t size) { return Allocator::alloc(size); }
|
|
||||||
|
|
||||||
void add(Memory_region const &);
|
void add(Memory_region const &);
|
||||||
void remove(Memory_region const &);
|
void remove(Memory_region const &);
|
||||||
|
@ -29,12 +29,15 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
off_t offset, bool use_local_addr,
|
off_t offset, bool use_local_addr,
|
||||||
Region_map::Local_addr, bool, bool writeable)
|
Region_map::Local_addr, bool, bool writeable)
|
||||||
{
|
{
|
||||||
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Local_addr {
|
||||||
if (!ds)
|
|
||||||
|
if (!ds_ptr)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
|
Dataspace_component &ds = *ds_ptr;
|
||||||
|
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
size = ds->size();
|
size = ds.size();
|
||||||
|
|
||||||
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
@ -48,10 +51,13 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned const align = get_page_size_log2();
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
Allocator::Alloc_result virt =
|
||||||
if (!platform().region_alloc().alloc_aligned(page_rounded_size, &virt_addr,
|
platform().region_alloc().alloc_aligned(page_rounded_size, align);
|
||||||
get_page_size_log2()).ok()) {
|
|
||||||
|
if (virt.failed()) {
|
||||||
error("could not allocate virtual address range in core of size ",
|
error("could not allocate virtual address range in core of size ",
|
||||||
page_rounded_size);
|
page_rounded_size);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -61,16 +67,23 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
|
|
||||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
Page_flags const flags { (writeable && ds->writable()) ? RW : RO,
|
Page_flags const flags { (writeable && ds.writable()) ? RW : RO,
|
||||||
NO_EXEC, KERN, GLOBAL,
|
NO_EXEC, KERN, GLOBAL,
|
||||||
ds->io_mem() ? DEVICE : RAM,
|
ds.io_mem() ? DEVICE : RAM,
|
||||||
ds->cacheability() };
|
ds.cacheability() };
|
||||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
return virt_addr;
|
return virt.convert<Local_addr>(
|
||||||
};
|
|
||||||
return _ep.apply(ds_cap, lambda);
|
[&] (void *virt_addr) -> void * {
|
||||||
|
if (map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||||
|
return virt_addr;
|
||||||
|
|
||||||
|
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||||
|
return nullptr; },
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error) {
|
||||||
|
return nullptr; });
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -57,8 +57,8 @@ class Genode::Cpu_thread_allocator : public Allocator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override {
|
Alloc_result try_alloc(size_t size) override {
|
||||||
return _alloc.alloc(size, out_addr); }
|
return _alloc.alloc(size); }
|
||||||
|
|
||||||
void free(void *addr, size_t size) override {
|
void free(void *addr, size_t size) override {
|
||||||
_alloc.free(addr, size); }
|
_alloc.free(addr, size); }
|
||||||
|
@ -78,7 +78,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate interrupt */
|
/* allocate interrupt */
|
||||||
if (_irq_alloc.alloc_addr(1, _irq_number).error()) {
|
if (_irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||||
error("unavailable interrupt ", _irq_number, " requested");
|
error("unavailable interrupt ", _irq_number, " requested");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
@ -35,10 +35,41 @@ using namespace Kernel;
|
|||||||
|
|
||||||
void Thread::_ipc_alloc_recv_caps(unsigned cap_count)
|
void Thread::_ipc_alloc_recv_caps(unsigned cap_count)
|
||||||
{
|
{
|
||||||
Genode::Allocator &slab = pd().platform_pd().capability_slab();
|
using Allocator = Genode::Allocator;
|
||||||
|
|
||||||
|
Allocator &slab = pd().platform_pd().capability_slab();
|
||||||
for (unsigned i = 0; i < cap_count; i++) {
|
for (unsigned i = 0; i < cap_count; i++) {
|
||||||
if (_obj_id_ref_ptr[i] == nullptr)
|
if (_obj_id_ref_ptr[i] != nullptr)
|
||||||
_obj_id_ref_ptr[i] = slab.alloc(sizeof(Object_identity_reference));
|
continue;
|
||||||
|
|
||||||
|
slab.try_alloc(sizeof(Object_identity_reference)).with_result(
|
||||||
|
|
||||||
|
[&] (void *ptr) {
|
||||||
|
_obj_id_ref_ptr[i] = ptr; },
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error e) {
|
||||||
|
|
||||||
|
switch (e) {
|
||||||
|
case Allocator::Alloc_error::DENIED:
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Slab is exhausted, reflect condition to the client.
|
||||||
|
*/
|
||||||
|
throw Genode::Out_of_ram();
|
||||||
|
|
||||||
|
case Allocator::Alloc_error::OUT_OF_CAPS:
|
||||||
|
case Allocator::Alloc_error::OUT_OF_RAM:
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These conditions cannot happen because the slab
|
||||||
|
* does not try to grow automatically. It is
|
||||||
|
* explicitely expanded by the client as response to
|
||||||
|
* the 'Out_of_ram' condition above.
|
||||||
|
*/
|
||||||
|
Genode::raw("unexpected recv_caps allocation failure");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
_ipc_rcv_caps = cap_count;
|
_ipc_rcv_caps = cap_count;
|
||||||
}
|
}
|
||||||
|
@ -112,28 +112,46 @@ void Platform::_init_platform_info()
|
|||||||
{
|
{
|
||||||
unsigned const pages = 1;
|
unsigned const pages = 1;
|
||||||
size_t const rom_size = pages << get_page_size_log2();
|
size_t const rom_size = pages << get_page_size_log2();
|
||||||
void *phys_ptr = nullptr;
|
|
||||||
void *virt_ptr = nullptr;
|
|
||||||
const char *rom_name = "platform_info";
|
const char *rom_name = "platform_info";
|
||||||
|
|
||||||
if (!ram_alloc().alloc(get_page_size(), &phys_ptr)) {
|
struct Guard
|
||||||
error("could not setup platform_info ROM - ram allocation error");
|
{
|
||||||
return;
|
Range_allocator &phys_alloc;
|
||||||
}
|
Range_allocator &virt_alloc;
|
||||||
|
|
||||||
if (!region_alloc().alloc(rom_size, &virt_ptr)) {
|
struct {
|
||||||
error("could not setup platform_info ROM - region allocation error");
|
void *phys_ptr = nullptr;
|
||||||
ram_alloc().free(phys_ptr);
|
void *virt_ptr = nullptr;
|
||||||
return;
|
};
|
||||||
}
|
|
||||||
|
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
Guard(Range_allocator &phys_alloc, Range_allocator &virt_alloc)
|
||||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
: phys_alloc(phys_alloc), virt_alloc(virt_alloc) { }
|
||||||
|
|
||||||
|
~Guard()
|
||||||
|
{
|
||||||
|
if (phys_ptr) phys_alloc.free(phys_ptr);
|
||||||
|
if (virt_ptr) virt_alloc.free(phys_ptr);
|
||||||
|
}
|
||||||
|
} guard { ram_alloc(), region_alloc() };
|
||||||
|
|
||||||
|
ram_alloc().try_alloc(get_page_size()).with_result(
|
||||||
|
[&] (void *ptr) { guard.phys_ptr = ptr; },
|
||||||
|
[&] (Allocator::Alloc_error) {
|
||||||
|
error("could not setup platform_info ROM - RAM allocation error"); });
|
||||||
|
|
||||||
|
region_alloc().try_alloc(rom_size).with_result(
|
||||||
|
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||||
|
[&] (Allocator::Alloc_error) {
|
||||||
|
error("could not setup platform_info ROM - region allocation error"); });
|
||||||
|
|
||||||
|
if (!guard.phys_ptr || !guard.virt_ptr)
|
||||||
|
return;
|
||||||
|
|
||||||
|
addr_t const phys_addr = reinterpret_cast<addr_t>(guard.phys_ptr);
|
||||||
|
addr_t const virt_addr = reinterpret_cast<addr_t>(guard.virt_ptr);
|
||||||
|
|
||||||
if (!map_local(phys_addr, virt_addr, pages, Hw::PAGE_FLAGS_KERN_DATA)) {
|
if (!map_local(phys_addr, virt_addr, pages, Hw::PAGE_FLAGS_KERN_DATA)) {
|
||||||
error("could not setup platform_info ROM - map error");
|
error("could not setup platform_info ROM - map error");
|
||||||
region_alloc().free(virt_ptr);
|
|
||||||
ram_alloc().free(phys_ptr);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -156,10 +174,11 @@ void Platform::_init_platform_info()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
region_alloc().free(virt_ptr);
|
|
||||||
|
|
||||||
_rom_fs.insert(
|
_rom_fs.insert(
|
||||||
new (core_mem_alloc()) Rom_module(phys_addr, rom_size, rom_name));
|
new (core_mem_alloc()) Rom_module(phys_addr, rom_size, rom_name));
|
||||||
|
|
||||||
|
/* keep phys allocation but let guard revert virt allocation */
|
||||||
|
guard.phys_ptr = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -203,25 +222,32 @@ Platform::Platform()
|
|||||||
|
|
||||||
/* core log as ROM module */
|
/* core log as ROM module */
|
||||||
{
|
{
|
||||||
void * core_local_ptr = nullptr;
|
|
||||||
void * phys_ptr = nullptr;
|
|
||||||
unsigned const pages = 1;
|
unsigned const pages = 1;
|
||||||
size_t const log_size = pages << get_page_size_log2();
|
size_t const log_size = pages << get_page_size_log2();
|
||||||
|
unsigned const align = get_page_size_log2();
|
||||||
|
|
||||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
|
||||||
|
|
||||||
/* let one page free after the log buffer */
|
[&] (void *phys) {
|
||||||
region_alloc().alloc_aligned(log_size, &core_local_ptr, get_page_size_log2());
|
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
|
||||||
|
|
||||||
map_local(phys_addr, core_local_addr, pages);
|
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||||
memset(core_local_ptr, 0, log_size);
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
[&] (void *ptr) {
|
||||||
"core_log"));
|
|
||||||
|
|
||||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
map_local(phys_addr, (addr_t)ptr, pages);
|
||||||
|
memset(ptr, 0, log_size);
|
||||||
|
|
||||||
|
_rom_fs.insert(new (core_mem_alloc())
|
||||||
|
Rom_module(phys_addr, log_size, "core_log"));
|
||||||
|
|
||||||
|
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) { /* ignored */ }
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) { }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
class Idle_thread_trace_source : public Trace::Source::Info_accessor,
|
class Idle_thread_trace_source : public Trace::Source::Info_accessor,
|
||||||
|
@ -37,11 +37,16 @@ Core_mem_allocator &Hw::Address_space::_cma()
|
|||||||
|
|
||||||
void *Hw::Address_space::_table_alloc()
|
void *Hw::Address_space::_table_alloc()
|
||||||
{
|
{
|
||||||
void * ret = nullptr;
|
unsigned const align = Page_table::ALIGNM_LOG2;
|
||||||
if (!_cma().alloc_aligned(sizeof(Page_table), (void**)&ret,
|
|
||||||
Page_table::ALIGNM_LOG2).ok())
|
return _cma().alloc_aligned(sizeof(Page_table), align).convert<void *>(
|
||||||
throw Insufficient_ram_quota();
|
|
||||||
return ret;
|
[&] (void *ptr) {
|
||||||
|
return ptr; },
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_result) -> void * {
|
||||||
|
/* XXX distinguish error conditions */
|
||||||
|
throw Insufficient_ram_quota(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -134,10 +139,15 @@ Cap_space::Cap_space() : _slab(nullptr, &_initial_sb) { }
|
|||||||
|
|
||||||
void Cap_space::upgrade_slab(Allocator &alloc)
|
void Cap_space::upgrade_slab(Allocator &alloc)
|
||||||
{
|
{
|
||||||
void * block = nullptr;
|
alloc.try_alloc(SLAB_SIZE).with_result(
|
||||||
if (!alloc.alloc(SLAB_SIZE, &block))
|
|
||||||
throw Out_of_ram();
|
[&] (void *ptr) {
|
||||||
_slab.insert_sb(block);
|
_slab.insert_sb(ptr); },
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error) {
|
||||||
|
/* XXX distinguish error conditions */
|
||||||
|
throw Out_of_ram();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,13 +71,18 @@ Platform_thread::Platform_thread(Label const &label, Native_utcb &utcb)
|
|||||||
_kobj(_kobj.CALLED_FROM_CORE, _label.string())
|
_kobj(_kobj.CALLED_FROM_CORE, _label.string())
|
||||||
{
|
{
|
||||||
/* create UTCB for a core thread */
|
/* create UTCB for a core thread */
|
||||||
void *utcb_phys;
|
platform().ram_alloc().try_alloc(sizeof(Native_utcb)).with_result(
|
||||||
if (!platform().ram_alloc().alloc(sizeof(Native_utcb), &utcb_phys)) {
|
|
||||||
error("failed to allocate UTCB");
|
[&] (void *utcb_phys) {
|
||||||
throw Out_of_ram();
|
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
|
||||||
}
|
sizeof(Native_utcb) / get_page_size());
|
||||||
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
|
},
|
||||||
sizeof(Native_utcb) / get_page_size());
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
error("failed to allocate UTCB");
|
||||||
|
/* XXX distinguish error conditions */
|
||||||
|
throw Out_of_ram();
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -33,30 +33,40 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
|||||||
{
|
{
|
||||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
|
struct Guard
|
||||||
|
{
|
||||||
|
Range_allocator &virt_alloc;
|
||||||
|
struct { void *virt_ptr = nullptr; };
|
||||||
|
|
||||||
|
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||||
|
|
||||||
|
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||||
|
|
||||||
|
} guard(platform().region_alloc());
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||||
error("could not allocate virtual address range in core of size ",
|
[&] (Range_allocator::Alloc_error e) {
|
||||||
page_rounded_size);
|
error("could not allocate virtual address range in core of size ",
|
||||||
|
page_rounded_size, ", error=", e); });
|
||||||
|
|
||||||
|
if (!guard.virt_ptr)
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
if (!map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages)) {
|
if (!map_local(ds.phys_addr(), (addr_t)guard.virt_ptr, num_pages)) {
|
||||||
error("core-local memory mapping failed");
|
error("core-local memory mapping failed");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* dependent on the architecture, cache maintainance might be necessary */
|
/* dependent on the architecture, cache maintainance might be necessary */
|
||||||
Cpu::clear_memory_region((addr_t)virt_addr, page_rounded_size,
|
Cpu::clear_memory_region((addr_t)guard.virt_ptr, page_rounded_size,
|
||||||
ds.cacheability() != CACHED);
|
ds.cacheability() != CACHED);
|
||||||
|
|
||||||
/* unmap dataspace from core */
|
/* unmap dataspace from core */
|
||||||
if (!unmap_local((addr_t)virt_addr, num_pages))
|
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||||
error("could not unmap core-local address range at ", virt_addr);
|
error("could not unmap core-local address range at ", guard.virt_ptr);
|
||||||
|
|
||||||
/* free core's virtual address space */
|
|
||||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,22 +86,28 @@ class Genode::Rpc_cap_factory
|
|||||||
{
|
{
|
||||||
Mutex::Guard guard(_mutex);
|
Mutex::Guard guard(_mutex);
|
||||||
|
|
||||||
/* allocate kernel object */
|
return _slab.try_alloc(sizeof(Kobject)).convert<Native_capability>(
|
||||||
Kobject * obj;
|
|
||||||
if (!_slab.alloc(sizeof(Kobject), (void**)&obj))
|
|
||||||
throw Allocator::Out_of_memory();
|
|
||||||
construct_at<Kobject>(obj, ep);
|
|
||||||
|
|
||||||
if (!obj->cap.valid()) {
|
[&] (void *ptr) {
|
||||||
raw("Invalid entrypoint ", (addr_t)Capability_space::capid(ep),
|
|
||||||
" for allocating a capability!");
|
|
||||||
destroy(&_slab, obj);
|
|
||||||
return Native_capability();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* store it in the list and return result */
|
/* create kernel object */
|
||||||
_list.insert(obj);
|
Kobject &obj = *construct_at<Kobject>(ptr, ep);
|
||||||
return obj->cap;
|
|
||||||
|
if (!obj.cap.valid()) {
|
||||||
|
raw("Invalid entrypoint ", (addr_t)Capability_space::capid(ep),
|
||||||
|
" for allocating a capability!");
|
||||||
|
destroy(&_slab, &obj);
|
||||||
|
return Native_capability();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* store it in the list and return result */
|
||||||
|
_list.insert(&obj);
|
||||||
|
return obj.cap;
|
||||||
|
},
|
||||||
|
[&] (Allocator::Alloc_error) -> Native_capability {
|
||||||
|
/* XXX distinguish error conditions */
|
||||||
|
throw Allocator::Out_of_memory();
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(Native_capability cap)
|
void free(Native_capability cap)
|
||||||
|
@ -43,16 +43,20 @@ void Genode::platform_add_local_services(Rpc_entrypoint &ep,
|
|||||||
Hw::Mm::hypervisor_exception_vector().size / get_page_size(),
|
Hw::Mm::hypervisor_exception_vector().size / get_page_size(),
|
||||||
Hw::PAGE_FLAGS_KERN_TEXT);
|
Hw::PAGE_FLAGS_KERN_TEXT);
|
||||||
|
|
||||||
void * stack = nullptr;
|
platform().ram_alloc().alloc_aligned(Hw::Mm::hypervisor_stack().size,
|
||||||
assert(platform().ram_alloc().alloc_aligned(Hw::Mm::hypervisor_stack().size,
|
get_page_size_log2()).with_result(
|
||||||
(void**)&stack,
|
[&] (void *stack) {
|
||||||
get_page_size_log2()).ok());
|
map_local((addr_t)stack,
|
||||||
map_local((addr_t)stack,
|
Hw::Mm::hypervisor_stack().base,
|
||||||
Hw::Mm::hypervisor_stack().base,
|
Hw::Mm::hypervisor_stack().size / get_page_size(),
|
||||||
Hw::Mm::hypervisor_stack().size / get_page_size(),
|
Hw::PAGE_FLAGS_KERN_DATA);
|
||||||
Hw::PAGE_FLAGS_KERN_DATA);
|
|
||||||
|
|
||||||
static Vm_root vm_root(ep, sh, core_env().ram_allocator(),
|
static Vm_root vm_root(ep, sh, core_env().ram_allocator(),
|
||||||
core_env().local_rm(), trace_sources);
|
core_env().local_rm(), trace_sources);
|
||||||
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
static Core_service<Vm_session_component> vm_service(services, vm_root);
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
warning("failed to allocate hypervisor stack for VM service");
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
@ -73,14 +73,17 @@ void Vm_session_component::_detach_vm_memory(addr_t vm_addr, size_t size)
|
|||||||
|
|
||||||
void * Vm_session_component::_alloc_table()
|
void * Vm_session_component::_alloc_table()
|
||||||
{
|
{
|
||||||
void * table;
|
|
||||||
/* get some aligned space for the translation table */
|
/* get some aligned space for the translation table */
|
||||||
if (!cma().alloc_aligned(sizeof(Board::Vm_page_table), (void**)&table,
|
return cma().alloc_aligned(sizeof(Board::Vm_page_table),
|
||||||
Board::Vm_page_table::ALIGNM_LOG2).ok()) {
|
Board::Vm_page_table::ALIGNM_LOG2).convert<void *>(
|
||||||
error("failed to allocate kernel object");
|
[&] (void *table_ptr) {
|
||||||
throw Insufficient_ram_quota();
|
return table_ptr; },
|
||||||
}
|
|
||||||
return table;
|
[&] (Range_allocator::Alloc_error) -> void * {
|
||||||
|
/* XXX handle individual error conditions */
|
||||||
|
error("failed to allocate kernel object");
|
||||||
|
throw Insufficient_ram_quota(); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -64,22 +64,21 @@ class Genode::Platform : public Platform_generic
|
|||||||
|
|
||||||
struct Dummy_allocator : Range_allocator
|
struct Dummy_allocator : Range_allocator
|
||||||
{
|
{
|
||||||
void free(void *, size_t) override { ASSERT_NEVER_CALLED; }
|
void free(void *, size_t) override { ASSERT_NEVER_CALLED; }
|
||||||
bool need_size_for_free() const override { ASSERT_NEVER_CALLED; }
|
bool need_size_for_free() const override { ASSERT_NEVER_CALLED; }
|
||||||
size_t consumed() const override { ASSERT_NEVER_CALLED; }
|
size_t consumed() const override { ASSERT_NEVER_CALLED; }
|
||||||
size_t overhead(size_t) const override { ASSERT_NEVER_CALLED; }
|
size_t overhead(size_t) const override { ASSERT_NEVER_CALLED; }
|
||||||
int add_range (addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
Range_result add_range (addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
||||||
int remove_range(addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
Range_result remove_range(addr_t, size_t ) override { ASSERT_NEVER_CALLED; }
|
||||||
void free(void *) override { ASSERT_NEVER_CALLED; }
|
void free(void *) override { ASSERT_NEVER_CALLED; }
|
||||||
size_t avail() const override { ASSERT_NEVER_CALLED; }
|
size_t avail() const override { ASSERT_NEVER_CALLED; }
|
||||||
bool valid_addr(addr_t ) const override { ASSERT_NEVER_CALLED; }
|
bool valid_addr(addr_t ) const override { ASSERT_NEVER_CALLED; }
|
||||||
bool alloc(size_t, void **) override { ASSERT_NEVER_CALLED; }
|
Alloc_result try_alloc(size_t) override { ASSERT_NEVER_CALLED; }
|
||||||
|
Alloc_result alloc_addr(size_t, addr_t) override { ASSERT_NEVER_CALLED; }
|
||||||
|
|
||||||
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override
|
Alloc_result alloc_aligned(size_t, unsigned, Range) override
|
||||||
{ ASSERT_NEVER_CALLED; }
|
{ ASSERT_NEVER_CALLED; }
|
||||||
|
|
||||||
Alloc_return alloc_addr(size_t, addr_t) override
|
|
||||||
{ ASSERT_NEVER_CALLED; }
|
|
||||||
|
|
||||||
} _dummy_alloc { };
|
} _dummy_alloc { };
|
||||||
|
|
||||||
@ -88,25 +87,31 @@ class Genode::Platform : public Platform_generic
|
|||||||
*/
|
*/
|
||||||
struct Pseudo_ram_allocator : Range_allocator
|
struct Pseudo_ram_allocator : Range_allocator
|
||||||
{
|
{
|
||||||
bool alloc(size_t, void **out_addr) override
|
Alloc_result try_alloc(size_t) override
|
||||||
{
|
{
|
||||||
*out_addr = 0;
|
return nullptr;
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Alloc_return alloc_aligned(size_t, void **out, unsigned, Range) override
|
Alloc_result alloc_aligned(size_t, unsigned, Range) override
|
||||||
{
|
{
|
||||||
*out = 0;
|
return nullptr;
|
||||||
return Alloc_return::OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Alloc_return alloc_addr(size_t, addr_t) override
|
Alloc_result alloc_addr(size_t, addr_t) override
|
||||||
{
|
{
|
||||||
return Alloc_return::OK;
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
Range_result add_range(addr_t, size_t) override
|
||||||
|
{
|
||||||
|
return Range_ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
Range_result remove_range(addr_t, size_t) override
|
||||||
|
{
|
||||||
|
return Range_ok();
|
||||||
}
|
}
|
||||||
|
|
||||||
int add_range(addr_t, size_t) override { return 0; }
|
|
||||||
int remove_range(addr_t, size_t) override { return 0; }
|
|
||||||
void free(void *) override { }
|
void free(void *) override { }
|
||||||
void free(void *, size_t) override { }
|
void free(void *, size_t) override { }
|
||||||
size_t avail() const override { return ram_quota_from_env(); }
|
size_t avail() const override { return ram_quota_from_env(); }
|
||||||
|
@ -410,11 +410,7 @@ namespace {
|
|||||||
{
|
{
|
||||||
typedef Genode::size_t size_t;
|
typedef Genode::size_t size_t;
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||||
{
|
|
||||||
*out_addr = malloc(size);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void free(void *addr, size_t) override { ::free(addr); }
|
void free(void *addr, size_t) override { ::free(addr); }
|
||||||
|
|
||||||
|
@ -51,9 +51,9 @@ Main::Main(Env &env) : heap(env.ram(), env.rm())
|
|||||||
|
|
||||||
/* induce initial heap expansion to remove RM noise */
|
/* induce initial heap expansion to remove RM noise */
|
||||||
if (1) {
|
if (1) {
|
||||||
void *addr;
|
heap.try_alloc(0x100000).with_result(
|
||||||
heap.alloc(0x100000, &addr);
|
[&] (void *ptr) { heap.free(ptr, 0); },
|
||||||
heap.free(addr, 0);
|
[&] (Allocator::Alloc_error) { });
|
||||||
}
|
}
|
||||||
|
|
||||||
addr_t beg((addr_t)&blob_beg);
|
addr_t beg((addr_t)&blob_beg);
|
||||||
|
@ -39,8 +39,12 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
|||||||
void *virt_addr = 0;
|
void *virt_addr = 0;
|
||||||
size_t align_log2 = log2(ds.size());
|
size_t align_log2 = log2(ds.size());
|
||||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||||
if (platform().region_alloc().alloc_aligned(size,
|
|
||||||
&virt_addr, align_log2).ok())
|
platform().region_alloc().alloc_aligned(size, align_log2).with_result(
|
||||||
|
[&] (void *ptr) { virt_addr = ptr; },
|
||||||
|
[&] (Allocator::Alloc_error) { });
|
||||||
|
|
||||||
|
if (virt_addr)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,33 +69,36 @@ static bool msi(Genode::addr_t irq_sel, Genode::addr_t phys_mem,
|
|||||||
Genode::addr_t &msi_addr, Genode::addr_t &msi_data,
|
Genode::addr_t &msi_addr, Genode::addr_t &msi_data,
|
||||||
Genode::Signal_context_capability sig_cap)
|
Genode::Signal_context_capability sig_cap)
|
||||||
{
|
{
|
||||||
void * virt = 0;
|
return platform().region_alloc().alloc_aligned(4096, 12).convert<bool>(
|
||||||
if (platform().region_alloc().alloc_aligned(4096, &virt, 12).error())
|
|
||||||
return false;
|
|
||||||
|
|
||||||
Genode::addr_t virt_addr = reinterpret_cast<Genode::addr_t>(virt);
|
[&] (void *virt_ptr) {
|
||||||
if (!virt_addr)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
using Nova::Rights;
|
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
||||||
using Nova::Utcb;
|
|
||||||
|
|
||||||
Nova::Mem_crd phys_crd(phys_mem >> 12, 0, Rights(true, false, false));
|
using Nova::Rights;
|
||||||
Nova::Mem_crd virt_crd(virt_addr >> 12, 0, Rights(true, false, false));
|
using Nova::Utcb;
|
||||||
Utcb &utcb = *reinterpret_cast<Utcb *>(Thread::myself()->utcb());
|
|
||||||
|
|
||||||
if (map_local_phys_to_virt(utcb, phys_crd, virt_crd, platform_specific().core_pd_sel())) {
|
Nova::Mem_crd phys_crd(phys_mem >> 12, 0, Rights(true, false, false));
|
||||||
platform().region_alloc().free(virt, 4096);
|
Nova::Mem_crd virt_crd(virt_addr >> 12, 0, Rights(true, false, false));
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* try to assign MSI to device */
|
Utcb &utcb = *reinterpret_cast<Utcb *>(Thread::myself()->utcb());
|
||||||
bool res = associate(irq_sel, msi_addr, msi_data, sig_cap, virt_addr);
|
|
||||||
|
|
||||||
unmap_local(Nova::Mem_crd(virt_addr >> 12, 0, Rights(true, true, true)));
|
if (map_local_phys_to_virt(utcb, phys_crd, virt_crd, platform_specific().core_pd_sel())) {
|
||||||
platform().region_alloc().free(virt, 4096);
|
platform().region_alloc().free(virt_ptr, 4096);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return res;
|
/* try to assign MSI to device */
|
||||||
|
bool res = associate(irq_sel, msi_addr, msi_data, sig_cap, virt_addr);
|
||||||
|
|
||||||
|
unmap_local(Nova::Mem_crd(virt_addr >> 12, 0, Rights(true, true, true)));
|
||||||
|
platform().region_alloc().free(virt_ptr, 4096);
|
||||||
|
|
||||||
|
return res;
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
return false;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -217,7 +220,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
|||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irq_alloc.alloc_addr(1, irq_number).error()) {
|
if (irq_alloc.alloc_addr(1, irq_number).failed()) {
|
||||||
error("unavailable IRQ ", irq_number, " requested");
|
error("unavailable IRQ ", irq_number, " requested");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
@ -80,18 +80,21 @@ addr_t Platform::_map_pages(addr_t const phys_addr, addr_t const pages,
|
|||||||
addr_t const size = pages << get_page_size_log2();
|
addr_t const size = pages << get_page_size_log2();
|
||||||
|
|
||||||
/* try to reserve contiguous virtual area */
|
/* try to reserve contiguous virtual area */
|
||||||
void *core_local_ptr = nullptr;
|
return region_alloc().alloc_aligned(size + (guard_page ? get_page_size() : 0),
|
||||||
if (region_alloc().alloc_aligned(size + (guard_page ? get_page_size() : 0),
|
get_page_size_log2()).convert<addr_t>(
|
||||||
&core_local_ptr, get_page_size_log2()).error())
|
[&] (void *core_local_ptr) {
|
||||||
return 0;
|
|
||||||
|
|
||||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||||
|
|
||||||
int res = map_local(_core_pd_sel, *__main_thread_utcb, phys_addr,
|
int res = map_local(_core_pd_sel, *__main_thread_utcb, phys_addr,
|
||||||
core_local_addr, pages,
|
core_local_addr, pages,
|
||||||
Nova::Rights(true, true, false), true);
|
Nova::Rights(true, true, false), true);
|
||||||
|
|
||||||
return res ? 0 : core_local_addr;
|
return res ? 0 : core_local_addr;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error) {
|
||||||
|
return 0UL; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -661,126 +664,113 @@ Platform::Platform()
|
|||||||
|
|
||||||
_init_rom_modules();
|
_init_rom_modules();
|
||||||
|
|
||||||
|
auto export_pages_as_rom_module = [&] (auto rom_name, size_t pages, auto content_fn)
|
||||||
{
|
{
|
||||||
/* export x86 platform specific infos */
|
size_t const bytes = pages << get_page_size_log2();
|
||||||
|
ram_alloc().alloc_aligned(bytes, get_page_size_log2()).with_result(
|
||||||
|
|
||||||
unsigned const pages = 1;
|
[&] (void *phys_ptr) {
|
||||||
void * phys_ptr = nullptr;
|
|
||||||
if (ram_alloc().alloc_aligned(get_page_size(), &phys_ptr,
|
|
||||||
get_page_size_log2()).ok()) {
|
|
||||||
|
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||||
addr_t const core_local_addr = _map_pages(phys_addr, pages);
|
char * const core_local_ptr = (char *)_map_pages(phys_addr, pages);
|
||||||
|
|
||||||
if (!core_local_addr) {
|
if (!core_local_ptr) {
|
||||||
ram_alloc().free(phys_ptr);
|
warning("failed to export ", rom_name, " as ROM module");
|
||||||
} else {
|
ram_alloc().free(phys_ptr, bytes);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
memset(core_local_ptr, 0, bytes);
|
||||||
pages << get_page_size_log2(),
|
content_fn(core_local_ptr, bytes);
|
||||||
"platform_info", [&] ()
|
|
||||||
{
|
_rom_fs.insert(new (core_mem_alloc())
|
||||||
xml.node("kernel", [&] () {
|
Rom_module(phys_addr, bytes, rom_name));
|
||||||
xml.attribute("name", "nova");
|
|
||||||
xml.attribute("acpi", true);
|
/* leave the ROM backing store mapped within core */
|
||||||
xml.attribute("msi" , true);
|
},
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
warning("failed to allocate physical memory for exporting ",
|
||||||
|
rom_name, " as ROM module"); });
|
||||||
|
};
|
||||||
|
|
||||||
|
export_pages_as_rom_module("platform_info", 1,
|
||||||
|
[&] (char * const ptr, size_t const size) {
|
||||||
|
Xml_generator xml(ptr, size, "platform_info", [&] ()
|
||||||
|
{
|
||||||
|
xml.node("kernel", [&] () {
|
||||||
|
xml.attribute("name", "nova");
|
||||||
|
xml.attribute("acpi", true);
|
||||||
|
xml.attribute("msi" , true);
|
||||||
|
});
|
||||||
|
if (efi_sys_tab_phy) {
|
||||||
|
xml.node("efi-system-table", [&] () {
|
||||||
|
xml.attribute("address", String<32>(Hex(efi_sys_tab_phy)));
|
||||||
});
|
});
|
||||||
if (efi_sys_tab_phy) {
|
}
|
||||||
xml.node("efi-system-table", [&] () {
|
xml.node("acpi", [&] () {
|
||||||
xml.attribute("address", String<32>(Hex(efi_sys_tab_phy)));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
xml.node("acpi", [&] () {
|
|
||||||
|
|
||||||
xml.attribute("revision", 2); /* XXX */
|
xml.attribute("revision", 2); /* XXX */
|
||||||
|
|
||||||
if (rsdt)
|
if (rsdt)
|
||||||
xml.attribute("rsdt", String<32>(Hex(rsdt)));
|
xml.attribute("rsdt", String<32>(Hex(rsdt)));
|
||||||
|
|
||||||
if (xsdt)
|
if (xsdt)
|
||||||
xml.attribute("xsdt", String<32>(Hex(xsdt)));
|
xml.attribute("xsdt", String<32>(Hex(xsdt)));
|
||||||
|
});
|
||||||
|
xml.node("affinity-space", [&] () {
|
||||||
|
xml.attribute("width", _cpus.width());
|
||||||
|
xml.attribute("height", _cpus.height());
|
||||||
|
});
|
||||||
|
xml.node("boot", [&] () {
|
||||||
|
if (!boot_fb)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!efi_boot && (Resolution::Type::get(boot_fb->size) != Resolution::Type::VGA_TEXT))
|
||||||
|
return;
|
||||||
|
|
||||||
|
xml.node("framebuffer", [&] () {
|
||||||
|
xml.attribute("phys", String<32>(Hex(boot_fb->addr)));
|
||||||
|
xml.attribute("width", Resolution::Width::get(boot_fb->size));
|
||||||
|
xml.attribute("height", Resolution::Height::get(boot_fb->size));
|
||||||
|
xml.attribute("bpp", Resolution::Bpp::get(boot_fb->size));
|
||||||
|
xml.attribute("type", Resolution::Type::get(boot_fb->size));
|
||||||
|
xml.attribute("pitch", boot_fb->aux);
|
||||||
});
|
});
|
||||||
xml.node("affinity-space", [&] () {
|
});
|
||||||
xml.attribute("width", _cpus.width());
|
xml.node("hardware", [&] () {
|
||||||
xml.attribute("height", _cpus.height());
|
xml.node("features", [&] () {
|
||||||
|
xml.attribute("svm", hip.has_feature_svm());
|
||||||
|
xml.attribute("vmx", hip.has_feature_vmx());
|
||||||
});
|
});
|
||||||
xml.node("boot", [&] () {
|
xml.node("tsc", [&] () {
|
||||||
if (!boot_fb)
|
xml.attribute("invariant", cpuid_invariant_tsc());
|
||||||
return;
|
xml.attribute("freq_khz" , hip.tsc_freq);
|
||||||
|
|
||||||
if (!efi_boot && (Resolution::Type::get(boot_fb->size) != Resolution::Type::VGA_TEXT))
|
|
||||||
return;
|
|
||||||
|
|
||||||
xml.node("framebuffer", [&] () {
|
|
||||||
xml.attribute("phys", String<32>(Hex(boot_fb->addr)));
|
|
||||||
xml.attribute("width", Resolution::Width::get(boot_fb->size));
|
|
||||||
xml.attribute("height", Resolution::Height::get(boot_fb->size));
|
|
||||||
xml.attribute("bpp", Resolution::Bpp::get(boot_fb->size));
|
|
||||||
xml.attribute("type", Resolution::Type::get(boot_fb->size));
|
|
||||||
xml.attribute("pitch", boot_fb->aux);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
xml.node("hardware", [&] () {
|
xml.node("cpus", [&] () {
|
||||||
xml.node("features", [&] () {
|
hip.for_each_enabled_cpu([&](Hip::Cpu_desc const &cpu, unsigned i) {
|
||||||
xml.attribute("svm", hip.has_feature_svm());
|
xml.node("cpu", [&] () {
|
||||||
xml.attribute("vmx", hip.has_feature_vmx());
|
xml.attribute("id", i);
|
||||||
});
|
xml.attribute("package", cpu.package);
|
||||||
xml.node("tsc", [&] () {
|
xml.attribute("core", cpu.core);
|
||||||
xml.attribute("invariant", cpuid_invariant_tsc());
|
xml.attribute("thread", cpu.thread);
|
||||||
xml.attribute("freq_khz" , hip.tsc_freq);
|
xml.attribute("family", String<5>(Hex(cpu.family)));
|
||||||
});
|
xml.attribute("model", String<5>(Hex(cpu.model)));
|
||||||
xml.node("cpus", [&] () {
|
xml.attribute("stepping", String<5>(Hex(cpu.stepping)));
|
||||||
hip.for_each_enabled_cpu([&](Hip::Cpu_desc const &cpu, unsigned i) {
|
xml.attribute("platform", String<5>(Hex(cpu.platform)));
|
||||||
xml.node("cpu", [&] () {
|
xml.attribute("patch", String<12>(Hex(cpu.patch)));
|
||||||
xml.attribute("id", i);
|
|
||||||
xml.attribute("package", cpu.package);
|
|
||||||
xml.attribute("core", cpu.core);
|
|
||||||
xml.attribute("thread", cpu.thread);
|
|
||||||
xml.attribute("family", String<5>(Hex(cpu.family)));
|
|
||||||
xml.attribute("model", String<5>(Hex(cpu.model)));
|
|
||||||
xml.attribute("stepping", String<5>(Hex(cpu.stepping)));
|
|
||||||
xml.attribute("platform", String<5>(Hex(cpu.platform)));
|
|
||||||
xml.attribute("patch", String<12>(Hex(cpu.patch)));
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
});
|
||||||
unmap_local(*__main_thread_utcb, core_local_addr, pages);
|
|
||||||
region_alloc().free(reinterpret_cast<void *>(core_local_addr),
|
|
||||||
pages * get_page_size());
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc())
|
|
||||||
Rom_module(phys_addr, pages * get_page_size(),
|
|
||||||
"platform_info"));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
);
|
||||||
|
|
||||||
/* core log as ROM module */
|
export_pages_as_rom_module("core_log", 4,
|
||||||
{
|
[&] (char * const ptr, size_t const size) {
|
||||||
void * phys_ptr = nullptr;
|
init_core_log( Core_log_range { (addr_t)ptr, size } );
|
||||||
unsigned const pages = 4;
|
});
|
||||||
size_t const log_size = pages << get_page_size_log2();
|
|
||||||
|
|
||||||
if (ram_alloc().alloc_aligned(log_size, &phys_ptr,
|
|
||||||
get_page_size_log2()).ok()) {
|
|
||||||
|
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
|
||||||
|
|
||||||
addr_t const virt = _map_pages(phys_addr, pages, true);
|
|
||||||
if (virt) {
|
|
||||||
memset(reinterpret_cast<void *>(virt), 0, log_size);
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
|
||||||
"core_log"));
|
|
||||||
|
|
||||||
init_core_log( Core_log_range { virt, log_size } );
|
|
||||||
} else
|
|
||||||
ram_alloc().free(phys_ptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* export hypervisor log memory */
|
/* export hypervisor log memory */
|
||||||
if (hyp_log && hyp_log_size)
|
if (hyp_log && hyp_log_size)
|
||||||
@ -831,8 +821,12 @@ Platform::Platform()
|
|||||||
for (unsigned i = 0; i < 32; i++)
|
for (unsigned i = 0; i < 32; i++)
|
||||||
{
|
{
|
||||||
void * phys_ptr = nullptr;
|
void * phys_ptr = nullptr;
|
||||||
if (ram_alloc().alloc_aligned(get_page_size(), &phys_ptr,
|
|
||||||
get_page_size_log2()).error())
|
ram_alloc().alloc_aligned(get_page_size(), get_page_size_log2()).with_result(
|
||||||
|
[&] (void *ptr) { phys_ptr = ptr; },
|
||||||
|
[&] (Range_allocator::Alloc_error) { /* covered by nullptr test below */ });
|
||||||
|
|
||||||
|
if (phys_ptr == nullptr)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
addr_t phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
addr_t phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||||
|
@ -40,12 +40,17 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
|||||||
void *virt_addr = 0;
|
void *virt_addr = 0;
|
||||||
size_t align_log2 = log2(ds.size());
|
size_t align_log2 = log2(ds.size());
|
||||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||||
if (platform().region_alloc().alloc_aligned(size,
|
|
||||||
&virt_addr, align_log2).ok())
|
platform().region_alloc().alloc_aligned(size, align_log2).with_result(
|
||||||
break;
|
[&] (void *ptr) { virt_addr = ptr; },
|
||||||
|
[&] (Range_allocator::Alloc_error) { /* try next iteration */ }
|
||||||
|
);
|
||||||
|
if (virt_addr)
|
||||||
|
return virt_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return virt_addr;
|
error("alloc_region of size ", size, " unexpectedly failed");
|
||||||
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,10 +24,8 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
off_t offset, bool use_local_addr,
|
off_t offset, bool use_local_addr,
|
||||||
Region_map::Local_addr, bool, bool)
|
Region_map::Local_addr, bool, bool)
|
||||||
{
|
{
|
||||||
using namespace Okl4;
|
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> void * {
|
||||||
|
|
||||||
auto lambda = [&] (Dataspace_component *ds) -> void *
|
|
||||||
{
|
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
@ -48,21 +46,25 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
Range_allocator &virt_alloc = platform().region_alloc();
|
||||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
return virt_alloc.try_alloc(page_rounded_size).convert<void *>(
|
||||||
error("could not allocate virtual address range in core of size ",
|
|
||||||
page_rounded_size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
[&] (void *virt_addr) -> void * {
|
||||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
|
||||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
|
||||||
return nullptr;
|
|
||||||
return virt_addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
return _ep.apply(ds_cap, lambda);
|
/* map the dataspace's physical pages to virtual memory */
|
||||||
|
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
|
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
return virt_addr;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error) -> void * {
|
||||||
|
error("could not allocate virtual address range in core of size ",
|
||||||
|
page_rounded_size);
|
||||||
|
return nullptr;
|
||||||
|
});
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
|||||||
if (msi)
|
if (msi)
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
|
|
||||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||||
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
@ -189,52 +189,66 @@ Platform::Platform()
|
|||||||
|
|
||||||
/* core log as ROM module */
|
/* core log as ROM module */
|
||||||
{
|
{
|
||||||
void * core_local_ptr = nullptr;
|
|
||||||
void * phys_ptr = nullptr;
|
|
||||||
unsigned const pages = 1;
|
unsigned const pages = 1;
|
||||||
size_t const log_size = pages << get_page_size_log2();
|
size_t const log_size = pages << get_page_size_log2();
|
||||||
|
unsigned const align = get_page_size_log2();
|
||||||
|
|
||||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
|
||||||
|
|
||||||
/* let one page free after the log buffer */
|
[&] (void *phys) {
|
||||||
region_alloc().alloc_aligned(log_size, &core_local_ptr, get_page_size_log2());
|
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
|
||||||
|
|
||||||
map_local(phys_addr, core_local_addr, pages);
|
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||||
memset(core_local_ptr, 0, log_size);
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
[&] (void *ptr) {
|
||||||
"core_log"));
|
|
||||||
|
|
||||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
map_local(phys_addr, (addr_t)ptr, pages);
|
||||||
|
memset(ptr, 0, log_size);
|
||||||
|
|
||||||
|
_rom_fs.insert(new (core_mem_alloc())
|
||||||
|
Rom_module(phys_addr, log_size, "core_log"));
|
||||||
|
|
||||||
|
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) { }
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) { }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* export platform specific infos */
|
/* export platform specific infos */
|
||||||
{
|
{
|
||||||
void * core_local_ptr = nullptr;
|
|
||||||
void * phys_ptr = nullptr;
|
|
||||||
unsigned const pages = 1;
|
unsigned const pages = 1;
|
||||||
size_t const size = pages << get_page_size_log2();
|
size_t const size = pages << get_page_size_log2();
|
||||||
|
|
||||||
if (ram_alloc().alloc_aligned(size, &phys_ptr, get_page_size_log2()).ok()) {
|
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
|
||||||
|
|
||||||
/* let one page free after the log buffer */
|
[&] (void *phys_ptr) {
|
||||||
region_alloc().alloc_aligned(size, &core_local_ptr, get_page_size_log2());
|
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
|
||||||
|
|
||||||
if (map_local(phys_addr, core_local_addr, pages)) {
|
/* let one page free after the log buffer */
|
||||||
|
region_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||||
|
|
||||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
[&] (void *core_local_ptr) {
|
||||||
size, "platform_info", [&] () {
|
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||||
xml.node("kernel", [&] () { xml.attribute("name", "okl4"); });
|
|
||||||
});
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
if (map_local(phys_addr, core_local_addr, pages)) {
|
||||||
"platform_info"));
|
|
||||||
}
|
Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
||||||
}
|
size, "platform_info", [&] () {
|
||||||
|
xml.node("kernel", [&] () { xml.attribute("name", "okl4"); });
|
||||||
|
});
|
||||||
|
|
||||||
|
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
||||||
|
"platform_info"));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) { }
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) { }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,31 +38,41 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
|||||||
{
|
{
|
||||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
|
struct Guard
|
||||||
|
{
|
||||||
|
Range_allocator &virt_alloc;
|
||||||
|
struct { void *virt_ptr = nullptr; };
|
||||||
|
|
||||||
|
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||||
|
|
||||||
|
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||||
|
|
||||||
|
} guard(platform().region_alloc());
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||||
error("could not allocate virtual address range in core of size ",
|
[&] (Range_allocator::Alloc_error e) {
|
||||||
page_rounded_size);
|
error("could not allocate virtual address range in core of size ",
|
||||||
|
page_rounded_size, ", error=", e); });
|
||||||
|
|
||||||
|
if (!guard.virt_ptr)
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
if (!map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages)) {
|
if (!map_local(ds.phys_addr(), (addr_t)guard.virt_ptr, num_pages)) {
|
||||||
error("core-local memory mapping failed, error=", Okl4::L4_ErrorCode());
|
error("core-local memory mapping failed");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear dataspace */
|
/* clear dataspace */
|
||||||
size_t num_longwords = page_rounded_size/sizeof(long);
|
size_t num_longwords = page_rounded_size/sizeof(long);
|
||||||
for (long *dst = (long *)virt_addr; num_longwords--;)
|
for (long *dst = (long *)guard.virt_ptr; num_longwords--;)
|
||||||
*dst++ = 0;
|
*dst++ = 0;
|
||||||
|
|
||||||
/* unmap dataspace from core */
|
/* unmap dataspace from core */
|
||||||
if (!unmap_local((addr_t)virt_addr, num_pages))
|
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||||
error("could not unmap core-local address range at ", virt_addr, ", "
|
error("could not unmap core-local address range at ", guard.virt_ptr, ", "
|
||||||
"error=", Okl4::L4_ErrorCode());
|
"error=", Okl4::L4_ErrorCode());
|
||||||
|
|
||||||
/* free core's virtual address space */
|
|
||||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
|
||||||
}
|
}
|
||||||
|
@ -60,28 +60,30 @@ addr_t Io_mem_session_component::_map_local(addr_t base, size_t size)
|
|||||||
{
|
{
|
||||||
using namespace Pistachio;
|
using namespace Pistachio;
|
||||||
|
|
||||||
addr_t local_base;
|
auto alloc_virt_range = [&] ()
|
||||||
|
{
|
||||||
|
/* special case for the null page */
|
||||||
|
if (is_conventional_memory(base))
|
||||||
|
return base;
|
||||||
|
|
||||||
/* align large I/O dataspaces on a super-page boundary within core */
|
/* align large I/O dataspaces on a super-page boundary within core */
|
||||||
size_t alignment = (size >= get_super_page_size()) ? get_super_page_size_log2()
|
size_t const align = (size >= get_super_page_size())
|
||||||
: get_page_size_log2();
|
? get_super_page_size_log2()
|
||||||
|
: get_page_size_log2();
|
||||||
|
|
||||||
/* special case for the null page */
|
return platform().region_alloc().alloc_aligned(size, align).convert<addr_t>(
|
||||||
if (is_conventional_memory(base))
|
[&] (void *ptr) { return (addr_t)ptr; },
|
||||||
local_base = base;
|
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||||
|
error(__func__, ": alloc_aligned failed!");
|
||||||
|
return 0; });
|
||||||
|
};
|
||||||
|
|
||||||
else {
|
addr_t const local_base = (addr_t)alloc_virt_range();
|
||||||
|
|
||||||
/* find appropriate region for mapping */
|
if (!local_base)
|
||||||
void *result = 0;
|
return 0;
|
||||||
if (platform().region_alloc().alloc_aligned(size, &result, alignment).error())
|
|
||||||
error(__func__, ": alloc_aligned failed!");
|
|
||||||
|
|
||||||
local_base = (addr_t)result;
|
for (unsigned offset = 0; size; ) {
|
||||||
}
|
|
||||||
|
|
||||||
unsigned offset = 0;
|
|
||||||
while (size) {
|
|
||||||
|
|
||||||
size_t page_size = get_page_size();
|
size_t page_size = get_page_size();
|
||||||
if (can_use_super_page(base + offset, size))
|
if (can_use_super_page(base + offset, size))
|
||||||
|
@ -133,7 +133,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
|||||||
if (msi)
|
if (msi)
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
|
|
||||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||||
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
@ -605,51 +605,42 @@ Platform::Platform()
|
|||||||
|
|
||||||
core_pd().bind_thread(core_thread);
|
core_pd().bind_thread(core_thread);
|
||||||
|
|
||||||
/* core log as ROM module */
|
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||||
{
|
{
|
||||||
void * phys_ptr = nullptr;
|
size_t const size = 1 << get_page_size_log2();
|
||||||
unsigned const pages = 1;
|
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||||
size_t const log_size = pages << get_page_size_log2();
|
|
||||||
|
|
||||||
ram_alloc().alloc_aligned(log_size, &phys_ptr, get_page_size_log2());
|
[&] (void *phys_ptr) {
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
|
||||||
|
|
||||||
void * const core_local_ptr = phys_ptr;
|
/* core-local memory is one-to-one mapped physical RAM */
|
||||||
addr_t const core_local_addr = phys_addr;
|
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||||
|
void * const core_local_ptr = phys_ptr;
|
||||||
|
|
||||||
/* let one page free after the log buffer */
|
region_alloc().remove_range((addr_t)core_local_ptr, size);
|
||||||
region_alloc().remove_range(core_local_addr, log_size + get_page_size());
|
memset(core_local_ptr, 0, size);
|
||||||
|
content_fn(core_local_ptr, size);
|
||||||
|
|
||||||
memset(core_local_ptr, 0, log_size);
|
_rom_fs.insert(new (core_mem_alloc())
|
||||||
|
Rom_module(phys_addr, size, rom_name));
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
warning("failed to export ", rom_name, " as ROM module"); }
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
/* core log as ROM module */
|
||||||
"core_log"));
|
export_page_as_rom_module("core_log",
|
||||||
|
[&] (void *core_local_ptr, size_t size) {
|
||||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
init_core_log(Core_log_range { (addr_t)core_local_ptr, size } ); });
|
||||||
}
|
|
||||||
|
|
||||||
/* export platform specific infos */
|
/* export platform specific infos */
|
||||||
{
|
export_page_as_rom_module("platform_info",
|
||||||
void * phys_ptr = nullptr;
|
[&] (void *core_local_ptr, size_t size) {
|
||||||
size_t const size = 1 << get_page_size_log2();
|
Xml_generator xml(reinterpret_cast<char *>(core_local_ptr),
|
||||||
|
size, "platform_info",
|
||||||
if (ram_alloc().alloc_aligned(size, &phys_ptr,
|
[&] () {
|
||||||
get_page_size_log2()).ok()) {
|
xml.node("kernel", [&] () {
|
||||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
xml.attribute("name", "pistachio"); }); }); });
|
||||||
addr_t const core_local_addr = phys_addr;
|
|
||||||
|
|
||||||
region_alloc().remove_range(core_local_addr, size);
|
|
||||||
|
|
||||||
Genode::Xml_generator xml(reinterpret_cast<char *>(core_local_addr),
|
|
||||||
size, "platform_info", [&] () {
|
|
||||||
xml.node("kernel", [&] () { xml.attribute("name", "pistachio"); });
|
|
||||||
});
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, size,
|
|
||||||
"platform_info"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,7 +26,8 @@ Region_map::Local_addr
|
|||||||
Core_region_map::attach(Dataspace_capability ds_cap, size_t size, off_t offset,
|
Core_region_map::attach(Dataspace_capability ds_cap, size_t size, off_t offset,
|
||||||
bool use_local_addr, Region_map::Local_addr, bool, bool)
|
bool use_local_addr, Region_map::Local_addr, bool, bool)
|
||||||
{
|
{
|
||||||
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
|
return _ep.apply(ds_cap, [&] (Dataspace_component *ds) -> Local_addr {
|
||||||
|
|
||||||
if (!ds)
|
if (!ds)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
@ -46,21 +47,22 @@ Core_region_map::attach(Dataspace_capability ds_cap, size_t size, off_t offset,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
return platform().region_alloc().try_alloc(page_rounded_size).convert<Local_addr>(
|
||||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
[&] (void *virt_ptr) {
|
||||||
error("could not allocate virtual address range in core of size ",
|
|
||||||
page_rounded_size);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* map the dataspace's physical pages to core-local virtual addresses */
|
/* map the dataspace's physical pages to core-local virtual addresses */
|
||||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||||
map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages);
|
map_local(ds->phys_addr(), (addr_t)virt_ptr, num_pages);
|
||||||
|
|
||||||
return virt_addr;
|
return virt_ptr;
|
||||||
};
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) -> Local_addr {
|
||||||
return _ep.apply(ds_cap, lambda);
|
error("could not allocate virtual address range in core of size ",
|
||||||
|
page_rounded_size);
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,21 +51,17 @@ class Genode::Static_allocator : public Allocator
|
|||||||
|
|
||||||
class Alloc_failed { };
|
class Alloc_failed { };
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
*out_addr = nullptr;
|
|
||||||
|
|
||||||
if (size > sizeof(Elem_space)) {
|
if (size > sizeof(Elem_space)) {
|
||||||
error("unexpected allocation size of ", size);
|
error("unexpected allocation size of ", size);
|
||||||
return false;
|
return Alloc_error::DENIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
*out_addr = &_elements[_used.alloc()]; }
|
return &_elements[_used.alloc()]; }
|
||||||
catch (typename Bit_allocator<MAX>::Out_of_indices) {
|
catch (typename Bit_allocator<MAX>::Out_of_indices) {
|
||||||
return false; }
|
return Alloc_error::DENIED; }
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t overhead(size_t) const override { return 0; }
|
size_t overhead(size_t) const override { return 0; }
|
||||||
|
@ -33,17 +33,17 @@ struct Genode::Untyped_memory
|
|||||||
|
|
||||||
static inline addr_t alloc_pages(Range_allocator &phys_alloc, size_t num_pages)
|
static inline addr_t alloc_pages(Range_allocator &phys_alloc, size_t num_pages)
|
||||||
{
|
{
|
||||||
void *out_ptr = nullptr;
|
size_t const size = num_pages*get_page_size();
|
||||||
Range_allocator::Alloc_return alloc_ret =
|
unsigned const align = get_page_size_log2();
|
||||||
phys_alloc.alloc_aligned(num_pages*get_page_size(), &out_ptr,
|
|
||||||
get_page_size_log2());
|
|
||||||
|
|
||||||
if (alloc_ret.error()) {
|
return phys_alloc.alloc_aligned(size, align).convert<addr_t>(
|
||||||
error(__PRETTY_FUNCTION__, ": allocation of untyped memory failed");
|
|
||||||
throw Phys_alloc_failed();
|
|
||||||
}
|
|
||||||
|
|
||||||
return (addr_t)out_ptr;
|
[&] (void *ptr) {
|
||||||
|
return (addr_t)ptr; },
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||||
|
error(__PRETTY_FUNCTION__, ": allocation of untyped memory failed");
|
||||||
|
throw Phys_alloc_failed(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
|||||||
if (msi)
|
if (msi)
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
|
|
||||||
if (irq_alloc.alloc_addr(1, _irq_number).error()) {
|
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||||
error("unavailable IRQ ", _irq_number, " requested");
|
error("unavailable IRQ ", _irq_number, " requested");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
}
|
}
|
||||||
|
@ -285,27 +285,29 @@ void Platform::_init_rom_modules()
|
|||||||
static Tslab<Rom_module, sizeof(slab_block)>
|
static Tslab<Rom_module, sizeof(slab_block)>
|
||||||
rom_module_slab(core_mem_alloc(), &slab_block);
|
rom_module_slab(core_mem_alloc(), &slab_block);
|
||||||
|
|
||||||
/*
|
auto alloc_modules_range = [&] () -> addr_t
|
||||||
* Allocate unused range of phys CNode address space where to make the
|
{
|
||||||
* boot modules available.
|
/*
|
||||||
*/
|
* Allocate unused range of phys CNode address space where to make the
|
||||||
void *out_ptr = nullptr;
|
* boot modules available.
|
||||||
size_t const modules_size = (addr_t)&_boot_modules_binaries_end
|
*/
|
||||||
- (addr_t)&_boot_modules_binaries_begin + 1;
|
size_t const size = (addr_t)&_boot_modules_binaries_end
|
||||||
|
- (addr_t)&_boot_modules_binaries_begin + 1;
|
||||||
|
size_t const align = get_page_size_log2();
|
||||||
|
|
||||||
Range_allocator::Alloc_return const alloc_ret =
|
return _unused_phys_alloc.alloc_aligned(size, align).convert<addr_t>(
|
||||||
_unused_phys_alloc.alloc_aligned(modules_size, &out_ptr, get_page_size_log2());
|
[&] (void *ptr) { return (addr_t)ptr; },
|
||||||
|
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||||
if (alloc_ret.error()) {
|
error("could not reserve phys CNode space for boot modules");
|
||||||
error("could not reserve phys CNode space for boot modules");
|
struct Init_rom_modules_failed { };
|
||||||
struct Init_rom_modules_failed { };
|
throw Init_rom_modules_failed();
|
||||||
throw Init_rom_modules_failed();
|
});
|
||||||
}
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate frame frame selector used to back the boot modules
|
* Calculate frame frame selector used to back the boot modules
|
||||||
*/
|
*/
|
||||||
addr_t const unused_range_start = (addr_t)out_ptr;
|
addr_t const unused_range_start = alloc_modules_range();
|
||||||
addr_t const unused_first_frame_sel = unused_range_start >> get_page_size_log2();
|
addr_t const unused_first_frame_sel = unused_range_start >> get_page_size_log2();
|
||||||
addr_t const modules_start = (addr_t)&_boot_modules_binaries_begin;
|
addr_t const modules_start = (addr_t)&_boot_modules_binaries_begin;
|
||||||
addr_t const modules_core_offset = modules_start
|
addr_t const modules_core_offset = modules_start
|
||||||
@ -349,36 +351,10 @@ void Platform::_init_rom_modules()
|
|||||||
(const char*)header->name);
|
(const char*)header->name);
|
||||||
|
|
||||||
_rom_fs.insert(rom_module);
|
_rom_fs.insert(rom_module);
|
||||||
}
|
};
|
||||||
|
|
||||||
/* export x86 platform specific infos via 'platform_info' ROM */
|
auto gen_platform_info = [&] (Xml_generator &xml)
|
||||||
|
|
||||||
unsigned const pages = 1;
|
|
||||||
addr_t const rom_size = pages << get_page_size_log2();
|
|
||||||
void *virt_ptr = nullptr;
|
|
||||||
const char *rom_name = "platform_info";
|
|
||||||
|
|
||||||
addr_t const phys_addr = Untyped_memory::alloc_page(ram_alloc());
|
|
||||||
Untyped_memory::convert_to_page_frames(phys_addr, pages);
|
|
||||||
|
|
||||||
if (region_alloc().alloc_aligned(rom_size, &virt_ptr, get_page_size_log2()).error()) {
|
|
||||||
error("could not setup platform_info ROM - region allocation error");
|
|
||||||
Untyped_memory::free_page(ram_alloc(), phys_addr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
|
||||||
|
|
||||||
if (!map_local(phys_addr, virt_addr, pages, this)) {
|
|
||||||
error("could not setup platform_info ROM - map error");
|
|
||||||
region_alloc().free(virt_ptr);
|
|
||||||
Untyped_memory::free_page(ram_alloc(), phys_addr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Genode::Xml_generator xml(reinterpret_cast<char *>(virt_addr),
|
|
||||||
rom_size, rom_name, [&] ()
|
|
||||||
{
|
{
|
||||||
|
|
||||||
if (!bi.extraLen)
|
if (!bi.extraLen)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -489,16 +465,69 @@ void Platform::_init_rom_modules()
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
if (!unmap_local(virt_addr, pages, this)) {
|
/* export x86 platform specific infos via 'platform_info' ROM */
|
||||||
error("could not setup platform_info ROM - unmap error");
|
auto export_page_as_rom_module = [&] (auto rom_name, auto content_fn)
|
||||||
return;
|
{
|
||||||
}
|
constexpr unsigned const pages = 1;
|
||||||
region_alloc().free(virt_ptr);
|
|
||||||
|
|
||||||
_rom_fs.insert(
|
struct Phys_alloc_guard
|
||||||
new (core_mem_alloc()) Rom_module(phys_addr, rom_size, rom_name));
|
{
|
||||||
|
Range_allocator &_alloc;
|
||||||
|
|
||||||
|
addr_t const addr = Untyped_memory::alloc_page(_alloc);
|
||||||
|
|
||||||
|
bool keep = false;
|
||||||
|
|
||||||
|
Phys_alloc_guard(Range_allocator &alloc) :_alloc(alloc)
|
||||||
|
{
|
||||||
|
Untyped_memory::convert_to_page_frames(addr, pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
~Phys_alloc_guard()
|
||||||
|
{
|
||||||
|
if (keep)
|
||||||
|
return;
|
||||||
|
|
||||||
|
Untyped_memory::free_page(_alloc, addr);
|
||||||
|
}
|
||||||
|
} phys { ram_alloc() };
|
||||||
|
|
||||||
|
addr_t const size = pages << get_page_size_log2();
|
||||||
|
size_t const align = get_page_size_log2();
|
||||||
|
|
||||||
|
region_alloc().alloc_aligned(size, align).with_result(
|
||||||
|
|
||||||
|
[&] (void *core_local_ptr) {
|
||||||
|
|
||||||
|
if (!map_local(phys.addr, (addr_t)core_local_ptr, pages, this)) {
|
||||||
|
error("could not setup platform_info ROM - map error");
|
||||||
|
region_alloc().free(core_local_ptr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(core_local_ptr, 0, size);
|
||||||
|
content_fn((char *)core_local_ptr, size);
|
||||||
|
|
||||||
|
_rom_fs.insert(
|
||||||
|
new (core_mem_alloc()) Rom_module(phys.addr, size, rom_name));
|
||||||
|
|
||||||
|
phys.keep = true;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
error("could not setup platform_info ROM - region allocation error");
|
||||||
|
}
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export_page_as_rom_module("platform_info", [&] (char *ptr, size_t size) {
|
||||||
|
Xml_generator xml(ptr, size, "platform_info", [&] () {
|
||||||
|
gen_platform_info(xml); }); });
|
||||||
|
|
||||||
|
export_page_as_rom_module("core_log", [&] (char *ptr, size_t size) {
|
||||||
|
init_core_log(Core_log_range { (addr_t)ptr, size } ); });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -560,17 +589,21 @@ Platform::Platform()
|
|||||||
|
|
||||||
/* add some minor virtual region for dynamic usage by core */
|
/* add some minor virtual region for dynamic usage by core */
|
||||||
addr_t const virt_size = 32 * 1024 * 1024;
|
addr_t const virt_size = 32 * 1024 * 1024;
|
||||||
void * virt_ptr = nullptr;
|
_unused_virt_alloc.alloc_aligned(virt_size, get_page_size_log2()).with_result(
|
||||||
if (_unused_virt_alloc.alloc_aligned(virt_size, &virt_ptr, get_page_size_log2()).ok()) {
|
|
||||||
|
|
||||||
addr_t const virt_addr = (addr_t)virt_ptr;
|
[&] (void *virt_ptr) {
|
||||||
|
addr_t const virt_addr = (addr_t)virt_ptr;
|
||||||
|
|
||||||
/* add to available virtual region of core */
|
/* add to available virtual region of core */
|
||||||
_core_mem_alloc.virt_alloc().add_range(virt_addr, virt_size);
|
_core_mem_alloc.virt_alloc().add_range(virt_addr, virt_size);
|
||||||
|
|
||||||
/* back region by page tables */
|
/* back region by page tables */
|
||||||
_core_vm_space.unsynchronized_alloc_page_tables(virt_addr, virt_size);
|
_core_vm_space.unsynchronized_alloc_page_tables(virt_addr, virt_size);
|
||||||
}
|
},
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
warning("failed to reserve core virtual memory for dynamic use"); }
|
||||||
|
);
|
||||||
|
|
||||||
/* add idle thread trace subjects */
|
/* add idle thread trace subjects */
|
||||||
for (unsigned cpu_id = 0; cpu_id < affinity_space().width(); cpu_id ++) {
|
for (unsigned cpu_id = 0; cpu_id < affinity_space().width(); cpu_id ++) {
|
||||||
@ -623,28 +656,6 @@ Platform::Platform()
|
|||||||
/* I/O port allocator (only meaningful for x86) */
|
/* I/O port allocator (only meaningful for x86) */
|
||||||
_io_port_alloc.add_range(0, 0x10000);
|
_io_port_alloc.add_range(0, 0x10000);
|
||||||
|
|
||||||
/* core log as ROM module */
|
|
||||||
{
|
|
||||||
void * core_local_ptr = nullptr;
|
|
||||||
unsigned const pages = 1;
|
|
||||||
size_t const log_size = pages << get_page_size_log2();
|
|
||||||
|
|
||||||
addr_t const phys_addr = Untyped_memory::alloc_page(ram_alloc());
|
|
||||||
Untyped_memory::convert_to_page_frames(phys_addr, pages);
|
|
||||||
|
|
||||||
/* let one page free after the log buffer */
|
|
||||||
region_alloc().alloc_aligned(log_size + get_page_size(), &core_local_ptr, get_page_size_log2());
|
|
||||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
|
||||||
|
|
||||||
map_local(phys_addr, core_local_addr, pages, this);
|
|
||||||
memset(core_local_ptr, 0, log_size);
|
|
||||||
|
|
||||||
_rom_fs.insert(new (core_mem_alloc()) Rom_module(phys_addr, log_size,
|
|
||||||
"core_log"));
|
|
||||||
|
|
||||||
init_core_log(Core_log_range { core_local_addr, log_size } );
|
|
||||||
}
|
|
||||||
|
|
||||||
_init_rom_modules();
|
_init_rom_modules();
|
||||||
|
|
||||||
platform_in_construction = nullptr;
|
platform_in_construction = nullptr;
|
||||||
|
@ -105,30 +105,34 @@ static void prepopulate_ipc_buffer(addr_t ipc_buffer_phys, Cap_sel ep_sel,
|
|||||||
size_t const page_rounded_size = get_page_size();
|
size_t const page_rounded_size = get_page_size();
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
/* allocate range in core's virtual address space */
|
||||||
void *virt_addr;
|
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||||
if (!platform().region_alloc().alloc(page_rounded_size, &virt_addr)) {
|
|
||||||
error("could not allocate virtual address range in core of size ",
|
|
||||||
page_rounded_size);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* map the IPC buffer to core-local virtual addresses */
|
[&] (void *virt_ptr) {
|
||||||
map_local(ipc_buffer_phys, (addr_t)virt_addr, 1);
|
|
||||||
|
|
||||||
/* populate IPC buffer with thread information */
|
/* map the IPC buffer to core-local virtual addresses */
|
||||||
Native_utcb &utcb = *(Native_utcb *)virt_addr;
|
map_local(ipc_buffer_phys, (addr_t)virt_ptr, 1);
|
||||||
utcb.ep_sel (ep_sel .value());
|
|
||||||
utcb.lock_sel(lock_sel.value());
|
|
||||||
|
|
||||||
/* unmap IPC buffer from core */
|
/* populate IPC buffer with thread information */
|
||||||
if (!unmap_local((addr_t)virt_addr, 1)) {
|
Native_utcb &utcb = *(Native_utcb *)virt_ptr;
|
||||||
error("could not unmap core virtual address ",
|
utcb.ep_sel (ep_sel .value());
|
||||||
virt_addr, " in ", __PRETTY_FUNCTION__);
|
utcb.lock_sel(lock_sel.value());
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* free core's virtual address space */
|
/* unmap IPC buffer from core */
|
||||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
if (!unmap_local((addr_t)virt_ptr, 1)) {
|
||||||
|
error("could not unmap core virtual address ",
|
||||||
|
virt_ptr, " in ", __PRETTY_FUNCTION__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* free core's virtual address space */
|
||||||
|
platform().region_alloc().free(virt_ptr, page_rounded_size);
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
error("could not allocate virtual address range in core of size ",
|
||||||
|
page_rounded_size);
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -42,14 +42,15 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
|||||||
size_t const page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
size_t const page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||||
|
|
||||||
/* allocate one page in core's virtual address space */
|
/* allocate one page in core's virtual address space */
|
||||||
void *virt_addr_ptr = nullptr;
|
auto alloc_one_virt_page = [&] () -> void *
|
||||||
if (!platform().region_alloc().alloc(get_page_size(), &virt_addr_ptr))
|
{
|
||||||
ASSERT_NEVER_CALLED;
|
return platform().region_alloc().try_alloc(get_page_size()).convert<void *>(
|
||||||
|
[&] (void *ptr) { return ptr; },
|
||||||
|
[&] (Range_allocator::Alloc_error) -> void * {
|
||||||
|
ASSERT_NEVER_CALLED; });
|
||||||
|
};
|
||||||
|
|
||||||
if (!virt_addr_ptr)
|
addr_t const virt_addr = (addr_t)alloc_one_virt_page();
|
||||||
ASSERT_NEVER_CALLED;
|
|
||||||
|
|
||||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_addr_ptr);
|
|
||||||
|
|
||||||
/* map each page of dataspace one at a time and clear it */
|
/* map each page of dataspace one at a time and clear it */
|
||||||
for (addr_t offset = 0; offset < page_rounded_size; offset += get_page_size())
|
for (addr_t offset = 0; offset < page_rounded_size; offset += get_page_size())
|
||||||
@ -72,5 +73,5 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* free core's virtual address space */
|
/* free core's virtual address space */
|
||||||
platform().region_alloc().free(virt_addr_ptr, get_page_size());
|
platform().region_alloc().free((void *)virt_addr, get_page_size());
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <base/stdint.h>
|
#include <base/stdint.h>
|
||||||
#include <base/exception.h>
|
#include <base/exception.h>
|
||||||
#include <base/quota_guard.h>
|
#include <base/quota_guard.h>
|
||||||
|
#include <base/ram_allocator.h>
|
||||||
|
|
||||||
namespace Genode {
|
namespace Genode {
|
||||||
|
|
||||||
@ -61,7 +62,14 @@ struct Genode::Allocator : Deallocator
|
|||||||
/**
|
/**
|
||||||
* Exception type
|
* Exception type
|
||||||
*/
|
*/
|
||||||
typedef Out_of_ram Out_of_memory;
|
using Out_of_memory = Out_of_ram;
|
||||||
|
using Denied = Ram_allocator::Denied;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return type of 'try_alloc'
|
||||||
|
*/
|
||||||
|
using Alloc_error = Ram_allocator::Alloc_error;
|
||||||
|
using Alloc_result = Attempt<void *, Alloc_error>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Destructor
|
* Destructor
|
||||||
@ -74,32 +82,8 @@ struct Genode::Allocator : Deallocator
|
|||||||
* \param size block size to allocate
|
* \param size block size to allocate
|
||||||
* \param out_addr resulting pointer to the new block,
|
* \param out_addr resulting pointer to the new block,
|
||||||
* undefined in the error case
|
* undefined in the error case
|
||||||
*
|
|
||||||
* \throw Out_of_ram
|
|
||||||
* \throw Out_of_caps
|
|
||||||
*
|
|
||||||
* \return true on success
|
|
||||||
*/
|
*/
|
||||||
virtual bool alloc(size_t size, void **out_addr) = 0;
|
virtual Alloc_result try_alloc(size_t size) = 0;
|
||||||
|
|
||||||
/**
|
|
||||||
* Allocate typed block
|
|
||||||
*
|
|
||||||
* This template allocates a typed block returned as a pointer to
|
|
||||||
* a non-void type. By providing this method, we prevent the
|
|
||||||
* compiler from warning us about "dereferencing type-punned
|
|
||||||
* pointer will break strict-aliasing rules".
|
|
||||||
*
|
|
||||||
* \throw Out_of_ram
|
|
||||||
* \throw Out_of_caps
|
|
||||||
*/
|
|
||||||
template <typename T> bool alloc(size_t size, T **out_addr)
|
|
||||||
{
|
|
||||||
void *addr = 0;
|
|
||||||
bool ret = alloc(size, &addr);
|
|
||||||
*out_addr = (T *)addr;
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return total amount of backing store consumed by the allocator
|
* Return total amount of backing store consumed by the allocator
|
||||||
@ -111,6 +95,19 @@ struct Genode::Allocator : Deallocator
|
|||||||
*/
|
*/
|
||||||
virtual size_t overhead(size_t size) const = 0;
|
virtual size_t overhead(size_t size) const = 0;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Raise exception according to the 'error' value
|
||||||
|
*/
|
||||||
|
static void throw_alloc_error(Alloc_error error) __attribute__((noreturn))
|
||||||
|
{
|
||||||
|
switch (error) {
|
||||||
|
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||||
|
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||||
|
case Alloc_error::DENIED: break;
|
||||||
|
}
|
||||||
|
throw Denied();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate block and signal error as an exception
|
* Allocate block and signal error as an exception
|
||||||
*
|
*
|
||||||
@ -118,16 +115,16 @@ struct Genode::Allocator : Deallocator
|
|||||||
*
|
*
|
||||||
* \throw Out_of_ram
|
* \throw Out_of_ram
|
||||||
* \throw Out_of_caps
|
* \throw Out_of_caps
|
||||||
|
* \throw Denied
|
||||||
*
|
*
|
||||||
* \return pointer to the new block
|
* \return pointer to the new block
|
||||||
*/
|
*/
|
||||||
void *alloc(size_t size)
|
void *alloc(size_t size)
|
||||||
{
|
{
|
||||||
void *result = 0;
|
return try_alloc(size).convert<void *>(
|
||||||
if (!alloc(size, &result))
|
[&] (void *ptr) { return ptr; },
|
||||||
throw Out_of_memory();
|
[&] (Alloc_error error) -> void * {
|
||||||
|
throw_alloc_error(error); });
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -139,32 +136,21 @@ struct Genode::Range_allocator : Allocator
|
|||||||
*/
|
*/
|
||||||
virtual ~Range_allocator() { }
|
virtual ~Range_allocator() { }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return type of range-management operations
|
||||||
|
*/
|
||||||
|
struct Range_ok { };
|
||||||
|
using Range_result = Attempt<Range_ok, Alloc_error>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add free address range to allocator
|
* Add free address range to allocator
|
||||||
*/
|
*/
|
||||||
virtual int add_range(addr_t base, size_t size) = 0;
|
virtual Range_result add_range(addr_t base, size_t size) = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove address range from allocator
|
* Remove address range from allocator
|
||||||
*/
|
*/
|
||||||
virtual int remove_range(addr_t base, size_t size) = 0;
|
virtual Range_result remove_range(addr_t base, size_t size) = 0;
|
||||||
|
|
||||||
/**
|
|
||||||
* Return value of allocation functons
|
|
||||||
*
|
|
||||||
* 'OK' on success, or
|
|
||||||
* 'OUT_OF_METADATA' if meta-data allocation failed, or
|
|
||||||
* 'RANGE_CONFLICT' if no fitting address range is found
|
|
||||||
*/
|
|
||||||
struct Alloc_return
|
|
||||||
{
|
|
||||||
enum Value { OK = 0, OUT_OF_METADATA = -1, RANGE_CONFLICT = -2 };
|
|
||||||
Value const value;
|
|
||||||
Alloc_return(Value value) : value(value) { }
|
|
||||||
|
|
||||||
bool ok() const { return value == OK; }
|
|
||||||
bool error() const { return !ok(); }
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Range { addr_t start, end; };
|
struct Range { addr_t start, end; };
|
||||||
|
|
||||||
@ -172,21 +158,18 @@ struct Genode::Range_allocator : Allocator
|
|||||||
* Allocate block
|
* Allocate block
|
||||||
*
|
*
|
||||||
* \param size size of new block
|
* \param size size of new block
|
||||||
* \param out_addr start address of new block,
|
|
||||||
* undefined in the error case
|
|
||||||
* \param align alignment of new block specified
|
* \param align alignment of new block specified
|
||||||
* as the power of two
|
* as the power of two
|
||||||
* \param range address-range constraint for the allocation
|
* \param range address-range constraint for the allocation
|
||||||
*/
|
*/
|
||||||
virtual Alloc_return alloc_aligned(size_t size, void **out_addr,
|
virtual Alloc_result alloc_aligned(size_t size, unsigned align, Range range) = 0;
|
||||||
unsigned align, Range range) = 0;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate block without constraining the address range
|
* Allocate block without constraining the address range
|
||||||
*/
|
*/
|
||||||
Alloc_return alloc_aligned(size_t size, void **out_addr, unsigned align)
|
Alloc_result alloc_aligned(size_t size, unsigned align)
|
||||||
{
|
{
|
||||||
return alloc_aligned(size, out_addr, align, Range { .start = 0, .end = ~0UL });
|
return alloc_aligned(size, align, Range { .start = 0, .end = ~0UL });
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -194,12 +177,8 @@ struct Genode::Range_allocator : Allocator
|
|||||||
*
|
*
|
||||||
* \param size size of new block
|
* \param size size of new block
|
||||||
* \param addr desired address of block
|
* \param addr desired address of block
|
||||||
*
|
|
||||||
* \return 'ALLOC_OK' on success, or
|
|
||||||
* 'OUT_OF_METADATA' if meta-data allocation failed, or
|
|
||||||
* 'RANGE_CONFLICT' if specified range is occupied
|
|
||||||
*/
|
*/
|
||||||
virtual Alloc_return alloc_addr(size_t size, addr_t addr) = 0;
|
virtual Alloc_result alloc_addr(size_t size, addr_t addr) = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Free a previously allocated block
|
* Free a previously allocated block
|
||||||
@ -326,4 +305,32 @@ void Genode::destroy(DEALLOC && dealloc, T *obj)
|
|||||||
operator delete (obj, dealloc);
|
operator delete (obj, dealloc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace Genode {
|
||||||
|
|
||||||
|
void static inline print(Output &out, Allocator::Alloc_error error)
|
||||||
|
{
|
||||||
|
using Error = Allocator::Alloc_error;
|
||||||
|
|
||||||
|
auto name = [] (Error error)
|
||||||
|
{
|
||||||
|
switch (error) {
|
||||||
|
case Error::OUT_OF_RAM: return "OUT_OF_RAM";
|
||||||
|
case Error::OUT_OF_CAPS: return "OUT_OF_CAPS";
|
||||||
|
case Error::DENIED: return "DENIED";
|
||||||
|
}
|
||||||
|
return "<unknown>";
|
||||||
|
};
|
||||||
|
|
||||||
|
Genode::print(out, name(error));
|
||||||
|
}
|
||||||
|
|
||||||
|
void static inline print(Output &out, Allocator::Alloc_result result)
|
||||||
|
{
|
||||||
|
result.with_result(
|
||||||
|
[&] (void *ptr) { Genode::print(out, ptr); },
|
||||||
|
[&] (auto error) { Genode::print(out, error); });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _INCLUDE__BASE__ALLOCATOR_H_ */
|
#endif /* _INCLUDE__BASE__ALLOCATOR_H_ */
|
||||||
|
@ -163,25 +163,29 @@ class Genode::Allocator_avl_base : public Range_allocator
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
Avl_tree<Block> _addr_tree { }; /* blocks sorted by base address */
|
Avl_tree<Block> _addr_tree { }; /* blocks sorted by base address */
|
||||||
Allocator *_md_alloc { nullptr }; /* meta-data allocator */
|
Allocator &_md_alloc; /* meta-data allocator */
|
||||||
size_t _md_entry_size { 0 }; /* size of block meta-data entry */
|
size_t _md_entry_size { 0 }; /* size of block meta-data entry */
|
||||||
|
|
||||||
|
struct Two_blocks { Block *b1_ptr, *b2_ptr; };
|
||||||
|
|
||||||
|
using Alloc_md_result = Attempt<Block *, Alloc_error>;
|
||||||
|
using Alloc_md_two_result = Attempt<Two_blocks, Alloc_error>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Alloc meta-data block
|
* Alloc meta-data block
|
||||||
*/
|
*/
|
||||||
Block *_alloc_block_metadata();
|
Alloc_md_result _alloc_block_metadata();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Alloc two meta-data blocks in a transactional way
|
* Alloc two meta-data blocks in a transactional way
|
||||||
*/
|
*/
|
||||||
bool _alloc_two_blocks_metadata(Block **dst1, Block **dst2);
|
Alloc_md_two_result _alloc_two_blocks_metadata();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create new block
|
* Create new block
|
||||||
*/
|
*/
|
||||||
int _add_block(Block *block_metadata,
|
void _add_block(Block &block_metadata, addr_t base, size_t size, bool used);
|
||||||
addr_t base, size_t size, bool used);
|
|
||||||
|
|
||||||
Block *_find_any_used_block(Block *sub_tree);
|
Block *_find_any_used_block(Block *sub_tree);
|
||||||
Block *_find_any_unused_block(Block *sub_tree);
|
Block *_find_any_unused_block(Block *sub_tree);
|
||||||
@ -189,7 +193,7 @@ class Genode::Allocator_avl_base : public Range_allocator
|
|||||||
/**
|
/**
|
||||||
* Destroy block
|
* Destroy block
|
||||||
*/
|
*/
|
||||||
void _destroy_block(Block *b);
|
void _destroy_block(Block &b);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Cut specified area from block
|
* Cut specified area from block
|
||||||
@ -197,8 +201,13 @@ class Genode::Allocator_avl_base : public Range_allocator
|
|||||||
* The original block gets replaced by (up to) two smaller blocks
|
* The original block gets replaced by (up to) two smaller blocks
|
||||||
* with remaining space.
|
* with remaining space.
|
||||||
*/
|
*/
|
||||||
void _cut_from_block(Block *b, addr_t cut_addr, size_t cut_size,
|
void _cut_from_block(Block &b, addr_t cut_addr, size_t cut_size, Two_blocks);
|
||||||
Block *dst1, Block *dst2);
|
|
||||||
|
template <typename ANY_BLOCK_FN>
|
||||||
|
void _revert_block_ranges(ANY_BLOCK_FN const &);
|
||||||
|
|
||||||
|
template <typename SEARCH_FN>
|
||||||
|
Alloc_result _allocate(size_t, unsigned, Range, SEARCH_FN const &);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
@ -234,7 +243,7 @@ class Genode::Allocator_avl_base : public Range_allocator
|
|||||||
* we can attach custom information to block meta data.
|
* we can attach custom information to block meta data.
|
||||||
*/
|
*/
|
||||||
Allocator_avl_base(Allocator *md_alloc, size_t md_entry_size) :
|
Allocator_avl_base(Allocator *md_alloc, size_t md_entry_size) :
|
||||||
_md_alloc(md_alloc), _md_entry_size(md_entry_size) { }
|
_md_alloc(*md_alloc), _md_entry_size(md_entry_size) { }
|
||||||
|
|
||||||
~Allocator_avl_base() { _revert_allocations_and_ranges(); }
|
~Allocator_avl_base() { _revert_allocations_and_ranges(); }
|
||||||
|
|
||||||
@ -258,10 +267,10 @@ class Genode::Allocator_avl_base : public Range_allocator
|
|||||||
** Range allocator interface **
|
** Range allocator interface **
|
||||||
*******************************/
|
*******************************/
|
||||||
|
|
||||||
int add_range(addr_t base, size_t size) override;
|
Range_result add_range(addr_t base, size_t size) override;
|
||||||
int remove_range(addr_t base, size_t size) override;
|
Range_result remove_range(addr_t base, size_t size) override;
|
||||||
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override;
|
Alloc_result alloc_aligned(size_t, unsigned, Range) override;
|
||||||
Alloc_return alloc_addr(size_t size, addr_t addr) override;
|
Alloc_result alloc_addr(size_t size, addr_t addr) override;
|
||||||
void free(void *addr) override;
|
void free(void *addr) override;
|
||||||
size_t avail() const override;
|
size_t avail() const override;
|
||||||
bool valid_addr(addr_t addr) const override;
|
bool valid_addr(addr_t addr) const override;
|
||||||
@ -273,10 +282,9 @@ class Genode::Allocator_avl_base : public Range_allocator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
return (Allocator_avl_base::alloc_aligned(
|
return Allocator_avl_base::alloc_aligned(size, log2(sizeof(addr_t)));
|
||||||
size, out_addr, log2(sizeof(addr_t))).ok());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, size_t) override { free(addr); }
|
void free(void *addr, size_t) override { free(addr); }
|
||||||
@ -385,7 +393,7 @@ class Genode::Allocator_avl_tpl : public Allocator_avl_base
|
|||||||
return b && b->used() ? b : 0;
|
return b && b->used() ? b : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int add_range(addr_t base, size_t size) override
|
Range_result add_range(addr_t base, size_t size) override
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We disable the slab block allocation while
|
* We disable the slab block allocation while
|
||||||
@ -395,9 +403,9 @@ class Genode::Allocator_avl_tpl : public Allocator_avl_base
|
|||||||
*/
|
*/
|
||||||
Allocator *md_bs = _metadata.backing_store();
|
Allocator *md_bs = _metadata.backing_store();
|
||||||
_metadata.backing_store(0);
|
_metadata.backing_store(0);
|
||||||
int ret = Allocator_avl_base::add_range(base, size);
|
Range_result result = Allocator_avl_base::add_range(base, size);
|
||||||
_metadata.backing_store(md_bs);
|
_metadata.backing_store(md_bs);
|
||||||
return ret;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -96,32 +96,26 @@ class Genode::Heap : public Allocator
|
|||||||
size_t _quota_used { 0 };
|
size_t _quota_used { 0 };
|
||||||
size_t _chunk_size { 0 };
|
size_t _chunk_size { 0 };
|
||||||
|
|
||||||
|
using Alloc_ds_result = Attempt<Dataspace *, Alloc_error>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate a new dataspace of the specified size
|
* Allocate a new dataspace of the specified size
|
||||||
*
|
*
|
||||||
* \param size number of bytes to allocate
|
* \param size number of bytes to allocate
|
||||||
* \param enforce_separate_metadata if true, the new dataspace
|
* \param enforce_separate_metadata if true, the new dataspace
|
||||||
* will not contain any meta data
|
* will not contain any meta data
|
||||||
* \throw Region_map::Invalid_dataspace,
|
|
||||||
* Region_map::Region_conflict
|
|
||||||
* \return 0 on success or negative error code
|
|
||||||
*/
|
*/
|
||||||
Heap::Dataspace *_allocate_dataspace(size_t size, bool enforce_separate_metadata);
|
Alloc_ds_result _allocate_dataspace(size_t size, bool enforce_separate_metadata);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Try to allocate block at our local allocator
|
* Try to allocate block at our local allocator
|
||||||
*
|
|
||||||
* \return true on success
|
|
||||||
*
|
|
||||||
* This method is a utility used by '_unsynchronized_alloc' to
|
|
||||||
* avoid code duplication.
|
|
||||||
*/
|
*/
|
||||||
bool _try_local_alloc(size_t size, void **out_addr);
|
Alloc_result _try_local_alloc(size_t size);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Unsynchronized implementation of 'alloc'
|
* Unsynchronized implementation of 'try_alloc'
|
||||||
*/
|
*/
|
||||||
bool _unsynchronized_alloc(size_t size, void **out_addr);
|
Alloc_result _unsynchronized_alloc(size_t size);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
@ -167,11 +161,11 @@ class Genode::Heap : public Allocator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t, void **) override;
|
Alloc_result try_alloc(size_t) override;
|
||||||
void free(void *, size_t) override;
|
void free(void *, size_t) override;
|
||||||
size_t consumed() const override { return _quota_used; }
|
size_t consumed() const override { return _quota_used; }
|
||||||
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
|
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
|
||||||
bool need_size_for_free() const override { return false; }
|
bool need_size_for_free() const override { return false; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -222,11 +216,11 @@ class Genode::Sliced_heap : public Allocator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t, void **) override;
|
Alloc_result try_alloc(size_t) override;
|
||||||
void free(void *, size_t) override;
|
void free(void *, size_t) override;
|
||||||
size_t consumed() const override { return _consumed; }
|
size_t consumed() const override { return _consumed; }
|
||||||
size_t overhead(size_t size) const override;
|
size_t overhead(size_t size) const override;
|
||||||
bool need_size_for_free() const override { return false; }
|
bool need_size_for_free() const override { return false; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _INCLUDE__BASE__HEAP_H_ */
|
#endif /* _INCLUDE__BASE__HEAP_H_ */
|
||||||
|
@ -44,10 +44,12 @@ class Genode::Slab : public Allocator
|
|||||||
|
|
||||||
Allocator *_backing_store;
|
Allocator *_backing_store;
|
||||||
|
|
||||||
|
using New_slab_block_result = Attempt<Block *, Alloc_error>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate and initialize new slab block
|
* Allocate and initialize new slab block
|
||||||
*/
|
*/
|
||||||
Block *_new_slab_block();
|
New_slab_block_result _new_slab_block();
|
||||||
|
|
||||||
|
|
||||||
/*****************************
|
/*****************************
|
||||||
@ -58,11 +60,17 @@ class Genode::Slab : public Allocator
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Insert block into slab block ring
|
* Insert block into slab block ring
|
||||||
*
|
|
||||||
* \noapi
|
|
||||||
*/
|
*/
|
||||||
void _insert_sb(Block *);
|
void _insert_sb(Block *);
|
||||||
|
|
||||||
|
struct Expand_ok { };
|
||||||
|
using Expand_result = Attempt<Expand_ok, Alloc_error>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expand slab by one block
|
||||||
|
*/
|
||||||
|
Expand_result _expand();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Release slab block
|
* Release slab block
|
||||||
*/
|
*/
|
||||||
@ -88,6 +96,10 @@ class Genode::Slab : public Allocator
|
|||||||
* block that is used for the first couple of allocations,
|
* block that is used for the first couple of allocations,
|
||||||
* especially for the allocation of the second slab
|
* especially for the allocation of the second slab
|
||||||
* block.
|
* block.
|
||||||
|
*
|
||||||
|
* \throw Out_of_ram
|
||||||
|
* \throw Out_of_caps
|
||||||
|
* \throw Allocator::Denied failed to obtain initial slab block
|
||||||
*/
|
*/
|
||||||
Slab(size_t slab_size, size_t block_size, void *initial_sb,
|
Slab(size_t slab_size, size_t block_size, void *initial_sb,
|
||||||
Allocator *backing_store = 0);
|
Allocator *backing_store = 0);
|
||||||
@ -154,7 +166,7 @@ class Genode::Slab : public Allocator
|
|||||||
* The 'size' parameter is ignored as only slab entries with
|
* The 'size' parameter is ignored as only slab entries with
|
||||||
* preconfigured slab-entry size are allocated.
|
* preconfigured slab-entry size are allocated.
|
||||||
*/
|
*/
|
||||||
bool alloc(size_t size, void **addr) override;
|
Alloc_result try_alloc(size_t size) override;
|
||||||
void free(void *addr, size_t) override { _free(addr); }
|
void free(void *addr, size_t) override { _free(addr); }
|
||||||
size_t consumed() const override;
|
size_t consumed() const override;
|
||||||
size_t overhead(size_t) const override { return _block_size/_entries_per_block; }
|
size_t overhead(size_t) const override { return _block_size/_entries_per_block; }
|
||||||
|
@ -56,8 +56,8 @@ class Genode::Synced_allocator : public Allocator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override {
|
Alloc_result try_alloc(size_t size) override {
|
||||||
return _synced_object()->alloc(size, out_addr); }
|
return _synced_object()->try_alloc(size); }
|
||||||
|
|
||||||
void free(void *addr, size_t size) override {
|
void free(void *addr, size_t size) override {
|
||||||
_synced_object()->free(addr, size); }
|
_synced_object()->free(addr, size); }
|
||||||
|
@ -64,7 +64,7 @@ _ZN6Genode10Ipc_serverC2Ev T
|
|||||||
_ZN6Genode10Ipc_serverD1Ev T
|
_ZN6Genode10Ipc_serverD1Ev T
|
||||||
_ZN6Genode10Ipc_serverD2Ev T
|
_ZN6Genode10Ipc_serverD2Ev T
|
||||||
_ZN6Genode11Sliced_heap4freeEPvm T
|
_ZN6Genode11Sliced_heap4freeEPvm T
|
||||||
_ZN6Genode11Sliced_heap5allocEmPPv T
|
_ZN6Genode11Sliced_heap9try_allocEm T
|
||||||
_ZN6Genode11Sliced_heapC1ERNS_13Ram_allocatorERNS_10Region_mapE T
|
_ZN6Genode11Sliced_heapC1ERNS_13Ram_allocatorERNS_10Region_mapE T
|
||||||
_ZN6Genode11Sliced_heapC2ERNS_13Ram_allocatorERNS_10Region_mapE T
|
_ZN6Genode11Sliced_heapC2ERNS_13Ram_allocatorERNS_10Region_mapE T
|
||||||
_ZN6Genode11Sliced_heapD0Ev T
|
_ZN6Genode11Sliced_heapD0Ev T
|
||||||
@ -165,23 +165,15 @@ _ZN6Genode17Timeout_schedulerC2ERNS_11Time_sourceENS_12MicrosecondsE T
|
|||||||
_ZN6Genode17Timeout_schedulerD0Ev T
|
_ZN6Genode17Timeout_schedulerD0Ev T
|
||||||
_ZN6Genode17Timeout_schedulerD1Ev T
|
_ZN6Genode17Timeout_schedulerD1Ev T
|
||||||
_ZN6Genode17Timeout_schedulerD2Ev T
|
_ZN6Genode17Timeout_schedulerD2Ev T
|
||||||
_ZN6Genode18Allocator_avl_base10_add_blockEPNS0_5BlockEmmb T
|
_ZN6Genode18Allocator_avl_base10_add_blockERNS0_5BlockEmmb T
|
||||||
_ZN6Genode18Allocator_avl_base10alloc_addrEmm T
|
_ZN6Genode18Allocator_avl_base10alloc_addrEmm T
|
||||||
_ZN6Genode18Allocator_avl_base12remove_rangeEmm T
|
_ZN6Genode18Allocator_avl_base12remove_rangeEmm T
|
||||||
_ZN6Genode18Allocator_avl_base13alloc_alignedEmPPvjNS_15Range_allocator5RangeE T
|
_ZN6Genode18Allocator_avl_base13alloc_alignedEmjNS_15Range_allocator5RangeE T
|
||||||
_ZN6Genode18Allocator_avl_base14_destroy_blockEPNS0_5BlockE T
|
|
||||||
_ZN6Genode18Allocator_avl_base14any_block_addrEPm T
|
_ZN6Genode18Allocator_avl_base14any_block_addrEPm T
|
||||||
_ZN6Genode18Allocator_avl_base15_cut_from_blockEPNS0_5BlockEmmS2_S2_ T
|
|
||||||
_ZN6Genode18Allocator_avl_base20_find_any_used_blockEPNS0_5BlockE T
|
|
||||||
_ZN6Genode18Allocator_avl_base21_alloc_block_metadataEv T
|
|
||||||
_ZN6Genode18Allocator_avl_base21_revert_unused_rangesEv T
|
_ZN6Genode18Allocator_avl_base21_revert_unused_rangesEv T
|
||||||
_ZN6Genode18Allocator_avl_base22_find_any_unused_blockEPNS0_5BlockE T
|
|
||||||
_ZN6Genode18Allocator_avl_base26_alloc_two_blocks_metadataEPPNS0_5BlockES3_ T
|
|
||||||
_ZN6Genode18Allocator_avl_base30_revert_allocations_and_rangesEv T
|
_ZN6Genode18Allocator_avl_base30_revert_allocations_and_rangesEv T
|
||||||
_ZN6Genode18Allocator_avl_base4freeEPv T
|
_ZN6Genode18Allocator_avl_base4freeEPv T
|
||||||
_ZN6Genode18Allocator_avl_base5Block13find_best_fitEmjmm T
|
|
||||||
_ZN6Genode18Allocator_avl_base5Block15find_by_addressEmmb T
|
_ZN6Genode18Allocator_avl_base5Block15find_by_addressEmmb T
|
||||||
_ZN6Genode18Allocator_avl_base5Block16avail_in_subtreeEv T
|
|
||||||
_ZN6Genode18Allocator_avl_base5Block9recomputeEv T
|
_ZN6Genode18Allocator_avl_base5Block9recomputeEv T
|
||||||
_ZN6Genode18Allocator_avl_base9add_rangeEmm T
|
_ZN6Genode18Allocator_avl_base9add_rangeEmm T
|
||||||
_ZN6Genode18Signal_transmitter6submitEj T
|
_ZN6Genode18Signal_transmitter6submitEj T
|
||||||
@ -201,8 +193,10 @@ _ZN6Genode3Raw7_outputEv T
|
|||||||
_ZN6Genode3Raw8_acquireEv T
|
_ZN6Genode3Raw8_acquireEv T
|
||||||
_ZN6Genode3Raw8_releaseEv T
|
_ZN6Genode3Raw8_releaseEv T
|
||||||
_ZN6Genode4Heap11quota_limitEm T
|
_ZN6Genode4Heap11quota_limitEm T
|
||||||
|
_ZN6Genode4Heap14Dataspace_poolD1Ev T
|
||||||
|
_ZN6Genode4Heap14Dataspace_poolD2Ev T
|
||||||
_ZN6Genode4Heap4freeEPvm T
|
_ZN6Genode4Heap4freeEPvm T
|
||||||
_ZN6Genode4Heap5allocEmPPv T
|
_ZN6Genode4Heap9try_allocEm T
|
||||||
_ZN6Genode4HeapC1EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
|
_ZN6Genode4HeapC1EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
|
||||||
_ZN6Genode4HeapC2EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
|
_ZN6Genode4HeapC2EPNS_13Ram_allocatorEPNS_10Region_mapEmPvm T
|
||||||
_ZN6Genode4HeapD0Ev T
|
_ZN6Genode4HeapD0Ev T
|
||||||
@ -213,13 +207,9 @@ _ZN6Genode4Lock6unlockEv T
|
|||||||
_ZN6Genode4LockC1ENS0_5StateE T
|
_ZN6Genode4LockC1ENS0_5StateE T
|
||||||
_ZN6Genode4Slab13any_used_elemEv T
|
_ZN6Genode4Slab13any_used_elemEv T
|
||||||
_ZN6Genode4Slab17free_empty_blocksEv T
|
_ZN6Genode4Slab17free_empty_blocksEv T
|
||||||
_ZN6Genode4Slab5Block11_slab_entryEi T
|
|
||||||
_ZN6Genode4Slab5Block14any_used_entryEv T
|
|
||||||
_ZN6Genode4Slab5Block5allocEv T
|
|
||||||
_ZN6Genode4Slab5Block9inc_availERNS0_5EntryE T
|
|
||||||
_ZN6Genode4Slab5_freeEPv T
|
_ZN6Genode4Slab5_freeEPv T
|
||||||
_ZN6Genode4Slab5allocEmPPv T
|
|
||||||
_ZN6Genode4Slab9insert_sbEPv T
|
_ZN6Genode4Slab9insert_sbEPv T
|
||||||
|
_ZN6Genode4Slab9try_allocEm T
|
||||||
_ZN6Genode4SlabC1EmmPvPNS_9AllocatorE T
|
_ZN6Genode4SlabC1EmmPvPNS_9AllocatorE T
|
||||||
_ZN6Genode4SlabC2EmmPvPNS_9AllocatorE T
|
_ZN6Genode4SlabC2EmmPvPNS_9AllocatorE T
|
||||||
_ZN6Genode4SlabD0Ev T
|
_ZN6Genode4SlabD0Ev T
|
||||||
|
@ -33,42 +33,46 @@ void * Mapped_avl_allocator::map_addr(void * addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Range_allocator::Alloc_return
|
Range_allocator::Alloc_result
|
||||||
Mapped_mem_allocator::alloc_aligned(size_t size, void **out_addr,
|
Mapped_mem_allocator::alloc_aligned(size_t size, unsigned align, Range range)
|
||||||
unsigned align, Range range)
|
|
||||||
{
|
{
|
||||||
size_t page_rounded_size = align_addr(size, get_page_size_log2());
|
size_t page_rounded_size = align_addr(size, get_page_size_log2());
|
||||||
void *phys_addr = 0;
|
|
||||||
align = max((size_t)align, get_page_size_log2());
|
align = max((size_t)align, get_page_size_log2());
|
||||||
|
|
||||||
/* allocate physical pages */
|
/* allocate physical pages */
|
||||||
Alloc_return ret1 = _phys_alloc->alloc_aligned(page_rounded_size,
|
return _phys_alloc->alloc_aligned(page_rounded_size, align, range)
|
||||||
&phys_addr, align, range);
|
.convert<Alloc_result>(
|
||||||
if (!ret1.ok()) {
|
|
||||||
error("Could not allocate physical memory region of size ",
|
|
||||||
page_rounded_size);
|
|
||||||
return ret1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* allocate range in core's virtual address space */
|
[&] (void *phys_addr) -> Alloc_result {
|
||||||
Alloc_return ret2 = _virt_alloc->alloc_aligned(page_rounded_size,
|
|
||||||
out_addr, align);
|
|
||||||
if (!ret2.ok()) {
|
|
||||||
error("Could not allocate virtual address range in core of size ",
|
|
||||||
page_rounded_size);
|
|
||||||
|
|
||||||
/* revert physical allocation */
|
/* allocate range in core's virtual address space */
|
||||||
_phys_alloc->free(phys_addr);
|
return _virt_alloc->alloc_aligned(page_rounded_size, align)
|
||||||
return ret2;
|
.convert<Alloc_result>(
|
||||||
}
|
|
||||||
|
|
||||||
_phys_alloc->metadata(phys_addr, { *out_addr });
|
[&] (void *virt_addr) {
|
||||||
_virt_alloc->metadata(*out_addr, { phys_addr });
|
|
||||||
|
|
||||||
/* make physical page accessible at the designated virtual address */
|
_phys_alloc->metadata(phys_addr, { virt_addr });
|
||||||
_map_local((addr_t)*out_addr, (addr_t)phys_addr, page_rounded_size);
|
_virt_alloc->metadata(virt_addr, { phys_addr });
|
||||||
|
|
||||||
return Alloc_return::OK;
|
/* make physical page accessible at the designated virtual address */
|
||||||
|
_map_local((addr_t)virt_addr, (addr_t)phys_addr, page_rounded_size);
|
||||||
|
|
||||||
|
return virt_addr;
|
||||||
|
},
|
||||||
|
[&] (Alloc_error e) {
|
||||||
|
error("Could not allocate virtual address range in core of size ",
|
||||||
|
page_rounded_size, " (error ", (int)e, ")");
|
||||||
|
|
||||||
|
/* revert physical allocation */
|
||||||
|
_phys_alloc->free(phys_addr);
|
||||||
|
return e;
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[&] (Alloc_error e) {
|
||||||
|
error("Could not allocate physical memory region of size ",
|
||||||
|
page_rounded_size, " (error ", (int)e, ")");
|
||||||
|
return e;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ class Genode::Constrained_core_ram : public Allocator
|
|||||||
" in core !");
|
" in core !");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool alloc(size_t const size, void **ptr) override
|
Alloc_result try_alloc(size_t const size) override
|
||||||
{
|
{
|
||||||
size_t const page_aligned_size = align_addr(size, 12);
|
size_t const page_aligned_size = align_addr(size, 12);
|
||||||
|
|
||||||
@ -56,15 +56,16 @@ class Genode::Constrained_core_ram : public Allocator
|
|||||||
/* on some kernels we require a cap, on some not XXX */
|
/* on some kernels we require a cap, on some not XXX */
|
||||||
Cap_quota_guard::Reservation caps(_cap_guard, Cap_quota{1});
|
Cap_quota_guard::Reservation caps(_cap_guard, Cap_quota{1});
|
||||||
|
|
||||||
if (!_core_mem.alloc(page_aligned_size, ptr))
|
return _core_mem.try_alloc(page_aligned_size).convert<Alloc_result>(
|
||||||
return false;
|
|
||||||
|
|
||||||
ram.acknowledge();
|
[&] (void *ptr) {
|
||||||
caps.acknowledge();
|
ram.acknowledge();
|
||||||
|
caps.acknowledge();
|
||||||
|
core_mem_allocated += page_aligned_size;
|
||||||
|
return ptr; },
|
||||||
|
|
||||||
core_mem_allocated += page_aligned_size;
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *ptr, size_t const size) override
|
void free(void *ptr, size_t const size) override
|
||||||
|
@ -163,11 +163,10 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
|
|||||||
** Range allocator interface **
|
** Range allocator interface **
|
||||||
*******************************/
|
*******************************/
|
||||||
|
|
||||||
int add_range(addr_t, size_t) override { return -1; }
|
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||||
int remove_range(addr_t, size_t) override { return -1; }
|
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||||
Alloc_return alloc_aligned(size_t, void **, unsigned, Range) override;
|
Alloc_result alloc_aligned(size_t, unsigned, Range) override;
|
||||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
|
||||||
return Alloc_return::RANGE_CONFLICT; }
|
|
||||||
void free(void *) override;
|
void free(void *) override;
|
||||||
size_t avail() const override { return _phys_alloc->avail(); }
|
size_t avail() const override { return _phys_alloc->avail(); }
|
||||||
bool valid_addr(addr_t addr) const override {
|
bool valid_addr(addr_t addr) const override {
|
||||||
@ -180,8 +179,8 @@ class Genode::Mapped_mem_allocator : public Genode::Core_mem_translator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override {
|
Alloc_result try_alloc(size_t size) override {
|
||||||
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
|
return alloc_aligned(size, log2(sizeof(addr_t))); }
|
||||||
void free(void *addr, size_t) override;
|
void free(void *addr, size_t) override;
|
||||||
size_t consumed() const override { return _phys_alloc->consumed(); }
|
size_t consumed() const override { return _phys_alloc->consumed(); }
|
||||||
size_t overhead(size_t size) const override {
|
size_t overhead(size_t size) const override {
|
||||||
@ -276,16 +275,14 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
|
|||||||
** Range allocator interface **
|
** Range allocator interface **
|
||||||
*******************************/
|
*******************************/
|
||||||
|
|
||||||
int add_range(addr_t, size_t) override { return -1; }
|
Range_result add_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||||
int remove_range(addr_t, size_t) override { return -1; }
|
Range_result remove_range(addr_t, size_t) override { return Alloc_error::DENIED; }
|
||||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
Alloc_result alloc_addr(size_t, addr_t) override { return Alloc_error::DENIED; }
|
||||||
return Alloc_return::RANGE_CONFLICT; }
|
|
||||||
|
|
||||||
Alloc_return alloc_aligned(size_t size, void **out_addr,
|
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override
|
||||||
unsigned align, Range range) override
|
|
||||||
{
|
{
|
||||||
Mutex::Guard lock_guard(_mutex);
|
Mutex::Guard lock_guard(_mutex);
|
||||||
return _mem_alloc.alloc_aligned(size, out_addr, align, range);
|
return _mem_alloc.alloc_aligned(size, align, range);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr) override
|
void free(void *addr) override
|
||||||
@ -305,8 +302,10 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override {
|
Alloc_result try_alloc(size_t size) override
|
||||||
return alloc_aligned(size, out_addr, log2(sizeof(addr_t))).ok(); }
|
{
|
||||||
|
return alloc_aligned(size, log2(sizeof(addr_t)));
|
||||||
|
}
|
||||||
|
|
||||||
void free(void *addr, size_t size) override
|
void free(void *addr, size_t size) override
|
||||||
{
|
{
|
||||||
|
@ -40,7 +40,7 @@ class Genode::Synced_range_allocator : public Range_allocator
|
|||||||
friend class Mapped_mem_allocator;
|
friend class Mapped_mem_allocator;
|
||||||
|
|
||||||
Mutex _default_mutex { };
|
Mutex _default_mutex { };
|
||||||
Mutex &_mutex;
|
Mutex &_mutex { _default_mutex };
|
||||||
ALLOC _alloc;
|
ALLOC _alloc;
|
||||||
Synced_interface<ALLOC, Mutex> _synced_object;
|
Synced_interface<ALLOC, Mutex> _synced_object;
|
||||||
|
|
||||||
@ -54,8 +54,7 @@ class Genode::Synced_range_allocator : public Range_allocator
|
|||||||
|
|
||||||
template <typename... ARGS>
|
template <typename... ARGS>
|
||||||
Synced_range_allocator(ARGS &&... args)
|
Synced_range_allocator(ARGS &&... args)
|
||||||
: _mutex(_default_mutex), _alloc(args...),
|
: _alloc(args...), _synced_object(_mutex, &_alloc) { }
|
||||||
_synced_object(_mutex, &_alloc) { }
|
|
||||||
|
|
||||||
Guard operator () () { return _synced_object(); }
|
Guard operator () () { return _synced_object(); }
|
||||||
Guard operator () () const { return _synced_object(); }
|
Guard operator () () const { return _synced_object(); }
|
||||||
@ -67,8 +66,8 @@ class Genode::Synced_range_allocator : public Range_allocator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override {
|
Alloc_result try_alloc(size_t size) override {
|
||||||
return _synced_object()->alloc(size, out_addr); }
|
return _synced_object()->try_alloc(size); }
|
||||||
|
|
||||||
void free(void *addr, size_t size) override {
|
void free(void *addr, size_t size) override {
|
||||||
_synced_object()->free(addr, size); }
|
_synced_object()->free(addr, size); }
|
||||||
@ -87,17 +86,16 @@ class Genode::Synced_range_allocator : public Range_allocator
|
|||||||
** Range-allocator interface **
|
** Range-allocator interface **
|
||||||
*******************************/
|
*******************************/
|
||||||
|
|
||||||
int add_range(addr_t base, size_t size) override {
|
Range_result add_range(addr_t base, size_t size) override {
|
||||||
return _synced_object()->add_range(base, size); }
|
return _synced_object()->add_range(base, size); }
|
||||||
|
|
||||||
int remove_range(addr_t base, size_t size) override {
|
Range_result remove_range(addr_t base, size_t size) override {
|
||||||
return _synced_object()->remove_range(base, size); }
|
return _synced_object()->remove_range(base, size); }
|
||||||
|
|
||||||
Alloc_return alloc_aligned(size_t size, void **out_addr,
|
Alloc_result alloc_aligned(size_t size, unsigned align, Range range) override {
|
||||||
unsigned align, Range range) override {
|
return _synced_object()->alloc_aligned(size, align, range); }
|
||||||
return _synced_object()->alloc_aligned(size, out_addr, align, range); }
|
|
||||||
|
|
||||||
Alloc_return alloc_addr(size_t size, addr_t addr) override {
|
Alloc_result alloc_addr(size_t size, addr_t addr) override {
|
||||||
return _synced_object()->alloc_addr(size, addr); }
|
return _synced_object()->alloc_addr(size, addr); }
|
||||||
|
|
||||||
void free(void *addr) override {
|
void free(void *addr) override {
|
||||||
|
@ -41,24 +41,16 @@ Io_mem_session_component::_prepare_io_mem(const char *args,
|
|||||||
_cacheable = WRITE_COMBINED;
|
_cacheable = WRITE_COMBINED;
|
||||||
|
|
||||||
/* check for RAM collision */
|
/* check for RAM collision */
|
||||||
int ret;
|
if (ram_alloc.remove_range(base, size).failed()) {
|
||||||
if ((ret = ram_alloc.remove_range(base, size))) {
|
|
||||||
error("I/O memory ", Hex_range<addr_t>(base, size), " "
|
error("I/O memory ", Hex_range<addr_t>(base, size), " "
|
||||||
"used by RAM allocator (", ret, ")");
|
"used by RAM allocator");
|
||||||
return Dataspace_attr();
|
return Dataspace_attr();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate region */
|
/* allocate region */
|
||||||
switch (_io_mem_alloc.alloc_addr(req_size, req_base).value) {
|
if (_io_mem_alloc.alloc_addr(req_size, req_base).failed()) {
|
||||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
|
||||||
error("I/O memory ", Hex_range<addr_t>(req_base, req_size), " not available");
|
error("I/O memory ", Hex_range<addr_t>(req_base, req_size), " not available");
|
||||||
return Dataspace_attr();
|
return Dataspace_attr();
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
|
||||||
error("I/O memory allocator ran out of meta data");
|
|
||||||
return Dataspace_attr();
|
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::OK: break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* request local mapping */
|
/* request local mapping */
|
||||||
|
@ -38,8 +38,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
|||||||
* If this does not work, we subsequently weaken the alignment constraint
|
* If this does not work, we subsequently weaken the alignment constraint
|
||||||
* until the allocation succeeds.
|
* until the allocation succeeds.
|
||||||
*/
|
*/
|
||||||
void *ds_addr = nullptr;
|
Range_allocator::Alloc_result allocated_range = Allocator::Alloc_error::DENIED;
|
||||||
bool alloc_succeeded = false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If no physical constraint exists, try to allocate physical memory at
|
* If no physical constraint exists, try to allocate physical memory at
|
||||||
@ -53,63 +52,57 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
|||||||
Phys_range const range { .start = high_start, .end = _phys_range.end };
|
Phys_range const range { .start = high_start, .end = _phys_range.end };
|
||||||
|
|
||||||
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
||||||
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2, range).ok()) {
|
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, range);
|
||||||
alloc_succeeded = true;
|
if (allocated_range.ok())
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* apply constraints, or retry if larger memory allocation failed */
|
/* apply constraints, or retry if larger memory allocation failed */
|
||||||
if (!alloc_succeeded) {
|
if (!allocated_range.ok()) {
|
||||||
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
||||||
if (_phys_alloc.alloc_aligned(ds_size, &ds_addr, align_log2,
|
allocated_range = _phys_alloc.alloc_aligned(ds_size, align_log2, _phys_range);
|
||||||
_phys_range).ok()) {
|
if (allocated_range.ok())
|
||||||
alloc_succeeded = true;
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Helper to release the allocated physical memory whenever we leave the
|
|
||||||
* scope via an exception.
|
|
||||||
*/
|
|
||||||
class Phys_alloc_guard
|
|
||||||
{
|
|
||||||
private:
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Noncopyable
|
|
||||||
*/
|
|
||||||
Phys_alloc_guard(Phys_alloc_guard const &);
|
|
||||||
Phys_alloc_guard &operator = (Phys_alloc_guard const &);
|
|
||||||
|
|
||||||
public:
|
|
||||||
|
|
||||||
Range_allocator &phys_alloc;
|
|
||||||
void * const ds_addr;
|
|
||||||
bool ack = false;
|
|
||||||
|
|
||||||
Phys_alloc_guard(Range_allocator &phys_alloc, void *ds_addr)
|
|
||||||
: phys_alloc(phys_alloc), ds_addr(ds_addr) { }
|
|
||||||
|
|
||||||
~Phys_alloc_guard() { if (!ack) phys_alloc.free(ds_addr); }
|
|
||||||
|
|
||||||
} phys_alloc_guard(_phys_alloc, ds_addr);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Normally, init's quota equals the size of physical memory and this quota
|
* Normally, init's quota equals the size of physical memory and this quota
|
||||||
* is distributed among the processes. As we check the quota before
|
* is distributed among the processes. As we check the quota before
|
||||||
* allocating, the allocation should always succeed in theory. However,
|
* allocating, the allocation should always succeed in theory. However,
|
||||||
* fragmentation could cause a failing allocation.
|
* fragmentation could cause a failing allocation.
|
||||||
*/
|
*/
|
||||||
if (!alloc_succeeded) {
|
if (allocated_range.failed()) {
|
||||||
error("out of physical memory while allocating ", ds_size, " bytes ",
|
error("out of physical memory while allocating ", ds_size, " bytes ",
|
||||||
"in range [", Hex(_phys_range.start), "-", Hex(_phys_range.end), "]");
|
"in range [", Hex(_phys_range.start), "-", Hex(_phys_range.end), "]");
|
||||||
return Alloc_error::OUT_OF_RAM;
|
|
||||||
|
return allocated_range.convert<Ram_allocator::Alloc_result>(
|
||||||
|
[&] (void *) { return Alloc_error::DENIED; },
|
||||||
|
[&] (Alloc_error error) { return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper to release the allocated physical memory whenever we leave the
|
||||||
|
* scope via an exception.
|
||||||
|
*/
|
||||||
|
struct Phys_alloc_guard
|
||||||
|
{
|
||||||
|
Range_allocator &phys_alloc;
|
||||||
|
struct { void * ds_addr = nullptr; };
|
||||||
|
bool keep = false;
|
||||||
|
|
||||||
|
Phys_alloc_guard(Range_allocator &phys_alloc)
|
||||||
|
: phys_alloc(phys_alloc) { }
|
||||||
|
|
||||||
|
~Phys_alloc_guard() { if (!keep && ds_addr) phys_alloc.free(ds_addr); }
|
||||||
|
|
||||||
|
} phys_alloc_guard(_phys_alloc);
|
||||||
|
|
||||||
|
allocated_range.with_result(
|
||||||
|
[&] (void *ptr) { phys_alloc_guard.ds_addr = ptr; },
|
||||||
|
[&] (Alloc_error) { /* already checked above */ });
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For non-cached RAM dataspaces, we mark the dataspace as write
|
* For non-cached RAM dataspaces, we mark the dataspace as write
|
||||||
* combined and expect the pager to evaluate this dataspace property
|
* combined and expect the pager to evaluate this dataspace property
|
||||||
@ -118,7 +111,8 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
|||||||
Dataspace_component *ds_ptr = nullptr;
|
Dataspace_component *ds_ptr = nullptr;
|
||||||
try {
|
try {
|
||||||
ds_ptr = new (_ds_slab)
|
ds_ptr = new (_ds_slab)
|
||||||
Dataspace_component(ds_size, (addr_t)ds_addr, cache, true, this);
|
Dataspace_component(ds_size, (addr_t)phys_alloc_guard.ds_addr,
|
||||||
|
cache, true, this);
|
||||||
}
|
}
|
||||||
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||||
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||||
@ -145,7 +139,7 @@ Ram_dataspace_factory::try_alloc(size_t ds_size, Cache cache)
|
|||||||
|
|
||||||
Dataspace_capability ds_cap = _ep.manage(&ds);
|
Dataspace_capability ds_cap = _ep.manage(&ds);
|
||||||
|
|
||||||
phys_alloc_guard.ack = true;
|
phys_alloc_guard.keep = true;
|
||||||
|
|
||||||
return static_cap_cast<Ram_dataspace>(ds_cap);
|
return static_cap_cast<Ram_dataspace>(ds_cap);
|
||||||
}
|
}
|
||||||
|
@ -365,8 +365,14 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
throw Region_conflict();
|
throw Region_conflict();
|
||||||
|
|
||||||
auto lambda = [&] (Dataspace_component *dsc) {
|
auto lambda = [&] (Dataspace_component *dsc) {
|
||||||
|
|
||||||
|
using Alloc_error = Range_allocator::Alloc_error;
|
||||||
|
|
||||||
/* check dataspace validity */
|
/* check dataspace validity */
|
||||||
if (!dsc) throw Invalid_dataspace();
|
if (!dsc)
|
||||||
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
|
unsigned const min_align_log2 = get_page_size_log2();
|
||||||
|
|
||||||
size_t const off = offset;
|
size_t const off = offset;
|
||||||
if (off >= dsc->size())
|
if (off >= dsc->size())
|
||||||
@ -376,27 +382,25 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
size = dsc->size() - offset;
|
size = dsc->size() - offset;
|
||||||
|
|
||||||
/* work with page granularity */
|
/* work with page granularity */
|
||||||
size = align_addr(size, get_page_size_log2());
|
size = align_addr(size, min_align_log2);
|
||||||
|
|
||||||
/* deny creation of regions larger then the actual dataspace */
|
/* deny creation of regions larger then the actual dataspace */
|
||||||
if (dsc->size() < size + offset)
|
if (dsc->size() < size + offset)
|
||||||
throw Region_conflict();
|
throw Region_conflict();
|
||||||
|
|
||||||
/* allocate region for attachment */
|
/* allocate region for attachment */
|
||||||
void *attach_at = 0;
|
void *attach_at = nullptr;
|
||||||
if (use_local_addr) {
|
if (use_local_addr) {
|
||||||
switch (_map.alloc_addr(size, local_addr).value) {
|
_map.alloc_addr(size, local_addr).with_result(
|
||||||
|
[&] (void *ptr) { attach_at = ptr; },
|
||||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
[&] (Range_allocator::Alloc_error error) {
|
||||||
throw Out_of_ram();
|
switch (error) {
|
||||||
|
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||||
throw Region_conflict();
|
case Alloc_error::DENIED: break;
|
||||||
|
}
|
||||||
case Range_allocator::Alloc_return::OK:
|
throw Region_conflict();
|
||||||
attach_at = local_addr;
|
});
|
||||||
break;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -406,9 +410,10 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
*/
|
*/
|
||||||
size_t align_log2 = log2(size);
|
size_t align_log2 = log2(size);
|
||||||
if (align_log2 >= sizeof(void *)*8)
|
if (align_log2 >= sizeof(void *)*8)
|
||||||
align_log2 = get_page_size_log2();
|
align_log2 = min_align_log2;
|
||||||
|
|
||||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
bool done = false;
|
||||||
|
for (; !done && (align_log2 >= min_align_log2); align_log2--) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Don't use an alignment higher than the alignment of the backing
|
* Don't use an alignment higher than the alignment of the backing
|
||||||
@ -419,21 +424,23 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* try allocating the align region */
|
/* try allocating the align region */
|
||||||
Range_allocator::Alloc_return alloc_return =
|
_map.alloc_aligned(size, align_log2).with_result(
|
||||||
_map.alloc_aligned(size, &attach_at, align_log2);
|
|
||||||
|
|
||||||
typedef Range_allocator::Alloc_return Alloc_return;
|
[&] (void *ptr) {
|
||||||
|
attach_at = ptr;
|
||||||
switch (alloc_return.value) {
|
done = true; },
|
||||||
case Alloc_return::OK: break; /* switch */
|
|
||||||
case Alloc_return::OUT_OF_METADATA: throw Out_of_ram();
|
|
||||||
case Alloc_return::RANGE_CONFLICT: continue; /* for loop */
|
|
||||||
}
|
|
||||||
break; /* for loop */
|
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error error) {
|
||||||
|
switch (error) {
|
||||||
|
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||||
|
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||||
|
case Alloc_error::DENIED: break; /* no fit */
|
||||||
|
}
|
||||||
|
/* try smaller alignment in next iteration... */
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (align_log2 < get_page_size_log2())
|
if (!done)
|
||||||
throw Region_conflict();
|
throw Region_conflict();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,18 +35,23 @@ Io_port_session_component::Io_port_session_component(Range_allocator &io_port_al
|
|||||||
unsigned size = Arg_string::find_arg(args, "io_port_size").ulong_value(0);
|
unsigned size = Arg_string::find_arg(args, "io_port_size").ulong_value(0);
|
||||||
|
|
||||||
/* allocate region (also checks out-of-bounds regions) */
|
/* allocate region (also checks out-of-bounds regions) */
|
||||||
switch (io_port_alloc.alloc_addr(size, base).value) {
|
io_port_alloc.alloc_addr(size, base).with_error(
|
||||||
|
[&] (Allocator::Alloc_error e) {
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
switch (e) {
|
||||||
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
|
case Range_allocator::Alloc_error::DENIED:
|
||||||
throw Service_denied();
|
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
|
||||||
|
throw Service_denied();
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
case Range_allocator::Alloc_error::OUT_OF_RAM:
|
||||||
error("I/O port allocator ran out of meta data");
|
error("I/O port allocator ran out of RAM");
|
||||||
throw Service_denied();
|
throw Service_denied();
|
||||||
|
|
||||||
case Range_allocator::Alloc_return::OK: break;
|
case Range_allocator::Alloc_error::OUT_OF_CAPS:
|
||||||
}
|
error("I/O port allocator ran out of caps");
|
||||||
|
throw Service_denied();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
/* store information */
|
/* store information */
|
||||||
_base = base;
|
_base = base;
|
||||||
|
@ -71,29 +71,37 @@ class Stack_area_region_map : public Region_map
|
|||||||
{
|
{
|
||||||
/* allocate physical memory */
|
/* allocate physical memory */
|
||||||
size = round_page(size);
|
size = round_page(size);
|
||||||
void *phys_base = nullptr;
|
|
||||||
Range_allocator &ra = platform_specific().ram_alloc();
|
|
||||||
if (ra.alloc_aligned(size, &phys_base,
|
|
||||||
get_page_size_log2()).error()) {
|
|
||||||
error("could not allocate backing store for new stack");
|
|
||||||
return (addr_t)0;
|
|
||||||
}
|
|
||||||
|
|
||||||
Dataspace_component &ds = *new (&_ds_slab)
|
Range_allocator &phys = platform_specific().ram_alloc();
|
||||||
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
|
||||||
|
|
||||||
addr_t const core_local_addr = stack_area_virtual_base() + (addr_t)local_addr;
|
return phys.alloc_aligned(size, get_page_size_log2()).convert<Local_addr>(
|
||||||
|
|
||||||
if (!map_local(ds.phys_addr(), core_local_addr,
|
[&] (void *phys_ptr) {
|
||||||
ds.size() >> get_page_size_log2())) {
|
|
||||||
error("could not map phys ", Hex(ds.phys_addr()),
|
|
||||||
" at local ", Hex(core_local_addr));
|
|
||||||
return (addr_t)0;
|
|
||||||
}
|
|
||||||
|
|
||||||
ds.assign_core_local_addr((void*)core_local_addr);
|
addr_t const phys_base = (addr_t)phys_ptr;
|
||||||
|
|
||||||
return local_addr;
|
Dataspace_component &ds = *new (&_ds_slab)
|
||||||
|
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
||||||
|
|
||||||
|
addr_t const core_local_addr = stack_area_virtual_base()
|
||||||
|
+ (addr_t)local_addr;
|
||||||
|
|
||||||
|
if (!map_local(ds.phys_addr(), core_local_addr,
|
||||||
|
ds.size() >> get_page_size_log2())) {
|
||||||
|
error("could not map phys ", Hex(ds.phys_addr()),
|
||||||
|
" at local ", Hex(core_local_addr));
|
||||||
|
|
||||||
|
phys.free(phys_ptr);
|
||||||
|
return Local_addr { (addr_t)0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
ds.assign_core_local_addr((void*)core_local_addr);
|
||||||
|
|
||||||
|
return local_addr;
|
||||||
|
},
|
||||||
|
[&] (Range_allocator::Alloc_error) {
|
||||||
|
error("could not allocate backing store for new stack");
|
||||||
|
return (addr_t)0; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void detach(Local_addr local_addr) override
|
void detach(Local_addr local_addr) override
|
||||||
|
@ -63,48 +63,57 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
|||||||
attribute.offset > dsc.size() - attribute.size)
|
attribute.offset > dsc.size() - attribute.size)
|
||||||
throw Invalid_dataspace();
|
throw Invalid_dataspace();
|
||||||
|
|
||||||
switch (_map.alloc_addr(attribute.size, guest_phys).value) {
|
using Alloc_error = Range_allocator::Alloc_error;
|
||||||
case Range_allocator::Alloc_return::OUT_OF_METADATA:
|
|
||||||
throw Out_of_ram();
|
|
||||||
case Range_allocator::Alloc_return::RANGE_CONFLICT:
|
|
||||||
{
|
|
||||||
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
|
|
||||||
if (!region_ptr)
|
|
||||||
throw Region_conflict();
|
|
||||||
|
|
||||||
Rm_region ®ion = *region_ptr;
|
_map.alloc_addr(attribute.size, guest_phys).with_result(
|
||||||
|
|
||||||
if (!(cap == region.dataspace().cap()))
|
[&] (void *) {
|
||||||
throw Region_conflict();
|
|
||||||
if (guest_phys < region.base() ||
|
|
||||||
guest_phys > region.base() + region.size() - 1)
|
|
||||||
throw Region_conflict();
|
|
||||||
|
|
||||||
/* re-attach all */
|
/* store attachment info in meta data */
|
||||||
break;
|
try {
|
||||||
}
|
_map.construct_metadata((void *)guest_phys,
|
||||||
case Range_allocator::Alloc_return::OK:
|
guest_phys, attribute.size,
|
||||||
{
|
dsc.writable() && attribute.writeable,
|
||||||
/* store attachment info in meta data */
|
dsc, attribute.offset, *this,
|
||||||
try {
|
attribute.executable);
|
||||||
_map.construct_metadata((void *)guest_phys,
|
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
||||||
guest_phys, attribute.size,
|
error("failed to store attachment info");
|
||||||
dsc.writable() && attribute.writeable,
|
throw Invalid_dataspace();
|
||||||
dsc, attribute.offset, *this,
|
}
|
||||||
attribute.executable);
|
|
||||||
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
|
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
||||||
error("failed to store attachment info");
|
|
||||||
throw Invalid_dataspace();
|
/* inform dataspace about attachment */
|
||||||
|
dsc.attached_to(region);
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
|
||||||
|
switch (error) {
|
||||||
|
|
||||||
|
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||||
|
case Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
||||||
|
case Alloc_error::DENIED:
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Handle attach after partial detach
|
||||||
|
*/
|
||||||
|
Rm_region *region_ptr = _map.metadata((void *)guest_phys);
|
||||||
|
if (!region_ptr)
|
||||||
|
throw Region_conflict();
|
||||||
|
|
||||||
|
Rm_region ®ion = *region_ptr;
|
||||||
|
|
||||||
|
if (!(cap == region.dataspace().cap()))
|
||||||
|
throw Region_conflict();
|
||||||
|
|
||||||
|
if (guest_phys < region.base() ||
|
||||||
|
guest_phys > region.base() + region.size() - 1)
|
||||||
|
throw Region_conflict();
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
);
|
||||||
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
|
||||||
|
|
||||||
/* inform dataspace about attachment */
|
|
||||||
dsc.attached_to(region);
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/* kernel specific code to attach memory to guest */
|
/* kernel specific code to attach memory to guest */
|
||||||
_attach_vm_memory(dsc, guest_phys, attribute);
|
_attach_vm_memory(dsc, guest_phys, attribute);
|
||||||
|
@ -95,110 +95,106 @@ void Allocator_avl_base::Block::recompute()
|
|||||||
** Allocator_avl implementation **
|
** Allocator_avl implementation **
|
||||||
**********************************/
|
**********************************/
|
||||||
|
|
||||||
Allocator_avl_base::Block *Allocator_avl_base::_alloc_block_metadata()
|
Allocator_avl_base::Alloc_md_result Allocator_avl_base::_alloc_block_metadata()
|
||||||
{
|
{
|
||||||
void *b = nullptr;
|
return _md_alloc.try_alloc(sizeof(Block)).convert<Alloc_md_result>(
|
||||||
if (_md_alloc->alloc(sizeof(Block), &b))
|
[&] (void *ptr) {
|
||||||
return construct_at<Block>(b, 0, 0, 0);
|
return construct_at<Block>(ptr, 0, 0, 0); },
|
||||||
|
[&] (Alloc_error error) {
|
||||||
return nullptr;
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Allocator_avl_base::_alloc_two_blocks_metadata(Block **dst1, Block **dst2)
|
Allocator_avl_base::Alloc_md_two_result
|
||||||
|
Allocator_avl_base::_alloc_two_blocks_metadata()
|
||||||
{
|
{
|
||||||
Block * const b1 = _alloc_block_metadata();
|
return _alloc_block_metadata().convert<Alloc_md_two_result>(
|
||||||
Block * b2 = nullptr;
|
|
||||||
|
|
||||||
try {
|
[&] (Block *b1_ptr) {
|
||||||
b2 = _alloc_block_metadata();
|
return _alloc_block_metadata().convert<Alloc_md_two_result>(
|
||||||
} catch (...) {
|
|
||||||
if (b1) _md_alloc->free(b1, sizeof(Block));
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (b1 && b2) {
|
[&] (Block *b2_ptr) {
|
||||||
*dst1 = b1;
|
return Two_blocks { b1_ptr, b2_ptr }; },
|
||||||
*dst2 = b2;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
*dst1 = *dst2 = nullptr;
|
[&] (Alloc_error error) {
|
||||||
|
_md_alloc.free(b1_ptr, sizeof(Block));
|
||||||
if (b2) _md_alloc->free(b2, sizeof(Block));
|
return error; });
|
||||||
if (b1) _md_alloc->free(b1, sizeof(Block));
|
},
|
||||||
|
[&] (Alloc_error error) {
|
||||||
return false;
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Allocator_avl_base::_add_block(Block *block_metadata,
|
void Allocator_avl_base::_add_block(Block &block_metadata,
|
||||||
addr_t base, size_t size, bool used)
|
addr_t base, size_t size, bool used)
|
||||||
{
|
{
|
||||||
if (!block_metadata)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
/* call constructor for new block */
|
/* call constructor for new block */
|
||||||
construct_at<Block>(block_metadata, base, size, used);
|
construct_at<Block>(&block_metadata, base, size, used);
|
||||||
|
|
||||||
/* insert block into avl tree */
|
/* insert block into avl tree */
|
||||||
_addr_tree.insert(block_metadata);
|
_addr_tree.insert(&block_metadata);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Allocator_avl_base::_destroy_block(Block *b)
|
void Allocator_avl_base::_destroy_block(Block &b)
|
||||||
{
|
{
|
||||||
if (!b) return;
|
_addr_tree.remove(&b);
|
||||||
|
_md_alloc.free(&b, _md_entry_size);
|
||||||
/* remove block from both avl trees */
|
|
||||||
_addr_tree.remove(b);
|
|
||||||
_md_alloc->free(b, _md_entry_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Allocator_avl_base::_cut_from_block(Block *b, addr_t addr, size_t size,
|
void Allocator_avl_base::_cut_from_block(Block &b, addr_t addr, size_t size, Two_blocks blocks)
|
||||||
Block *dst1, Block *dst2)
|
|
||||||
{
|
{
|
||||||
size_t const padding = addr > b->addr() ? addr - b->addr() : 0;
|
size_t const padding = addr > b.addr() ? addr - b.addr() : 0;
|
||||||
size_t const b_size = b->size() > padding ? b->size() - padding : 0;
|
size_t const b_size = b.size() > padding ? b.size() - padding : 0;
|
||||||
size_t remaining = b_size > size ? b_size - size : 0;
|
size_t remaining = b_size > size ? b_size - size : 0;
|
||||||
|
|
||||||
/* case that a block contains the whole addressable range */
|
/* case that a block contains the whole addressable range */
|
||||||
if (!b->addr() && !b->size())
|
if (!b.addr() && !b.size())
|
||||||
remaining = b->size() - size - padding;
|
remaining = b.size() - size - padding;
|
||||||
|
|
||||||
addr_t orig_addr = b->addr();
|
addr_t orig_addr = b.addr();
|
||||||
|
|
||||||
_destroy_block(b);
|
_destroy_block(b);
|
||||||
|
|
||||||
/* create free block containing the alignment padding */
|
/* create free block containing the alignment padding */
|
||||||
if (padding > 0)
|
if (padding > 0)
|
||||||
_add_block(dst1, orig_addr, padding, Block::FREE);
|
_add_block(*blocks.b1_ptr, orig_addr, padding, Block::FREE);
|
||||||
else
|
else
|
||||||
_md_alloc->free(dst1, sizeof(Block));
|
_md_alloc.free(blocks.b1_ptr, sizeof(Block));
|
||||||
|
|
||||||
/* create free block for remaining space of original block */
|
/* create free block for remaining space of original block */
|
||||||
if (remaining > 0)
|
if (remaining > 0)
|
||||||
_add_block(dst2, addr + size, remaining, Block::FREE);
|
_add_block(*blocks.b2_ptr, addr + size, remaining, Block::FREE);
|
||||||
else
|
else
|
||||||
_md_alloc->free(dst2, sizeof(Block));
|
_md_alloc.free(blocks.b2_ptr, sizeof(Block));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename FN>
|
||||||
|
void Allocator_avl_base::_revert_block_ranges(FN const &any_block_fn)
|
||||||
|
{
|
||||||
|
for (bool loop = true; loop; ) {
|
||||||
|
|
||||||
|
Block *block_ptr = any_block_fn();
|
||||||
|
if (!block_ptr)
|
||||||
|
break;
|
||||||
|
|
||||||
|
remove_range(block_ptr->addr(), block_ptr->size()).with_error(
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
if (error == Alloc_error::DENIED) /* conflict */
|
||||||
|
_destroy_block(*block_ptr);
|
||||||
|
else
|
||||||
|
loop = false; /* give up on OUT_OF_RAM or OUT_OF_CAPS */
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Allocator_avl_base::_revert_unused_ranges()
|
void Allocator_avl_base::_revert_unused_ranges()
|
||||||
{
|
{
|
||||||
do {
|
_revert_block_ranges([&] () {
|
||||||
Block * const block = _find_any_unused_block(_addr_tree.first());
|
return _find_any_unused_block(_addr_tree.first()); });
|
||||||
if (!block)
|
|
||||||
break;
|
|
||||||
|
|
||||||
int const error = remove_range(block->addr(), block->size());
|
|
||||||
if (error && block == _find_any_unused_block(_addr_tree.first()))
|
|
||||||
/* if the invocation fails, release the block to break endless loop */
|
|
||||||
_destroy_block(block);
|
|
||||||
} while (true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -220,159 +216,161 @@ void Allocator_avl_base::_revert_allocations_and_ranges()
|
|||||||
" at allocator destruction time");
|
" at allocator destruction time");
|
||||||
|
|
||||||
/* destroy all remaining blocks */
|
/* destroy all remaining blocks */
|
||||||
while (Block *block = _addr_tree.first()) {
|
_revert_block_ranges([&] () { return _addr_tree.first(); });
|
||||||
if (remove_range(block->addr(), block->size())) {
|
|
||||||
/* if the invocation fails, release the block to break endless loop */
|
|
||||||
_destroy_block(block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Allocator_avl_base::add_range(addr_t new_addr, size_t new_size)
|
Allocator_avl_base::Range_result Allocator_avl_base::add_range(addr_t new_addr, size_t new_size)
|
||||||
{
|
{
|
||||||
Block *b;
|
if (!new_size)
|
||||||
|
return Alloc_error::DENIED;
|
||||||
/* sanity check for insane users ;-) */
|
|
||||||
if (!new_size) return -2;
|
|
||||||
|
|
||||||
/* check for conflicts with existing blocks */
|
/* check for conflicts with existing blocks */
|
||||||
if (_find_by_address(new_addr, new_size, true))
|
if (_find_by_address(new_addr, new_size, true))
|
||||||
return -3;
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
Block *new_block = _alloc_block_metadata();
|
return _alloc_block_metadata().convert<Range_result>(
|
||||||
if (!new_block) return -4;
|
|
||||||
|
|
||||||
/* merge with predecessor */
|
[&] (Block *new_block_ptr) {
|
||||||
if (new_addr != 0 && (b = _find_by_address(new_addr - 1)) && !b->used()) {
|
|
||||||
|
|
||||||
new_size += b->size();
|
/* merge with predecessor */
|
||||||
new_addr = b->addr();
|
Block *b = nullptr;
|
||||||
|
if (new_addr != 0 && (b = _find_by_address(new_addr - 1)) && !b->used()) {
|
||||||
|
|
||||||
_destroy_block(b);
|
new_size += b->size();
|
||||||
}
|
new_addr = b->addr();
|
||||||
|
_destroy_block(*b);
|
||||||
|
}
|
||||||
|
|
||||||
/* merge with successor */
|
/* merge with successor */
|
||||||
if ((b = _find_by_address(new_addr + new_size)) && !b->used()) {
|
if ((b = _find_by_address(new_addr + new_size)) && !b->used()) {
|
||||||
|
|
||||||
new_size += b->size();
|
new_size += b->size();
|
||||||
|
_destroy_block(*b);
|
||||||
|
}
|
||||||
|
|
||||||
_destroy_block(b);
|
/* create new block that spans over all merged blocks */
|
||||||
}
|
_add_block(*new_block_ptr, new_addr, new_size, Block::FREE);
|
||||||
|
|
||||||
/* create new block that spans over all merged blocks */
|
return Range_ok();
|
||||||
return _add_block(new_block, new_addr, new_size, Block::FREE);
|
},
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int Allocator_avl_base::remove_range(addr_t base, size_t size)
|
Allocator_avl_base::Range_result Allocator_avl_base::remove_range(addr_t base, size_t size)
|
||||||
{
|
{
|
||||||
/* sanity check for insane users ;-) */
|
Range_result result = Alloc_error::DENIED;
|
||||||
if (!size) return -1;
|
|
||||||
|
|
||||||
Block *dst1, *dst2;
|
if (!size)
|
||||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
return result;
|
||||||
return -2;
|
|
||||||
|
|
||||||
/* FIXME removing ranges from allocators with used blocks is not safe! */
|
for (bool done = false; !done; ) {
|
||||||
while (1) {
|
|
||||||
|
|
||||||
/* find block overlapping the specified range */
|
_alloc_two_blocks_metadata().with_result(
|
||||||
Block *b = _addr_tree.first();
|
[&] (Two_blocks blocks) {
|
||||||
b = b ? b->find_by_address(base, size, 1) : 0;
|
|
||||||
|
|
||||||
/*
|
/* find block overlapping the specified range */
|
||||||
* If there are no overlappings with any existing blocks (b == 0), we
|
Block *b = _addr_tree.first();
|
||||||
* are done. If however, the overlapping block is in use, we have a
|
b = b ? b->find_by_address(base, size, 1) : 0;
|
||||||
* problem. In both cases, return.
|
|
||||||
*/
|
|
||||||
if (!b || !b->avail()) {
|
|
||||||
_md_alloc->free(dst1, sizeof(Block));
|
|
||||||
_md_alloc->free(dst2, sizeof(Block));
|
|
||||||
return !b ? 0 : -3;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* cut intersecting address range */
|
/*
|
||||||
addr_t intersect_beg = max(base, b->addr());
|
* If there are no overlappings with any existing blocks (b == 0), we
|
||||||
size_t intersect_end = min(base + size - 1, b->addr() + b->size() - 1);
|
* are done. If however, the overlapping block is in use, we have a
|
||||||
|
* problem. Stop iterating in both cases.
|
||||||
|
*/
|
||||||
|
if (!b || !b->avail()) {
|
||||||
|
_md_alloc.free(blocks.b1_ptr, sizeof(Block));
|
||||||
|
_md_alloc.free(blocks.b2_ptr, sizeof(Block));
|
||||||
|
|
||||||
_cut_from_block(b, intersect_beg, intersect_end - intersect_beg + 1, dst1, dst2);
|
if (b == 0)
|
||||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
result = Range_ok();
|
||||||
return -4;
|
|
||||||
};
|
done = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* cut intersecting address range */
|
||||||
|
addr_t intersect_beg = max(base, b->addr());
|
||||||
|
size_t intersect_end = min(base + size - 1, b->addr() + b->size() - 1);
|
||||||
|
|
||||||
|
_cut_from_block(*b, intersect_beg, intersect_end - intersect_beg + 1, blocks);
|
||||||
|
},
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
result = error;
|
||||||
|
done = true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Range_allocator::Alloc_return
|
template <typename SEARCH_FN>
|
||||||
Allocator_avl_base::alloc_aligned(size_t size, void **out_addr, unsigned align,
|
Allocator::Alloc_result
|
||||||
Range range)
|
Allocator_avl_base::_allocate(size_t const size, unsigned align, Range range,
|
||||||
|
SEARCH_FN const &search_fn)
|
||||||
{
|
{
|
||||||
Block *dst1, *dst2;
|
return _alloc_two_blocks_metadata().convert<Alloc_result>(
|
||||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
|
||||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
|
||||||
|
|
||||||
/* find best fitting block */
|
[&] (Two_blocks two_blocks) -> Alloc_result {
|
||||||
Block *b = _addr_tree.first();
|
|
||||||
b = b ? b->find_best_fit(size, align, range) : 0;
|
|
||||||
|
|
||||||
if (!b) {
|
/* find block according to the policy implemented by 'search_fn' */
|
||||||
_md_alloc->free(dst1, sizeof(Block));
|
Block *b_ptr = _addr_tree.first();
|
||||||
_md_alloc->free(dst2, sizeof(Block));
|
b_ptr = b_ptr ? search_fn(*b_ptr) : 0;
|
||||||
return Alloc_return(Alloc_return::RANGE_CONFLICT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* calculate address of new (aligned) block */
|
if (!b_ptr) {
|
||||||
addr_t new_addr = align_addr(max(b->addr(), range.start), align);
|
/* range conflict */
|
||||||
|
_md_alloc.free(two_blocks.b1_ptr, sizeof(Block));
|
||||||
|
_md_alloc.free(two_blocks.b2_ptr, sizeof(Block));
|
||||||
|
return Alloc_error::DENIED;
|
||||||
|
}
|
||||||
|
Block &b = *b_ptr;
|
||||||
|
|
||||||
/* remove new block from containing block */
|
/* calculate address of new (aligned) block */
|
||||||
_cut_from_block(b, new_addr, size, dst1, dst2);
|
addr_t const new_addr = align_addr(max(b.addr(), range.start), align);
|
||||||
|
|
||||||
/* create allocated block */
|
/* remove new block from containing block, consume two_blocks */
|
||||||
Block *new_block = _alloc_block_metadata();
|
_cut_from_block(b, new_addr, size, two_blocks);
|
||||||
if (!new_block) {
|
|
||||||
_md_alloc->free(new_block, sizeof(Block));
|
|
||||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
|
||||||
}
|
|
||||||
_add_block(new_block, new_addr, size, Block::USED);
|
|
||||||
|
|
||||||
*out_addr = reinterpret_cast<void *>(new_addr);
|
/* create allocated block */
|
||||||
return Alloc_return(Alloc_return::OK);
|
return _alloc_block_metadata().convert<Alloc_result>(
|
||||||
|
|
||||||
|
[&] (Block *new_block_ptr) {
|
||||||
|
_add_block(*new_block_ptr, new_addr, size, Block::USED);
|
||||||
|
return reinterpret_cast<void *>(new_addr); },
|
||||||
|
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
|
},
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Range_allocator::Alloc_return Allocator_avl_base::alloc_addr(size_t size, addr_t addr)
|
Allocator::Alloc_result
|
||||||
|
Allocator_avl_base::alloc_aligned(size_t size, unsigned align, Range range)
|
||||||
{
|
{
|
||||||
/* sanity check */
|
return _allocate(size, align, range, [&] (Block &first) {
|
||||||
|
return first.find_best_fit(size, align, range); });
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Range_allocator::Alloc_result Allocator_avl_base::alloc_addr(size_t size, addr_t addr)
|
||||||
|
{
|
||||||
|
/* check for integer overflow */
|
||||||
|
if (addr + size - 1 < addr)
|
||||||
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
|
/* check for range conflict */
|
||||||
if (!_sum_in_range(addr, size))
|
if (!_sum_in_range(addr, size))
|
||||||
return Alloc_return(Alloc_return::RANGE_CONFLICT);
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
Block *dst1, *dst2;
|
Range const range { .start = addr, .end = addr + size - 1 };
|
||||||
if (!_alloc_two_blocks_metadata(&dst1, &dst2))
|
unsigned const align_any = 0;
|
||||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
|
||||||
|
|
||||||
/* find block at specified address */
|
return _allocate(size, align_any, range, [&] (Block &first) {
|
||||||
Block *b = _addr_tree.first();
|
return first.find_by_address(addr, size); });
|
||||||
b = b ? b->find_by_address(addr, size) : 0;
|
|
||||||
|
|
||||||
/* skip if there's no block or block is used */
|
|
||||||
if (!b || b->used()) {
|
|
||||||
_md_alloc->free(dst1, sizeof(Block));
|
|
||||||
_md_alloc->free(dst2, sizeof(Block));
|
|
||||||
return Alloc_return(Alloc_return::RANGE_CONFLICT);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* remove new block from containing block */
|
|
||||||
_cut_from_block(b, addr, size, dst1, dst2);
|
|
||||||
|
|
||||||
/* create allocated block */
|
|
||||||
Block *new_block = _alloc_block_metadata();
|
|
||||||
if (!new_block) {
|
|
||||||
_md_alloc->free(new_block, sizeof(Block));
|
|
||||||
return Alloc_return(Alloc_return::OUT_OF_METADATA);
|
|
||||||
}
|
|
||||||
_add_block(new_block, addr, size, Block::USED);
|
|
||||||
|
|
||||||
return Alloc_return(Alloc_return::OK);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -383,14 +381,14 @@ void Allocator_avl_base::free(void *addr)
|
|||||||
|
|
||||||
if (!b || !(b->used())) return;
|
if (!b || !(b->used())) return;
|
||||||
|
|
||||||
addr_t new_addr = b->addr();
|
addr_t const new_addr = b->addr();
|
||||||
size_t new_size = b->size();
|
size_t const new_size = b->size();
|
||||||
|
|
||||||
if (new_addr != (addr_t)addr)
|
if (new_addr != (addr_t)addr)
|
||||||
error(__PRETTY_FUNCTION__, ": given address (", addr, ") "
|
error(__PRETTY_FUNCTION__, ": given address (", addr, ") "
|
||||||
"is not the block start address (", (void *)new_addr, ")");
|
"is not the block start address (", (void *)new_addr, ")");
|
||||||
|
|
||||||
_destroy_block(b);
|
_destroy_block(*b);
|
||||||
|
|
||||||
add_range(new_addr, new_size);
|
add_range(new_addr, new_size);
|
||||||
}
|
}
|
||||||
|
@ -77,117 +77,129 @@ int Heap::quota_limit(size_t new_quota_limit)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Heap::Dataspace *Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
Heap::Alloc_ds_result
|
||||||
|
Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
||||||
{
|
{
|
||||||
Ram_dataspace_capability new_ds_cap;
|
using Result = Alloc_ds_result;
|
||||||
void *ds_addr = 0;
|
|
||||||
void *ds_meta_data_addr = 0;
|
|
||||||
Heap::Dataspace *ds = 0;
|
|
||||||
|
|
||||||
/* make new ram dataspace available at our local address space */
|
return _ds_pool.ram_alloc->try_alloc(size).convert<Result>(
|
||||||
try {
|
|
||||||
new_ds_cap = _ds_pool.ram_alloc->alloc(size);
|
|
||||||
try { ds_addr = _ds_pool.region_map->attach(new_ds_cap); }
|
|
||||||
catch (Out_of_ram) {
|
|
||||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
catch (Out_of_caps) {
|
|
||||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
catch (Region_map::Invalid_dataspace) {
|
|
||||||
warning("heap: attempt to attach invalid dataspace");
|
|
||||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
catch (Region_map::Region_conflict) {
|
|
||||||
warning("heap: region conflict while allocating dataspace");
|
|
||||||
_ds_pool.ram_alloc->free(new_ds_cap);
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch (Out_of_ram) { return nullptr; }
|
|
||||||
|
|
||||||
if (enforce_separate_metadata) {
|
[&] (Ram_dataspace_capability ds_cap) -> Result {
|
||||||
|
|
||||||
/* allocate the Dataspace structure */
|
struct Alloc_guard
|
||||||
if (!_unsynchronized_alloc(sizeof(Heap::Dataspace), &ds_meta_data_addr)) {
|
{
|
||||||
warning("could not allocate dataspace meta data");
|
Ram_allocator &ram;
|
||||||
return 0;
|
Ram_dataspace_capability ds;
|
||||||
}
|
bool keep = false;
|
||||||
|
|
||||||
} else {
|
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
|
||||||
|
: ram(ram), ds(ds) { }
|
||||||
|
|
||||||
/* add new local address range to our local allocator */
|
~Alloc_guard() { if (!keep) ram.free(ds); }
|
||||||
_alloc->add_range((addr_t)ds_addr, size);
|
|
||||||
|
|
||||||
/* allocate the Dataspace structure */
|
} alloc_guard(*_ds_pool.ram_alloc, ds_cap);
|
||||||
if (_alloc->alloc_aligned(sizeof(Heap::Dataspace), &ds_meta_data_addr, log2(16)).error()) {
|
|
||||||
warning("could not allocate dataspace meta data - this should never happen");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ds = construct_at<Dataspace>(ds_meta_data_addr, new_ds_cap, ds_addr, size);
|
struct Attach_guard
|
||||||
|
{
|
||||||
|
Region_map &rm;
|
||||||
|
struct { void *ptr = nullptr; };
|
||||||
|
bool keep = false;
|
||||||
|
|
||||||
_ds_pool.insert(ds);
|
Attach_guard(Region_map &rm) : rm(rm) { }
|
||||||
|
|
||||||
return ds;
|
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
|
||||||
|
|
||||||
|
} attach_guard(*_ds_pool.region_map);
|
||||||
|
|
||||||
|
try {
|
||||||
|
attach_guard.ptr = _ds_pool.region_map->attach(ds_cap);
|
||||||
|
}
|
||||||
|
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||||
|
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||||
|
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
|
||||||
|
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
|
||||||
|
|
||||||
|
Alloc_result metadata = Alloc_error::DENIED;
|
||||||
|
|
||||||
|
/* allocate the 'Dataspace' structure */
|
||||||
|
if (enforce_separate_metadata) {
|
||||||
|
metadata = _unsynchronized_alloc(sizeof(Heap::Dataspace));
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
/* add new local address range to our local allocator */
|
||||||
|
_alloc->add_range((addr_t)attach_guard.ptr, size).with_result(
|
||||||
|
[&] (Range_allocator::Range_ok) {
|
||||||
|
metadata = _alloc->alloc_aligned(sizeof(Heap::Dataspace), log2(16)); },
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
metadata = error; });
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata.convert<Result>(
|
||||||
|
[&] (void *md_ptr) -> Result {
|
||||||
|
Dataspace &ds = *construct_at<Dataspace>(md_ptr, ds_cap,
|
||||||
|
attach_guard.ptr, size);
|
||||||
|
_ds_pool.insert(&ds);
|
||||||
|
alloc_guard.keep = attach_guard.keep = true;
|
||||||
|
return &ds;
|
||||||
|
},
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
|
},
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Heap::_try_local_alloc(size_t size, void **out_addr)
|
Allocator::Alloc_result Heap::_try_local_alloc(size_t size)
|
||||||
{
|
{
|
||||||
if (_alloc->alloc_aligned(size, out_addr, log2(16)).error())
|
return _alloc->alloc_aligned(size, log2(16)).convert<Alloc_result>(
|
||||||
return false;
|
|
||||||
|
|
||||||
_quota_used += size;
|
[&] (void *ptr) {
|
||||||
return true;
|
_quota_used += size;
|
||||||
|
return ptr; },
|
||||||
|
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
|
Allocator::Alloc_result Heap::_unsynchronized_alloc(size_t size)
|
||||||
{
|
{
|
||||||
size_t dataspace_size;
|
|
||||||
|
|
||||||
if (size >= BIG_ALLOCATION_THRESHOLD) {
|
if (size >= BIG_ALLOCATION_THRESHOLD) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* big allocation
|
* big allocation
|
||||||
*
|
*
|
||||||
* in this case, we allocate one dataspace without any meta data in it
|
* In this case, we allocate one dataspace without any meta data in it
|
||||||
* and return its local address without going through the allocator.
|
* and return its local address without going through the allocator.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* align to 4K page */
|
/* align to 4K page */
|
||||||
dataspace_size = align_addr(size, 12);
|
size_t const dataspace_size = align_addr(size, 12);
|
||||||
|
|
||||||
Heap::Dataspace *ds = _allocate_dataspace(dataspace_size, true);
|
return _allocate_dataspace(dataspace_size, true).convert<Alloc_result>(
|
||||||
|
|
||||||
if (!ds) {
|
[&] (Dataspace *ds_ptr) {
|
||||||
warning("could not allocate dataspace");
|
_quota_used += ds_ptr->size;
|
||||||
return false;
|
return ds_ptr->local_addr; },
|
||||||
}
|
|
||||||
|
|
||||||
_quota_used += ds->size;
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
*out_addr = ds->local_addr;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* try allocation at our local allocator */
|
/* try allocation at our local allocator */
|
||||||
if (_try_local_alloc(size, out_addr))
|
{
|
||||||
return true;
|
Alloc_result result = _try_local_alloc(size);
|
||||||
|
if (result.ok())
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
size_t dataspace_size = size
|
||||||
* Calculate block size of needed backing store. The block must hold the
|
+ Allocator_avl::slab_block_size()
|
||||||
* requested 'size' and we add some space for meta data
|
+ sizeof(Heap::Dataspace);
|
||||||
* ('Dataspace' structures, AVL-node slab blocks).
|
/* align to 4K page */
|
||||||
* Finally, we align the size to a 4K page.
|
dataspace_size = align_addr(dataspace_size, 12);
|
||||||
*/
|
|
||||||
dataspace_size = size + Allocator_avl::slab_block_size() + sizeof(Heap::Dataspace);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
|
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
|
||||||
@ -195,29 +207,34 @@ bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
|
|||||||
*/
|
*/
|
||||||
size_t const request_size = _chunk_size * sizeof(umword_t);
|
size_t const request_size = _chunk_size * sizeof(umword_t);
|
||||||
|
|
||||||
if ((dataspace_size < request_size) &&
|
Alloc_ds_result result = Alloc_error::DENIED;
|
||||||
_allocate_dataspace(request_size, false)) {
|
|
||||||
|
|
||||||
/*
|
if (dataspace_size < request_size) {
|
||||||
* Exponentially increase chunk size with each allocated chunk until
|
|
||||||
* we hit 'MAX_CHUNK_SIZE'.
|
|
||||||
*/
|
|
||||||
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
|
|
||||||
|
|
||||||
|
result = _allocate_dataspace(request_size, false);
|
||||||
|
if (result.ok()) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Exponentially increase chunk size with each allocated chunk until
|
||||||
|
* we hit 'MAX_CHUNK_SIZE'.
|
||||||
|
*/
|
||||||
|
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
result = _allocate_dataspace(dataspace_size, false);
|
||||||
/* align to 4K page */
|
|
||||||
dataspace_size = align_addr(dataspace_size, 12);
|
|
||||||
if (!_allocate_dataspace(dataspace_size, false))
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (result.failed())
|
||||||
|
return result.convert<Alloc_result>(
|
||||||
|
[&] (Dataspace *) { return Alloc_error::DENIED; },
|
||||||
|
[&] (Alloc_error error) { return error; });
|
||||||
|
|
||||||
/* allocate originally requested block */
|
/* allocate originally requested block */
|
||||||
return _try_local_alloc(size, out_addr);
|
return _try_local_alloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Heap::alloc(size_t size, void **out_addr)
|
Allocator::Alloc_result Heap::try_alloc(size_t size)
|
||||||
{
|
{
|
||||||
if (size == 0)
|
if (size == 0)
|
||||||
error("attempt to allocate zero-size block from heap");
|
error("attempt to allocate zero-size block from heap");
|
||||||
@ -227,9 +244,9 @@ bool Heap::alloc(size_t size, void **out_addr)
|
|||||||
|
|
||||||
/* check requested allocation against quota limit */
|
/* check requested allocation against quota limit */
|
||||||
if (size + _quota_used > _quota_limit)
|
if (size + _quota_used > _quota_limit)
|
||||||
return false;
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
return _unsynchronized_alloc(size, out_addr);
|
return _unsynchronized_alloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -223,10 +223,13 @@ Slab::Slab(size_t slab_size, size_t block_size, void *initial_sb,
|
|||||||
{
|
{
|
||||||
/* if no initial slab block was specified, try to get one */
|
/* if no initial slab block was specified, try to get one */
|
||||||
if (!_curr_sb && _backing_store)
|
if (!_curr_sb && _backing_store)
|
||||||
_curr_sb = _new_slab_block();
|
_new_slab_block().with_result(
|
||||||
|
[&] (Block *sb) { _curr_sb = sb; },
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
Allocator::throw_alloc_error(error); });
|
||||||
|
|
||||||
if (!_curr_sb)
|
if (!_curr_sb)
|
||||||
throw Out_of_memory();
|
throw Allocator::Denied();
|
||||||
|
|
||||||
/* init first slab block */
|
/* init first slab block */
|
||||||
construct_at<Block>(_curr_sb, *this);
|
construct_at<Block>(_curr_sb, *this);
|
||||||
@ -253,13 +256,19 @@ Slab::~Slab()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Slab::Block *Slab::_new_slab_block()
|
Slab::New_slab_block_result Slab::_new_slab_block()
|
||||||
{
|
{
|
||||||
void *sb = nullptr;
|
using Result = New_slab_block_result;
|
||||||
if (!_backing_store || !_backing_store->alloc(_block_size, &sb))
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
return construct_at<Block>(sb, *this);
|
if (!_backing_store)
|
||||||
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
|
Slab &this_slab = *this;
|
||||||
|
return _backing_store->try_alloc(_block_size).convert<Result>(
|
||||||
|
[&] (void *sb) {
|
||||||
|
return construct_at<Block>(sb, this_slab); },
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -313,19 +322,51 @@ void Slab::_insert_sb(Block *sb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Slab::Expand_result Slab::_expand()
|
||||||
|
{
|
||||||
|
if (!_backing_store || _nested)
|
||||||
|
return Expand_ok();
|
||||||
|
|
||||||
|
/* allocate new block for slab */
|
||||||
|
_nested = true;
|
||||||
|
|
||||||
|
/* reset '_nested' when leaving the scope */
|
||||||
|
struct Nested_guard {
|
||||||
|
bool &_nested;
|
||||||
|
Nested_guard(bool &nested) : _nested(nested) { }
|
||||||
|
~Nested_guard() { _nested = false; }
|
||||||
|
} guard(_nested);
|
||||||
|
|
||||||
|
return _new_slab_block().convert<Expand_result>(
|
||||||
|
|
||||||
|
[&] (Block *sb_ptr) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The new block has the maximum number of available slots.
|
||||||
|
* Hence, we can insert it at the beginning of the sorted block
|
||||||
|
* list.
|
||||||
|
*/
|
||||||
|
_insert_sb(sb_ptr);
|
||||||
|
return Expand_ok(); },
|
||||||
|
|
||||||
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Slab::insert_sb(void *ptr)
|
void Slab::insert_sb(void *ptr)
|
||||||
{
|
{
|
||||||
_insert_sb(construct_at<Block>(ptr, *this));
|
_insert_sb(construct_at<Block>(ptr, *this));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Slab::alloc(size_t size, void **out_addr)
|
Allocator::Alloc_result Slab::try_alloc(size_t size)
|
||||||
{
|
{
|
||||||
/* too large for us ? */
|
/* too large for us ? */
|
||||||
if (size > _slab_size) {
|
if (size > _slab_size) {
|
||||||
error("requested size ", size, " is larger then slab size ",
|
error("requested size ", size, " is larger then slab size ",
|
||||||
_slab_size);
|
_slab_size);
|
||||||
return false;
|
return Alloc_error::DENIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -336,29 +377,12 @@ bool Slab::alloc(size_t size, void **out_addr)
|
|||||||
* new slab block early enough - that is if there are only three free slab
|
* new slab block early enough - that is if there are only three free slab
|
||||||
* entries left.
|
* entries left.
|
||||||
*/
|
*/
|
||||||
if (_backing_store && (_total_avail <= 3) && !_nested) {
|
if (_total_avail <= 3) {
|
||||||
|
Expand_result expand_result = _expand();
|
||||||
/* allocate new block for slab */
|
if (expand_result.failed())
|
||||||
_nested = true;
|
return expand_result.convert<Alloc_result>(
|
||||||
|
[&] (Expand_ok) { return Alloc_error::DENIED; },
|
||||||
try {
|
[&] (Alloc_error error) { return error; });
|
||||||
Block * const sb = _new_slab_block();
|
|
||||||
|
|
||||||
_nested = false;
|
|
||||||
|
|
||||||
if (!sb) return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The new block has the maximum number of available slots and
|
|
||||||
* so we can insert it at the beginning of the sorted block
|
|
||||||
* list.
|
|
||||||
*/
|
|
||||||
_insert_sb(sb);
|
|
||||||
}
|
|
||||||
catch (...) {
|
|
||||||
_nested = false;
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* skip completely occupied slab blocks, detect cycles */
|
/* skip completely occupied slab blocks, detect cycles */
|
||||||
@ -367,13 +391,13 @@ bool Slab::alloc(size_t size, void **out_addr)
|
|||||||
if (_curr_sb->next == orig_curr_sb)
|
if (_curr_sb->next == orig_curr_sb)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
*out_addr = _curr_sb->alloc();
|
void *ptr = _curr_sb->alloc();
|
||||||
|
if (!ptr)
|
||||||
if (*out_addr == nullptr)
|
return Alloc_error::DENIED;
|
||||||
return false;
|
|
||||||
|
|
||||||
_total_avail--;
|
_total_avail--;
|
||||||
return true;
|
|
||||||
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,56 +38,64 @@ Sliced_heap::~Sliced_heap()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Sliced_heap::alloc(size_t size, void **out_addr)
|
Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
|
||||||
{
|
{
|
||||||
/* allocation includes space for block meta data and is page-aligned */
|
/* allocation includes space for block meta data and is page-aligned */
|
||||||
size = align_addr(size + sizeof(Block), 12);
|
size = align_addr(size + sizeof(Block), 12);
|
||||||
|
|
||||||
Ram_dataspace_capability ds_cap;
|
return _ram_alloc.try_alloc(size).convert<Alloc_result>(
|
||||||
Block *block = nullptr;
|
|
||||||
|
|
||||||
_ram_alloc.try_alloc(size).with_result(
|
[&] (Ram_dataspace_capability ds_cap) -> Alloc_result {
|
||||||
[&] (Ram_dataspace_capability cap) { ds_cap = cap; },
|
|
||||||
[&] (Ram_allocator::Alloc_error error) {
|
struct Alloc_guard
|
||||||
switch (error) {
|
{
|
||||||
case Ram_allocator::Alloc_error::OUT_OF_CAPS: throw Out_of_caps();
|
Ram_allocator &ram;
|
||||||
case Ram_allocator::Alloc_error::OUT_OF_RAM: break;
|
Ram_dataspace_capability ds;
|
||||||
case Ram_allocator::Alloc_error::DENIED: break;
|
bool keep = false;
|
||||||
|
|
||||||
|
Alloc_guard(Ram_allocator &ram, Ram_dataspace_capability ds)
|
||||||
|
: ram(ram), ds(ds) { }
|
||||||
|
|
||||||
|
~Alloc_guard() { if (!keep) ram.free(ds); }
|
||||||
|
|
||||||
|
} alloc_guard(_ram_alloc, ds_cap);
|
||||||
|
|
||||||
|
struct Attach_guard
|
||||||
|
{
|
||||||
|
Region_map &rm;
|
||||||
|
struct { void *ptr = nullptr; };
|
||||||
|
bool keep = false;
|
||||||
|
|
||||||
|
Attach_guard(Region_map &rm) : rm(rm) { }
|
||||||
|
|
||||||
|
~Attach_guard() { if (!keep && ptr) rm.detach(ptr); }
|
||||||
|
|
||||||
|
} attach_guard(_region_map);
|
||||||
|
|
||||||
|
try {
|
||||||
|
attach_guard.ptr = _region_map.attach(ds_cap);
|
||||||
}
|
}
|
||||||
});
|
catch (Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||||
|
catch (Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||||
|
catch (Region_map::Invalid_dataspace) { return Alloc_error::DENIED; }
|
||||||
|
catch (Region_map::Region_conflict) { return Alloc_error::DENIED; }
|
||||||
|
|
||||||
if (!ds_cap.valid())
|
/* serialize access to block list */
|
||||||
return false;
|
Mutex::Guard guard(_mutex);
|
||||||
|
|
||||||
try {
|
Block * const block = construct_at<Block>(attach_guard.ptr, ds_cap, size);
|
||||||
block = _region_map.attach(ds_cap);
|
|
||||||
}
|
|
||||||
catch (Region_map::Region_conflict) {
|
|
||||||
error("sliced_heap: region conflict while attaching dataspace");
|
|
||||||
_ram_alloc.free(ds_cap);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
catch (Region_map::Invalid_dataspace) {
|
|
||||||
error("sliced_heap: attempt to attach invalid dataspace");
|
|
||||||
_ram_alloc.free(ds_cap);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
catch (Out_of_ram) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* serialize access to block list */
|
_consumed += size;
|
||||||
Mutex::Guard guard(_mutex);
|
_blocks.insert(block);
|
||||||
|
|
||||||
construct_at<Block>(block, ds_cap, size);
|
alloc_guard.keep = attach_guard.keep = true;
|
||||||
|
|
||||||
_consumed += size;
|
/* skip meta data prepended to the payload portion of the block */
|
||||||
_blocks.insert(block);
|
void *ptr = block + 1;
|
||||||
|
return ptr;
|
||||||
/* skip meta data prepended to the payload portion of the block */
|
},
|
||||||
*out_addr = block + 1;
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -171,16 +171,18 @@ extern "C" void *__emutls_get_address(void *obj)
|
|||||||
|
|
||||||
/* the heap allocates 16-byte aligned */
|
/* the heap allocates 16-byte aligned */
|
||||||
if ((16 % emutls_object->align) != 0)
|
if ((16 % emutls_object->align) != 0)
|
||||||
Genode::warning(__func__, ": cannot ensure alignment of ",
|
warning(__func__, ": cannot ensure alignment of ",
|
||||||
emutls_object->align, " bytes");
|
emutls_object->align, " bytes");
|
||||||
|
|
||||||
void *address = nullptr;
|
void *address = nullptr;
|
||||||
|
|
||||||
if (!cxx_heap().alloc(emutls_object->size, &address)) {
|
cxx_heap().try_alloc(emutls_object->size).with_result(
|
||||||
Genode::error(__func__,
|
[&] (void *ptr) { address = ptr; },
|
||||||
": could not allocate thread-local variable instance");
|
[&] (Allocator::Alloc_error e) {
|
||||||
|
error(__func__,
|
||||||
|
": could not allocate thread-local variable, error ", (int)e); });
|
||||||
|
if (!address)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
|
||||||
|
|
||||||
if (emutls_object->templ)
|
if (emutls_object->templ)
|
||||||
memcpy(address, emutls_object->templ, emutls_object->size);
|
memcpy(address, emutls_object->templ, emutls_object->size);
|
||||||
|
@ -76,9 +76,15 @@ extern "C" void *malloc(size_t size)
|
|||||||
* the size information when freeing the block.
|
* the size information when freeing the block.
|
||||||
*/
|
*/
|
||||||
unsigned long real_size = size + sizeof(Block_header);
|
unsigned long real_size = size + sizeof(Block_header);
|
||||||
void *addr = 0;
|
|
||||||
if (!cxx_heap().alloc(real_size, &addr))
|
void *addr = nullptr;
|
||||||
return 0;
|
cxx_heap().try_alloc(real_size).with_result(
|
||||||
|
[&] (void *ptr) { addr = ptr; },
|
||||||
|
[&] (Allocator::Alloc_error error) {
|
||||||
|
Genode::error(__func__,
|
||||||
|
": cxx_heap allocation failed with error ", (int)error); });
|
||||||
|
if (!addr)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
*(Block_header *)addr = real_size;
|
*(Block_header *)addr = real_size;
|
||||||
return (Block_header *)addr + 1;
|
return (Block_header *)addr + 1;
|
||||||
|
@ -75,15 +75,14 @@ class Linker::Region_map
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Allocate region anywhere within the region map
|
* Allocate region anywhere within the region map
|
||||||
|
*
|
||||||
|
* XXX propagate OUT_OF_RAM, OUT_OF_CAPS
|
||||||
*/
|
*/
|
||||||
addr_t alloc_region(size_t size)
|
addr_t alloc_region(size_t size)
|
||||||
{
|
{
|
||||||
addr_t result = 0;
|
return _range.alloc_aligned(size, get_page_size_log2()).convert<addr_t>(
|
||||||
if (_range.alloc_aligned(size, (void **)&result,
|
[&] (void *ptr) { return (addr_t)ptr; },
|
||||||
get_page_size_log2()).error())
|
[&] (Allocator::Alloc_error) -> addr_t { throw Region_conflict(); });
|
||||||
throw Region_conflict();
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -91,7 +90,7 @@ class Linker::Region_map
|
|||||||
*/
|
*/
|
||||||
void alloc_region_at(size_t size, addr_t vaddr)
|
void alloc_region_at(size_t size, addr_t vaddr)
|
||||||
{
|
{
|
||||||
if (_range.alloc_addr(size, vaddr).error())
|
if (_range.alloc_addr(size, vaddr).failed())
|
||||||
throw Region_conflict();
|
throw Region_conflict();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,13 +56,13 @@ struct Allocator : Genode::Allocator
|
|||||||
bool need_size_for_free() const override {
|
bool need_size_for_free() const override {
|
||||||
return a.need_size_for_free(); }
|
return a.need_size_for_free(); }
|
||||||
|
|
||||||
bool alloc(Genode::size_t size, void **p) override
|
Alloc_result try_alloc(Genode::size_t size) override
|
||||||
{
|
{
|
||||||
*p = a.alloc(size);
|
Alloc_result const result = a.try_alloc(size);
|
||||||
|
|
||||||
log("Allocator::alloc()");
|
log("Allocator::alloc()");
|
||||||
|
|
||||||
return *p != 0;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *p, Genode::size_t size) override
|
void free(void *p, Genode::size_t size) override
|
||||||
|
@ -357,7 +357,7 @@ class Audio_in::In
|
|||||||
float const scale = 32768.0f * 2;
|
float const scale = 32768.0f * 2;
|
||||||
|
|
||||||
float * const content = p->content();
|
float * const content = p->content();
|
||||||
for (int i = 0; i < 2*Audio_in::PERIOD; i += 2) {
|
for (unsigned long i = 0; i < 2*Audio_in::PERIOD; i += 2) {
|
||||||
float sample = data[i] + data[i+1];
|
float sample = data[i] + data[i+1];
|
||||||
content[i/2] = sample / scale;
|
content[i/2] = sample / scale;
|
||||||
}
|
}
|
||||||
|
@ -59,24 +59,51 @@ class Bsd::Slab_backend_alloc : public Genode::Allocator,
|
|||||||
Genode::Allocator_avl _range; /* manage allocations */
|
Genode::Allocator_avl _range; /* manage allocations */
|
||||||
Genode::Ram_allocator &_ram; /* allocator to allocate ds from */
|
Genode::Ram_allocator &_ram; /* allocator to allocate ds from */
|
||||||
|
|
||||||
bool _alloc_block()
|
struct Extend_ok { };
|
||||||
|
using Extend_result = Genode::Attempt<Extend_ok, Alloc_error>;
|
||||||
|
|
||||||
|
Extend_result _extend_one_block()
|
||||||
{
|
{
|
||||||
|
using namespace Genode;
|
||||||
|
|
||||||
if (_index == ELEMENTS) {
|
if (_index == ELEMENTS) {
|
||||||
Genode::error("Slab-backend exhausted!");
|
error("Slab-backend exhausted!");
|
||||||
return false;
|
return Alloc_error::DENIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
return _ram.try_alloc(BLOCK_SIZE).convert<Extend_result>(
|
||||||
_ds_cap[_index] = _ram.alloc(BLOCK_SIZE);
|
|
||||||
Region_map_client::attach_at(_ds_cap[_index], _index * BLOCK_SIZE, BLOCK_SIZE, 0);
|
|
||||||
} catch (...) { return false; }
|
|
||||||
|
|
||||||
/* return base + offset in VM area */
|
[&] (Ram_dataspace_capability ds) -> Extend_result {
|
||||||
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
|
||||||
++_index;
|
|
||||||
|
|
||||||
_range.add_range(block_base, BLOCK_SIZE);
|
_ds_cap[_index] = ds;
|
||||||
return true;
|
|
||||||
|
Alloc_error alloc_error = Alloc_error::DENIED;
|
||||||
|
|
||||||
|
try {
|
||||||
|
Region_map_client::attach_at(_ds_cap[_index],
|
||||||
|
_index * BLOCK_SIZE,
|
||||||
|
BLOCK_SIZE, 0);
|
||||||
|
|
||||||
|
/* return base + offset in VM area */
|
||||||
|
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
||||||
|
++_index;
|
||||||
|
|
||||||
|
_range.add_range(block_base, BLOCK_SIZE);
|
||||||
|
|
||||||
|
return Extend_ok();
|
||||||
|
}
|
||||||
|
catch (Out_of_ram) { alloc_error = Alloc_error::OUT_OF_RAM; }
|
||||||
|
catch (Out_of_caps) { alloc_error = Alloc_error::OUT_OF_CAPS; }
|
||||||
|
catch (...) { alloc_error = Alloc_error::DENIED; }
|
||||||
|
|
||||||
|
error("Slab_backend_alloc: local attach_at failed");
|
||||||
|
|
||||||
|
_ram.free(ds);
|
||||||
|
_ds_cap[_index] = { };
|
||||||
|
|
||||||
|
return alloc_error;
|
||||||
|
},
|
||||||
|
[&] (Alloc_error e) -> Extend_result { return e; });
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -100,20 +127,19 @@ class Bsd::Slab_backend_alloc : public Genode::Allocator,
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(Genode::size_t size, void **out_addr)
|
Alloc_result try_alloc(Genode::size_t size) override
|
||||||
{
|
{
|
||||||
bool done = _range.alloc(size, out_addr);
|
Alloc_result result = _range.try_alloc(size);
|
||||||
|
if (result.ok())
|
||||||
|
return result;
|
||||||
|
|
||||||
if (done)
|
return _extend_one_block().convert<Alloc_result>(
|
||||||
return done;
|
[&] (Extend_ok) {
|
||||||
|
return _range.try_alloc(size); },
|
||||||
|
|
||||||
done = _alloc_block();
|
[&] (Alloc_error e) {
|
||||||
if (!done) {
|
Genode::error("Backend allocator exhausted\n");
|
||||||
Genode::error("Backend allocator exhausted\n");
|
return e; });
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return _range.alloc(size, out_addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, Genode::size_t size) { _range.free(addr, size); }
|
void free(void *addr, Genode::size_t size) { _range.free(addr, size); }
|
||||||
@ -147,8 +173,9 @@ class Bsd::Slab_alloc : public Genode::Slab
|
|||||||
|
|
||||||
Genode::addr_t alloc()
|
Genode::addr_t alloc()
|
||||||
{
|
{
|
||||||
Genode::addr_t result;
|
return Slab::try_alloc(_object_size).convert<Genode::addr_t>(
|
||||||
return (Slab::alloc(_object_size, (void **)&result) ? result : 0);
|
[&] (void *ptr) { return (Genode::addr_t)ptr; },
|
||||||
|
[&] (Alloc_error) -> Genode::addr_t { return 0; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
||||||
|
@ -94,10 +94,9 @@ class Pci_driver : public Bsd::Bus_driver
|
|||||||
_dma_initialized = true;
|
_dma_initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *ptr = nullptr;
|
return Allocator_avl::alloc_aligned(size, align).convert<Genode::addr_t>(
|
||||||
bool err = Allocator_avl::alloc_aligned(size, &ptr, align).error();
|
[&] (void *ptr) { return (addr_t)ptr; },
|
||||||
|
[&] (Alloc_error) { return 0UL; });
|
||||||
return err ? 0 : (addr_t)ptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(Genode::addr_t virt, Genode::size_t size) {
|
void free(Genode::addr_t virt, Genode::size_t size) {
|
||||||
|
@ -389,15 +389,16 @@ static Genode::Allocator_avl& allocator()
|
|||||||
extern "C" void *dde_dma_alloc(dde_size_t size, dde_size_t align,
|
extern "C" void *dde_dma_alloc(dde_size_t size, dde_size_t align,
|
||||||
dde_size_t offset)
|
dde_size_t offset)
|
||||||
{
|
{
|
||||||
void *ptr;
|
return allocator().alloc_aligned(size, Genode::log2(align)).convert<void *>(
|
||||||
if (allocator().alloc_aligned(size, &ptr, Genode::log2(align)).error()) {
|
|
||||||
Genode::error("memory allocation failed in alloc_memblock ("
|
[&] (void *ptr) { return ptr; },
|
||||||
"size=", size, " "
|
|
||||||
"align=", Genode::Hex(align), " "
|
[&] (Genode::Range_allocator::Alloc_error) -> void * {
|
||||||
"offset=", Genode::Hex(offset), ")");
|
Genode::error("memory allocation failed in alloc_memblock ("
|
||||||
return 0;
|
"size=", size, " "
|
||||||
}
|
"align=", Genode::Hex(align), " "
|
||||||
return ptr;
|
"offset=", Genode::Hex(offset), ")");
|
||||||
|
return nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -477,26 +478,53 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
|||||||
Genode::Allocator_avl _range;
|
Genode::Allocator_avl _range;
|
||||||
Genode::Ram_allocator &_ram;
|
Genode::Ram_allocator &_ram;
|
||||||
|
|
||||||
bool _alloc_block()
|
struct Extend_ok { };
|
||||||
|
using Extend_result = Genode::Attempt<Extend_ok, Alloc_error>;
|
||||||
|
|
||||||
|
Extend_result _extend_one_block()
|
||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
if (_index == ELEMENTS) {
|
if (_index == ELEMENTS) {
|
||||||
error("slab backend exhausted!");
|
error("slab backend exhausted!");
|
||||||
return false;
|
return Alloc_error::DENIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
return _ram.try_alloc(BLOCK_SIZE).convert<Extend_result>(
|
||||||
_ds_cap[_index] = _ram.alloc(BLOCK_SIZE);
|
|
||||||
Region_map_client::attach_at(_ds_cap[_index], _index * BLOCK_SIZE, BLOCK_SIZE, 0);
|
|
||||||
} catch (...) { return false; }
|
|
||||||
|
|
||||||
/* return base + offset in VM area */
|
[&] (Ram_dataspace_capability ds) -> Extend_result {
|
||||||
Genode::addr_t block_base = _base + (_index * BLOCK_SIZE);
|
|
||||||
++_index;
|
|
||||||
|
|
||||||
_range.add_range(block_base, BLOCK_SIZE);
|
_ds_cap[_index] = ds;
|
||||||
return true;
|
|
||||||
|
Alloc_error error = Alloc_error::DENIED;
|
||||||
|
|
||||||
|
try {
|
||||||
|
Region_map_client::attach_at(_ds_cap[_index],
|
||||||
|
_index * BLOCK_SIZE,
|
||||||
|
BLOCK_SIZE, 0);
|
||||||
|
/* return base + offset in VM area */
|
||||||
|
addr_t block_base = _base + (_index * BLOCK_SIZE);
|
||||||
|
++_index;
|
||||||
|
|
||||||
|
_range.add_range(block_base, BLOCK_SIZE);
|
||||||
|
|
||||||
|
return Extend_ok();
|
||||||
|
}
|
||||||
|
catch (Out_of_ram) { error = Alloc_error::OUT_OF_RAM; }
|
||||||
|
catch (Out_of_caps) { error = Alloc_error::OUT_OF_CAPS; }
|
||||||
|
catch (...) { error = Alloc_error::DENIED; }
|
||||||
|
|
||||||
|
Genode::error("Slab_backend_alloc: local attach_at failed");
|
||||||
|
|
||||||
|
_ram.free(ds);
|
||||||
|
_ds_cap[_index] = { };
|
||||||
|
|
||||||
|
return error;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Alloc_error e) -> Extend_result {
|
||||||
|
error("Slab_backend_alloc: backend allocator exhausted");
|
||||||
|
return e; });
|
||||||
}
|
}
|
||||||
|
|
||||||
Slab_backend_alloc(Genode::Env &env, Genode::Region_map &rm,
|
Slab_backend_alloc(Genode::Env &env, Genode::Region_map &rm,
|
||||||
@ -518,20 +546,15 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(Genode::size_t size, void **out_addr)
|
Alloc_result try_alloc(Genode::size_t size) override
|
||||||
{
|
{
|
||||||
bool done = _range.alloc(size, out_addr);
|
Alloc_result result = _range.try_alloc(size);
|
||||||
|
if (result.ok())
|
||||||
|
return result;
|
||||||
|
|
||||||
if (done)
|
return _extend_one_block().convert<Alloc_result>(
|
||||||
return done;
|
[&] (Extend_ok) { return _range.try_alloc(size); },
|
||||||
|
[&] (Alloc_error error) { return error; });
|
||||||
done = _alloc_block();
|
|
||||||
if (!done) {
|
|
||||||
Genode::error("backend allocator exhausted");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return _range.alloc(size, out_addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, Genode::size_t size) { _range.free(addr, size); }
|
void free(void *addr, Genode::size_t size) { _range.free(addr, size); }
|
||||||
@ -562,8 +585,9 @@ class Slab_alloc : public Genode::Slab
|
|||||||
|
|
||||||
Genode::addr_t alloc()
|
Genode::addr_t alloc()
|
||||||
{
|
{
|
||||||
Genode::addr_t result;
|
return Slab::try_alloc(_object_size).convert<Genode::addr_t>(
|
||||||
return (Slab::alloc(_object_size, (void **)&result) ? result : 0);
|
[&] (void *ptr) { return (Genode::addr_t)ptr; },
|
||||||
|
[&] (Alloc_error) -> Genode::addr_t { return 0; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
||||||
|
@ -565,7 +565,7 @@ void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handl
|
|||||||
addr = Lx::Malloc::dma().alloc_large(size);
|
addr = Lx::Malloc::dma().alloc_large(size);
|
||||||
dma_addr = (dma_addr_t) Lx::Malloc::dma().phys_addr(addr);
|
dma_addr = (dma_addr_t) Lx::Malloc::dma().phys_addr(addr);
|
||||||
} else
|
} else
|
||||||
addr = Lx::Malloc::dma().alloc(size, 12, &dma_addr);
|
addr = Lx::Malloc::dma().malloc(size, 12, &dma_addr);
|
||||||
|
|
||||||
*dma_handle = dma_addr;
|
*dma_handle = dma_addr;
|
||||||
return addr;
|
return addr;
|
||||||
@ -702,7 +702,7 @@ int register_netdev(struct net_device * d)
|
|||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t, int)
|
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t, int)
|
||||||
{
|
{
|
||||||
return (void*)cache->alloc();
|
return (void*)cache->alloc_element();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -491,7 +491,7 @@ void dma_pool_free(struct dma_pool *d, void *vaddr, dma_addr_t a)
|
|||||||
|
|
||||||
void *dma_alloc_coherent(struct device *, size_t size, dma_addr_t *dma, gfp_t)
|
void *dma_alloc_coherent(struct device *, size_t size, dma_addr_t *dma, gfp_t)
|
||||||
{
|
{
|
||||||
void *addr = Lx::Malloc::dma().alloc(size, PAGE_SHIFT, dma);
|
void *addr = Lx::Malloc::dma().malloc(size, PAGE_SHIFT, dma);
|
||||||
|
|
||||||
if (!addr)
|
if (!addr)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -509,7 +509,7 @@ int netif_carrier_ok(const struct net_device *dev)
|
|||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t gfp_flags, int arg)
|
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t gfp_flags, int arg)
|
||||||
{
|
{
|
||||||
return (void*)cache->alloc();
|
return (void*)cache->alloc_element();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -448,7 +448,7 @@ int netif_carrier_ok(const struct net_device *dev)
|
|||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t gfp_flags, int arg)
|
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t gfp_flags, int arg)
|
||||||
{
|
{
|
||||||
return (void*)cache->alloc();
|
return (void*)cache->alloc_element();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -199,9 +199,12 @@ void kmem_cache_destroy(struct kmem_cache *cache)
|
|||||||
|
|
||||||
void * kmem_cache_alloc(struct kmem_cache *cache, gfp_t flags)
|
void * kmem_cache_alloc(struct kmem_cache *cache, gfp_t flags)
|
||||||
{
|
{
|
||||||
void *addr = (void *)cache->alloc();
|
void * const ptr = cache->alloc_element();
|
||||||
if (addr && cache->ctor) { cache->ctor(addr); }
|
|
||||||
return addr;
|
if (ptr && cache->ctor)
|
||||||
|
cache->ctor(ptr);
|
||||||
|
|
||||||
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,10 +55,11 @@ class Lx::Slab_alloc : public Genode::Slab
|
|||||||
_object_size(object_size)
|
_object_size(object_size)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
Genode::addr_t alloc()
|
void *alloc_element()
|
||||||
{
|
{
|
||||||
Genode::addr_t result;
|
return Slab::try_alloc(_object_size).convert<void *>(
|
||||||
return (Slab::alloc(_object_size, (void **)&result) ? result : 0);
|
[&] (void *ptr) { return ptr; },
|
||||||
|
[&] (Alloc_error) { return (void *)nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *ptr)
|
void free(void *ptr)
|
||||||
|
@ -39,7 +39,7 @@ class Lx::Slab_backend_alloc : public Genode::Allocator
|
|||||||
/**
|
/**
|
||||||
* Allocate
|
* Allocate
|
||||||
*/
|
*/
|
||||||
virtual bool alloc(Genode::size_t size, void **out_addr) = 0;
|
virtual Alloc_result try_alloc(Genode::size_t size) = 0;
|
||||||
virtual void free(void *addr) = 0;
|
virtual void free(void *addr) = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -41,7 +41,7 @@ class Lx::Malloc : public Genode::Allocator
|
|||||||
/**
|
/**
|
||||||
* Alloc in slabs
|
* Alloc in slabs
|
||||||
*/
|
*/
|
||||||
virtual void *alloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0) = 0;
|
virtual void *malloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0) = 0;
|
||||||
|
|
||||||
virtual void free(void const *a) = 0;
|
virtual void free(void const *a) = 0;
|
||||||
|
|
||||||
@ -67,11 +67,7 @@ class Lx::Malloc : public Genode::Allocator
|
|||||||
|
|
||||||
size_t overhead(size_t size) const override { return 0; }
|
size_t overhead(size_t size) const override { return 0; }
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||||
{
|
|
||||||
*out_addr = alloc(size);
|
|
||||||
return *out_addr ? true : false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void free(void *addr, size_t size) override { free(addr); }
|
void free(void *addr, size_t size) override { free(addr); }
|
||||||
|
|
||||||
|
@ -96,20 +96,18 @@ class Lx_kit::Slab_backend_alloc : public Lx::Slab_backend_alloc,
|
|||||||
** Lx::Slab_backend_alloc interface **
|
** Lx::Slab_backend_alloc interface **
|
||||||
**************************************/
|
**************************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
bool done = _range.alloc(size, out_addr);
|
Alloc_result result = _range.try_alloc(size);
|
||||||
|
if (result.ok())
|
||||||
|
return result;
|
||||||
|
|
||||||
if (done)
|
if (!_alloc_block()) {
|
||||||
return done;
|
|
||||||
|
|
||||||
done = _alloc_block();
|
|
||||||
if (!done) {
|
|
||||||
Genode::error("backend allocator exhausted");
|
Genode::error("backend allocator exhausted");
|
||||||
return false;
|
return Alloc_error::DENIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
return _range.alloc(size, out_addr);
|
return _range.alloc(size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr) {
|
void free(void *addr) {
|
||||||
@ -232,7 +230,7 @@ class Lx_kit::Malloc : public Lx::Malloc
|
|||||||
** Lx::Malloc interface **
|
** Lx::Malloc interface **
|
||||||
**************************/
|
**************************/
|
||||||
|
|
||||||
void *alloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0)
|
void *malloc(Genode::size_t size, int align = 0, Genode::addr_t *phys = 0)
|
||||||
{
|
{
|
||||||
using namespace Genode;
|
using namespace Genode;
|
||||||
|
|
||||||
@ -257,7 +255,7 @@ class Lx_kit::Malloc : public Lx::Malloc
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr_t addr = _allocator[msb - SLAB_START_LOG2]->alloc();
|
addr_t addr = (addr_t)_allocator[msb - SLAB_START_LOG2]->alloc_element();
|
||||||
if (!addr) {
|
if (!addr) {
|
||||||
Genode::error("failed to get slab for ", 1 << msb);
|
Genode::error("failed to get slab for ", 1 << msb);
|
||||||
return 0;
|
return 0;
|
||||||
@ -298,13 +296,14 @@ class Lx_kit::Malloc : public Lx::Malloc
|
|||||||
|
|
||||||
void *alloc_large(size_t size)
|
void *alloc_large(size_t size)
|
||||||
{
|
{
|
||||||
void *addr;
|
return _back_allocator.try_alloc(size).convert<void *>(
|
||||||
if (!_back_allocator.alloc(size, &addr)) {
|
|
||||||
Genode::error("large back end allocation failed (", size, " bytes)");
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr;
|
[&] (void *ptr) {
|
||||||
|
return ptr; },
|
||||||
|
|
||||||
|
[&] (Alloc_error) {
|
||||||
|
Genode::error("large back end allocation failed (", size, " bytes)");
|
||||||
|
return (void *)nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_large(void *ptr)
|
void free_large(void *ptr)
|
||||||
|
@ -95,42 +95,48 @@ void * Lx_kit::Mem_allocator::alloc(size_t size, size_t align)
|
|||||||
if (!size)
|
if (!size)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
void * out_addr = nullptr;
|
return _mem.alloc_aligned(size, log2(align)).convert<void *>(
|
||||||
|
|
||||||
if (_mem.alloc_aligned(size, &out_addr, log2(align)).error()) {
|
[&] (void *ptr) {
|
||||||
|
memset(ptr, 0, size);
|
||||||
|
return ptr; },
|
||||||
|
|
||||||
/*
|
[&] (Range_allocator::Alloc_error) {
|
||||||
* Restrict the minimum buffer size to avoid the creation of
|
|
||||||
* a separate dataspaces for tiny allocations.
|
|
||||||
*/
|
|
||||||
size_t const min_buffer_size = 256*1024;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate one excess byte that is not officially registered at
|
* Restrict the minimum buffer size to avoid the creation of
|
||||||
* the '_mem' ranges. This way, two virtual consecutive ranges
|
* a separate dataspaces for tiny allocations.
|
||||||
* (that must be assumed to belong to non-contiguous physical
|
*/
|
||||||
* ranges) can never be merged when freeing an allocation. Such
|
size_t const min_buffer_size = 256*1024;
|
||||||
* a merge would violate the assumption that a both the virtual
|
|
||||||
* and physical addresses of a multi-page allocation are always
|
|
||||||
* contiguous.
|
|
||||||
*/
|
|
||||||
Attached_dataspace & ds = alloc_dataspace(max(size + 1,
|
|
||||||
min_buffer_size));
|
|
||||||
|
|
||||||
_mem.add_range((addr_t)ds.local_addr<void>(), ds.size() - 1);
|
/*
|
||||||
|
* Allocate one excess byte that is not officially registered at
|
||||||
|
* the '_mem' ranges. This way, two virtual consecutive ranges
|
||||||
|
* (that must be assumed to belong to non-contiguous physical
|
||||||
|
* ranges) can never be merged when freeing an allocation. Such
|
||||||
|
* a merge would violate the assumption that a both the virtual
|
||||||
|
* and physical addresses of a multi-page allocation are always
|
||||||
|
* contiguous.
|
||||||
|
*/
|
||||||
|
Attached_dataspace & ds = alloc_dataspace(max(size + 1,
|
||||||
|
min_buffer_size));
|
||||||
|
|
||||||
/* re-try allocation */
|
_mem.add_range((addr_t)ds.local_addr<void>(), ds.size() - 1);
|
||||||
_mem.alloc_aligned(size, &out_addr, log2(align));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!out_addr) {
|
/* re-try allocation */
|
||||||
error("memory allocation failed for ", size, " align ", align);
|
return _mem.alloc_aligned(size, log2(align)).convert<void *>(
|
||||||
backtrace();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
memset(out_addr, 0, size);
|
|
||||||
|
|
||||||
return out_addr;
|
[&] (void *ptr) {
|
||||||
|
memset(ptr, 0, size);
|
||||||
|
return ptr; },
|
||||||
|
|
||||||
|
[&] (Range_allocator::Alloc_error) -> void * {
|
||||||
|
error("memory allocation failed for ", size, " align ", align);
|
||||||
|
backtrace();
|
||||||
|
return nullptr; }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -123,15 +123,25 @@ void *alloc_large_system_hash(const char *tablename,
|
|||||||
unsigned long nlog2 = ilog2(elements);
|
unsigned long nlog2 = ilog2(elements);
|
||||||
nlog2 <<= (1 << nlog2) < elements ? 1 : 0;
|
nlog2 <<= (1 << nlog2) < elements ? 1 : 0;
|
||||||
|
|
||||||
void *table;
|
|
||||||
lx_env->heap().alloc(elements * bucketsize, &table);
|
|
||||||
|
|
||||||
if (_hash_mask)
|
return lx_env->heap().try_alloc(elements * bucketsize).convert<void *>(
|
||||||
*_hash_mask = (1 << nlog2) - 1;
|
|
||||||
if (_hash_shift)
|
|
||||||
*_hash_shift = nlog2;
|
|
||||||
|
|
||||||
return table;
|
[&] (void *table_ptr) {
|
||||||
|
|
||||||
|
if (_hash_mask)
|
||||||
|
*_hash_mask = (1 << nlog2) - 1;
|
||||||
|
|
||||||
|
if (_hash_shift)
|
||||||
|
*_hash_shift = nlog2;
|
||||||
|
|
||||||
|
return table_ptr;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Genode::Allocator::Alloc_error) -> void * {
|
||||||
|
Genode::error("alloc_large_system_hash allocation failed");
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -148,12 +158,12 @@ void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
|||||||
|
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t flags, int node)
|
void *kmem_cache_alloc_node(struct kmem_cache *cache, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
return (void*)cache->alloc();
|
return (void*)cache->alloc_element();
|
||||||
}
|
}
|
||||||
|
|
||||||
void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
|
void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
|
||||||
{
|
{
|
||||||
void *addr = (void*)cache->alloc();
|
void *addr = (void*)cache->alloc_element();
|
||||||
if (addr) { memset(addr, 0, cache->size()); }
|
if (addr) { memset(addr, 0, cache->size()); }
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
|
@ -87,8 +87,12 @@ int request_firmware_nowait(struct module *module, bool uevent,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* use allocator because fw is too big for slab */
|
/* use allocator because fw is too big for slab */
|
||||||
if (!Lx_kit::env().heap().alloc(fwl->size, (void**)&fw->data)) {
|
Lx_kit::env().heap().try_alloc(fwl->size).with_result(
|
||||||
Genode::error("Could not allocate memory for firmware image");
|
[&] (void *ptr) { fw->data = (u8 *)ptr; },
|
||||||
|
[&] (Genode::Allocator::Alloc_error) {
|
||||||
|
Genode::error("Could not allocate memory for firmware image"); });
|
||||||
|
|
||||||
|
if (!fw->data) {
|
||||||
kfree(fw);
|
kfree(fw);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -427,14 +427,17 @@ void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
|
|||||||
void *vmalloc(unsigned long size)
|
void *vmalloc(unsigned long size)
|
||||||
{
|
{
|
||||||
size_t real_size = size + sizeof(size_t);
|
size_t real_size = size + sizeof(size_t);
|
||||||
size_t *addr;
|
|
||||||
|
|
||||||
if (!Lx_kit::env().heap().alloc(real_size, (void**)&addr)) {
|
return Lx_kit::env().heap().try_alloc(real_size).convert<void *>(
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
*addr = real_size;
|
[&] (void *ptr) -> void * {
|
||||||
return addr + 1;
|
size_t * const base = (size_t)ptr;
|
||||||
|
*base = real_size;
|
||||||
|
return base + 1;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Genode::Allocator::Alloc_error) -> void * {
|
||||||
|
return nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -744,7 +747,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
bool const large_alloc = size >= DMA_LARGE_ALLOC_SIZE;
|
bool const large_alloc = size >= DMA_LARGE_ALLOC_SIZE;
|
||||||
dma_addr_t dma_addr = 0;
|
dma_addr_t dma_addr = 0;
|
||||||
void *addr = large_alloc ? Lx::Malloc::dma().alloc_large(size)
|
void *addr = large_alloc ? Lx::Malloc::dma().alloc_large(size)
|
||||||
: Lx::Malloc::dma().alloc(size, 12, &dma_addr);
|
: Lx::Malloc::dma().malloc(size, 12, &dma_addr);
|
||||||
|
|
||||||
if (addr) {
|
if (addr) {
|
||||||
*dma_handle = large_alloc ? Lx::Malloc::dma().phys_addr(addr)
|
*dma_handle = large_alloc ? Lx::Malloc::dma().phys_addr(addr)
|
||||||
@ -924,7 +927,7 @@ struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
|
|||||||
|
|
||||||
size_t size = PAGE_SIZE << order;
|
size_t size = PAGE_SIZE << order;
|
||||||
|
|
||||||
page->addr = Lx::Malloc::dma().alloc(size, 12);
|
page->addr = Lx::Malloc::dma().malloc(size, 12);
|
||||||
|
|
||||||
if (!page->addr) {
|
if (!page->addr) {
|
||||||
Genode::error("alloc_pages: ", size, " failed");
|
Genode::error("alloc_pages: ", size, " failed");
|
||||||
|
@ -128,37 +128,40 @@ namespace Allocator {
|
|||||||
/**
|
/**
|
||||||
* Allocate
|
* Allocate
|
||||||
*/
|
*/
|
||||||
bool alloc(size_t size, void **out_addr)
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
bool done = _range.alloc(size, out_addr);
|
Alloc_result result = _range.try_alloc(size);
|
||||||
|
if (result.ok())
|
||||||
|
return result;
|
||||||
|
|
||||||
if (done)
|
if (!_alloc_block())
|
||||||
return done;
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
done = _alloc_block();
|
return _range.try_alloc(size);
|
||||||
if (!done)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return _range.alloc(size, out_addr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void *alloc_aligned(size_t size, unsigned align = 0)
|
void *alloc_aligned(size_t size, unsigned align = 0)
|
||||||
{
|
{
|
||||||
void *addr;
|
Alloc_result result = _range.alloc_aligned(size, align);
|
||||||
|
if (result.ok())
|
||||||
if (!_range.alloc_aligned(size, &addr, align).error())
|
return result.convert<void *>(
|
||||||
return addr;
|
[&] (void *ptr) { return ptr; },
|
||||||
|
[&] (Alloc_error) -> void * { return nullptr; });
|
||||||
|
|
||||||
if (!_alloc_block())
|
if (!_alloc_block())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (_range.alloc_aligned(size, &addr, align).error()) {
|
return _range.alloc_aligned(size, align).convert<void *>(
|
||||||
error("backend allocator: Unable to allocate memory "
|
|
||||||
"(size: ", size, " align: ", align, ")");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr;
|
[&] (void *ptr) {
|
||||||
|
return ptr; },
|
||||||
|
|
||||||
|
[&] (Alloc_error e) -> void * {
|
||||||
|
error("backend allocator: Unable to allocate memory "
|
||||||
|
"(size: ", size, " align: ", align, ")");
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, size_t size) override { _range.free(addr, size); }
|
void free(void *addr, size_t size) override { _range.free(addr, size); }
|
||||||
|
@ -40,13 +40,17 @@ extern "C" void *malloc(size_t size)
|
|||||||
* the subsequent address. This way, we can retrieve
|
* the subsequent address. This way, we can retrieve
|
||||||
* the size information when freeing the block.
|
* the size information when freeing the block.
|
||||||
*/
|
*/
|
||||||
unsigned long real_size = size + sizeof(unsigned long);
|
unsigned long const real_size = size + sizeof(unsigned long);
|
||||||
void *addr = 0;
|
|
||||||
if (!alloc().alloc(real_size, &addr))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
*(unsigned long *)addr = real_size;
|
return alloc().try_alloc(real_size).convert<void *>(
|
||||||
return (unsigned long *)addr + 1;
|
|
||||||
|
[&] (void *ptr) {
|
||||||
|
|
||||||
|
*(unsigned long *)ptr = real_size;
|
||||||
|
return (unsigned long *)ptr + 1; },
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error) {
|
||||||
|
return nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -73,18 +73,19 @@ class Genode::Cached_font : public Text_painter::Font
|
|||||||
|
|
||||||
size_t consumed_bytes() const { return _consumed_bytes; }
|
size_t consumed_bytes() const { return _consumed_bytes; }
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
size = _padded(size);
|
size = _padded(size);
|
||||||
|
|
||||||
bool const result = _alloc.alloc(size, out_addr);
|
return _alloc.try_alloc(size).convert<Alloc_result>(
|
||||||
|
|
||||||
if (result) {
|
[&] (void *ptr) {
|
||||||
memset(*out_addr, 0, size);
|
memset(ptr, 0, size);
|
||||||
_consumed_bytes += size + overhead(size);
|
_consumed_bytes += size + overhead(size);
|
||||||
}
|
return ptr; },
|
||||||
|
|
||||||
return result;
|
[&] (Alloc_error error) {
|
||||||
|
return error; });
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t consumed() const override { return _alloc.consumed(); }
|
size_t consumed() const override { return _alloc.consumed(); }
|
||||||
|
@ -98,24 +98,28 @@ struct Text_area::Dynamic_array
|
|||||||
size_t const new_capacity =
|
size_t const new_capacity =
|
||||||
2 * max(_capacity, max(8U, at.value));
|
2 * max(_capacity, max(8U, at.value));
|
||||||
|
|
||||||
Element *new_array = nullptr;
|
_alloc.try_alloc(sizeof(Element)*new_capacity).with_result(
|
||||||
try {
|
|
||||||
(void)_alloc.alloc(sizeof(Element)*new_capacity, &new_array);
|
|
||||||
|
|
||||||
for (unsigned i = 0; i < new_capacity; i++)
|
[&] (void *ptr) {
|
||||||
construct_at<Element>(&new_array[i]);
|
|
||||||
}
|
|
||||||
catch (... /* Out_of_ram, Out_of_caps */ ) { throw; }
|
|
||||||
|
|
||||||
if (_array) {
|
Element *new_array = (Element *)ptr;
|
||||||
for (unsigned i = 0; i < _upper_bound; i++)
|
|
||||||
new_array[i].construct(*_array[i]);
|
|
||||||
|
|
||||||
_alloc.free(_array, sizeof(Element)*_capacity);
|
for (unsigned i = 0; i < new_capacity; i++)
|
||||||
}
|
construct_at<Element>(&new_array[i]);
|
||||||
|
|
||||||
_array = new_array;
|
if (_array) {
|
||||||
_capacity = new_capacity;
|
for (unsigned i = 0; i < _upper_bound; i++)
|
||||||
|
new_array[i].construct(*_array[i]);
|
||||||
|
|
||||||
|
_alloc.free(_array, sizeof(Element)*_capacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
_array = new_array;
|
||||||
|
_capacity = new_capacity;
|
||||||
|
},
|
||||||
|
[&] (Allocator::Alloc_error e) {
|
||||||
|
Allocator::throw_alloc_error(e); }
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* make room for new element */
|
/* make room for new element */
|
||||||
|
@ -87,8 +87,17 @@ void Http::resolve_uri()
|
|||||||
throw Http::Uri_error();
|
throw Http::Uri_error();
|
||||||
}
|
}
|
||||||
|
|
||||||
_heap.alloc(sizeof(struct addrinfo), (void**)&_info);
|
_heap.try_alloc(sizeof(struct addrinfo)).with_result(
|
||||||
Genode::memcpy(_info, info, sizeof(struct addrinfo));
|
|
||||||
|
[&] (void *ptr) {
|
||||||
|
_info = (struct addrinfo *)ptr;
|
||||||
|
Genode::memcpy(_info, info, sizeof(struct addrinfo));
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error) {
|
||||||
|
throw Http::Uri_error();
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -180,7 +189,9 @@ void Http::do_read(void * buf, size_t size)
|
|||||||
Http::Http(Genode::Heap &heap, ::String const &uri)
|
Http::Http(Genode::Heap &heap, ::String const &uri)
|
||||||
: _heap(heap), _port((char *)"80")
|
: _heap(heap), _port((char *)"80")
|
||||||
{
|
{
|
||||||
_heap.alloc(HTTP_BUF, (void**)&_http_buf);
|
_heap.try_alloc(HTTP_BUF).with_result(
|
||||||
|
[&] (void *ptr) { _http_buf = (char *)ptr; },
|
||||||
|
[&] (Allocator::Alloc_error) { });
|
||||||
|
|
||||||
/* parse URI */
|
/* parse URI */
|
||||||
parse_uri(uri);
|
parse_uri(uri);
|
||||||
@ -221,11 +232,31 @@ void Http::parse_uri(::String const &u)
|
|||||||
size_t i;
|
size_t i;
|
||||||
for (i = 0; i < length && uri[i] != '/'; i++) ;
|
for (i = 0; i < length && uri[i] != '/'; i++) ;
|
||||||
|
|
||||||
_heap.alloc(i + 1, (void**)&_host);
|
/*
|
||||||
copy_cstring(_host, uri, i + 1);
|
* \param len number of cstring bytes w/o null-termination
|
||||||
|
*/
|
||||||
|
auto copied_cstring = [&] (char const *src, size_t len) -> char *
|
||||||
|
{
|
||||||
|
size_t const bytes = len + 1;
|
||||||
|
|
||||||
_heap.alloc(length - i + 1, (void**)&_path);
|
return _heap.try_alloc(bytes).convert<char *>(
|
||||||
copy_cstring(_path, uri + i, length - i + 1);
|
|
||||||
|
[&] (void *ptr) {
|
||||||
|
char *dst = (char *)ptr;
|
||||||
|
copy_cstring(dst, src, bytes);
|
||||||
|
return dst; },
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error) -> char * {
|
||||||
|
return nullptr; });
|
||||||
|
};
|
||||||
|
|
||||||
|
_host = copied_cstring(uri, i);
|
||||||
|
_path = copied_cstring(uri + i, length - i);
|
||||||
|
|
||||||
|
if (!_host || !_path) {
|
||||||
|
error("allocation failure during Http::parse_uri");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* look for port */
|
/* look for port */
|
||||||
size_t len = Genode::strlen(_host);
|
size_t len = Genode::strlen(_host);
|
||||||
|
@ -27,11 +27,7 @@ struct Libc::Allocator : Genode::Allocator
|
|||||||
{
|
{
|
||||||
typedef Genode::size_t size_t;
|
typedef Genode::size_t size_t;
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||||
{
|
|
||||||
*out_addr = malloc(size);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void free(void *addr, size_t size) override { ::free(addr); }
|
void free(void *addr, size_t size) override { ::free(addr); }
|
||||||
|
|
||||||
|
@ -1 +1 @@
|
|||||||
58ab991bee9d68f213ae71f04c796de0490f3b0e
|
68b8eb5bfa950adf094fe9e6c579e6d542dd6c63
|
||||||
|
@ -261,9 +261,9 @@ extern "C" int getpid()
|
|||||||
|
|
||||||
extern "C" void *malloc(size_t size)
|
extern "C" void *malloc(size_t size)
|
||||||
{
|
{
|
||||||
void *res = nullptr;
|
return gcov_env->heap.try_alloc(size).convert<void *>(
|
||||||
gcov_env->heap.alloc(size, &res);
|
[&] (void *ptr) { return ptr; },
|
||||||
return res;
|
[&] (Genode::Allocator::Alloc_error) -> void * { return nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ Libc::Mem_alloc_impl::Dataspace_pool::~Dataspace_pool()
|
|||||||
int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *alloc)
|
int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *alloc)
|
||||||
{
|
{
|
||||||
Ram_dataspace_capability new_ds_cap;
|
Ram_dataspace_capability new_ds_cap;
|
||||||
void *local_addr, *ds_addr = 0;
|
void *local_addr;
|
||||||
|
|
||||||
/* make new ram dataspace available at our local address space */
|
/* make new ram dataspace available at our local address space */
|
||||||
try {
|
try {
|
||||||
@ -71,16 +71,17 @@ int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *a
|
|||||||
alloc->add_range((addr_t)local_addr, size);
|
alloc->add_range((addr_t)local_addr, size);
|
||||||
|
|
||||||
/* now that we have new backing store, allocate Dataspace structure */
|
/* now that we have new backing store, allocate Dataspace structure */
|
||||||
if (alloc->alloc_aligned(sizeof(Dataspace), &ds_addr, 2).error()) {
|
return alloc->alloc_aligned(sizeof(Dataspace), 2).convert<int>(
|
||||||
warning("libc: could not allocate meta data - this should never happen");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* add dataspace information to list of dataspaces */
|
[&] (void *ptr) {
|
||||||
Dataspace *ds = construct_at<Dataspace>(ds_addr, new_ds_cap, local_addr);
|
/* add dataspace information to list of dataspaces */
|
||||||
insert(ds);
|
Dataspace *ds = construct_at<Dataspace>(ptr, new_ds_cap, local_addr);
|
||||||
|
insert(ds);
|
||||||
|
return 0; },
|
||||||
|
|
||||||
return 0;
|
[&] (Allocator::Alloc_error) {
|
||||||
|
warning("libc: could not allocate meta data - this should never happen");
|
||||||
|
return -1; });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -89,9 +90,14 @@ void *Libc::Mem_alloc_impl::alloc(size_t size, size_t align_log2)
|
|||||||
/* serialize access of heap functions */
|
/* serialize access of heap functions */
|
||||||
Mutex::Guard guard(_mutex);
|
Mutex::Guard guard(_mutex);
|
||||||
|
|
||||||
|
void *out_addr = nullptr;
|
||||||
|
|
||||||
/* try allocation at our local allocator */
|
/* try allocation at our local allocator */
|
||||||
void *out_addr = 0;
|
_alloc.alloc_aligned(size, align_log2).with_result(
|
||||||
if (_alloc.alloc_aligned(size, &out_addr, align_log2).ok())
|
[&] (void *ptr) { out_addr = ptr; },
|
||||||
|
[&] (Allocator::Alloc_error) { });
|
||||||
|
|
||||||
|
if (out_addr)
|
||||||
return out_addr;
|
return out_addr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -119,7 +125,11 @@ void *Libc::Mem_alloc_impl::alloc(size_t size, size_t align_log2)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate originally requested block */
|
/* allocate originally requested block */
|
||||||
return _alloc.alloc_aligned(size, &out_addr, align_log2).ok() ? out_addr : 0;
|
_alloc.alloc_aligned(size, align_log2).with_result(
|
||||||
|
[&] (void *ptr) { out_addr = ptr; },
|
||||||
|
[&] (Allocator::Alloc_error) { });
|
||||||
|
|
||||||
|
return out_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -63,8 +63,9 @@ class Libc::Slab_alloc : public Slab
|
|||||||
|
|
||||||
void *alloc()
|
void *alloc()
|
||||||
{
|
{
|
||||||
void *result;
|
return Slab::try_alloc(_object_size).convert<void *>(
|
||||||
return (Slab::alloc(_object_size, &result) ? result : 0);
|
[&] (void *ptr) { return ptr; },
|
||||||
|
[&] (Alloc_error) { return nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
||||||
@ -167,7 +168,9 @@ class Libc::Malloc
|
|||||||
|
|
||||||
/* use backing store if requested memory is larger than largest slab */
|
/* use backing store if requested memory is larger than largest slab */
|
||||||
if (msb > SLAB_STOP)
|
if (msb > SLAB_STOP)
|
||||||
_backing_store.alloc(real_size, &alloc_addr);
|
_backing_store.try_alloc(real_size).with_result(
|
||||||
|
[&] (void *ptr) { alloc_addr = ptr; },
|
||||||
|
[&] (Allocator::Alloc_error) { });
|
||||||
else
|
else
|
||||||
alloc_addr = _slabs[msb - SLAB_START]->alloc();
|
alloc_addr = _slabs[msb - SLAB_START]->alloc();
|
||||||
|
|
||||||
|
@ -89,19 +89,20 @@ extern "C" {
|
|||||||
|
|
||||||
void *genode_malloc(unsigned long size)
|
void *genode_malloc(unsigned long size)
|
||||||
{
|
{
|
||||||
void *ptr = nullptr;
|
return Lwip::_heap->try_alloc(size).convert<void *>(
|
||||||
return Lwip::_heap->alloc(size, &ptr) ? ptr : 0;
|
[&] (void *ptr) { return ptr; },
|
||||||
|
[&] (Genode::Allocator::Alloc_error) -> void * { return nullptr; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void *genode_calloc(unsigned long number, unsigned long size)
|
void *genode_calloc(unsigned long number, unsigned long size)
|
||||||
{
|
{
|
||||||
void *ptr = nullptr;
|
|
||||||
size *= number;
|
size *= number;
|
||||||
if (Lwip::_heap->alloc(size, &ptr)) {
|
|
||||||
|
void * const ptr = genode_malloc(size);
|
||||||
|
if (ptr)
|
||||||
Genode::memset(ptr, 0x00, size);
|
Genode::memset(ptr, 0x00, size);
|
||||||
return ptr;
|
|
||||||
}
|
return ptr;
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
u32_t sys_now(void) {
|
u32_t sys_now(void) {
|
||||||
|
@ -23,7 +23,7 @@ new file mode 100644
|
|||||||
index 0000000..03b1740
|
index 0000000..03b1740
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/sanitizer_common/sanitizer_genode.cc
|
+++ b/sanitizer_common/sanitizer_genode.cc
|
||||||
@@ -0,0 +1,331 @@
|
@@ -0,0 +1,335 @@
|
||||||
+/*
|
+/*
|
||||||
+ * \brief Genode-specific functions from sanitizer_common.h
|
+ * \brief Genode-specific functions from sanitizer_common.h
|
||||||
+ * and sanitizer_libc.h
|
+ * and sanitizer_libc.h
|
||||||
@ -205,14 +205,18 @@ index 0000000..03b1740
|
|||||||
+{
|
+{
|
||||||
+ size = RoundUpTo(size, GetPageSizeCached());
|
+ size = RoundUpTo(size, GetPageSizeCached());
|
||||||
+
|
+
|
||||||
+ void *res = nullptr;
|
+ return heap().try_alloc(size).convert<void *>(
|
||||||
+ heap().alloc(size, &res);
|
|
||||||
+
|
+
|
||||||
+ if (res == nullptr)
|
+ [&] (void *ptr) {
|
||||||
+ ReportMmapFailureAndDie(size, mem_type, "allocate", 0, raw_report);
|
+ IncreaseTotalMmap(size);
|
||||||
|
+ return ptr;
|
||||||
|
+ },
|
||||||
+
|
+
|
||||||
+ IncreaseTotalMmap(size);
|
+ [&] (Genode::Allocator::Alloc_error) -> void * {
|
||||||
+ return res;
|
+ ReportMmapFailureAndDie(size, mem_type, "allocate", 0, raw_report);
|
||||||
|
+ return nullptr;
|
||||||
|
+ }
|
||||||
|
+ );
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+
|
+
|
||||||
|
@ -77,29 +77,50 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
|||||||
** Range-allocator interface **
|
** Range-allocator interface **
|
||||||
*******************************/
|
*******************************/
|
||||||
|
|
||||||
int add_range(addr_t const base, size_t const size) override
|
Range_result add_range(addr_t const base, size_t const size) override
|
||||||
{
|
{
|
||||||
if (_base || _array) return -1;
|
if (_base || _array)
|
||||||
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
size_t const bits_cnt = _bits_cnt(size);
|
size_t const bits_cnt = _bits_cnt(size);
|
||||||
|
|
||||||
_base = base;
|
_base = base;
|
||||||
_bits = (addr_t *)_md_alloc->alloc(bits_cnt / 8);
|
|
||||||
memset(_bits, 0, bits_cnt / 8);
|
|
||||||
|
|
||||||
_array = new (_md_alloc) Bit_array_base(bits_cnt, _bits);
|
Alloc_error error = Alloc_error::DENIED;
|
||||||
|
|
||||||
/* reserve bits which are unavailable */
|
size_t const bits_bytes = bits_cnt / 8;
|
||||||
size_t const max_cnt = size / _block_size;
|
|
||||||
if (bits_cnt > max_cnt)
|
|
||||||
_array->set(max_cnt, bits_cnt - max_cnt);
|
|
||||||
|
|
||||||
return 0;
|
try {
|
||||||
|
_bits = (addr_t *)_md_alloc->alloc(bits_bytes);
|
||||||
|
memset(_bits, 0, bits_cnt / 8);
|
||||||
|
|
||||||
|
_array = new (_md_alloc) Bit_array_base(bits_cnt, _bits);
|
||||||
|
|
||||||
|
/* reserve bits which are unavailable */
|
||||||
|
size_t const max_cnt = size / _block_size;
|
||||||
|
if (bits_cnt > max_cnt)
|
||||||
|
_array->set(max_cnt, bits_cnt - max_cnt);
|
||||||
|
|
||||||
|
return Range_ok();
|
||||||
|
|
||||||
|
}
|
||||||
|
catch (Out_of_ram) { error = Alloc_error::OUT_OF_RAM; }
|
||||||
|
catch (Out_of_caps) { error = Alloc_error::OUT_OF_CAPS; }
|
||||||
|
catch (...) { error = Alloc_error::DENIED; }
|
||||||
|
|
||||||
|
if (_bits)
|
||||||
|
_md_alloc->free(_bits, bits_bytes);
|
||||||
|
|
||||||
|
if (_array)
|
||||||
|
destroy(_md_alloc, _array);
|
||||||
|
|
||||||
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
int remove_range(addr_t base, size_t size) override
|
Range_result remove_range(addr_t base, size_t size) override
|
||||||
{
|
{
|
||||||
if (_base != base) return -1;
|
if (_base != base)
|
||||||
|
return Alloc_error::DENIED;
|
||||||
|
|
||||||
_base = _next = 0;
|
_base = _next = 0;
|
||||||
|
|
||||||
@ -113,17 +134,15 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
|||||||
_bits = nullptr;
|
_bits = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return Range_ok();
|
||||||
}
|
}
|
||||||
|
|
||||||
Alloc_return alloc_aligned(size_t size, void **out_addr, unsigned,
|
Alloc_result alloc_aligned(size_t size, unsigned, Range) override
|
||||||
Range) override
|
|
||||||
{
|
{
|
||||||
return alloc(size, out_addr) ? Alloc_return::OK
|
return try_alloc(size);
|
||||||
: Alloc_return::RANGE_CONFLICT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
addr_t const cnt = (size % _block_size) ? size / _block_size + 1
|
addr_t const cnt = (size % _block_size) ? size / _block_size + 1
|
||||||
: size / _block_size;
|
: size / _block_size;
|
||||||
@ -138,9 +157,8 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
|||||||
|
|
||||||
_array->set(i, cnt);
|
_array->set(i, cnt);
|
||||||
_next = i + cnt;
|
_next = i + cnt;
|
||||||
*out_addr = reinterpret_cast<void *>(i * _block_size
|
return reinterpret_cast<void *>(i * _block_size
|
||||||
+ _base);
|
+ _base);
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
} catch (typename Bit_array_base::Invalid_index_access) { }
|
} catch (typename Bit_array_base::Invalid_index_access) { }
|
||||||
|
|
||||||
@ -149,7 +167,7 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
|||||||
|
|
||||||
} while (max != 0);
|
} while (max != 0);
|
||||||
|
|
||||||
return false;
|
return Alloc_error::DENIED;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, size_t size) override
|
void free(void *addr, size_t size) override
|
||||||
@ -171,8 +189,8 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
|||||||
size_t overhead(size_t) const override { return 0;}
|
size_t overhead(size_t) const override { return 0;}
|
||||||
size_t avail() const override { return 0; }
|
size_t avail() const override { return 0; }
|
||||||
bool valid_addr(addr_t) const override { return 0; }
|
bool valid_addr(addr_t) const override { return 0; }
|
||||||
Alloc_return alloc_addr(size_t, addr_t) override {
|
Alloc_result alloc_addr(size_t, addr_t) override {
|
||||||
return Alloc_return(Alloc_return::OUT_OF_METADATA); }
|
return Alloc_error::DENIED; }
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _INCLUDE__OS__PACKET_ALLOCATOR__ */
|
#endif /* _INCLUDE__OS__PACKET_ALLOCATOR__ */
|
||||||
|
@ -769,11 +769,16 @@ class Genode::Packet_stream_source : private Packet_stream_base
|
|||||||
*/
|
*/
|
||||||
Packet_descriptor alloc_packet(Genode::size_t size, unsigned align = PACKET_ALIGNMENT)
|
Packet_descriptor alloc_packet(Genode::size_t size, unsigned align = PACKET_ALIGNMENT)
|
||||||
{
|
{
|
||||||
void *base = 0;
|
if (size == 0)
|
||||||
if (size && _packet_alloc.alloc_aligned(size, &base, align).error())
|
return Packet_descriptor(0, 0);
|
||||||
throw Packet_alloc_failed();
|
|
||||||
|
|
||||||
return Packet_descriptor((Genode::off_t)base, size);
|
return _packet_alloc.alloc_aligned(size, align).convert<Packet_descriptor>(
|
||||||
|
|
||||||
|
[&] (void *base) {
|
||||||
|
return Packet_descriptor((Genode::off_t)base, size); },
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error) -> Packet_descriptor {
|
||||||
|
throw Packet_alloc_failed(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -55,13 +55,36 @@ class Igd::Ppgtt_allocator : public Genode::Translation_table_allocator
|
|||||||
** Allocator interface **
|
** Allocator interface **
|
||||||
*************************/
|
*************************/
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
Genode::Ram_dataspace_capability ds =
|
Genode::Ram_dataspace_capability ds { };
|
||||||
_backend.alloc(size, _caps_guard, _ram_guard);
|
|
||||||
|
|
||||||
*out_addr = _rm.attach(ds);
|
try {
|
||||||
return _map.add(ds, *out_addr);
|
ds = _backend.alloc(size, _caps_guard, _ram_guard);
|
||||||
|
}
|
||||||
|
catch (Genode::Out_of_ram) { return Alloc_error::OUT_OF_RAM; }
|
||||||
|
catch (Genode::Out_of_caps) { return Alloc_error::OUT_OF_CAPS; }
|
||||||
|
catch (...) { return Alloc_error::DENIED; }
|
||||||
|
|
||||||
|
Alloc_error alloc_error = Alloc_error::DENIED;
|
||||||
|
|
||||||
|
try {
|
||||||
|
void * const ptr = _rm.attach(ds);
|
||||||
|
|
||||||
|
if (_map.add(ds, ptr))
|
||||||
|
return ptr;
|
||||||
|
|
||||||
|
/* _map.add failed, roll back _rm.attach */
|
||||||
|
_rm.detach(ptr);
|
||||||
|
}
|
||||||
|
catch (Genode::Out_of_ram) { alloc_error = Alloc_error::OUT_OF_RAM; }
|
||||||
|
catch (Genode::Out_of_caps) { alloc_error = Alloc_error::OUT_OF_CAPS; }
|
||||||
|
catch (...) { alloc_error = Alloc_error::DENIED; }
|
||||||
|
|
||||||
|
/* roll back allocation */
|
||||||
|
_backend.free(ds);
|
||||||
|
|
||||||
|
return alloc_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, size_t) override
|
void free(void *addr, size_t) override
|
||||||
|
@ -311,14 +311,15 @@ class Volume_descriptor : public Iso::Iso_base
|
|||||||
/* copy the root record */
|
/* copy the root record */
|
||||||
Directory_record *copy_root_record(Genode::Allocator &alloc)
|
Directory_record *copy_root_record(Genode::Allocator &alloc)
|
||||||
{
|
{
|
||||||
Directory_record *buf;
|
return alloc.try_alloc(ROOT_SIZE).convert<Directory_record *>(
|
||||||
|
|
||||||
if (!(alloc.alloc(ROOT_SIZE, &buf)))
|
[&] (void *ptr) -> Directory_record * {
|
||||||
throw Insufficient_ram_quota();
|
memcpy(ptr, root_record(), ROOT_SIZE);
|
||||||
|
return (Directory_record *)ptr; },
|
||||||
|
|
||||||
memcpy(buf, root_record(), ROOT_SIZE);
|
[&] (Allocator::Alloc_error e) -> Directory_record * {
|
||||||
|
Allocator::throw_alloc_error(e); }
|
||||||
return buf;
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -39,8 +39,13 @@ struct Test
|
|||||||
{
|
{
|
||||||
log("\nTEST ", id, ": ", brief, "\n");
|
log("\nTEST ", id, ": ", brief, "\n");
|
||||||
for (unsigned i = 0; i < 2; i++) {
|
for (unsigned i = 0; i < 2; i++) {
|
||||||
if (!heap.alloc(fb_ds.size(), (void **)&buf[i])) {
|
heap.try_alloc(fb_ds.size()).with_result(
|
||||||
env.parent().exit(-1); }
|
[&] (void *ptr) { buf[i] = (char *)ptr; },
|
||||||
|
[&] (Allocator::Alloc_error e) {
|
||||||
|
env.parent().exit(-1);
|
||||||
|
Allocator::throw_alloc_error(e);
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
/* fill one memory buffer with white pixels */
|
/* fill one memory buffer with white pixels */
|
||||||
memset(buf[1], ~0, fb_ds.size());
|
memset(buf[1], ~0, fb_ds.size());
|
||||||
|
@ -67,12 +67,20 @@ struct Allocator_tracer : Allocator
|
|||||||
|
|
||||||
Allocator_tracer(Allocator &wrapped) : wrapped(wrapped) { }
|
Allocator_tracer(Allocator &wrapped) : wrapped(wrapped) { }
|
||||||
|
|
||||||
bool alloc(size_t size, void **out_addr) override
|
Alloc_result try_alloc(size_t size) override
|
||||||
{
|
{
|
||||||
sum += size;
|
return wrapped.try_alloc(size).convert<Alloc_result>(
|
||||||
bool result = wrapped.alloc(size, out_addr);
|
|
||||||
new (wrapped) Alloc(allocs, Alloc::Id { (addr_t)*out_addr }, size);
|
[&] (void *ptr) {
|
||||||
return result;
|
sum += size;
|
||||||
|
new (wrapped) Alloc(allocs, Alloc::Id { (addr_t)ptr }, size);
|
||||||
|
return ptr;
|
||||||
|
},
|
||||||
|
|
||||||
|
[&] (Allocator::Alloc_error error) {
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
void free(void *addr, size_t size) override
|
void free(void *addr, size_t size) override
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user