mirror of
https://github.com/genodelabs/genode.git
synced 2025-04-12 21:53:28 +00:00
base: use 'Allocation' interface for mem alloc
This patch converts the memory-allocator interfaces ('Allocator', 'Range_allocator') and their implementations ('Heap', 'Sliced heap', 'Slab', 'Allocator_avl', 'Synced_allocator') to the new 'Allocation' utility. The new interface resides at base/memory.h whereas the traditional allocators implement the new interface. Down the road, the traditional allocators can successively be decoupled from the traditional 'Allocator' and 'Range_allocator' interfaces. Issue #5502 Issue #5245
This commit is contained in:
parent
30e200f4ae
commit
e380d0da95
@ -96,13 +96,14 @@ Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(
|
||||
|
||||
return platform().region_alloc().alloc_aligned(size, align).convert<Map_local_result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
addr_t const core_local_base = (addr_t)ptr;
|
||||
[&] (Range_allocator::Allocation &core_local) {
|
||||
addr_t const core_local_base = (addr_t)core_local.ptr;
|
||||
map_io_region(phys_base, core_local_base, size);
|
||||
core_local.deallocate = false;
|
||||
return Map_local_result { .core_local_addr = core_local_base, .success = true };
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
error("core-local mapping of memory-mapped I/O range failed");
|
||||
return Map_local_result();
|
||||
});
|
||||
|
@ -444,11 +444,11 @@ Core::Platform::Platform()
|
||||
size_t const size = 1 << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
[&] (void *phys_ptr) {
|
||||
[&] (Range_allocator::Allocation &phys) {
|
||||
|
||||
/* core-local memory is one-to-one mapped physical RAM */
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
void * const core_local_ptr = phys_ptr;
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys.ptr);
|
||||
void * const core_local_ptr = phys.ptr;
|
||||
|
||||
region_alloc().remove_range((addr_t)core_local_ptr, size);
|
||||
memset(core_local_ptr, 0, size);
|
||||
@ -456,8 +456,10 @@ Core::Platform::Platform()
|
||||
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, rom_name, phys_addr, size);
|
||||
|
||||
phys.deallocate = false;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
warning("failed to export ", rom_name, " as ROM module"); }
|
||||
);
|
||||
};
|
||||
|
@ -41,17 +41,17 @@ Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(
|
||||
/* find appropriate region and map it locally */
|
||||
return platform().region_alloc().alloc_aligned(size, (unsigned)alignment).convert<Map_local_result>(
|
||||
|
||||
[&] (void *local_base) {
|
||||
if (!map_local_io(base, (addr_t)local_base, size >> get_page_size_log2())) {
|
||||
[&] (Range_allocator::Allocation &local) {
|
||||
if (!map_local_io(base, (addr_t)local.ptr, size >> get_page_size_log2())) {
|
||||
error("map_local_io failed ", Hex_range(base, size));
|
||||
platform().region_alloc().free(local_base, base);
|
||||
return Map_local_result();
|
||||
}
|
||||
return Map_local_result { .core_local_addr = addr_t(local_base),
|
||||
local.deallocate = false;
|
||||
return Map_local_result { .core_local_addr = addr_t(local.ptr),
|
||||
.success = true };
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
error("allocation of virtual memory for local I/O mapping failed");
|
||||
return Map_local_result(); });
|
||||
}
|
||||
|
@ -203,10 +203,12 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
}
|
||||
msi_alloc().set(_irq_number, 1);
|
||||
} else {
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
irq_alloc.alloc_addr(1, _irq_number).with_result(
|
||||
[&] (Range_allocator::Allocation &irq_number) {
|
||||
irq_number.deallocate = false; },
|
||||
[&] (Alloc_error) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied(); });
|
||||
}
|
||||
|
||||
if (_irq_object.associate(_irq_number, msi, irq_args.trigger(),
|
||||
|
@ -381,31 +381,31 @@ static void add_acpi_rsdp(auto ®ion_alloc, auto &xml)
|
||||
auto offset = desc[i].start() & 0xffful;
|
||||
auto pages = align_addr(offset + desc[i].size(), 12) >> 12;
|
||||
|
||||
region_alloc.alloc_aligned(pages * 4096, 12).with_result([&] (void *core_local_ptr) {
|
||||
region_alloc.alloc_aligned(pages * 4096, 12).with_result(
|
||||
[&] (Range_allocator::Allocation &core_local) {
|
||||
|
||||
if (!map_local_io(desc[i].start(), (addr_t)core_local_ptr, pages))
|
||||
return;
|
||||
if (!map_local_io(desc[i].start(), (addr_t)core_local.ptr, pages))
|
||||
return;
|
||||
|
||||
Byte_range_ptr const ptr((char *)(addr_t(core_local_ptr) + offset),
|
||||
pages * 4096 - offset);
|
||||
auto const rsdp = Acpi_rsdp(ptr);
|
||||
Byte_range_ptr const ptr((char *)(addr_t(core_local.ptr) + offset),
|
||||
pages * 4096 - offset);
|
||||
auto const rsdp = Acpi_rsdp(ptr);
|
||||
|
||||
if (!rsdp.valid())
|
||||
return;
|
||||
if (!rsdp.valid())
|
||||
return;
|
||||
|
||||
xml.node("acpi", [&] {
|
||||
xml.attribute("revision", rsdp.read<Acpi_rsdp::Revision>());
|
||||
if (rsdp.read<Acpi_rsdp::Rsdt>())
|
||||
xml.attribute("rsdt", String<32>(Hex(rsdp.read<Acpi_rsdp::Rsdt>())));
|
||||
if (rsdp.read<Acpi_rsdp::Xsdt>())
|
||||
xml.attribute("xsdt", String<32>(Hex(rsdp.read<Acpi_rsdp::Xsdt>())));
|
||||
});
|
||||
xml.node("acpi", [&] {
|
||||
xml.attribute("revision", rsdp.read<Acpi_rsdp::Revision>());
|
||||
if (rsdp.read<Acpi_rsdp::Rsdt>())
|
||||
xml.attribute("rsdt", String<32>(Hex(rsdp.read<Acpi_rsdp::Rsdt>())));
|
||||
if (rsdp.read<Acpi_rsdp::Xsdt>())
|
||||
xml.attribute("xsdt", String<32>(Hex(rsdp.read<Acpi_rsdp::Xsdt>())));
|
||||
});
|
||||
|
||||
unmap_local(addr_t(core_local_ptr), pages);
|
||||
region_alloc.free(core_local_ptr);
|
||||
unmap_local(addr_t(core_local.ptr), pages);
|
||||
|
||||
pages = 0;
|
||||
}, [&] (Range_allocator::Alloc_error) { });
|
||||
pages = 0;
|
||||
}, [&] (Alloc_error) { });
|
||||
|
||||
if (!pages)
|
||||
return;
|
||||
@ -545,35 +545,31 @@ Core::Platform::Platform()
|
||||
size_t const bytes = pages << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(bytes, align).with_result(
|
||||
|
||||
[&] (void *phys_ptr) {
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
[&] (Range_allocator::Allocation &phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys.ptr);
|
||||
|
||||
region_alloc().alloc_aligned(bytes, align).with_result(
|
||||
[&] (void *core_local_ptr) {
|
||||
[&] (Range_allocator::Allocation &core_local) {
|
||||
|
||||
if (!map_local(phys_addr, (addr_t)core_local_ptr, pages)) {
|
||||
if (!map_local(phys_addr, (addr_t)core_local.ptr, pages)) {
|
||||
warning("map_local failed while exporting ",
|
||||
rom_name, " as ROM module");
|
||||
ram_alloc().free(phys_ptr, bytes);
|
||||
region_alloc().free(core_local_ptr, bytes);
|
||||
return;
|
||||
}
|
||||
return; }
|
||||
|
||||
memset(core_local_ptr, 0, bytes);
|
||||
content_fn((char *)core_local_ptr, bytes);
|
||||
memset(core_local.ptr, 0, bytes);
|
||||
content_fn((char *)core_local.ptr, bytes);
|
||||
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, rom_name, phys_addr, bytes);
|
||||
|
||||
phys.deallocate = core_local.deallocate = false;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
warning("failed allocate virtual memory to export ",
|
||||
rom_name, " as ROM module");
|
||||
ram_alloc().free(phys_ptr, bytes);
|
||||
}
|
||||
rom_name, " as ROM module"); }
|
||||
);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
warning("failed to export ", rom_name, " as ROM module"); }
|
||||
);
|
||||
};
|
||||
|
@ -200,10 +200,10 @@ Cap_id_allocator::id_t Cap_id_allocator::alloc()
|
||||
|
||||
return _id_alloc.try_alloc(CAP_ID_OFFSET).convert<id_t>(
|
||||
|
||||
[&] (void *id) -> id_t {
|
||||
return (addr_t)id & ID_MASK; },
|
||||
[&] (Range_allocator::Allocation &id) -> id_t {
|
||||
id.deallocate = false; return (addr_t)id.ptr & ID_MASK; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> id_t {
|
||||
[&] (Alloc_error) -> id_t {
|
||||
throw Out_of_ids(); });
|
||||
}
|
||||
|
||||
|
@ -36,8 +36,8 @@ void * Platform::Ram_allocator::alloc_aligned(size_t size, unsigned align)
|
||||
|
||||
return Base::alloc_aligned(Hw::round_page(size), align).convert<void *>(
|
||||
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Ram_allocator::Alloc_error e) -> void *
|
||||
[&] (Range_allocator::Allocation &a) { a.deallocate = false; return a.ptr; },
|
||||
[&] (Alloc_error e) -> void *
|
||||
{
|
||||
error("bootstrap RAM allocation failed, error=", e);
|
||||
assert(false);
|
||||
|
@ -26,6 +26,8 @@ using namespace Core;
|
||||
Region_map::Attach_result
|
||||
Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||
{
|
||||
using Virt_allocation = Range_allocator::Allocation;
|
||||
|
||||
return _ep.apply(ds_cap, [&] (Dataspace_component *ds_ptr) -> Attach_result {
|
||||
|
||||
if (!ds_ptr)
|
||||
@ -43,7 +45,7 @@ Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
Allocator::Alloc_result const virt =
|
||||
Allocator::Alloc_result virt =
|
||||
platform().region_alloc().alloc_aligned(page_rounded_size, align);
|
||||
|
||||
if (virt.failed()) {
|
||||
@ -68,15 +70,15 @@ Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||
|
||||
return virt.convert<Attach_result>(
|
||||
|
||||
[&] (void *virt_addr) -> Attach_result {
|
||||
if (map_local(ds.phys_addr(), (addr_t)virt_addr, num_pages, flags))
|
||||
return Range { .start = addr_t(virt_addr),
|
||||
.num_bytes = page_rounded_size };
|
||||
[&] (Virt_allocation &a) -> Attach_result {
|
||||
if (!map_local(ds.phys_addr(), (addr_t)a.ptr, num_pages, flags))
|
||||
return Attach_error::REGION_CONFLICT;
|
||||
|
||||
platform().region_alloc().free(virt_addr, page_rounded_size);
|
||||
return Attach_error::REGION_CONFLICT; },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
a.deallocate = false;
|
||||
return Range { .start = addr_t(a.ptr),
|
||||
.num_bytes = page_rounded_size };
|
||||
},
|
||||
[&] (Alloc_error) {
|
||||
return Attach_error::REGION_CONFLICT; });
|
||||
});
|
||||
}
|
||||
|
@ -52,12 +52,24 @@ class Core::Cpu_thread_allocator : public Allocator
|
||||
Cpu_thread_allocator(Allocator &alloc) : _alloc(alloc) { }
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _alloc.alloc(size); }
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
return _alloc.try_alloc(size).convert<Alloc_result>(
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false; return { *this, a }; },
|
||||
[&] (Alloc_error e) { return e; });
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { _alloc.free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_alloc.free(addr, size); }
|
||||
|
@ -72,11 +72,11 @@ class Core::Guest_memory
|
||||
Attach_attr attr,
|
||||
auto const &map_fn)
|
||||
{
|
||||
/*
|
||||
/*
|
||||
* unsupported - deny otherwise arbitrary physical
|
||||
* memory can be mapped to a VM
|
||||
* memory can be mapped to a VM
|
||||
*/
|
||||
if (dsc.managed())
|
||||
if (dsc.managed())
|
||||
return Attach_result::INVALID_DS;
|
||||
|
||||
if (guest_phys & 0xffful || attr.offset & 0xffful ||
|
||||
@ -97,11 +97,9 @@ class Core::Guest_memory
|
||||
attr.offset > dsc.size() - attr.size)
|
||||
return Attach_result::INVALID_DS;
|
||||
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
Attach_result const retval = _map.alloc_addr(attr.size, guest_phys).convert<Attach_result>(
|
||||
|
||||
[&] (void *) {
|
||||
[&] (Range_allocator::Allocation &allocation) {
|
||||
|
||||
Rm_region::Attr const region_attr
|
||||
{
|
||||
@ -131,6 +129,7 @@ class Core::Guest_memory
|
||||
/* inform dataspace about attachment */
|
||||
dsc.attached_to(region);
|
||||
|
||||
allocation.deallocate = false;
|
||||
return Attach_result::OK;
|
||||
},
|
||||
|
||||
|
@ -77,8 +77,10 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
}
|
||||
|
||||
/* allocate interrupt */
|
||||
if (_irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
_irq_alloc.alloc_addr(1, _irq_number).with_result(
|
||||
[&] (Range_allocator::Allocation &irq_number) {
|
||||
irq_number.deallocate = false; },
|
||||
[&] (Alloc_error) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied(); });
|
||||
}
|
||||
|
@ -35,8 +35,9 @@ using namespace Kernel;
|
||||
|
||||
Thread::Ipc_alloc_result Thread::_ipc_alloc_recv_caps(unsigned cap_count)
|
||||
{
|
||||
using Allocator = Genode::Allocator;
|
||||
using Result = Ipc_alloc_result;
|
||||
using Allocator = Genode::Allocator;
|
||||
using Result = Ipc_alloc_result;
|
||||
using Alloc_error = Genode::Alloc_error;
|
||||
|
||||
Allocator &slab = pd().platform_pd().capability_slab();
|
||||
for (unsigned i = 0; i < cap_count; i++) {
|
||||
@ -46,11 +47,12 @@ Thread::Ipc_alloc_result Thread::_ipc_alloc_recv_caps(unsigned cap_count)
|
||||
Result const result =
|
||||
slab.try_alloc(sizeof(Object_identity_reference)).convert<Result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_obj_id_ref_ptr[i] = ptr;
|
||||
return Result::OK; },
|
||||
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
[&] (Genode::Memory::Allocation &a) {
|
||||
_obj_id_ref_ptr[i] = a.ptr;
|
||||
a.deallocate = false;
|
||||
return Result::OK;
|
||||
},
|
||||
[&] (Alloc_error e) {
|
||||
|
||||
/*
|
||||
* Conditions other than DENIED cannot happen because the slab
|
||||
@ -58,7 +60,7 @@ Thread::Ipc_alloc_result Thread::_ipc_alloc_recv_caps(unsigned cap_count)
|
||||
* expanded by the client as response to the EXHAUSTED return
|
||||
* value.
|
||||
*/
|
||||
if (e != Allocator::Alloc_error::DENIED)
|
||||
if (e != Alloc_error::DENIED)
|
||||
Genode::raw("unexpected recv_caps allocation failure");
|
||||
|
||||
return Result::EXHAUSTED;
|
||||
|
@ -116,41 +116,26 @@ void Platform::_init_platform_info()
|
||||
size_t const rom_size = pages << get_page_size_log2();
|
||||
const char *rom_name = "platform_info";
|
||||
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &phys_alloc;
|
||||
Range_allocator &virt_alloc;
|
||||
Range_allocator::Result phys = ram_alloc().try_alloc(get_page_size());
|
||||
Range_allocator::Result virt = region_alloc().try_alloc(rom_size);
|
||||
|
||||
struct {
|
||||
void *phys_ptr = nullptr;
|
||||
void *virt_ptr = nullptr;
|
||||
};
|
||||
if (phys.failed())
|
||||
error("could not setup platform_info ROM - RAM allocation error");
|
||||
if (virt.failed())
|
||||
error("could not setup platform_info ROM - region allocation error");
|
||||
|
||||
Guard(Range_allocator &phys_alloc, Range_allocator &virt_alloc)
|
||||
: phys_alloc(phys_alloc), virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard()
|
||||
{
|
||||
if (phys_ptr) phys_alloc.free(phys_ptr);
|
||||
if (virt_ptr) virt_alloc.free(phys_ptr);
|
||||
}
|
||||
} guard { ram_alloc(), region_alloc() };
|
||||
|
||||
ram_alloc().try_alloc(get_page_size()).with_result(
|
||||
[&] (void *ptr) { guard.phys_ptr = ptr; },
|
||||
[&] (Allocator::Alloc_error) {
|
||||
error("could not setup platform_info ROM - RAM allocation error"); });
|
||||
|
||||
region_alloc().try_alloc(rom_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Allocator::Alloc_error) {
|
||||
error("could not setup platform_info ROM - region allocation error"); });
|
||||
|
||||
if (!guard.phys_ptr || !guard.virt_ptr)
|
||||
if (phys.failed() || virt.failed())
|
||||
return;
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(guard.phys_ptr);
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(guard.virt_ptr);
|
||||
auto start_addr = [&] (Range_allocator::Result &range)
|
||||
{
|
||||
return range.convert<addr_t>(
|
||||
[&] (Range_allocator::Allocation &a) { return addr_t(a.ptr); },
|
||||
[] (Alloc_error) { return 0UL; });
|
||||
};
|
||||
|
||||
addr_t const phys_addr = start_addr(phys);
|
||||
addr_t const virt_addr = start_addr(virt);
|
||||
|
||||
if (!map_local(phys_addr, virt_addr, pages, Hw::PAGE_FLAGS_KERN_DATA)) {
|
||||
error("could not setup platform_info ROM - map error");
|
||||
@ -182,7 +167,8 @@ void Platform::_init_platform_info()
|
||||
new (core_mem_alloc()) Rom_module(_rom_fs, rom_name, phys_addr, rom_size);
|
||||
|
||||
/* keep phys allocation but let guard revert virt allocation */
|
||||
guard.phys_ptr = nullptr;
|
||||
phys.with_result([&] (auto &allocation) { allocation.deallocate = false; },
|
||||
[] (Alloc_error) { });
|
||||
}
|
||||
|
||||
|
||||
@ -226,32 +212,31 @@ Platform::Platform()
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
using Allocation = Range_allocator::Allocation;
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||
|
||||
[&] (void *phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||
|
||||
[&] (Allocation &phys) {
|
||||
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||
[&] (Allocation &virt) {
|
||||
addr_t const phys_addr = addr_t(phys.ptr);
|
||||
addr_t const virt_addr = addr_t(virt.ptr);
|
||||
|
||||
[&] (void *ptr) {
|
||||
|
||||
map_local(phys_addr, (addr_t)ptr, pages);
|
||||
memset(ptr, 0, log_size);
|
||||
map_local(phys_addr, virt_addr, pages);
|
||||
memset(virt.ptr, 0, log_size);
|
||||
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, "core_log", phys_addr, log_size);
|
||||
|
||||
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||
init_core_log(Core_log_range { virt_addr, log_size } );
|
||||
phys.deallocate = virt.deallocate = false;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { /* ignored */ }
|
||||
);
|
||||
[&] (Alloc_error) { });
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
[&] (Alloc_error) { });
|
||||
}
|
||||
|
||||
unsigned const cpus = _boot_info().cpus;
|
||||
|
@ -41,8 +41,8 @@ void *Hw::Address_space::_table_alloc()
|
||||
|
||||
return _cma().alloc_aligned(sizeof(Page_table), align).convert<void *>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
return ptr; },
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false; return a.ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_result) -> void * {
|
||||
/* XXX distinguish error conditions */
|
||||
@ -147,13 +147,11 @@ Cap_space::Cap_space() : _slab(nullptr, &_initial_sb) { }
|
||||
void Cap_space::upgrade_slab(Allocator &alloc)
|
||||
{
|
||||
alloc.try_alloc(SLAB_SIZE).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
_slab.insert_sb(ptr); },
|
||||
|
||||
[&] (Allocator::Alloc_error error) {
|
||||
Allocator::throw_alloc_error(error);
|
||||
});
|
||||
[&] (Memory::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
_slab.insert_sb(a.ptr);
|
||||
},
|
||||
[&] (Alloc_error e) { raise(e); });
|
||||
}
|
||||
|
||||
|
||||
|
@ -58,12 +58,13 @@ static addr_t _alloc_core_local_utcb(addr_t core_addr)
|
||||
*/
|
||||
return platform().ram_alloc().try_alloc(sizeof(Native_utcb)).convert<addr_t>(
|
||||
|
||||
[&] (void *utcb_phys) {
|
||||
map_local((addr_t)utcb_phys, core_addr,
|
||||
[&] (Range_allocator::Allocation &utcb_phys) {
|
||||
map_local((addr_t)utcb_phys.ptr, core_addr,
|
||||
sizeof(Native_utcb) / get_page_size());
|
||||
return addr_t(utcb_phys);
|
||||
utcb_phys.deallocate = false;
|
||||
return addr_t(utcb_phys.ptr);
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
error("failed to allocate UTCB for core/kernel thread!");
|
||||
return 0ul;
|
||||
});
|
||||
|
@ -12,6 +12,9 @@
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* Genode includes */
|
||||
#include <base/error.h>
|
||||
|
||||
/* core includes */
|
||||
#include <ram_dataspace_factory.h>
|
||||
#include <platform.h>
|
||||
@ -29,28 +32,23 @@ void Ram_dataspace_factory::_revoke_ram_ds(Dataspace_component &) { }
|
||||
|
||||
void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
{
|
||||
using Virt_allocation = Range_allocator::Allocation;
|
||||
|
||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &virt_alloc;
|
||||
struct { void *virt_ptr = nullptr; };
|
||||
|
||||
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||
|
||||
} guard(platform().region_alloc());
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error e) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, ", error=", e); });
|
||||
Range_allocator::Result const virt =
|
||||
platform().region_alloc().try_alloc(page_rounded_size);
|
||||
|
||||
if (!guard.virt_ptr)
|
||||
if (virt.failed()) {
|
||||
error("clear_ds could not allocate virtual address range of size ",
|
||||
page_rounded_size);
|
||||
return;
|
||||
}
|
||||
|
||||
addr_t const virt_addr = virt.convert<addr_t>(
|
||||
[&] (Virt_allocation const &a) { return addr_t(a.ptr); },
|
||||
[] (Alloc_error) { return 0UL; });
|
||||
|
||||
/*
|
||||
* Map and clear dataspaces in chucks of 128MiB max to prevent large
|
||||
@ -62,22 +60,22 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
addr_t chunk_phys_addr = ds.phys_addr();
|
||||
|
||||
while (size_remaining) {
|
||||
size_t chunk_size = min(size_remaining, max_chunk_size);
|
||||
size_t num_pages = chunk_size >> get_page_size_log2();
|
||||
size_t const chunk_size = min(size_remaining, max_chunk_size);
|
||||
size_t const num_pages = chunk_size >> get_page_size_log2();
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
if (!map_local(chunk_phys_addr, (addr_t)guard.virt_ptr, num_pages)) {
|
||||
if (!map_local(chunk_phys_addr, virt_addr, num_pages)) {
|
||||
error("core-local memory mapping failed");
|
||||
return;
|
||||
}
|
||||
|
||||
/* dependent on the architecture, cache maintainance might be necessary */
|
||||
Cpu::clear_memory_region((addr_t)guard.virt_ptr, chunk_size,
|
||||
Cpu::clear_memory_region(virt_addr, chunk_size,
|
||||
ds.cacheability() != CACHED);
|
||||
|
||||
/* unmap dataspace from core */
|
||||
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||
error("could not unmap core-local address range at ", guard.virt_ptr);
|
||||
if (!unmap_local(virt_addr, num_pages))
|
||||
error("could not unmap core-local address range at ", Hex(virt_addr));
|
||||
|
||||
size_remaining -= chunk_size;
|
||||
chunk_phys_addr += chunk_size;
|
||||
|
@ -86,10 +86,10 @@ class Core::Rpc_cap_factory
|
||||
|
||||
return _slab.try_alloc(sizeof(Kobject)).convert<Native_capability>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
[&] (Memory::Allocation &a) {
|
||||
|
||||
/* create kernel object */
|
||||
Kobject &obj = *construct_at<Kobject>(ptr, ep);
|
||||
Kobject &obj = *construct_at<Kobject>(a.ptr, ep);
|
||||
|
||||
if (!obj.cap.valid()) {
|
||||
raw("Invalid entrypoint ", (addr_t)Capability_space::capid(ep),
|
||||
@ -100,9 +100,10 @@ class Core::Rpc_cap_factory
|
||||
|
||||
/* store it in the list and return result */
|
||||
_list.insert(&obj);
|
||||
a.deallocate = false;
|
||||
return obj.cap;
|
||||
},
|
||||
[&] (Allocator::Alloc_error) -> Native_capability {
|
||||
[&] (Alloc_error) -> Native_capability {
|
||||
/* XXX distinguish error conditions */
|
||||
throw Allocator::Out_of_memory();
|
||||
});
|
||||
|
@ -64,6 +64,7 @@ class Core::Platform : public Platform_generic
|
||||
|
||||
struct Dummy_allocator : Range_allocator
|
||||
{
|
||||
void _free(Allocation &) override { ASSERT_NEVER_CALLED; }
|
||||
void free(void *, size_t) override { ASSERT_NEVER_CALLED; }
|
||||
bool need_size_for_free() const override { ASSERT_NEVER_CALLED; }
|
||||
size_t consumed() const override { ASSERT_NEVER_CALLED; }
|
||||
@ -87,9 +88,10 @@ class Core::Platform : public Platform_generic
|
||||
*/
|
||||
struct Pseudo_ram_allocator : Range_allocator
|
||||
{
|
||||
Alloc_result try_alloc(size_t) override { return nullptr; }
|
||||
Alloc_result alloc_aligned(size_t, unsigned, Range) override { return nullptr; }
|
||||
Alloc_result alloc_addr(size_t, addr_t) override { return nullptr; }
|
||||
void _free(Allocation &) override { }
|
||||
Alloc_result try_alloc(size_t) override { return { *this, { } }; }
|
||||
Alloc_result alloc_aligned(size_t, unsigned, Range) override { return { *this, { } }; }
|
||||
Alloc_result alloc_addr(size_t, addr_t) override { return { *this, { } }; }
|
||||
Range_result add_range(addr_t, size_t) override { return Ok(); }
|
||||
Range_result remove_range(addr_t, size_t) override { return Ok(); }
|
||||
|
||||
|
@ -430,7 +430,10 @@ namespace {
|
||||
{
|
||||
using size_t = Genode::size_t;
|
||||
|
||||
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return { *this, { malloc(size), size } }; }
|
||||
|
||||
void _free(Allocation &a) override { ::free(a.ptr); }
|
||||
|
||||
void free(void *addr, size_t) override { ::free(addr); }
|
||||
|
||||
|
@ -52,8 +52,8 @@ Main::Main(Env &env) : heap(env.ram(), env.rm())
|
||||
/* induce initial heap expansion to remove RM noise */
|
||||
if (1) {
|
||||
heap.try_alloc(0x100000).with_result(
|
||||
[&] (void *ptr) { heap.free(ptr, 0); },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
[&] (Heap::Allocation &) { /* deallocate == true */ },
|
||||
[&] (Alloc_error) { });
|
||||
}
|
||||
|
||||
addr_t beg((addr_t)&blob_beg);
|
||||
|
@ -28,6 +28,8 @@ using namespace Core;
|
||||
*/
|
||||
static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
||||
{
|
||||
using Region_allocation = Range_allocator::Allocation;
|
||||
|
||||
/*
|
||||
* Allocate range in core's virtual address space
|
||||
*
|
||||
@ -39,8 +41,8 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||
|
||||
platform().region_alloc().alloc_aligned(size, (unsigned)align_log2).with_result(
|
||||
[&] (void *ptr) { virt_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
[&] (Region_allocation &a) { a.deallocate = false; virt_addr = a.ptr; },
|
||||
[&] (Alloc_error) { });
|
||||
|
||||
if (virt_addr)
|
||||
break;
|
||||
|
@ -64,14 +64,16 @@ static void deassociate(addr_t irq_sel)
|
||||
static bool associate_msi(addr_t irq_sel, addr_t phys_mem, addr_t &msi_addr,
|
||||
addr_t &msi_data, Signal_context_capability sig_cap)
|
||||
{
|
||||
using Virt_allocation = Range_allocator::Allocation;
|
||||
|
||||
if (!phys_mem)
|
||||
return irq_ctrl(irq_sel, msi_addr, msi_data, sig_cap.local_name(), Nova::Gsi_flags(), 0);
|
||||
|
||||
return platform().region_alloc().alloc_aligned(4096, 12).convert<bool>(
|
||||
|
||||
[&] (void *virt_ptr) {
|
||||
[&] (Virt_allocation &virt) {
|
||||
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt_ptr);
|
||||
addr_t const virt_addr = reinterpret_cast<addr_t>(virt.ptr);
|
||||
|
||||
using Nova::Rights;
|
||||
using Nova::Utcb;
|
||||
@ -81,20 +83,17 @@ static bool associate_msi(addr_t irq_sel, addr_t phys_mem, addr_t &msi_addr,
|
||||
|
||||
Utcb &utcb = *reinterpret_cast<Utcb *>(Thread::myself()->utcb());
|
||||
|
||||
if (map_local_phys_to_virt(utcb, phys_crd, virt_crd, platform_specific().core_pd_sel())) {
|
||||
platform().region_alloc().free(virt_ptr, 4096);
|
||||
if (map_local_phys_to_virt(utcb, phys_crd, virt_crd, platform_specific().core_pd_sel()))
|
||||
return false;
|
||||
}
|
||||
|
||||
/* try to assign MSI to device */
|
||||
bool res = irq_ctrl(irq_sel, msi_addr, msi_data, sig_cap.local_name(), Nova::Gsi_flags(), virt_addr);
|
||||
|
||||
unmap_local(Nova::Mem_crd(virt_addr >> 12, 0, Rights(true, true, true)));
|
||||
platform().region_alloc().free(virt_ptr, 4096);
|
||||
|
||||
return res;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
return false;
|
||||
});
|
||||
}
|
||||
@ -231,10 +230,11 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
throw Service_denied();
|
||||
}
|
||||
|
||||
if (irq_alloc.alloc_addr(1, irq_number).failed()) {
|
||||
error("unavailable IRQ ", irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
irq_alloc.alloc_addr(1, irq_number).with_result(
|
||||
[&] (Allocator::Allocation &a) { a.deallocate = false; },
|
||||
[&] (Alloc_error) {
|
||||
error("unavailable IRQ ", irq_number, " requested");
|
||||
throw Service_denied(); });
|
||||
|
||||
_irq_number = (unsigned)irq_number;
|
||||
|
||||
|
@ -77,23 +77,28 @@ extern unsigned _prog_img_beg, _prog_img_end;
|
||||
addr_t Core::Platform::_map_pages(addr_t const phys_addr, addr_t const pages,
|
||||
bool guard_page)
|
||||
{
|
||||
using Region_allocation = Range_allocator::Allocation;
|
||||
|
||||
addr_t const size = pages << get_page_size_log2();
|
||||
|
||||
/* try to reserve contiguous virtual area */
|
||||
return region_alloc().alloc_aligned(size + (guard_page ? get_page_size() : 0),
|
||||
get_page_size_log2()).convert<addr_t>(
|
||||
[&] (void *core_local_ptr) {
|
||||
[&] (Region_allocation &core_local) {
|
||||
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local.ptr);
|
||||
|
||||
int res = map_local(_core_pd_sel, *__main_thread_utcb, phys_addr,
|
||||
core_local_addr, pages,
|
||||
Nova::Rights(true, true, false), true);
|
||||
if (res)
|
||||
return 0UL;
|
||||
|
||||
return res ? 0 : core_local_addr;
|
||||
core_local.deallocate = false;
|
||||
return core_local_addr;
|
||||
},
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
return 0UL; });
|
||||
}
|
||||
|
||||
@ -619,17 +624,18 @@ Core::Platform::Platform()
|
||||
|
||||
auto export_pages_as_rom_module = [&] (auto rom_name, size_t pages, auto content_fn)
|
||||
{
|
||||
using Phys_allocation = Range_allocator::Allocation;
|
||||
|
||||
size_t const bytes = pages << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(bytes, get_page_size_log2()).with_result(
|
||||
|
||||
[&] (void *phys_ptr) {
|
||||
[&] (Phys_allocation &phys) {
|
||||
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys.ptr);
|
||||
char * const core_local_ptr = (char *)_map_pages(phys_addr, pages);
|
||||
|
||||
if (!core_local_ptr) {
|
||||
warning("failed to export ", rom_name, " as ROM module");
|
||||
ram_alloc().free(phys_ptr, bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -639,10 +645,12 @@ Core::Platform::Platform()
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, rom_name, phys_addr, bytes);
|
||||
|
||||
phys.deallocate = false;
|
||||
|
||||
/* leave the ROM backing store mapped within core */
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
warning("failed to allocate physical memory for exporting ",
|
||||
rom_name, " as ROM module"); });
|
||||
};
|
||||
@ -811,29 +819,29 @@ Core::Platform::Platform()
|
||||
unsigned index = first_index;
|
||||
for (unsigned i = 0; i < 32; i++)
|
||||
{
|
||||
void * phys_ptr = nullptr;
|
||||
using Phys_allocation = Range_allocator::Allocation;
|
||||
|
||||
ram_alloc().alloc_aligned(get_page_size(), get_page_size_log2()).with_result(
|
||||
[&] (void *ptr) { phys_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error) { /* covered by nullptr test below */ });
|
||||
bool ok = ram_alloc().alloc_aligned(get_page_size(),
|
||||
get_page_size_log2()).convert<bool>(
|
||||
[&] (Phys_allocation &phys) {
|
||||
addr_t phys_addr = reinterpret_cast<addr_t>(phys.ptr);
|
||||
addr_t core_local_addr = _map_pages(phys_addr, 1);
|
||||
|
||||
if (phys_ptr == nullptr)
|
||||
if (!core_local_addr)
|
||||
return false;
|
||||
|
||||
Cap_range &range = *reinterpret_cast<Cap_range *>(core_local_addr);
|
||||
construct_at<Cap_range>(&range, index);
|
||||
|
||||
cap_map().insert(range);
|
||||
index = (unsigned)(range.base() + range.elements());
|
||||
phys.deallocate = false;
|
||||
return true;
|
||||
},
|
||||
[&] (Alloc_error) { log("Alloc_error"); return false; });
|
||||
|
||||
if (!ok)
|
||||
break;
|
||||
|
||||
addr_t phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
addr_t core_local_addr = _map_pages(phys_addr, 1);
|
||||
|
||||
if (!core_local_addr) {
|
||||
ram_alloc().free(phys_ptr);
|
||||
break;
|
||||
}
|
||||
|
||||
Cap_range &range = *reinterpret_cast<Cap_range *>(core_local_addr);
|
||||
construct_at<Cap_range>(&range, index);
|
||||
|
||||
cap_map().insert(range);
|
||||
|
||||
index = (unsigned)(range.base() + range.elements());
|
||||
}
|
||||
_max_caps = index - first_index;
|
||||
|
||||
|
@ -31,6 +31,8 @@ void Ram_dataspace_factory::_revoke_ram_ds(Dataspace_component &) { }
|
||||
|
||||
static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
||||
{
|
||||
using Region_allocation = Range_allocator::Allocation;
|
||||
|
||||
/*
|
||||
* Allocate range in core's virtual address space
|
||||
*
|
||||
@ -42,8 +44,8 @@ static inline void * alloc_region(Dataspace_component &ds, const size_t size)
|
||||
for (; align_log2 >= get_page_size_log2(); align_log2--) {
|
||||
|
||||
platform().region_alloc().alloc_aligned(size, (unsigned)align_log2).with_result(
|
||||
[&] (void *ptr) { virt_addr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error) { /* try next iteration */ }
|
||||
[&] (Region_allocation &a) { a.deallocate = false; virt_addr = a.ptr; },
|
||||
[&] (Alloc_error) { /* try next iteration */ }
|
||||
);
|
||||
if (virt_addr)
|
||||
return virt_addr;
|
||||
|
@ -38,17 +38,18 @@ Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||
Range_allocator &virt_alloc = platform().region_alloc();
|
||||
return virt_alloc.try_alloc(page_rounded_size).convert<Attach_result>(
|
||||
|
||||
[&] (void *virt_addr) -> Attach_result {
|
||||
[&] (Range_allocator::Allocation &virt) -> Attach_result {
|
||||
|
||||
/* map the dataspace's physical pages to virtual memory */
|
||||
unsigned num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
|
||||
if (!map_local(ds->phys_addr(), (addr_t)virt.ptr, num_pages))
|
||||
return Attach_error::INVALID_DATASPACE;
|
||||
|
||||
return Range { .start = addr_t(virt_addr), .num_bytes = page_rounded_size };
|
||||
virt.deallocate = false;
|
||||
return Range { .start = addr_t(virt.ptr), .num_bytes = page_rounded_size };
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return Attach_error::REGION_CONFLICT;
|
||||
|
@ -140,10 +140,12 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
if (msi)
|
||||
throw Service_denied();
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
_irq_alloc.alloc_addr(1, _irq_number).with_result(
|
||||
[&] (Range_allocator::Allocation &irq_number) {
|
||||
irq_number.deallocate = false; },
|
||||
[&] (Alloc_error) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied(); });
|
||||
|
||||
_irq_object.start();
|
||||
}
|
||||
|
@ -184,32 +184,31 @@ Core::Platform::Platform()
|
||||
|
||||
/* core log as ROM module */
|
||||
{
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
unsigned const pages = 1;
|
||||
size_t const log_size = pages << get_page_size_log2();
|
||||
unsigned const align = get_page_size_log2();
|
||||
|
||||
ram_alloc().alloc_aligned(log_size, align).with_result(
|
||||
|
||||
[&] (void *phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys);
|
||||
[&] (Range_allocator::Allocation &phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys.ptr);
|
||||
|
||||
region_alloc().alloc_aligned(log_size, align). with_result(
|
||||
[&] (Range_allocator::Allocation &virt) {
|
||||
|
||||
[&] (void *ptr) {
|
||||
|
||||
map_local(phys_addr, (addr_t)ptr, pages);
|
||||
memset(ptr, 0, log_size);
|
||||
map_local(phys_addr, (addr_t)virt.ptr, pages);
|
||||
memset(virt.ptr, 0, log_size);
|
||||
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, "core_log", phys_addr, log_size);
|
||||
|
||||
init_core_log(Core_log_range { (addr_t)ptr, log_size } );
|
||||
init_core_log(Core_log_range { (addr_t)virt.ptr, log_size } );
|
||||
|
||||
phys.deallocate = virt.deallocate = false;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
[&] (Alloc_error) { });
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
[&] (Alloc_error) { });
|
||||
}
|
||||
|
||||
/* export platform-specific infos */
|
||||
@ -219,14 +218,14 @@ Core::Platform::Platform()
|
||||
|
||||
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
[&] (void *phys_ptr) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
[&] (Range_allocator::Allocation &phys) {
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys.ptr);
|
||||
|
||||
/* let one page free after the log buffer */
|
||||
region_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
[&] (void *core_local_ptr) {
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local_ptr);
|
||||
[&] (Range_allocator::Allocation &core_local) {
|
||||
addr_t const core_local_addr = reinterpret_cast<addr_t>(core_local.ptr);
|
||||
|
||||
if (map_local(phys_addr, core_local_addr, pages)) {
|
||||
|
||||
@ -237,13 +236,13 @@ Core::Platform::Platform()
|
||||
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, "platform_info", phys_addr, size);
|
||||
|
||||
phys.deallocate = core_local.deallocate = false;
|
||||
}
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
[&] (Alloc_error) { });
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) { }
|
||||
);
|
||||
[&] (Alloc_error) { });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,41 +38,28 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
{
|
||||
size_t page_rounded_size = (ds.size() + get_page_size() - 1) & get_page_mask();
|
||||
|
||||
struct Guard
|
||||
{
|
||||
Range_allocator &virt_alloc;
|
||||
struct { void *virt_ptr = nullptr; };
|
||||
|
||||
Guard(Range_allocator &virt_alloc) : virt_alloc(virt_alloc) { }
|
||||
|
||||
~Guard() { if (virt_ptr) virt_alloc.free(virt_ptr); }
|
||||
|
||||
} guard(platform().region_alloc());
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
[&] (void *ptr) { guard.virt_ptr = ptr; },
|
||||
[&] (Range_allocator::Alloc_error e) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, ", error=", e); });
|
||||
[&] (Range_allocator::Allocation &virt) {
|
||||
|
||||
if (!guard.virt_ptr)
|
||||
return;
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds.phys_addr(), (addr_t)virt.ptr, num_pages)) {
|
||||
error("core-local memory mapping failed");
|
||||
return;
|
||||
}
|
||||
|
||||
/* map the dataspace's physical pages to corresponding virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
if (!map_local(ds.phys_addr(), (addr_t)guard.virt_ptr, num_pages)) {
|
||||
error("core-local memory mapping failed");
|
||||
return;
|
||||
}
|
||||
/* clear dataspace */
|
||||
size_t num_longwords = page_rounded_size/sizeof(long);
|
||||
for (long *dst = (long *)virt.ptr; num_longwords--;)
|
||||
*dst++ = 0;
|
||||
|
||||
/* clear dataspace */
|
||||
size_t num_longwords = page_rounded_size/sizeof(long);
|
||||
for (long *dst = (long *)guard.virt_ptr; num_longwords--;)
|
||||
*dst++ = 0;
|
||||
|
||||
/* unmap dataspace from core */
|
||||
if (!unmap_local((addr_t)guard.virt_ptr, num_pages))
|
||||
error("could not unmap core-local address range at ", guard.virt_ptr, ", "
|
||||
"error=", Okl4::L4_ErrorCode());
|
||||
/* unmap dataspace from core */
|
||||
if (!unmap_local((addr_t)virt.ptr, num_pages))
|
||||
error("could not unmap core-local address range at ", virt.ptr, ", "
|
||||
"error=", Okl4::L4_ErrorCode());
|
||||
},
|
||||
[&] (Alloc_error) {
|
||||
error("could not allocate virtual address range of size ", page_rounded_size);
|
||||
});
|
||||
}
|
||||
|
@ -75,8 +75,9 @@ Io_mem_session_component::Map_local_result Io_mem_session_component::_map_local(
|
||||
: log2(size);
|
||||
|
||||
return platform().region_alloc().alloc_aligned(size, align).convert<addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false; return (addr_t)a.ptr; },
|
||||
[&] (Alloc_error) -> addr_t {
|
||||
error(__func__, ": alloc_aligned failed!");
|
||||
return 0; });
|
||||
};
|
||||
|
@ -133,10 +133,12 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
if (msi)
|
||||
throw Service_denied();
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", Hex(_irq_number), " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
_irq_alloc.alloc_addr(1, _irq_number).with_result(
|
||||
[&] (Range_allocator::Allocation &irq_number) {
|
||||
irq_number.deallocate = false; },
|
||||
[&] (Alloc_error) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied(); });
|
||||
|
||||
_irq_object.start();
|
||||
}
|
||||
|
@ -602,11 +602,11 @@ Core::Platform::Platform()
|
||||
size_t const size = 1 << get_page_size_log2();
|
||||
ram_alloc().alloc_aligned(size, get_page_size_log2()).with_result(
|
||||
|
||||
[&] (void *phys_ptr) {
|
||||
[&] (Range_allocator::Allocation &phys) {
|
||||
|
||||
/* core-local memory is one-to-one mapped physical RAM */
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys_ptr);
|
||||
void * const core_local_ptr = phys_ptr;
|
||||
addr_t const phys_addr = reinterpret_cast<addr_t>(phys.ptr);
|
||||
void * const core_local_ptr = phys.ptr;
|
||||
|
||||
region_alloc().remove_range((addr_t)core_local_ptr, size);
|
||||
memset(core_local_ptr, 0, size);
|
||||
@ -614,8 +614,10 @@ Core::Platform::Platform()
|
||||
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, rom_name, phys_addr, size);
|
||||
|
||||
phys.deallocate = false;
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
warning("failed to export ", rom_name, " as ROM module"); }
|
||||
);
|
||||
};
|
||||
|
@ -35,15 +35,16 @@ Core_region_map::attach(Dataspace_capability ds_cap, Attr const &attr)
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
return platform().region_alloc().try_alloc(page_rounded_size).convert<Attach_result>(
|
||||
[&] (void *virt_ptr) {
|
||||
[&] (Range_allocator::Allocation &virt) {
|
||||
|
||||
/* map the dataspace's physical pages to core-local virtual addresses */
|
||||
size_t num_pages = page_rounded_size >> get_page_size_log2();
|
||||
map_local(ds->phys_addr(), (addr_t)virt_ptr, num_pages);
|
||||
map_local(ds->phys_addr(), (addr_t)virt.ptr, num_pages);
|
||||
|
||||
return Range { .start = addr_t(virt_ptr), .num_bytes = page_rounded_size };
|
||||
virt.deallocate = false;
|
||||
return Range { .start = addr_t(virt.ptr), .num_bytes = page_rounded_size };
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) -> Attach_result {
|
||||
[&] (Alloc_error) -> Attach_result {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
return Attach_error::REGION_CONFLICT;
|
||||
|
@ -60,11 +60,13 @@ class Core::Static_allocator : public Allocator
|
||||
}
|
||||
|
||||
try {
|
||||
return &_elements[_used.alloc()]; }
|
||||
return { *this, { &_elements[_used.alloc()], size } }; }
|
||||
catch (typename Bit_allocator<MAX>::Out_of_indices) {
|
||||
return Alloc_error::DENIED; }
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
size_t overhead(size_t) const override { return 0; }
|
||||
|
||||
void free(void *ptr, size_t) override
|
||||
|
@ -39,10 +39,11 @@ struct Core::Untyped_memory
|
||||
|
||||
return phys_alloc.alloc_aligned(size, align).convert<addr_t>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
return (addr_t)ptr; },
|
||||
[&] (Range_allocator::Allocation &phys) {
|
||||
phys.deallocate = false;
|
||||
return (addr_t)phys.ptr; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
[&] (Alloc_error) -> addr_t {
|
||||
error(__PRETTY_FUNCTION__, ": allocation of untyped memory failed");
|
||||
throw Phys_alloc_failed(); });
|
||||
}
|
||||
|
@ -107,10 +107,12 @@ Irq_session_component::Irq_session_component(Range_allocator &irq_alloc,
|
||||
{
|
||||
Irq_args const irq_args(args);
|
||||
|
||||
if (irq_alloc.alloc_addr(1, _irq_number).failed()) {
|
||||
error("unavailable IRQ ", _irq_number, " requested");
|
||||
throw Service_denied();
|
||||
}
|
||||
_irq_alloc.alloc_addr(1, _irq_number).with_result(
|
||||
[&] (Range_allocator::Allocation &irq_number) {
|
||||
irq_number.deallocate = false; },
|
||||
[&] (Alloc_error) {
|
||||
error("unavailable interrupt ", _irq_number, " requested");
|
||||
throw Service_denied(); });
|
||||
|
||||
if (!_irq_object.associate(irq_args)) {
|
||||
error("could not associate with IRQ ", irq_args.irq_number());
|
||||
|
@ -295,8 +295,9 @@ void Core::Platform::_init_rom_modules()
|
||||
size_t const align = get_page_size_log2();
|
||||
|
||||
return _unused_phys_alloc.alloc_aligned(size, align).convert<addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Range_allocator::Alloc_error) -> addr_t {
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false; return (addr_t)a.ptr; },
|
||||
[&] (Alloc_error) -> addr_t {
|
||||
error("could not reserve phys CNode space for boot modules");
|
||||
struct Init_rom_modules_failed { };
|
||||
throw Init_rom_modules_failed();
|
||||
@ -476,7 +477,7 @@ void Core::Platform::_init_rom_modules()
|
||||
|
||||
addr_t const addr = Untyped_memory::alloc_page(_alloc);
|
||||
|
||||
bool keep = false;
|
||||
bool deallocate = true;
|
||||
|
||||
Phys_alloc_guard(Range_allocator &alloc) :_alloc(alloc)
|
||||
{
|
||||
@ -485,10 +486,8 @@ void Core::Platform::_init_rom_modules()
|
||||
|
||||
~Phys_alloc_guard()
|
||||
{
|
||||
if (keep)
|
||||
return;
|
||||
|
||||
Untyped_memory::free_page(_alloc, addr);
|
||||
if (deallocate)
|
||||
Untyped_memory::free_page(_alloc, addr);
|
||||
}
|
||||
} phys { ram_alloc() };
|
||||
|
||||
@ -497,24 +496,23 @@ void Core::Platform::_init_rom_modules()
|
||||
|
||||
region_alloc().alloc_aligned(size, align).with_result(
|
||||
|
||||
[&] (void *core_local_ptr) {
|
||||
[&] (Range_allocator::Allocation &core_local) {
|
||||
|
||||
if (!map_local(phys.addr, (addr_t)core_local_ptr, pages, this)) {
|
||||
if (!map_local(phys.addr, (addr_t)core_local.ptr, pages, this)) {
|
||||
error("could not setup platform_info ROM - map error");
|
||||
region_alloc().free(core_local_ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
memset(core_local_ptr, 0, size);
|
||||
content_fn((char *)core_local_ptr, size);
|
||||
memset(core_local.ptr, 0, size);
|
||||
content_fn((char *)core_local.ptr, size);
|
||||
|
||||
new (core_mem_alloc())
|
||||
Rom_module(_rom_fs, rom_name, phys.addr, size);
|
||||
|
||||
phys.keep = true;
|
||||
phys.deallocate = core_local.deallocate = false;
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
error("could not setup platform_info ROM - region allocation error");
|
||||
}
|
||||
);
|
||||
@ -588,17 +586,19 @@ Core::Platform::Platform()
|
||||
addr_t const virt_size = 32 * 1024 * 1024;
|
||||
_unused_virt_alloc.alloc_aligned(virt_size, get_page_size_log2()).with_result(
|
||||
|
||||
[&] (void *virt_ptr) {
|
||||
addr_t const virt_addr = (addr_t)virt_ptr;
|
||||
[&] (Range_allocator::Allocation &virt) {
|
||||
addr_t const virt_addr = (addr_t)virt.ptr;
|
||||
|
||||
/* add to available virtual region of core */
|
||||
_core_mem_alloc.virt_alloc().add_range(virt_addr, virt_size);
|
||||
|
||||
/* back region by page tables */
|
||||
_core_vm_space.unsynchronized_alloc_page_tables(virt_addr, virt_size);
|
||||
|
||||
virt.deallocate = false;
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
warning("failed to reserve core virtual memory for dynamic use"); }
|
||||
);
|
||||
|
||||
|
@ -105,29 +105,24 @@ static void prepopulate_ipc_buffer(Ipc_buffer_phys const ipc_buffer_phys,
|
||||
/* allocate range in core's virtual address space */
|
||||
platform().region_alloc().try_alloc(page_rounded_size).with_result(
|
||||
|
||||
[&] (void *virt_ptr) {
|
||||
[&] (Range_allocator::Allocation &virt) {
|
||||
|
||||
/* map the IPC buffer to core-local virtual addresses */
|
||||
map_local(ipc_buffer_phys.addr, (addr_t)virt_ptr, 1);
|
||||
map_local(ipc_buffer_phys.addr, (addr_t)virt.ptr, 1);
|
||||
|
||||
/* populate IPC buffer with thread information */
|
||||
Native_utcb &utcb = *(Native_utcb *)virt_ptr;
|
||||
Native_utcb &utcb = *(Native_utcb *)virt.ptr;
|
||||
utcb.ep_sel (ep_sel .value());
|
||||
utcb.lock_sel(lock_sel.value());
|
||||
utcb.ipcbuffer(utcb_virt);
|
||||
|
||||
/* unmap IPC buffer from core */
|
||||
if (!unmap_local((addr_t)virt_ptr, 1)) {
|
||||
if (!unmap_local((addr_t)virt.ptr, 1))
|
||||
error("could not unmap core virtual address ",
|
||||
virt_ptr, " in ", __PRETTY_FUNCTION__);
|
||||
return;
|
||||
}
|
||||
|
||||
/* free core's virtual address space */
|
||||
platform().region_alloc().free(virt_ptr, page_rounded_size);
|
||||
virt.ptr, " in ", __PRETTY_FUNCTION__);
|
||||
},
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
error("could not allocate virtual address range in core of size ",
|
||||
page_rounded_size);
|
||||
}
|
||||
|
@ -45,8 +45,9 @@ void Ram_dataspace_factory::_clear_ds (Dataspace_component &ds)
|
||||
auto alloc_one_virt_page = [&] () -> void *
|
||||
{
|
||||
return platform().region_alloc().try_alloc(get_page_size()).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false; return a.ptr; },
|
||||
[&] (Alloc_error) -> void * {
|
||||
ASSERT_NEVER_CALLED; });
|
||||
};
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <util/interface.h>
|
||||
#include <util/attempt.h>
|
||||
#include <base/memory.h>
|
||||
#include <base/stdint.h>
|
||||
#include <base/exception.h>
|
||||
#include <base/quota_guard.h>
|
||||
@ -58,30 +59,12 @@ struct Genode::Deallocator : Interface
|
||||
};
|
||||
|
||||
|
||||
struct Genode::Allocator : Deallocator
|
||||
struct Genode::Allocator : Deallocator, Memory::Constrained_allocator
|
||||
{
|
||||
using Out_of_memory = Out_of_ram;
|
||||
using Denied = Genode::Denied;
|
||||
|
||||
/**
|
||||
* Return type of 'try_alloc'
|
||||
*/
|
||||
using Alloc_error = Genode::Alloc_error;
|
||||
using Alloc_result = Attempt<void *, Alloc_error>;
|
||||
|
||||
/**
|
||||
* Destructor
|
||||
*/
|
||||
virtual ~Allocator() { }
|
||||
|
||||
/**
|
||||
* Allocate block
|
||||
*
|
||||
* \param size block size to allocate
|
||||
* \param out_addr resulting pointer to the new block,
|
||||
* undefined in the error case
|
||||
*/
|
||||
virtual Alloc_result try_alloc(size_t size) = 0;
|
||||
using Alloc_result = Memory::Constrained_allocator::Result;
|
||||
using Allocation = Memory::Allocation;
|
||||
|
||||
/**
|
||||
* Return total amount of backing store consumed by the allocator
|
||||
@ -118,7 +101,7 @@ struct Genode::Allocator : Deallocator
|
||||
void *alloc(size_t size)
|
||||
{
|
||||
return try_alloc(size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Allocation &a) { a.deallocate = false; return a.ptr; },
|
||||
[&] (Alloc_error e) -> void * { raise(e); });
|
||||
}
|
||||
};
|
||||
@ -126,10 +109,7 @@ struct Genode::Allocator : Deallocator
|
||||
|
||||
struct Genode::Range_allocator : Allocator
|
||||
{
|
||||
/**
|
||||
* Destructor
|
||||
*/
|
||||
virtual ~Range_allocator() { }
|
||||
using Allocation = Allocator::Allocation;
|
||||
|
||||
/**
|
||||
* Return type of range-management operations
|
||||
@ -156,12 +136,12 @@ struct Genode::Range_allocator : Allocator
|
||||
* as the power of two
|
||||
* \param range address-range constraint for the allocation
|
||||
*/
|
||||
virtual Alloc_result alloc_aligned(size_t size, unsigned align, Range range) = 0;
|
||||
virtual Result alloc_aligned(size_t size, unsigned align, Range range) = 0;
|
||||
|
||||
/**
|
||||
* Allocate block without constraining the address range
|
||||
*/
|
||||
Alloc_result alloc_aligned(size_t size, unsigned align)
|
||||
Result alloc_aligned(size_t size, unsigned align)
|
||||
{
|
||||
return alloc_aligned(size, align, Range { .start = 0, .end = ~0UL });
|
||||
}
|
||||
@ -172,7 +152,7 @@ struct Genode::Range_allocator : Allocator
|
||||
* \param size size of new block
|
||||
* \param addr desired address of block
|
||||
*/
|
||||
virtual Alloc_result alloc_addr(size_t size, addr_t addr) = 0;
|
||||
virtual Result alloc_addr(size_t size, addr_t addr) = 0;
|
||||
|
||||
/**
|
||||
* Free a previously allocated block
|
||||
@ -299,15 +279,4 @@ void Genode::destroy(auto && dealloc, T *obj)
|
||||
operator delete (obj, dealloc);
|
||||
}
|
||||
|
||||
|
||||
namespace Genode {
|
||||
|
||||
void static inline print(Output &out, Allocator::Alloc_result result)
|
||||
{
|
||||
result.with_result(
|
||||
[&] (void *ptr) { Genode::print(out, ptr); },
|
||||
[&] (auto error) { Genode::print(out, error); });
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _INCLUDE__BASE__ALLOCATOR_H_ */
|
||||
|
@ -280,15 +280,22 @@ class Genode::Allocator_avl_base : public Range_allocator
|
||||
using Range_allocator::alloc_aligned; /* import overloads */
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
return Allocator_avl_base::alloc_aligned(size, (unsigned)log2(sizeof(addr_t)));
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
void free(void *addr, size_t) override { free(addr); }
|
||||
|
||||
/**
|
||||
|
@ -155,12 +155,19 @@ class Genode::Heap : public Allocator
|
||||
fn(ds->local_addr, ds->size);
|
||||
}
|
||||
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
Alloc_result try_alloc(size_t) override;
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
Alloc_result try_alloc(size_t) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _quota_used; }
|
||||
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
|
||||
@ -211,11 +218,19 @@ class Genode::Sliced_heap : public Allocator
|
||||
~Sliced_heap();
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(size_t size) override;
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
Alloc_result try_alloc(size_t) override;
|
||||
void free(void *, size_t) override;
|
||||
size_t consumed() const override { return _consumed; }
|
||||
size_t overhead(size_t size) const override;
|
||||
|
62
repos/base/include/base/memory.h
Normal file
62
repos/base/include/base/memory.h
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* \brief Interfaces for byte-wise local memory allocations
|
||||
* \author Norman Feske
|
||||
* \date 2025-04-05
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2025 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _INCLUDE__BASE__MEMORY_H_
|
||||
#define _INCLUDE__BASE__MEMORY_H_
|
||||
|
||||
#include <base/error.h>
|
||||
#include <util/allocation.h>
|
||||
|
||||
namespace Genode::Memory { struct Constrained_allocator; }
|
||||
|
||||
|
||||
/**
|
||||
* Allocator of bytes that reflects allocation errors
|
||||
*/
|
||||
struct Genode::Memory::Constrained_allocator : Interface, Noncopyable
|
||||
{
|
||||
struct Attr
|
||||
{
|
||||
void *ptr; size_t num_bytes;
|
||||
|
||||
void print(Output &out) const
|
||||
{
|
||||
Genode::print(out, "ptr=", ptr, " num_bytes=", num_bytes);
|
||||
}
|
||||
};
|
||||
|
||||
using Error = Alloc_error;
|
||||
using Allocation = Genode::Allocation<Constrained_allocator>;
|
||||
using Result = Allocation::Attempt;
|
||||
|
||||
/**
|
||||
* Allocate memory block
|
||||
*/
|
||||
virtual Result try_alloc(size_t num_bytes) = 0;
|
||||
|
||||
/**
|
||||
* Release allocation
|
||||
*
|
||||
* \noapi
|
||||
*/
|
||||
virtual void _free(Allocation &) = 0;
|
||||
};
|
||||
|
||||
|
||||
namespace Genode::Memory {
|
||||
|
||||
/* shortcut for the most commonly used type allocation */
|
||||
using Allocation = Constrained_allocator::Allocation;
|
||||
}
|
||||
|
||||
#endif /* _INCLUDE__BASE__MEMORY_H_ */
|
@ -145,9 +145,9 @@ class Genode::Slab : public Allocator
|
||||
void free_empty_blocks();
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
/**
|
||||
* Allocate slab entry
|
||||
@ -156,6 +156,14 @@ class Genode::Slab : public Allocator
|
||||
* preconfigured slab-entry size are allocated.
|
||||
*/
|
||||
Alloc_result try_alloc(size_t size) override;
|
||||
|
||||
void _free(Allocation &a) override { _free(a.ptr); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
void free(void *addr, size_t) override { _free(addr); }
|
||||
size_t consumed() const override;
|
||||
size_t overhead(size_t) const override { return _block_size/_entries_per_block; }
|
||||
|
@ -51,13 +51,20 @@ class Genode::Synced_allocator : public Allocator
|
||||
Guard operator () () const { return _synced_object(); }
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _synced_object()->try_alloc(size); }
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_synced_object()->free(addr, size); }
|
||||
|
||||
|
@ -367,15 +367,15 @@ _ZTSN6Genode6ThreadE R 17
|
||||
_ZTSN6Genode7ConsoleE R 18
|
||||
_ZTVN5Timer10ConnectionE D 320
|
||||
_ZTVN6Genode10Vm_sessionE D 56
|
||||
_ZTVN6Genode11Sliced_heapE D 72
|
||||
_ZTVN6Genode11Sliced_heapE D 128
|
||||
_ZTVN6Genode14Rpc_entrypointE D 80
|
||||
_ZTVN6Genode14Signal_contextE D 32
|
||||
_ZTVN6Genode17Region_map_clientE D 72
|
||||
_ZTVN6Genode17Rm_session_clientE D 48
|
||||
_ZTVN6Genode17Timeout_schedulerE D 112
|
||||
_ZTVN6Genode18Allocator_avl_baseE D 128
|
||||
_ZTVN6Genode4HeapE D 72
|
||||
_ZTVN6Genode4SlabE D 72
|
||||
_ZTVN6Genode18Allocator_avl_baseE D 184
|
||||
_ZTVN6Genode4HeapE D 128
|
||||
_ZTVN6Genode4SlabE D 128
|
||||
_ZTVN6Genode5Child14Initial_threadE D 48
|
||||
_ZTVN6Genode5ChildE D 440
|
||||
_ZTVN6Genode6OutputE D 48
|
||||
@ -383,11 +383,22 @@ _ZTVN6Genode6ThreadE D 48
|
||||
_ZTVN6Genode7ConsoleE D 48
|
||||
_ZThn104_N5Timer10Connection11set_timeoutEN6Genode12MicrosecondsERNS1_15Timeout_handlerE T
|
||||
_ZThn104_N5Timer10Connection9curr_timeEv T
|
||||
_ZThn4_N6Genode11Sliced_heap9try_allocEm T
|
||||
_ZThn4_N6Genode4Heap9try_allocEm T
|
||||
_ZThn4_N6Genode4HeapD0Ev T
|
||||
_ZThn4_N6Genode4HeapD1Ev T
|
||||
_ZThn4_N6Genode4Slab9try_allocEm T
|
||||
_ZThn4_N6Genode4SlabD0Ev T
|
||||
_ZThn4_N6Genode4SlabD1Ev T
|
||||
_ZThn52_N5Timer10Connection11set_timeoutEN6Genode12MicrosecondsERNS1_15Timeout_handlerE T
|
||||
_ZThn52_N5Timer10Connection9curr_timeEv T
|
||||
_ZThn8_N6Genode11Sliced_heap9try_allocEm T
|
||||
_ZThn8_N6Genode17Timeout_scheduler14handle_timeoutENS_8DurationE T
|
||||
_ZThn8_N6Genode17Timeout_schedulerD0Ev T
|
||||
_ZThn8_N6Genode17Timeout_schedulerD1Ev T
|
||||
_ZThn8_N6Genode18Allocator_avl_base9try_allocEm T
|
||||
_ZThn8_N6Genode4Heap9try_allocEm T
|
||||
_ZThn8_N6Genode4Slab9try_allocEm T
|
||||
_ZdlPvPN6Genode11DeallocatorE T
|
||||
_ZdlPvPN6Genode9AllocatorE W
|
||||
_ZdlPvRN6Genode11DeallocatorE T
|
||||
|
@ -42,28 +42,27 @@ Mapped_mem_allocator::alloc_aligned(size_t size, unsigned align, Range range)
|
||||
return _phys_alloc->alloc_aligned(page_rounded_size, align, range)
|
||||
.convert<Alloc_result>(
|
||||
|
||||
[&] (void *phys_addr) -> Alloc_result {
|
||||
[&] (Allocation &phys) -> Alloc_result {
|
||||
|
||||
/* allocate range in core's virtual address space */
|
||||
return _virt_alloc->alloc_aligned(page_rounded_size, align)
|
||||
.convert<Alloc_result>(
|
||||
|
||||
[&] (void *virt_addr) {
|
||||
[&] (Allocation &virt) -> Alloc_result {
|
||||
|
||||
_phys_alloc->metadata(phys_addr, { virt_addr });
|
||||
_virt_alloc->metadata(virt_addr, { phys_addr });
|
||||
_phys_alloc->metadata(phys.ptr, { virt.ptr });
|
||||
_virt_alloc->metadata(virt.ptr, { phys.ptr });
|
||||
|
||||
/* make physical page accessible at the designated virtual address */
|
||||
_map_local((addr_t)virt_addr, (addr_t)phys_addr, page_rounded_size);
|
||||
_map_local((addr_t)virt.ptr, (addr_t)phys.ptr, page_rounded_size);
|
||||
|
||||
return virt_addr;
|
||||
phys.deallocate = false;
|
||||
virt.deallocate = false;
|
||||
return { *this, { virt.ptr, page_rounded_size } };
|
||||
},
|
||||
[&] (Alloc_error e) {
|
||||
error("Could not allocate virtual address range in core of size ",
|
||||
page_rounded_size, " (error ", (int)e, ")");
|
||||
|
||||
/* revert physical allocation */
|
||||
_phys_alloc->free(phys_addr);
|
||||
return e;
|
||||
});
|
||||
},
|
||||
|
@ -1,6 +1,5 @@
|
||||
/*
|
||||
* \brief Quota-bounds-checking implementation of the 'Ram_allocator'
|
||||
* interface specifically for core
|
||||
* \brief Quota-bounds-checking implementation of the 'Allocator' for core
|
||||
* \author Norman Feske
|
||||
* \date 2017-05-02
|
||||
*/
|
||||
@ -63,16 +62,19 @@ class Core::Accounted_core_ram : public Allocator
|
||||
|
||||
return _core_mem.try_alloc(page_aligned_size).convert<Alloc_result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false;
|
||||
ram.acknowledge();
|
||||
caps.acknowledge();
|
||||
core_mem_allocated += page_aligned_size;
|
||||
return ptr; },
|
||||
return { *this, { a.ptr, page_aligned_size } }; },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void free(void *ptr, size_t const size) override
|
||||
{
|
||||
size_t const page_aligned_size = align_addr(size, 12);
|
||||
|
@ -123,7 +123,6 @@ class Core::Mapped_mem_allocator : public Core_mem_translator
|
||||
* \param phys_alloc allocator of physical memory
|
||||
* \param virt_alloc allocator of core-local virtual memory ranges
|
||||
*/
|
||||
|
||||
Mapped_mem_allocator(Synced_mapped_allocator &phys_alloc,
|
||||
Synced_mapped_allocator &virt_alloc)
|
||||
: _phys_alloc(&phys_alloc._alloc), _virt_alloc(&virt_alloc._alloc) { }
|
||||
@ -178,12 +177,20 @@ class Core::Mapped_mem_allocator : public Core_mem_translator
|
||||
using Range_allocator::alloc_aligned; /* import overloads */
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return alloc_aligned(size, (unsigned)log2(sizeof(addr_t))); }
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
void free(void *addr, size_t) override;
|
||||
size_t consumed() const override { return _phys_alloc->consumed(); }
|
||||
size_t overhead(size_t size) const override {
|
||||
@ -301,15 +308,22 @@ class Core::Core_mem_allocator : public Core_mem_translator
|
||||
using Range_allocator::alloc_aligned; /* import overloads */
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
return alloc_aligned(size, (unsigned)log2(sizeof(addr_t)));
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
|
@ -63,13 +63,20 @@ class Core::Synced_range_allocator : public Range_allocator
|
||||
void print(Output &out) const { _synced_object()->print(out); }
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(size_t size) override {
|
||||
return _synced_object()->try_alloc(size); }
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/****************************************
|
||||
** Legacy Genode::Allocator interface **
|
||||
****************************************/
|
||||
|
||||
void free(void *addr, size_t size) override {
|
||||
_synced_object()->free(addr, size); }
|
||||
|
||||
|
@ -69,9 +69,19 @@ Io_mem_session_component::_prepare_io_mem(const char *args,
|
||||
(req_base == 0xfd6e0000ull && req_size == 4096);
|
||||
|
||||
/* allocate region */
|
||||
if (!skip_iomem_check && _io_mem_alloc.alloc_addr(req_size, req_base).failed()) {
|
||||
error("I/O memory ", Hex_range<addr_t>(req_base, req_size), " not available");
|
||||
return Dataspace_attr();
|
||||
if (!skip_iomem_check) {
|
||||
|
||||
bool const ok = _io_mem_alloc.alloc_addr(req_size, req_base).convert<bool>(
|
||||
|
||||
[&] (Range_allocator::Allocation &io_mem) {
|
||||
io_mem.deallocate = false; return true; },
|
||||
|
||||
[&] (Alloc_error) { return false; });
|
||||
|
||||
if (!ok) {
|
||||
error("I/O memory ", Hex_range<addr_t>(req_base, req_size), " not available");
|
||||
return Dataspace_attr();
|
||||
}
|
||||
}
|
||||
|
||||
/* request local mapping */
|
||||
|
@ -20,6 +20,8 @@ using namespace Core;
|
||||
Ram_dataspace_factory::Alloc_ram_result
|
||||
Ram_dataspace_factory::alloc_ram(size_t ds_size, Cache cache)
|
||||
{
|
||||
using Range_allocation = Range_allocator::Allocation;
|
||||
|
||||
/* zero-sized dataspaces are not allowed */
|
||||
if (!ds_size)
|
||||
return Alloc_ram_error::DENIED;
|
||||
@ -35,7 +37,7 @@ Ram_dataspace_factory::alloc_ram(size_t ds_size, Cache cache)
|
||||
* If this does not work, we subsequently weaken the alignment constraint
|
||||
* until the allocation succeeds.
|
||||
*/
|
||||
Range_allocator::Alloc_result allocated_range = Allocator::Alloc_error::DENIED;
|
||||
Range_allocator::Alloc_result allocated_range = Alloc_error::DENIED;
|
||||
|
||||
/* apply constraints */
|
||||
if (_phys_range.start != 0 || _phys_range.end != ~0UL) {
|
||||
@ -82,36 +84,15 @@ Ram_dataspace_factory::alloc_ram(size_t ds_size, Cache cache)
|
||||
error("out of physical memory while allocating ", ds_size, " bytes ",
|
||||
"in range [", Hex(_phys_range.start), "-", Hex(_phys_range.end), "]");
|
||||
|
||||
if (allocated_range == Allocator::Alloc_error::OUT_OF_RAM)
|
||||
if (allocated_range == Alloc_error::OUT_OF_RAM)
|
||||
return Alloc_ram_error::OUT_OF_RAM;
|
||||
|
||||
if (allocated_range == Allocator::Alloc_error::OUT_OF_CAPS)
|
||||
if (allocated_range == Alloc_error::OUT_OF_CAPS)
|
||||
return Alloc_ram_error::OUT_OF_CAPS;
|
||||
|
||||
return Alloc_ram_error::DENIED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper to release the allocated physical memory whenever we leave the
|
||||
* scope via an exception.
|
||||
*/
|
||||
struct Phys_alloc_guard
|
||||
{
|
||||
Range_allocator &phys_alloc;
|
||||
struct { void * ds_addr = nullptr; };
|
||||
bool keep = false;
|
||||
|
||||
Phys_alloc_guard(Range_allocator &phys_alloc)
|
||||
: phys_alloc(phys_alloc) { }
|
||||
|
||||
~Phys_alloc_guard() { if (!keep && ds_addr) phys_alloc.free(ds_addr); }
|
||||
|
||||
} phys_alloc_guard(_phys_alloc);
|
||||
|
||||
allocated_range.with_result(
|
||||
[&] (void *ptr) { phys_alloc_guard.ds_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { /* already checked above */ });
|
||||
|
||||
/*
|
||||
* For non-cached RAM dataspaces, we mark the dataspace as write
|
||||
* combined and expect the pager to evaluate this dataspace property
|
||||
@ -119,9 +100,12 @@ Ram_dataspace_factory::alloc_ram(size_t ds_size, Cache cache)
|
||||
*/
|
||||
Dataspace_component *ds_ptr = nullptr;
|
||||
try {
|
||||
ds_ptr = new (_ds_slab)
|
||||
Dataspace_component(ds_size, (addr_t)phys_alloc_guard.ds_addr,
|
||||
cache, true, this);
|
||||
allocated_range.with_result(
|
||||
[&] (Range_allocation &range) {
|
||||
ds_ptr = new (_ds_slab)
|
||||
Dataspace_component(ds_size, (addr_t)range.ptr,
|
||||
cache, true, this); },
|
||||
[] (Alloc_error) { });
|
||||
}
|
||||
catch (Out_of_ram) { return Alloc_ram_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { return Alloc_ram_error::OUT_OF_CAPS; }
|
||||
@ -148,7 +132,9 @@ Ram_dataspace_factory::alloc_ram(size_t ds_size, Cache cache)
|
||||
|
||||
Dataspace_capability ds_cap = _ep.manage(&ds);
|
||||
|
||||
phys_alloc_guard.keep = true;
|
||||
allocated_range.with_result(
|
||||
[&] (Range_allocation &a) { a.deallocate = false; },
|
||||
[] (Alloc_error) { });
|
||||
|
||||
return static_cap_cast<Ram_dataspace>(ds_cap);
|
||||
}
|
||||
|
@ -165,6 +165,8 @@ void Rm_faulter::continue_after_resolved_fault()
|
||||
Region_map::Attach_result
|
||||
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const core_attr)
|
||||
{
|
||||
using Region_allocation = Range_allocator::Allocation;
|
||||
|
||||
Attr const attr = core_attr.attr;
|
||||
|
||||
/* serialize access */
|
||||
@ -176,8 +178,6 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const cor
|
||||
|
||||
auto lambda = [&] (Dataspace_component *dsc) -> Attach_result {
|
||||
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
/* check dataspace validity */
|
||||
if (!dsc)
|
||||
return Attach_error::INVALID_DATASPACE;
|
||||
@ -204,7 +204,10 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const cor
|
||||
if (attr.use_at) {
|
||||
Alloc_error error = Alloc_error::DENIED;
|
||||
_map.alloc_addr(size, attr.at).with_result(
|
||||
[&] (void *ptr) { at = addr_t(ptr); at_defined = true; },
|
||||
[&] (Region_allocation &a) {
|
||||
a.deallocate = false;
|
||||
at = addr_t(a.ptr);
|
||||
at_defined = true; },
|
||||
[&] (Alloc_error e) { error = e; });
|
||||
|
||||
if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM;
|
||||
@ -234,7 +237,10 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const cor
|
||||
/* try allocating the aligned region */
|
||||
Alloc_error error = Alloc_error::DENIED;
|
||||
_map.alloc_aligned(size, unsigned(align_log2)).with_result(
|
||||
[&] (void *ptr) { at = addr_t(ptr); at_defined = true; },
|
||||
[&] (Region_allocation &a) {
|
||||
a.deallocate = false;
|
||||
at = addr_t(a.ptr);
|
||||
at_defined = true; },
|
||||
[&] (Alloc_error e) { error = e; });
|
||||
|
||||
if (error == Alloc_error::OUT_OF_RAM) return Attach_error::OUT_OF_RAM;
|
||||
|
@ -32,18 +32,18 @@ Io_port_session_component::Io_port_session_component(Range_allocator &io_port_al
|
||||
|
||||
/* allocate region (also checks out-of-bounds regions) */
|
||||
io_port_alloc.alloc_addr(size, base).with_error(
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
[&] (Alloc_error e) {
|
||||
|
||||
switch (e) {
|
||||
case Range_allocator::Alloc_error::DENIED:
|
||||
case Alloc_error::DENIED:
|
||||
error("I/O port ", Hex_range<uint16_t>(base, size), " not available");
|
||||
throw Service_denied();
|
||||
|
||||
case Range_allocator::Alloc_error::OUT_OF_RAM:
|
||||
case Alloc_error::OUT_OF_RAM:
|
||||
error("I/O port allocator ran out of RAM");
|
||||
throw Service_denied();
|
||||
|
||||
case Range_allocator::Alloc_error::OUT_OF_CAPS:
|
||||
case Alloc_error::OUT_OF_CAPS:
|
||||
error("I/O port allocator ran out of caps");
|
||||
throw Service_denied();
|
||||
}
|
||||
|
@ -67,6 +67,8 @@ class Stack_area_region_map : public Region_map
|
||||
*/
|
||||
Attach_result attach(Dataspace_capability, Attr const &attr) override
|
||||
{
|
||||
using Phys_allocation = Range_allocator::Allocation;
|
||||
|
||||
/* allocate physical memory */
|
||||
size_t const size = round_page(attr.size);
|
||||
|
||||
@ -74,10 +76,10 @@ class Stack_area_region_map : public Region_map
|
||||
|
||||
return phys.alloc_aligned(size, get_page_size_log2()).convert<Attach_result>(
|
||||
|
||||
[&] (void *phys_ptr) -> Attach_result {
|
||||
[&] (Phys_allocation &phys) -> Attach_result {
|
||||
|
||||
try {
|
||||
addr_t const phys_base = (addr_t)phys_ptr;
|
||||
addr_t const phys_base = (addr_t)phys.ptr;
|
||||
|
||||
Dataspace_component &ds = *new (&_ds_slab)
|
||||
Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0);
|
||||
@ -90,18 +92,18 @@ class Stack_area_region_map : public Region_map
|
||||
error("could not map phys ", Hex(ds.phys_addr()),
|
||||
" at local ", Hex(core_local_addr));
|
||||
|
||||
phys.free(phys_ptr);
|
||||
return Attach_error::INVALID_DATASPACE;
|
||||
}
|
||||
|
||||
ds.assign_core_local_addr((void*)core_local_addr);
|
||||
phys.deallocate = false;
|
||||
|
||||
return Range { .start = attr.at, .num_bytes = size };
|
||||
}
|
||||
catch (Out_of_ram) { return Attach_error::OUT_OF_RAM; }
|
||||
catch (Out_of_caps) { return Attach_error::OUT_OF_CAPS; }
|
||||
},
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
error("could not allocate backing store for new stack");
|
||||
return Attach_error::REGION_CONFLICT; });
|
||||
}
|
||||
|
@ -63,13 +63,13 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
||||
attribute.offset > dsc.size() - attribute.size)
|
||||
throw Invalid_dataspace();
|
||||
|
||||
using Alloc_error = Range_allocator::Alloc_error;
|
||||
|
||||
Region_map_detach &rm_detach = *this;
|
||||
|
||||
using Guest_phys_allocation = Range_allocator::Allocation;
|
||||
|
||||
_map.alloc_addr(attribute.size, guest_phys).with_result(
|
||||
|
||||
[&] (void *) {
|
||||
[&] (Guest_phys_allocation &allocation) {
|
||||
|
||||
Rm_region::Attr const region_attr
|
||||
{
|
||||
@ -93,6 +93,8 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
||||
|
||||
Rm_region ®ion = *_map.metadata((void *)guest_phys);
|
||||
|
||||
allocation.deallocate = false;
|
||||
|
||||
/* inform dataspace about attachment */
|
||||
dsc.attached_to(region);
|
||||
},
|
||||
|
@ -98,8 +98,9 @@ void Allocator_avl_base::Block::recompute()
|
||||
Allocator_avl_base::Alloc_md_result Allocator_avl_base::_alloc_block_metadata()
|
||||
{
|
||||
return _md_alloc.try_alloc(sizeof(Block)).convert<Alloc_md_result>(
|
||||
[&] (void *ptr) {
|
||||
return construct_at<Block>(ptr, 0U, 0U, 0); },
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return construct_at<Block>(a.ptr, 0U, 0U, 0); },
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
@ -337,10 +338,10 @@ Allocator_avl_base::_allocate(size_t const size, unsigned align, Range range,
|
||||
/* create allocated block */
|
||||
return _alloc_block_metadata().convert<Alloc_result>(
|
||||
|
||||
[&] (Block *new_block_ptr) {
|
||||
[&] (Block *new_block_ptr) -> Alloc_result {
|
||||
_add_block(*new_block_ptr, new_addr, size, Block::USED);
|
||||
return reinterpret_cast<void *>(new_addr); },
|
||||
|
||||
return { *this, { reinterpret_cast<void *>(new_addr), size } };
|
||||
},
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
},
|
||||
|
@ -145,8 +145,9 @@ Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
|
||||
}
|
||||
|
||||
return metadata.convert<Result>(
|
||||
[&] (void *md_ptr) -> Result {
|
||||
Dataspace &ds = *construct_at<Dataspace>(md_ptr, allocation.cap,
|
||||
[&] (Allocation &md) -> Result {
|
||||
md.deallocate = false;
|
||||
Dataspace &ds = *construct_at<Dataspace>(md.ptr, allocation.cap,
|
||||
(void *)attach_guard.range.start, size);
|
||||
_ds_pool.insert(&ds);
|
||||
return &ds;
|
||||
@ -163,9 +164,10 @@ Allocator::Alloc_result Heap::_try_local_alloc(size_t size)
|
||||
{
|
||||
return _alloc->alloc_aligned(size, log2(16U)).convert<Alloc_result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false;
|
||||
_quota_used += size;
|
||||
return ptr; },
|
||||
return { *this, { a.ptr, size } }; },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
@ -188,9 +190,9 @@ Allocator::Alloc_result Heap::_unsynchronized_alloc(size_t size)
|
||||
|
||||
return _allocate_dataspace(dataspace_size, true).convert<Alloc_result>(
|
||||
|
||||
[&] (Dataspace *ds_ptr) {
|
||||
[&] (Dataspace *ds_ptr) -> Alloc_result {
|
||||
_quota_used += ds_ptr->size;
|
||||
return ds_ptr->local_addr; },
|
||||
return { *this, { ds_ptr->local_addr, size } }; },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
@ -200,7 +202,11 @@ Allocator::Alloc_result Heap::_unsynchronized_alloc(size_t size)
|
||||
{
|
||||
Alloc_result result = _try_local_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
return result.convert<Alloc_result>(
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false;
|
||||
return { *this, a }; },
|
||||
[&] (Alloc_error e) { return e; });
|
||||
}
|
||||
|
||||
size_t dataspace_size = size
|
||||
|
@ -255,8 +255,9 @@ Slab::New_slab_block_result Slab::_new_slab_block()
|
||||
|
||||
Slab &this_slab = *this;
|
||||
return _backing_store->try_alloc(_block_size).convert<Result>(
|
||||
[&] (void *sb) {
|
||||
return construct_at<Block>(sb, this_slab); },
|
||||
[&] (Allocation &sb) {
|
||||
sb.deallocate = false;
|
||||
return construct_at<Block>(sb.ptr, this_slab); },
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
}
|
||||
@ -408,7 +409,7 @@ Allocator::Alloc_result Slab::try_alloc(size_t size)
|
||||
|
||||
_total_avail--;
|
||||
|
||||
return ptr;
|
||||
return { *this, { ptr, _slab_size } };
|
||||
}
|
||||
|
||||
|
||||
|
@ -38,10 +38,10 @@ Sliced_heap::~Sliced_heap()
|
||||
}
|
||||
|
||||
|
||||
Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
|
||||
Allocator::Alloc_result Sliced_heap::try_alloc(size_t requested_size)
|
||||
{
|
||||
/* allocation includes space for block meta data and is page-aligned */
|
||||
size = align_addr(size + sizeof(Block), 12);
|
||||
size_t const size = align_addr(requested_size + sizeof(Block), 12);
|
||||
|
||||
return _ram_alloc.try_alloc(size).convert<Alloc_result>(
|
||||
|
||||
@ -93,8 +93,7 @@ Allocator::Alloc_result Sliced_heap::try_alloc(size_t size)
|
||||
attach_guard.keep = true;
|
||||
|
||||
/* skip meta data prepended to the payload portion of the block */
|
||||
void *ptr = block + 1;
|
||||
return ptr;
|
||||
return { *this, { .ptr = block + 1, .num_bytes = requested_size } };
|
||||
},
|
||||
[&] (Ram::Error e) { return e; });
|
||||
}
|
||||
|
@ -177,10 +177,9 @@ extern "C" void *__emutls_get_address(void *obj)
|
||||
void *address = nullptr;
|
||||
|
||||
cxx_heap().try_alloc(emutls_object->size).with_result(
|
||||
[&] (void *ptr) { address = ptr; },
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
error(__func__,
|
||||
": could not allocate thread-local variable, error ", (int)e); });
|
||||
[&] (Memory::Allocation &a) { a.deallocate = false; address = a.ptr; },
|
||||
[&] (Alloc_error e) {
|
||||
error(__func__, ": could not allocate thread-local variable, ", e); });
|
||||
if (!address)
|
||||
return nullptr;
|
||||
|
||||
|
@ -80,10 +80,9 @@ extern "C" void *malloc(size_t size)
|
||||
|
||||
void *addr = nullptr;
|
||||
cxx_heap().try_alloc(real_size).with_result(
|
||||
[&] (void *ptr) { addr = ptr; },
|
||||
[&] (Allocator::Alloc_error error) {
|
||||
Genode::error(__func__,
|
||||
": cxx_heap allocation failed with error ", (int)error); });
|
||||
[&] (Memory::Allocation &a) { a.deallocate = false; addr = a.ptr; },
|
||||
[&] (Alloc_error e) {
|
||||
Genode::error(__func__, ": cxx_heap allocation failed with ", e); });
|
||||
if (!addr)
|
||||
return nullptr;
|
||||
|
||||
|
@ -84,6 +84,7 @@ class Linker::Region_map
|
||||
using Alloc_region_result = Attempt<addr_t, Alloc_region_error>;
|
||||
using Attach_result = Genode::Region_map::Attach_result;
|
||||
using Attr = Genode::Region_map::Attr;
|
||||
using Range_allocation = Range_allocator::Allocation;
|
||||
|
||||
/**
|
||||
* Allocate region anywhere within the region map
|
||||
@ -91,8 +92,11 @@ class Linker::Region_map
|
||||
Alloc_region_result alloc_region(size_t size)
|
||||
{
|
||||
return _range.alloc_aligned(size, get_page_size_log2()).convert<Alloc_region_result>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Allocator::Alloc_error e) { return e; });
|
||||
[&] (Range_allocation &a) {
|
||||
a.deallocate = false;
|
||||
return (addr_t)a.ptr;
|
||||
},
|
||||
[&] (Alloc_error e) { return e; });
|
||||
}
|
||||
|
||||
/**
|
||||
@ -101,8 +105,11 @@ class Linker::Region_map
|
||||
Alloc_region_result alloc_region_at(size_t size, addr_t vaddr)
|
||||
{
|
||||
return _range.alloc_addr(size, vaddr).convert<Alloc_region_result>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Allocator::Alloc_error e) { return e; });
|
||||
[&] (Range_allocation &a) {
|
||||
a.deallocate = false;
|
||||
return (addr_t)a.ptr;
|
||||
},
|
||||
[&] (Alloc_error e) { return e; });
|
||||
}
|
||||
|
||||
Alloc_region_result alloc_region_at_end(size_t size)
|
||||
|
@ -41,34 +41,33 @@ struct E : C, D
|
||||
|
||||
struct Allocator : Genode::Allocator
|
||||
{
|
||||
Genode::Heap heap;
|
||||
Genode::Allocator & a { heap };
|
||||
Genode::Heap _heap;
|
||||
Genode::Allocator &_a { _heap };
|
||||
|
||||
Allocator(Genode::Env & env) : heap(env.ram(), env.rm()) { }
|
||||
Allocator(Genode::Env &env) : _heap(env.ram(), env.rm()) { }
|
||||
virtual ~Allocator() { }
|
||||
|
||||
Genode::size_t consumed() const override {
|
||||
return a.consumed(); }
|
||||
return _a.consumed(); }
|
||||
|
||||
Genode::size_t overhead(Genode::size_t size) const override {
|
||||
return a.overhead(size); }
|
||||
return _a.overhead(size); }
|
||||
|
||||
bool need_size_for_free() const override {
|
||||
return a.need_size_for_free(); }
|
||||
return _a.need_size_for_free(); }
|
||||
|
||||
Alloc_result try_alloc(Genode::size_t size) override
|
||||
{
|
||||
Alloc_result const result = a.try_alloc(size);
|
||||
|
||||
log("Allocator::alloc()");
|
||||
|
||||
return result;
|
||||
return _a.try_alloc(size);
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void free(void *p, Genode::size_t size) override
|
||||
{
|
||||
log("Allocator::free()");
|
||||
a.free(p, size);
|
||||
_a.free(p, size);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -227,8 +227,10 @@ class Pci_driver
|
||||
using namespace Genode;
|
||||
|
||||
return _alloc.alloc_aligned(size, align).convert<Genode::addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Allocator_avl::Alloc_error) { return 0UL; });
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return (addr_t)a.ptr; },
|
||||
[&] (Alloc_error) { return 0UL; });
|
||||
}
|
||||
|
||||
void free(Genode::addr_t virt, Genode::size_t size) {
|
||||
|
@ -321,9 +321,10 @@ extern "C" void *dde_dma_alloc(dde_size_t size, dde_size_t align,
|
||||
{
|
||||
return allocator().alloc_aligned(size, Genode::log2(align)).convert<void *>(
|
||||
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Genode::Allocator::Allocation &a) {
|
||||
a.deallocate = false; return a.ptr; },
|
||||
|
||||
[&] (Genode::Range_allocator::Alloc_error) -> void * {
|
||||
[&] (Genode::Alloc_error) -> void * {
|
||||
Genode::error("memory allocation failed in alloc_memblock ("
|
||||
"size=", size, " "
|
||||
"align=", Genode::Hex(align), " "
|
||||
@ -467,21 +468,32 @@ struct Slab_backend_alloc : public Genode::Allocator,
|
||||
Genode::addr_t start() const { return _base; }
|
||||
Genode::addr_t end() const { return _base + VM_SIZE - 1; }
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
/*********************************
|
||||
** Memory::Allocator interface **
|
||||
*********************************/
|
||||
|
||||
Alloc_result try_alloc(Genode::size_t size) override
|
||||
{
|
||||
Alloc_result result = _range.try_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
|
||||
return _extend_one_block().convert<Alloc_result>(
|
||||
[&] (Ok) { return _range.try_alloc(size); },
|
||||
[&] (Alloc_error e) { return e; });
|
||||
return _range.try_alloc(size).convert<Alloc_result>(
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false;
|
||||
return { *this, a };
|
||||
},
|
||||
[&] (Alloc_error) {
|
||||
return _extend_one_block().convert<Alloc_result>(
|
||||
[&] (Ok) { return _range.try_alloc(size); },
|
||||
[&] (Alloc_error e) { return e; });
|
||||
});
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
|
||||
/*************************
|
||||
** Allocator interface **
|
||||
*************************/
|
||||
|
||||
void free(void *addr, Genode::size_t size) { _range.free(addr, size); }
|
||||
Genode::size_t overhead(Genode::size_t size) const { return 0; }
|
||||
bool need_size_for_free() const { return false; }
|
||||
@ -511,7 +523,8 @@ class Slab_alloc : public Genode::Slab
|
||||
Genode::addr_t alloc()
|
||||
{
|
||||
return Slab::try_alloc(_object_size).convert<Genode::addr_t>(
|
||||
[&] (void *ptr) { return (Genode::addr_t)ptr; },
|
||||
[&] (Allocator::Allocation &a) {
|
||||
a.deallocate = false; return (Genode::addr_t)a.ptr; },
|
||||
[&] (Alloc_error) -> Genode::addr_t { return 0; });
|
||||
}
|
||||
|
||||
|
@ -71,9 +71,11 @@ void * Lx_kit::Mem_allocator::alloc(size_t const size, size_t const align,
|
||||
|
||||
return _mem.alloc_aligned(size, (unsigned)log2(align)).convert<void *>(
|
||||
|
||||
[&] (void *ptr) { return cleared_allocation(ptr, size); },
|
||||
[&] (Allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return cleared_allocation(a.ptr, size); },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
|
||||
/*
|
||||
* Restrict the minimum buffer size to avoid the creation of
|
||||
@ -97,9 +99,11 @@ void * Lx_kit::Mem_allocator::alloc(size_t const size, size_t const align,
|
||||
/* re-try allocation */
|
||||
void * const virt_addr = _mem.alloc_aligned(size, (unsigned)log2(align)).convert<void *>(
|
||||
|
||||
[&] (void *ptr) { return cleared_allocation(ptr, size); },
|
||||
[&] (Allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return cleared_allocation(a.ptr, size); },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> void * {
|
||||
[&] (Alloc_error) -> void * {
|
||||
error("memory allocation failed for ", size, " align ", align);
|
||||
return nullptr; }
|
||||
);
|
||||
|
@ -103,7 +103,7 @@ namespace Allocator {
|
||||
_ds_cap[_index] = Rump::env().env().ram().try_alloc(BLOCK_SIZE, _cache)
|
||||
.template convert<Ram_dataspace_capability>(
|
||||
[&] (Ram::Allocation &a) { a.deallocate = false; return a.cap; },
|
||||
[&] (Allocator::Alloc_error) { return Ram_dataspace_capability(); }
|
||||
[&] (Alloc_error) { return Ram_dataspace_capability(); }
|
||||
);
|
||||
|
||||
if (!_ds_cap[_index].valid()) {
|
||||
@ -147,27 +147,34 @@ namespace Allocator {
|
||||
_range(&Rump::env().heap())
|
||||
{ }
|
||||
|
||||
/**
|
||||
* Allocate
|
||||
*/
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
Alloc_result result = _range.try_alloc(size);
|
||||
if (result.ok())
|
||||
return result;
|
||||
return _range.try_alloc(size).convert<Alloc_result>(
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false;
|
||||
return { *this, a };
|
||||
},
|
||||
[&] (Alloc_error) -> Alloc_result {
|
||||
if (!_alloc_block())
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
if (!_alloc_block())
|
||||
return Alloc_error::DENIED;
|
||||
|
||||
return _range.try_alloc(size);
|
||||
return _range.try_alloc(size).convert<Alloc_result>(
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false;
|
||||
return { *this, a };
|
||||
},
|
||||
[&] (Alloc_error e) { return e; });
|
||||
});
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void *alloc_aligned(size_t size, unsigned align = 0)
|
||||
{
|
||||
Alloc_result result = _range.alloc_aligned(size, align);
|
||||
if (result.ok())
|
||||
return result.convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Allocation &a) { a.deallocate = false; return a.ptr; },
|
||||
[&] (Alloc_error) -> void * { return nullptr; });
|
||||
|
||||
if (!_alloc_block())
|
||||
@ -175,8 +182,9 @@ namespace Allocator {
|
||||
|
||||
return _range.alloc_aligned(size, align).convert<void *>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
return ptr; },
|
||||
[&] (Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return a.ptr; },
|
||||
|
||||
[&] (Alloc_error e) -> void * {
|
||||
error("backend allocator: Unable to allocate memory "
|
||||
|
@ -44,12 +44,12 @@ extern "C" void *malloc(size_t size)
|
||||
|
||||
return alloc().try_alloc(real_size).convert<void *>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
[&] (Allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
*(unsigned long *)a.ptr = real_size;
|
||||
return (unsigned long *)a.ptr + 1; },
|
||||
|
||||
*(unsigned long *)ptr = real_size;
|
||||
return (unsigned long *)ptr + 1; },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
return nullptr; });
|
||||
}
|
||||
|
||||
|
@ -79,15 +79,17 @@ class Genode::Cached_font : public Text_painter::Font
|
||||
|
||||
return _alloc.try_alloc(size).convert<Alloc_result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
memset(ptr, 0, size);
|
||||
[&] (Allocation &a) -> Result {
|
||||
memset(a.ptr, 0, size);
|
||||
_consumed_bytes += size + overhead(size);
|
||||
return ptr; },
|
||||
a.deallocate = false;
|
||||
return { *this, { a.ptr, size } }; },
|
||||
|
||||
[&] (Alloc_error error) {
|
||||
return error; });
|
||||
[&] (Error e) { return e; });
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
size_t consumed() const override { return _alloc.consumed(); }
|
||||
|
||||
size_t overhead(size_t size) const override { return _alloc.overhead(size); };
|
||||
|
@ -92,9 +92,9 @@ struct Genode::Dynamic_array
|
||||
|
||||
_alloc.try_alloc(sizeof(Element)*new_capacity).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
[&] (Allocator::Allocation &a) {
|
||||
|
||||
Element *new_array = (Element *)ptr;
|
||||
Element *new_array = (Element *)a.ptr;
|
||||
|
||||
for (unsigned i = 0; i < new_capacity; i++)
|
||||
construct_at<Element>(&new_array[i]);
|
||||
@ -108,9 +108,10 @@ struct Genode::Dynamic_array
|
||||
|
||||
_array = new_array;
|
||||
_capacity = new_capacity;
|
||||
|
||||
a.deallocate = false;
|
||||
},
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
Allocator::throw_alloc_error(e); }
|
||||
[&] (Alloc_error e) { raise(e); }
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,12 @@ struct Libc::Allocator : Genode::Allocator
|
||||
{
|
||||
typedef Genode::size_t size_t;
|
||||
|
||||
Alloc_result try_alloc(size_t size) override { return malloc(size); }
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
return { *this, { malloc(size), size } };
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { ::free(a.ptr); }
|
||||
|
||||
void free(void *addr, size_t size) override { ::free(addr); }
|
||||
|
||||
|
@ -1 +1 @@
|
||||
235417ce6c12f84a121ac094c155b883b7abaf68
|
||||
12ee36349c907f2c90d59bb3717ced1d9ff5b78a
|
||||
|
@ -384,8 +384,8 @@ extern "C" int getpid()
|
||||
extern "C" void *malloc(size_t size)
|
||||
{
|
||||
return gcov_env->heap.try_alloc(size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Allocator::Alloc_error) -> void * { return nullptr; });
|
||||
[&] (Allocator::Allocation &a) { a.deallocate = false; return a.ptr; },
|
||||
[&] (Alloc_error) -> void * { return nullptr; });
|
||||
}
|
||||
|
||||
|
||||
|
@ -99,13 +99,14 @@ int Libc::Mem_alloc_impl::Dataspace_pool::expand(size_t size, Range_allocator *a
|
||||
/* now that we have new backing store, allocate Dataspace structure */
|
||||
return alloc->alloc_aligned(sizeof(Dataspace), 2).convert<int>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
/* add dataspace information to list of dataspaces */
|
||||
Dataspace *ds = construct_at<Dataspace>(ptr, new_ds_cap, range);
|
||||
Dataspace *ds = construct_at<Dataspace>(a.ptr, new_ds_cap, range);
|
||||
insert(ds);
|
||||
a.deallocate = false;
|
||||
return 0; },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
warning("libc: could not allocate meta data - this should never happen");
|
||||
return -1; });
|
||||
}
|
||||
@ -116,13 +117,16 @@ void *Libc::Mem_alloc_impl::alloc(size_t size, size_t align_log2)
|
||||
/* serialize access of heap functions */
|
||||
Mutex::Guard guard(_mutex);
|
||||
|
||||
void *out_addr = nullptr;
|
||||
auto alloc_or_nullptr = [&]
|
||||
{
|
||||
return _alloc.alloc_aligned(size, align_log2).convert<void *>(
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false; return a.ptr; },
|
||||
[&] (Alloc_error) { return nullptr; });
|
||||
};
|
||||
|
||||
/* try allocation at our local allocator */
|
||||
_alloc.alloc_aligned(size, align_log2).with_result(
|
||||
[&] (void *ptr) { out_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
|
||||
void * const out_addr = alloc_or_nullptr();
|
||||
if (out_addr)
|
||||
return out_addr;
|
||||
|
||||
@ -151,11 +155,7 @@ void *Libc::Mem_alloc_impl::alloc(size_t size, size_t align_log2)
|
||||
}
|
||||
|
||||
/* allocate originally requested block */
|
||||
_alloc.alloc_aligned(size, align_log2).with_result(
|
||||
[&] (void *ptr) { out_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
|
||||
return out_addr;
|
||||
return alloc_or_nullptr();
|
||||
}
|
||||
|
||||
|
||||
|
@ -61,8 +61,8 @@ class Libc::Slab_alloc : public Slab
|
||||
void *alloc()
|
||||
{
|
||||
return Slab::try_alloc(_object_size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Alloc_error) { return nullptr; });
|
||||
[&] (Allocation &a) { a.deallocate = false; return a.ptr; },
|
||||
[&] (Alloc_error) { return nullptr; });
|
||||
}
|
||||
|
||||
void free(void *ptr) { Slab::free(ptr, _object_size); }
|
||||
@ -166,8 +166,9 @@ class Libc::Malloc
|
||||
/* use backing store if requested memory is larger than largest slab */
|
||||
if (msb > SLAB_STOP)
|
||||
_backing_store.try_alloc(real_size).with_result(
|
||||
[&] (void *ptr) { alloc_addr = ptr; },
|
||||
[&] (Allocator::Alloc_error) { });
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false; alloc_addr = a.ptr; },
|
||||
[&] (Alloc_error) { });
|
||||
else
|
||||
alloc_addr = _slabs[msb - SLAB_START]->alloc();
|
||||
|
||||
|
@ -245,10 +245,11 @@ struct Gpu::Vram
|
||||
Allocation alloc(Genode::size_t size)
|
||||
{
|
||||
return _alloc.alloc_aligned(size, 12).convert<Allocation>(
|
||||
[&] (void *offset) {
|
||||
return Allocation { _elem.id(), _cap, Genode::off_t(offset), size };
|
||||
[&] (Genode::Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return Allocation { _elem.id(), _cap, Genode::off_t(a.ptr), size };
|
||||
},
|
||||
[&] (Genode::Allocator::Alloc_error err) -> Allocation {
|
||||
[&] (Genode::Alloc_error err) -> Allocation {
|
||||
return Allocation();
|
||||
});
|
||||
}
|
||||
|
@ -309,8 +309,11 @@ class Lima::Call
|
||||
Gpu::Virtual_address alloc(uint32_t size)
|
||||
{
|
||||
return Gpu::Virtual_address { _alloc.alloc_aligned(size, 12).convert<::uint64_t>(
|
||||
[&] (void *ptr) { return (::uint64_t)ptr; },
|
||||
[&] (Range_allocator::Alloc_error) -> ::uint64_t {
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return ::uint64_t(a.ptr);
|
||||
},
|
||||
[&] (Alloc_error) -> ::uint64_t {
|
||||
error("Could not allocate GPU virtual address for size: ", size);
|
||||
return 0;
|
||||
}) };
|
||||
|
@ -83,8 +83,9 @@ extern "C" {
|
||||
void *genode_malloc(unsigned long size)
|
||||
{
|
||||
return Lwip::_heap->try_alloc(size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Genode::Allocator::Alloc_error) -> void * { return nullptr; });
|
||||
[&] (Genode::Allocator::Allocation &a) {
|
||||
a.deallocate = false; return a.ptr; },
|
||||
[&] (Genode::Alloc_error) -> void * { return nullptr; });
|
||||
}
|
||||
|
||||
void *genode_calloc(unsigned long number, unsigned long size)
|
||||
|
@ -183,12 +183,12 @@ index 0000000..78ee830
|
||||
+
|
||||
+ return heap().try_alloc(size).convert<void *>(
|
||||
+
|
||||
+ [&] (void *ptr) {
|
||||
+ [&] (Genode::Range_allocator::Allocation &a) {
|
||||
+ IncreaseTotalMmap(size);
|
||||
+ return ptr;
|
||||
+ a.deallocate = false; return a.ptr;
|
||||
+ },
|
||||
+
|
||||
+ [&] (Genode::Allocator::Alloc_error) -> void * {
|
||||
+ [&] (Genode::Alloc_error) -> void * {
|
||||
+ ReportMmapFailureAndDie(size, mem_type, "allocate", 0, raw_report);
|
||||
+ return nullptr;
|
||||
+ }
|
||||
|
@ -83,8 +83,9 @@ extern "C" {
|
||||
void *genode_malloc(unsigned long size)
|
||||
{
|
||||
return Lwip::_heap->try_alloc(size).convert<void *>(
|
||||
[&] (void *ptr) { return ptr; },
|
||||
[&] (Genode::Allocator::Alloc_error) -> void * { return nullptr; });
|
||||
[&] (Genode::Allocator::Allocation &a) {
|
||||
a.deallocate = false; return a.ptr; },
|
||||
[&] (Genode::Alloc_error) -> void * { return nullptr; });
|
||||
}
|
||||
|
||||
void *genode_calloc(unsigned long number, unsigned long size)
|
||||
|
@ -57,32 +57,30 @@ struct Nic::Packet_allocator : Genode::Packet_allocator
|
||||
Packet_allocator(Genode::Allocator *md_alloc)
|
||||
: Genode::Packet_allocator(md_alloc, DEFAULT_PACKET_SIZE) {}
|
||||
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
Result try_alloc(size_t size) override
|
||||
{
|
||||
if (!size || size > OFFSET_PACKET_SIZE) {
|
||||
Genode::error("unsupported NIC packet size ", size);
|
||||
return Alloc_result { Alloc_error::DENIED };
|
||||
return Error::DENIED;
|
||||
}
|
||||
|
||||
Alloc_result result = Genode::Packet_allocator::try_alloc(size + OFFSET);
|
||||
|
||||
result.with_result(
|
||||
[&] (void *content) {
|
||||
return Genode::Packet_allocator::try_alloc(size + OFFSET).convert<Result>(
|
||||
[&] (Allocation &a) -> Result {
|
||||
/* assume word-aligned packet buffer and offset packet by 2 bytes */
|
||||
if ((Genode::addr_t)content & 0b11) {
|
||||
if ((Genode::addr_t)a.ptr & 0b11) {
|
||||
Genode::error("NIC packet allocation not word-aligned");
|
||||
result = { Alloc_error::DENIED };
|
||||
} else {
|
||||
result = Alloc_result {
|
||||
reinterpret_cast<void *>((Genode::uint8_t *)content + OFFSET) };
|
||||
return Error::DENIED;
|
||||
}
|
||||
a.deallocate = false;
|
||||
return { *this, {
|
||||
.ptr = reinterpret_cast<void *>((Genode::uint8_t *)a.ptr + OFFSET),
|
||||
.num_bytes = size } };
|
||||
},
|
||||
[] (Alloc_error) { }
|
||||
);
|
||||
|
||||
return result;
|
||||
[] (Error e) { return e; });
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
if (!size || size > OFFSET_PACKET_SIZE) {
|
||||
|
@ -157,8 +157,10 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
||||
|
||||
_array->set(i, cnt);
|
||||
_next = i + cnt;
|
||||
return reinterpret_cast<void *>(i * _block_size
|
||||
+ _base);
|
||||
return { *this, {
|
||||
.ptr = reinterpret_cast<void *>(i * _block_size + _base),
|
||||
.num_bytes = size } };
|
||||
|
||||
}
|
||||
} catch (typename Bit_array_base::Invalid_index_access) { }
|
||||
|
||||
@ -170,6 +172,8 @@ class Genode::Packet_allocator : public Genode::Range_allocator
|
||||
return Alloc_error::DENIED;
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
addr_t i = (((addr_t)addr) - _base) / _block_size;
|
||||
|
@ -699,10 +699,11 @@ class Genode::Packet_stream_source : private Packet_stream_base
|
||||
|
||||
return _packet_alloc.alloc_aligned(size, align).convert<Packet_descriptor>(
|
||||
|
||||
[&] (void *base) {
|
||||
return Packet_descriptor((Genode::off_t)base, size); },
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return Packet_descriptor((Genode::off_t)a.ptr, size); },
|
||||
|
||||
[&] (Allocator::Alloc_error) -> Packet_descriptor {
|
||||
[&] (Alloc_error) -> Packet_descriptor {
|
||||
throw Packet_alloc_failed(); });
|
||||
}
|
||||
|
||||
@ -722,10 +723,11 @@ class Genode::Packet_stream_source : private Packet_stream_base
|
||||
|
||||
return _packet_alloc.alloc_aligned(size, align).convert<Alloc_packet_result>(
|
||||
|
||||
[&] (void *base) {
|
||||
return Packet_descriptor((Genode::off_t)base, size); },
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return Packet_descriptor((Genode::off_t)a.ptr, size); },
|
||||
|
||||
[&] (Allocator::Alloc_error) {
|
||||
[&] (Alloc_error) {
|
||||
return Alloc_packet_error::FAILED; });
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,10 @@ class Acpi::Memory
|
||||
}
|
||||
|
||||
/* allocate ACPI range as I/O memory */
|
||||
_range.alloc_addr(loop_region.size(), loop_region.base());
|
||||
_range.alloc_addr(loop_region.size(), loop_region.base()).with_result(
|
||||
[&] (Range_allocator::Allocation &a) { a.deallocate = false; },
|
||||
[&] (Alloc_error e) { raise(e); });
|
||||
|
||||
_range.construct_metadata((void *)loop_region.base(), _env, loop_region);
|
||||
|
||||
/*
|
||||
|
@ -63,8 +63,17 @@ class Igd::Ppgtt_allocator : public Genode::Translation_table_allocator
|
||||
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
Alloc_result result = _range.alloc_aligned(size, 12);
|
||||
if (result.ok()) return result;
|
||||
return _range.alloc_aligned(size, 12).convert<Alloc_result>(
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
a.deallocate = false;
|
||||
return { *this, a }; },
|
||||
[&] (Genode::Alloc_error) {
|
||||
return _grow_and_alloc(size); });
|
||||
}
|
||||
|
||||
Alloc_result _grow_and_alloc(size_t size)
|
||||
{
|
||||
using Alloc_error = Genode::Alloc_error;
|
||||
|
||||
Genode::Ram_dataspace_capability ds { };
|
||||
|
||||
@ -117,6 +126,8 @@ class Igd::Ppgtt_allocator : public Genode::Translation_table_allocator
|
||||
);
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
if (addr == nullptr) { return; }
|
||||
|
@ -23,14 +23,14 @@ addr_t Dma_allocator::_alloc_dma_addr(addr_t const phys_addr,
|
||||
size_t const size,
|
||||
bool const force_phys_addr)
|
||||
{
|
||||
using Alloc_error = Allocator::Alloc_error;
|
||||
|
||||
/*
|
||||
* 1:1 mapping (allocate at specified range from DMA memory allocator)
|
||||
*/
|
||||
if (force_phys_addr || !_remapping) {
|
||||
return _dma_alloc.alloc_addr(size, phys_addr).convert<addr_t>(
|
||||
[&] (void *) -> addr_t { return phys_addr; },
|
||||
[&] (Range_allocator::Allocation &a) -> addr_t {
|
||||
a.deallocate = false;
|
||||
return addr_t(a.ptr); },
|
||||
[&] (Alloc_error err) -> addr_t {
|
||||
switch (err) {
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
@ -57,7 +57,9 @@ addr_t Dma_allocator::_alloc_dma_addr(addr_t const phys_addr,
|
||||
guarded_size += 0x1000; /* 4 kB */
|
||||
|
||||
return _dma_alloc.alloc_aligned(guarded_size, size_align_log2).convert<addr_t>(
|
||||
[&] (void *ptr) { return (addr_t)ptr; },
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return (addr_t)a.ptr; },
|
||||
[&] (Alloc_error err) -> addr_t {
|
||||
switch (err) {
|
||||
case Alloc_error::OUT_OF_RAM: throw Out_of_ram();
|
||||
|
@ -42,8 +42,11 @@ struct Test
|
||||
log("\nTEST ", id, ": ", brief, "\n");
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
heap.try_alloc(fb_ds.size()).with_result(
|
||||
[&] (void *ptr) { buf[i] = (char *)ptr; },
|
||||
[&] (Allocator::Alloc_error e) {
|
||||
[&] (Heap::Allocation &a) {
|
||||
buf[i] = (char *)a.ptr;
|
||||
a.deallocate = false;
|
||||
},
|
||||
[&] (Alloc_error e) {
|
||||
env.parent().exit(-1);
|
||||
Allocator::throw_alloc_error(e);
|
||||
}
|
||||
|
@ -71,18 +71,18 @@ struct Allocator_tracer : Allocator
|
||||
{
|
||||
return wrapped.try_alloc(size).convert<Alloc_result>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
sum += size;
|
||||
new (wrapped) Alloc(allocs, Alloc::Id { (addr_t)ptr }, size);
|
||||
return ptr;
|
||||
new (wrapped) Alloc(allocs, Alloc::Id { (addr_t)a.ptr }, size);
|
||||
a.deallocate = false;
|
||||
return { *this, { a.ptr, size } };
|
||||
},
|
||||
|
||||
[&] (Allocator::Alloc_error error) {
|
||||
return error;
|
||||
}
|
||||
[&] (Alloc_error error) { return error; }
|
||||
);
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
allocs.apply<Alloc>(Alloc::Id { (addr_t)addr }, [&] (Alloc &alloc) {
|
||||
|
@ -49,10 +49,11 @@ struct Array_of_slab_elements
|
||||
for (size_t i = 0; i < num_elem; i++) {
|
||||
slab.try_alloc(slab_size).with_result(
|
||||
|
||||
[&] (void *ptr) {
|
||||
elem[i] = ptr; },
|
||||
[&] (Genode::Allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
elem[i] = a.ptr; },
|
||||
|
||||
[&] (Genode::Allocator::Alloc_error) {
|
||||
[&] (Genode::Alloc_error) {
|
||||
throw Alloc_failed(); }
|
||||
);
|
||||
}
|
||||
@ -96,14 +97,17 @@ void Component::construct(Genode::Env & env)
|
||||
|
||||
Alloc_result try_alloc(size_t size) override
|
||||
{
|
||||
Alloc_result const result = _alloc.try_alloc(size);
|
||||
|
||||
if (result.ok())
|
||||
_consumed += size;
|
||||
|
||||
return result;
|
||||
return _alloc.try_alloc(size).convert<Result>(
|
||||
[&] (Allocation &a) -> Alloc_result {
|
||||
_consumed += size;
|
||||
a.deallocate = false;
|
||||
return { *this, { a.ptr, size } };
|
||||
},
|
||||
[&] (Error e) { return e; });
|
||||
}
|
||||
|
||||
void _free(Allocation &a) override { free(a.ptr, a.num_bytes); }
|
||||
|
||||
void free(void *addr, size_t size) override
|
||||
{
|
||||
_alloc.free(addr, size);
|
||||
|
@ -38,7 +38,7 @@ class Driver::Expanding_page_table_allocator
|
||||
private:
|
||||
|
||||
using Alloc_result = Allocator::Alloc_result;
|
||||
using Alloc_error = Allocator::Alloc_error;
|
||||
using Alloc_error = Genode::Alloc_error;
|
||||
|
||||
enum { MAX_CHUNK_SIZE = 2*1024*1024 };
|
||||
|
||||
@ -237,7 +237,9 @@ Genode::addr_t Driver::Expanding_page_table_allocator<TABLE_SIZE>::_alloc()
|
||||
}
|
||||
|
||||
return result.convert<addr_t>(
|
||||
[&] (void * ptr) -> addr_t { return (addr_t)ptr; },
|
||||
[&] (Allocator::Allocation &a) -> addr_t {
|
||||
a.deallocate = false;
|
||||
return (addr_t)a.ptr; },
|
||||
[&] (Alloc_error) -> addr_t { throw Alloc_failed(); });
|
||||
}
|
||||
|
||||
|
@ -101,10 +101,11 @@ Sup::Gmm::Vmm_addr Sup::Gmm::_alloc_pages(Pages pages)
|
||||
|
||||
return _alloc.alloc_aligned(bytes, align).convert<Vmm_addr>(
|
||||
|
||||
[&] (void *ptr) {
|
||||
return Vmm_addr { _map.base.value + (addr_t)ptr }; },
|
||||
[&] (Range_allocator::Allocation &a) {
|
||||
a.deallocate = false;
|
||||
return Vmm_addr { _map.base.value + (addr_t)a.ptr }; },
|
||||
|
||||
[&] (Range_allocator::Alloc_error) -> Vmm_addr {
|
||||
[&] (Alloc_error) -> Vmm_addr {
|
||||
error("Gmm allocation failed");
|
||||
throw Allocation_failed();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user