base-sel4: handle PTE selectors exceeded

In Vm_space::map when allocating a new page-table-entry selector, the
allocator may throw an exception that there are no selecztors left which
was not caught by now.  Now, we catch this exception, flush the mapping
cache to free all selectors again and retry to allocate.

Fixes #2781
This commit is contained in:
Martin Stein
2018-04-19 15:09:30 +02:00
committed by Christian Helmuth
parent c02ef3ec94
commit 52a69b8a6f

View File

@ -130,7 +130,8 @@ class Genode::Vm_space
/**
* Allocator for the selectors within '_vm_cnodes'
*/
Bit_allocator<1UL << NUM_VM_SEL_LOG2> _sel_alloc { };
using Selector_allocator = Bit_allocator<1UL << NUM_VM_SEL_LOG2>;
Selector_allocator _sel_alloc { };
/**
* Return leaf CNode that contains an index allocated from '_sel_alloc'
@ -155,6 +156,43 @@ class Genode::Vm_space
*/
addr_t _idx_to_sel(addr_t idx) const { return (_id << 20) | idx; }
void _flush(bool const flush_support)
{
if (!flush_support) {
warning("mapping cache full, but can't flush");
throw;
}
warning("flush page table entries - mapping cache full - PD: ",
_pd_label.string());
_page_table_registry.flush_pages([&] (Cap_sel const &idx,
addr_t const v_addr)
{
/* XXX - INITIAL_IPC_BUFFER can't be re-mapped currently */
if (v_addr == 0x1000)
return false;
/* XXX - UTCB can't be re-mapped currently */
if (stack_area_virtual_base() <= v_addr
&& (v_addr < stack_area_virtual_base() +
stack_area_virtual_size())
&& !((v_addr + 0x1000) & (stack_virtual_size() - 1)))
return false;
long err = _unmap_page(idx);
if (err != seL4_NoError)
error("unmap failed, idx=", idx, " res=", err);
_leaf_cnode(idx.value()).remove(_leaf_cnode_entry(idx.value()));
_sel_alloc.free(idx.value());
return true;
});
}
bool _map_frame(addr_t const from_phys, addr_t const to_virt,
Cache_attribute const cacheability,
bool const writable, bool const executable,
@ -171,9 +209,15 @@ class Genode::Vm_space
*/
return false;
}
/* allocate page-table-entry selector */
addr_t pte_idx;
try { pte_idx = _sel_alloc.alloc(); }
catch (Selector_allocator::Out_of_indices) {
/* allocate page-table entry selector */
addr_t pte_idx = _sel_alloc.alloc();
/* free all page-table-entry selectors and retry once */
_flush(flush_support);
pte_idx = _sel_alloc.alloc();
}
/*
* Copy page-frame selector to pte_sel
@ -186,42 +230,11 @@ class Genode::Vm_space
Cnode_index(_leaf_cnode_entry(pte_idx)));
/* remember relationship between pte_sel and the virtual address */
try {
_page_table_registry.insert_page_frame(to_virt, Cap_sel(pte_idx));
} catch (Page_table_registry::Mapping_cache_full) {
if (!flush_support) {
warning("mapping cache full, but can't flush");
throw;
}
try { _page_table_registry.insert_page_frame(to_virt, Cap_sel(pte_idx)); }
catch (Page_table_registry::Mapping_cache_full) {
warning("flush page table entries - mapping cache full - PD: ",
_pd_label.string());
_page_table_registry.flush_pages([&] (Cap_sel const &idx,
addr_t const v_addr)
{
/* XXX - INITIAL_IPC_BUFFER can't be re-mapped currently */
if (v_addr == 0x1000)
return false;
/* XXX - UTCB can't be re-mapped currently */
if (stack_area_virtual_base() <= v_addr
&& (v_addr < stack_area_virtual_base() +
stack_area_virtual_size())
&& !((v_addr + 0x1000) & (stack_virtual_size() - 1)))
return false;
long err = _unmap_page(idx);
if (err != seL4_NoError)
error("unmap failed, idx=", idx, " res=", err);
_leaf_cnode(idx.value()).remove(_leaf_cnode_entry(idx.value()));
_sel_alloc.free(idx.value());
return true;
});
/* re-try once */
/* free all entries of mapping cache and re-try once */
_flush(flush_support);
_page_table_registry.insert_page_frame(to_virt, Cap_sel(pte_idx));
}