mirror of
https://github.com/genodelabs/genode.git
synced 2025-01-18 02:40:08 +00:00
parent
f57da7e645
commit
9a049789de
@ -115,8 +115,9 @@ class Core::Vm_session_component
|
||||
*********************************/
|
||||
|
||||
/* used on destruction of attached dataspaces */
|
||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
|
||||
|
||||
/**************************
|
||||
|
@ -122,8 +122,9 @@ class Core::Vm_session_component
|
||||
** Region_map_detach interface **
|
||||
*********************************/
|
||||
|
||||
void detach(Region_map::Local_addr) override;
|
||||
void unmap_region(addr_t, size_t) override;
|
||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
|
||||
/**************************
|
||||
** Vm session interface **
|
||||
|
@ -24,7 +24,7 @@ set config {
|
||||
<default-route>
|
||||
<any-service> <parent/> </any-service>
|
||||
</default-route>
|
||||
<start name="test-platform" caps="12000" ld="no">
|
||||
<start name="test-platform" caps="12350" ld="no">
|
||||
<resource name="RAM" quantum="128M"/>}
|
||||
|
||||
append config "
|
||||
|
@ -162,8 +162,9 @@ class Core::Vm_session_component
|
||||
*********************************/
|
||||
|
||||
/* used on destruction of attached dataspaces */
|
||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
|
||||
/**************************
|
||||
** Vm session interface **
|
||||
|
@ -106,8 +106,9 @@ class Core::Vm_session_component
|
||||
*********************************/
|
||||
|
||||
/* used on destruction of attached dataspaces */
|
||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
||||
void detach(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
void unmap_region(addr_t, size_t) override; /* vm_session_common.cc */
|
||||
void reserve_and_flush(Region_map::Local_addr) override; /* vm_session_common.cc */
|
||||
|
||||
/**************************
|
||||
** Vm session interface **
|
||||
|
@ -39,11 +39,12 @@ void Dataspace_component::detach_from_rm_sessions()
|
||||
while (Rm_region *r = _regions.first()) {
|
||||
|
||||
/*
|
||||
* The 'detach' function calls 'Dataspace_component::detached_from'
|
||||
* and thereby removes the current region from the '_regions' list.
|
||||
* The 'reserve_and_flush' function calls
|
||||
* 'Dataspace_component::detached_from' and thereby
|
||||
* removes the current region from the '_regions' list.
|
||||
*/
|
||||
_mutex.release();
|
||||
r->rm().detach((void *)r->base());
|
||||
r->rm().reserve_and_flush((void *)r->base());
|
||||
_mutex.acquire();
|
||||
}
|
||||
|
||||
|
@ -58,6 +58,7 @@ class Core::Region_map_detach : Interface
|
||||
|
||||
virtual void detach(Region_map::Local_addr) = 0;
|
||||
virtual void unmap_region(addr_t base, size_t size) = 0;
|
||||
virtual void reserve_and_flush(Region_map::Local_addr) = 0;
|
||||
};
|
||||
|
||||
|
||||
@ -93,7 +94,7 @@ class Core::Rm_region : public List<Rm_region>::Element
|
||||
|
||||
private:
|
||||
|
||||
Dataspace_component &_dsc;
|
||||
Dataspace_component *_dsc;
|
||||
Region_map_detach &_rm;
|
||||
|
||||
Attr const _attr;
|
||||
@ -102,21 +103,33 @@ class Core::Rm_region : public List<Rm_region>::Element
|
||||
|
||||
Rm_region(Dataspace_component &dsc, Region_map_detach &rm, Attr attr)
|
||||
:
|
||||
_dsc(dsc), _rm(rm), _attr(attr)
|
||||
_dsc(&dsc), _rm(rm), _attr(attr)
|
||||
{ }
|
||||
|
||||
addr_t base() const { return _attr.base; }
|
||||
size_t size() const { return _attr.size; }
|
||||
bool write() const { return _attr.write; }
|
||||
bool executable() const { return _attr.exec; }
|
||||
off_t offset() const { return _attr.off; }
|
||||
bool dma() const { return _attr.dma; }
|
||||
Dataspace_component &dataspace() const { return _dsc; }
|
||||
Region_map_detach &rm() const { return _rm; }
|
||||
addr_t base() const { return _attr.base; }
|
||||
size_t size() const { return _attr.size; }
|
||||
bool write() const { return _attr.write; }
|
||||
bool executable() const { return _attr.exec; }
|
||||
off_t offset() const { return _attr.off; }
|
||||
bool dma() const { return _attr.dma; }
|
||||
Region_map_detach &rm() const { return _rm; }
|
||||
|
||||
void mark_as_reserved() { _dsc = nullptr; }
|
||||
bool reserved() const { return !_dsc; }
|
||||
|
||||
Addr_range range() const { return { .start = _attr.base,
|
||||
.end = _attr.base + _attr.size - 1 }; }
|
||||
|
||||
void with_dataspace(auto const &fn) const
|
||||
{
|
||||
if (!_dsc) {
|
||||
Genode::error(__func__, ": invalid dataspace");
|
||||
return;
|
||||
}
|
||||
|
||||
fn(*_dsc);
|
||||
}
|
||||
|
||||
void print(Output &out) const { Genode::print(out, _attr); }
|
||||
};
|
||||
|
||||
@ -422,7 +435,7 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
||||
return Result::REFLECTED; /* omit diagnostics */
|
||||
};
|
||||
|
||||
if (!region_ptr)
|
||||
if (!region_ptr || region_ptr->reserved())
|
||||
return reflect_fault(Result::NO_REGION);
|
||||
|
||||
Rm_region const ®ion = *region_ptr;
|
||||
@ -430,39 +443,50 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
||||
/* fault information relative to 'region' */
|
||||
Fault const relative_fault = fault.within_region(region);
|
||||
|
||||
Dataspace_component &dataspace = region.dataspace();
|
||||
|
||||
Native_capability managed_ds_cap = dataspace.sub_rm();
|
||||
|
||||
/* region refers to a regular dataspace */
|
||||
if (!managed_ds_cap.valid()) {
|
||||
|
||||
bool const writeable = relative_fault.rwx.w
|
||||
&& dataspace.writeable();
|
||||
|
||||
bool const write_violation = relative_fault.write_access()
|
||||
&& !writeable;
|
||||
|
||||
bool const exec_violation = relative_fault.exec_access()
|
||||
&& !relative_fault.rwx.x;
|
||||
|
||||
if (write_violation) return reflect_fault(Result::WRITE_VIOLATION);
|
||||
if (exec_violation) return reflect_fault(Result::EXEC_VIOLATION);
|
||||
|
||||
return resolved_fn(region, relative_fault);
|
||||
}
|
||||
|
||||
/* traverse into managed dataspace */
|
||||
Fault const sub_region_map_relative_fault =
|
||||
relative_fault.within_sub_region_map(region.offset(),
|
||||
dataspace.size());
|
||||
|
||||
Result result = Result::NO_REGION;
|
||||
_session_ep.apply(managed_ds_cap, [&] (Region_map_component *rmc_ptr) {
|
||||
if (rmc_ptr)
|
||||
result = rmc_ptr->_with_region_at_fault({ recursion_limit.value - 1 },
|
||||
sub_region_map_relative_fault,
|
||||
resolved_fn, reflect_fn); });
|
||||
|
||||
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
||||
|
||||
Native_capability managed_ds_cap = dataspace.sub_rm();
|
||||
|
||||
/* region refers to a regular dataspace */
|
||||
if (!managed_ds_cap.valid()) {
|
||||
|
||||
bool const writeable = relative_fault.rwx.w
|
||||
&& dataspace.writeable();
|
||||
|
||||
bool const write_violation = relative_fault.write_access()
|
||||
&& !writeable;
|
||||
|
||||
bool const exec_violation = relative_fault.exec_access()
|
||||
&& !relative_fault.rwx.x;
|
||||
|
||||
if (write_violation) {
|
||||
result = reflect_fault(Result::WRITE_VIOLATION);
|
||||
return;
|
||||
}
|
||||
|
||||
if (exec_violation) {
|
||||
result = reflect_fault(Result::EXEC_VIOLATION);
|
||||
return;
|
||||
}
|
||||
|
||||
result = resolved_fn(region, relative_fault);
|
||||
return;
|
||||
}
|
||||
|
||||
/* traverse into managed dataspace */
|
||||
Fault const sub_region_map_relative_fault =
|
||||
relative_fault.within_sub_region_map(region.offset(),
|
||||
dataspace.size());
|
||||
|
||||
_session_ep.apply(managed_ds_cap, [&] (Region_map_component *rmc_ptr) {
|
||||
if (rmc_ptr)
|
||||
result = rmc_ptr->_with_region_at_fault({ recursion_limit.value - 1 },
|
||||
sub_region_map_relative_fault,
|
||||
resolved_fn, reflect_fn); });
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -484,6 +508,26 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
||||
|
||||
Local_addr _attach(Dataspace_capability, Attach_attr);
|
||||
|
||||
void _with_region(Local_addr local_addr, auto const &fn)
|
||||
{
|
||||
/* read meta data for address */
|
||||
Rm_region *region_ptr = _map.metadata(local_addr);
|
||||
|
||||
if (!region_ptr) {
|
||||
if (_diag.enabled)
|
||||
warning("_with_region: no attachment at ", (void *)local_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((region_ptr->base() != static_cast<addr_t>(local_addr)) && _diag.enabled)
|
||||
warning("_with_region: ", static_cast<void *>(local_addr), " is not "
|
||||
"the beginning of the region ", Hex(region_ptr->base()));
|
||||
|
||||
fn(*region_ptr);
|
||||
}
|
||||
|
||||
void _reserve_and_flush_unsynchronized(Rm_region &);
|
||||
|
||||
public:
|
||||
|
||||
/*
|
||||
@ -494,6 +538,8 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
||||
*/
|
||||
void unmap_region(addr_t base, size_t size) override;
|
||||
|
||||
void reserve_and_flush(Local_addr) override;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
@ -557,43 +603,46 @@ class Core::Region_map_component : private Weak_object<Region_map_component>,
|
||||
return _with_region_at_fault(Recursion_limit { 5 }, fault,
|
||||
[&] (Rm_region const ®ion, Fault const ®ion_relative_fault)
|
||||
{
|
||||
Dataspace_component &dataspace = region.dataspace();
|
||||
With_mapping_result result = With_mapping_result::NO_REGION;
|
||||
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
||||
Fault const ram_relative_fault =
|
||||
region_relative_fault.within_ram(region.offset(), dataspace.attr());
|
||||
|
||||
Fault const ram_relative_fault =
|
||||
region_relative_fault.within_ram(region.offset(), dataspace.attr());
|
||||
Log2_range src_range { ram_relative_fault.hotspot };
|
||||
Log2_range dst_range { fault.hotspot };
|
||||
|
||||
Log2_range src_range { ram_relative_fault.hotspot };
|
||||
Log2_range dst_range { fault.hotspot };
|
||||
src_range = src_range.constrained(ram_relative_fault.bounds);
|
||||
|
||||
src_range = src_range.constrained(ram_relative_fault.bounds);
|
||||
Log2 const common_size = Log2_range::common_log2(dst_range,
|
||||
src_range);
|
||||
Log2 const map_size = kernel_constrained_map_size(common_size);
|
||||
|
||||
Log2 const common_size = Log2_range::common_log2(dst_range,
|
||||
src_range);
|
||||
Log2 const map_size = kernel_constrained_map_size(common_size);
|
||||
src_range = src_range.constrained(map_size);
|
||||
dst_range = dst_range.constrained(map_size);
|
||||
|
||||
src_range = src_range.constrained(map_size);
|
||||
dst_range = dst_range.constrained(map_size);
|
||||
if (!src_range.valid() || !dst_range.valid()) {
|
||||
error("invalid mapping");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!src_range.valid() || !dst_range.valid()) {
|
||||
error("invalid mapping");
|
||||
return With_mapping_result::NO_REGION;
|
||||
}
|
||||
Mapping const mapping {
|
||||
.dst_addr = dst_range.base.value,
|
||||
.src_addr = src_range.base.value,
|
||||
.size_log2 = map_size.log2,
|
||||
.cached = dataspace.cacheability() == CACHED,
|
||||
.io_mem = dataspace.io_mem(),
|
||||
.dma_buffer = region.dma(),
|
||||
.write_combined = dataspace.cacheability() == WRITE_COMBINED,
|
||||
.writeable = ram_relative_fault.rwx.w,
|
||||
.executable = ram_relative_fault.rwx.x
|
||||
};
|
||||
|
||||
Mapping const mapping {
|
||||
.dst_addr = dst_range.base.value,
|
||||
.src_addr = src_range.base.value,
|
||||
.size_log2 = map_size.log2,
|
||||
.cached = dataspace.cacheability() == CACHED,
|
||||
.io_mem = dataspace.io_mem(),
|
||||
.dma_buffer = region.dma(),
|
||||
.write_combined = dataspace.cacheability() == WRITE_COMBINED,
|
||||
.writeable = ram_relative_fault.rwx.w,
|
||||
.executable = ram_relative_fault.rwx.x
|
||||
};
|
||||
apply_fn(mapping);
|
||||
|
||||
apply_fn(mapping);
|
||||
result = With_mapping_result::RESOLVED;
|
||||
});
|
||||
|
||||
return With_mapping_result::RESOLVED;
|
||||
return result;
|
||||
},
|
||||
reflect_fn
|
||||
);
|
||||
|
@ -295,26 +295,33 @@ Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const att
|
||||
|
||||
addr_t Region_map_component::_core_local_addr(Rm_region & region)
|
||||
{
|
||||
/**
|
||||
* If this region references a managed dataspace,
|
||||
* we have to recursively request the core-local address
|
||||
*/
|
||||
if (region.dataspace().sub_rm().valid()) {
|
||||
auto lambda = [&] (Region_map_component * rmc) -> addr_t
|
||||
{
|
||||
/**
|
||||
* It is possible that there is no dataspace attached
|
||||
* inside the managed dataspace, in that case return zero.
|
||||
*/
|
||||
Rm_region * r = rmc ? rmc->_map.metadata((void*)region.offset())
|
||||
: nullptr;;
|
||||
return r ? rmc->_core_local_addr(*r) : 0;
|
||||
};
|
||||
return _session_ep.apply(region.dataspace().sub_rm(), lambda);
|
||||
}
|
||||
addr_t result = 0;
|
||||
|
||||
/* return core-local address of dataspace + region offset */
|
||||
return region.dataspace().core_local_addr() + region.offset();
|
||||
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
||||
/**
|
||||
* If this region references a managed dataspace,
|
||||
* we have to recursively request the core-local address
|
||||
*/
|
||||
if (dataspace.sub_rm().valid()) {
|
||||
auto lambda = [&] (Region_map_component * rmc) -> addr_t
|
||||
{
|
||||
/**
|
||||
* It is possible that there is no dataspace attached
|
||||
* inside the managed dataspace, in that case return zero.
|
||||
*/
|
||||
Rm_region * r = rmc ? rmc->_map.metadata((void*)region.offset())
|
||||
: nullptr;;
|
||||
return (r && !r->reserved()) ? rmc->_core_local_addr(*r) : 0;
|
||||
};
|
||||
result = _session_ep.apply(dataspace.sub_rm(), lambda);
|
||||
return;
|
||||
}
|
||||
|
||||
/* return core-local address of dataspace + region offset */
|
||||
result = dataspace.core_local_addr() + region.offset();
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -390,41 +397,12 @@ Region_map_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
|
||||
}
|
||||
|
||||
|
||||
void Region_map_component::detach(Local_addr local_addr)
|
||||
void Region_map_component::_reserve_and_flush_unsynchronized(Rm_region ®ion)
|
||||
{
|
||||
/* serialize access */
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
|
||||
/* read meta data for address */
|
||||
Rm_region *region_ptr = _map.metadata(local_addr);
|
||||
|
||||
if (!region_ptr) {
|
||||
if (_diag.enabled)
|
||||
warning("detach: no attachment at ", (void *)local_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((region_ptr->base() != static_cast<addr_t>(local_addr)) && _diag.enabled)
|
||||
warning("detach: ", static_cast<void *>(local_addr), " is not "
|
||||
"the beginning of the region ", Hex(region_ptr->base()));
|
||||
|
||||
Dataspace_component &dsc = region_ptr->dataspace();
|
||||
|
||||
/* inform dataspace about detachment */
|
||||
dsc.detached_from(*region_ptr);
|
||||
|
||||
/*
|
||||
* Create local copy of region data because the '_map.metadata' of the
|
||||
* region will become unavailable as soon as we call '_map.free' below.
|
||||
*/
|
||||
Rm_region region = *region_ptr;
|
||||
|
||||
/*
|
||||
* We unregister the region from region map prior unmapping the pages to
|
||||
* make sure that page faults occurring immediately after the unmap
|
||||
* refer to an empty region not to the dataspace, which we just removed.
|
||||
*/
|
||||
_map.free(reinterpret_cast<void *>(region.base()));
|
||||
region.with_dataspace([&] (Dataspace_component &dsc) {
|
||||
dsc.detached_from(region);
|
||||
});
|
||||
|
||||
if (!platform().supports_direct_unmap()) {
|
||||
|
||||
@ -438,10 +416,27 @@ void Region_map_component::detach(Local_addr local_addr)
|
||||
* of core memory (reference issue #3082)
|
||||
*/
|
||||
Address_space::Core_local_addr core_local { _core_local_addr(region) };
|
||||
|
||||
/*
|
||||
* We mark the region as reserved prior unmapping the pages to
|
||||
* make sure that page faults occurring immediately after the unmap
|
||||
* do not refer to the dataspace, which we just removed. Since
|
||||
* 'mark_as_reserved()' invalidates the dataspace pointer, it
|
||||
* must be called after '_core_local_addr()'.
|
||||
*/
|
||||
region.mark_as_reserved();
|
||||
|
||||
if (core_local.value)
|
||||
platform_specific().core_pd().flush(0, region.size(), core_local);
|
||||
} else {
|
||||
|
||||
/*
|
||||
* We mark the region as reserved prior unmapping the pages to
|
||||
* make sure that page faults occurring immediately after the unmap
|
||||
* do not refer to the dataspace, which we just removed.
|
||||
*/
|
||||
region.mark_as_reserved();
|
||||
|
||||
/*
|
||||
* Unmap this memory region from all region maps referencing it.
|
||||
*/
|
||||
@ -450,6 +445,34 @@ void Region_map_component::detach(Local_addr local_addr)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Flush the region, but keep it reserved until 'detach()' is called.
|
||||
*/
|
||||
void Region_map_component::reserve_and_flush(Local_addr local_addr)
|
||||
{
|
||||
/* serialize access */
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
|
||||
_with_region(local_addr, [&] (Rm_region ®ion) {
|
||||
_reserve_and_flush_unsynchronized(region);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void Region_map_component::detach(Local_addr local_addr)
|
||||
{
|
||||
/* serialize access */
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
|
||||
_with_region(local_addr, [&] (Rm_region ®ion) {
|
||||
if (!region.reserved())
|
||||
_reserve_and_flush_unsynchronized(region);
|
||||
/* free the reserved region */
|
||||
_map.free(reinterpret_cast<void *>(region.base()));
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
void Region_map_component::add_client(Rm_client &rm_client)
|
||||
{
|
||||
Mutex::Guard lock_guard(_mutex);
|
||||
|
@ -112,8 +112,10 @@ void Vm_session_component::attach(Dataspace_capability const cap,
|
||||
|
||||
Rm_region ®ion = *region_ptr;
|
||||
|
||||
if (!(cap == region.dataspace().cap()))
|
||||
throw Region_conflict();
|
||||
region.with_dataspace([&] (Dataspace_component &dataspace) {
|
||||
if (!(cap == dataspace.cap()))
|
||||
throw Region_conflict();
|
||||
});
|
||||
|
||||
if (guest_phys < region.base() ||
|
||||
guest_phys > region.base() + region.size() - 1)
|
||||
@ -149,7 +151,9 @@ void Vm_session_component::detach(addr_t guest_phys, size_t size)
|
||||
iteration_size = region->size();
|
||||
|
||||
/* inform dataspace */
|
||||
region->dataspace().detached_from(*region);
|
||||
region->with_dataspace([&] (Dataspace_component &dataspace) {
|
||||
dataspace.detached_from(*region);
|
||||
});
|
||||
|
||||
/* cleanup metadata */
|
||||
_map.free(reinterpret_cast<void *>(region->base()));
|
||||
@ -180,3 +184,9 @@ void Vm_session_component::unmap_region(addr_t base, size_t size)
|
||||
{
|
||||
error(__func__, " unimplemented ", base, " ", size);
|
||||
}
|
||||
|
||||
|
||||
void Vm_session_component::reserve_and_flush(Region_map::Local_addr)
|
||||
{
|
||||
error(__func__, " unimplemented");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user