base: Pd_session::dma_addr, Pd_session::attach_dma

This patch enhances the PD-session interface with the support needed for
user-level device drivers performing DMA. Both RPC functions are
intended for the direct use by the platform driver only. If invoked for
PDs that lack the managing-system role, the operations have no effect.

The 'dma_addr()' RPC function allows the platform driver to request the
DMA address of a given RAM dataspace. It is meant to replace the
'Dataspace::phys_addr' RPC function.

The 'attach_dma' RPC function adds the given dataspace to the device
PD's I/O page table. It replaces the former heuristics of marking DMA
buffers as uncached RAM on x86.

With this patch, the UNCACHED attribute of RAM dataspaces is no longer
used to distinguish DMA buffers from regular RAM dataspaces.

Issue #2243
This commit is contained in:
Norman Feske
2022-02-01 15:17:29 +01:00
parent db3a647c6d
commit e4f62380d7
16 changed files with 264 additions and 66 deletions

View File

@ -197,6 +197,7 @@ class Genode::Pd_session_component : public Session_object<Pd_session>
void map(addr_t, addr_t) override;
/****************
** Signalling **
****************/
@ -334,6 +335,15 @@ class Genode::Pd_session_component : public Session_object<Pd_session>
*******************************/
Managing_system_state managing_system(Managing_system_state const &) override;
/*******************************************
** Support for user-level device drivers **
*******************************************/
addr_t dma_addr(Ram_dataspace_capability) override;
Attach_dma_result attach_dma(Dataspace_capability, addr_t) override;
};
#endif /* _CORE__INCLUDE__PD_SESSION_COMPONENT_H_ */

View File

@ -101,6 +101,8 @@ class Genode::Ram_dataspace_factory : public Ram_allocator,
static_cap_cast<Dataspace>(ds->cap())));
}
addr_t dataspace_dma_addr(Ram_dataspace_capability);
/*****************************
** Ram_allocator interface **

View File

@ -28,6 +28,7 @@
#include <base/heap.h>
#include <util/list.h>
#include <util/fifo.h>
#include <pd_session/pd_session.h>
/* core includes */
#include <platform.h>
@ -70,39 +71,40 @@ class Genode::Region_map_detach : Genode::Interface
*/
class Genode::Rm_region : public List<Rm_region>::Element
{
private:
public:
addr_t const _base;
size_t const _size;
bool const _write;
bool const _exec;
off_t const _off;
struct Attr
{
addr_t base;
size_t size;
bool write;
bool exec;
off_t off;
bool dma;
};
private:
Dataspace_component &_dsc;
Region_map_detach &_rm;
Attr const _attr;
public:
Rm_region(addr_t base, size_t size, bool write,
Dataspace_component &dsc, off_t offset,
Region_map_detach &rm, bool exec)
Rm_region(Dataspace_component &dsc, Region_map_detach &rm, Attr attr)
:
_base(base), _size(size), _write(write), _exec(exec), _off(offset),
_dsc(dsc), _rm(rm)
_dsc(dsc), _rm(rm), _attr(attr)
{ }
/***************
** Accessors **
***************/
addr_t base() const { return _base; }
size_t size() const { return _size; }
bool write() const { return _write; }
bool executable() const { return _exec; }
Dataspace_component &dataspace() const { return _dsc; }
off_t offset() const { return _off; }
Region_map_detach &rm() const { return _rm; }
addr_t base() const { return _attr.base; }
size_t size() const { return _attr.size; }
bool write() const { return _attr.write; }
bool executable() const { return _attr.exec; }
off_t offset() const { return _attr.off; }
bool dma() const { return _attr.dma; }
Dataspace_component &dataspace() const { return _dsc; }
Region_map_detach &rm() const { return _rm; }
};
@ -358,6 +360,19 @@ class Genode::Region_map_component : private Weak_object<Region_map_component>,
*/
addr_t _core_local_addr(Rm_region & r);
struct Attach_attr
{
size_t size;
off_t offset;
bool use_local_addr;
addr_t local_addr;
bool executable;
bool writeable;
bool dma;
};
Local_addr _attach(Dataspace_capability, Attach_attr);
public:
/*
@ -451,6 +466,11 @@ class Genode::Region_map_component : private Weak_object<Region_map_component>,
Dataspace_component &dsc,
addr_t, addr_t);
using Attach_dma_result = Pd_session::Attach_dma_result;
Attach_dma_result attach_dma(Dataspace_capability, addr_t);
/**************************
** Region map interface **
**************************/

View File

@ -176,3 +176,27 @@ void Pd_session_component::transfer_quota(Capability<Pd_session> pd_cap,
});
}
addr_t Pd_session_component::dma_addr(Ram_dataspace_capability ds_cap)
{
if (_managing_system == Managing_system::DENIED)
return 0;
if (this->cap() == ds_cap)
return 0;
return _ram_ds_factory.dataspace_dma_addr(ds_cap);
}
Pd_session::Attach_dma_result
Pd_session_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
{
if (_managing_system == Managing_system::DENIED)
return Attach_dma_error::DENIED;
if (this->cap() == ds_cap)
return Attach_dma_error::DENIED;
return _address_space.attach_dma(ds_cap, at);
}

View File

@ -185,3 +185,14 @@ size_t Ram_dataspace_factory::dataspace_size(Ram_dataspace_capability ds_cap) co
return result;
}
addr_t Ram_dataspace_factory::dataspace_dma_addr(Ram_dataspace_capability ds_cap)
{
addr_t result = 0;
_ep.apply(ds_cap, [&] (Dataspace_component *c) {
if (c && c->owner(*this))
result = c->phys_addr(); });
return result;
}

View File

@ -344,7 +344,7 @@ Mapping Region_map_component::create_map_item(Region_map_component *,
.size_log2 = map_size_log2,
.cached = dataspace.cacheability() == CACHED,
.io_mem = dataspace.io_mem(),
.dma_buffer = dataspace.cacheability() != CACHED,
.dma_buffer = region.dma(),
.write_combined = dataspace.cacheability() == WRITE_COMBINED,
.writeable = region.write() && dataspace.writable(),
.executable = region.executable() };
@ -352,16 +352,13 @@ Mapping Region_map_component::create_map_item(Region_map_component *,
Region_map::Local_addr
Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
off_t offset, bool use_local_addr,
Region_map::Local_addr local_addr,
bool executable, bool writeable)
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const attr)
{
/* serialize access */
Mutex::Guard lock_guard(_mutex);
/* offset must be positive and page-aligned */
if (offset < 0 || align_addr(offset, get_page_size_log2()) != offset)
if (attr.offset < 0 || align_addr(attr.offset, get_page_size_log2()) != attr.offset)
throw Region_conflict();
auto lambda = [&] (Dataspace_component *dsc) {
@ -374,24 +371,26 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
unsigned const min_align_log2 = get_page_size_log2();
size_t const off = offset;
size_t const off = attr.offset;
if (off >= dsc->size())
throw Region_conflict();
size_t size = attr.size;
if (!size)
size = dsc->size() - offset;
size = dsc->size() - attr.offset;
/* work with page granularity */
size = align_addr(size, min_align_log2);
/* deny creation of regions larger then the actual dataspace */
if (dsc->size() < size + offset)
if (dsc->size() < size + attr.offset)
throw Region_conflict();
/* allocate region for attachment */
void *attach_at = nullptr;
if (use_local_addr) {
_map.alloc_addr(size, local_addr).with_result(
if (attr.use_local_addr) {
_map.alloc_addr(size, attr.local_addr).with_result(
[&] (void *ptr) { attach_at = ptr; },
[&] (Range_allocator::Alloc_error error) {
switch (error) {
@ -420,7 +419,7 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
* store. The backing store would constrain the mapping size
* anyway such that a higher alignment of the region is of no use.
*/
if (((dsc->map_src_addr() + offset) & ((1UL << align_log2) - 1)) != 0)
if (((dsc->map_src_addr() + attr.offset) & ((1UL << align_log2) - 1)) != 0)
continue;
/* try allocating the align region */
@ -444,12 +443,19 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
throw Region_conflict();
}
Rm_region::Attr const region_attr
{
.base = (addr_t)attach_at,
.size = size,
.write = attr.writeable,
.exec = attr.executable,
.off = attr.offset,
.dma = attr.dma,
};
/* store attachment info in meta data */
try {
_map.construct_metadata(attach_at,
(addr_t)attach_at, size,
dsc->writable() && writeable,
*dsc, offset, *this, executable);
_map.construct_metadata(attach_at, *dsc, *this, region_attr);
}
catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
@ -527,6 +533,52 @@ void Region_map_component::unmap_region(addr_t base, size_t size)
}
Region_map::Local_addr
Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
off_t offset, bool use_local_addr,
Region_map::Local_addr local_addr,
bool executable, bool writeable)
{
Attach_attr const attr {
.size = size,
.offset = offset,
.use_local_addr = use_local_addr,
.local_addr = local_addr,
.executable = executable,
.writeable = writeable,
.dma = false,
};
return _attach(ds_cap, attr);
}
Region_map_component::Attach_dma_result
Region_map_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
{
Attach_attr const attr {
.size = 0,
.offset = 0,
.use_local_addr = true,
.local_addr = at,
.executable = false,
.writeable = true,
.dma = true,
};
using Attach_dma_error = Pd_session::Attach_dma_error;
try {
_attach(ds_cap, attr);
return Pd_session::Attach_dma_ok();
}
catch (Invalid_dataspace) { return Attach_dma_error::DENIED; }
catch (Region_conflict) { return Attach_dma_error::DENIED; }
catch (Out_of_ram) { return Attach_dma_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Attach_dma_error::OUT_OF_CAPS; }
}
void Region_map_component::detach(Local_addr local_addr)
{
/* serialize access */

View File

@ -69,13 +69,21 @@ void Vm_session_component::attach(Dataspace_capability const cap,
[&] (void *) {
Rm_region::Attr const region_attr
{
.base = guest_phys,
.size = attribute.size,
.write = dsc.writable() && attribute.writeable,
.exec = attribute.executable,
.off = (off_t)attribute.offset,
.dma = false,
};
/* store attachment info in meta data */
try {
_map.construct_metadata((void *)guest_phys,
guest_phys, attribute.size,
dsc.writable() && attribute.writeable,
dsc, attribute.offset, *this,
attribute.executable);
dsc, *this, region_attr);
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
throw Invalid_dataspace();