base: Pd_session::dma_addr, Pd_session::attach_dma

This patch enhances the PD-session interface with the support needed for
user-level device drivers performing DMA. Both RPC functions are
intended for the direct use by the platform driver only. If invoked for
PDs that lack the managing-system role, the operations have no effect.

The 'dma_addr()' RPC function allows the platform driver to request the
DMA address of a given RAM dataspace. It is meant to replace the
'Dataspace::phys_addr' RPC function.

The 'attach_dma' RPC function adds the given dataspace to the device
PD's I/O page table. It replaces the former heuristics of marking DMA
buffers as uncached RAM on x86.

With this patch, the UNCACHED attribute of RAM dataspaces is no longer
used to distinguish DMA buffers from regular RAM dataspaces.

Issue #2243
This commit is contained in:
Norman Feske 2022-02-01 15:17:29 +01:00
parent db3a647c6d
commit e4f62380d7
16 changed files with 264 additions and 66 deletions

View File

@ -66,6 +66,11 @@ class Genode::Region_map_component : public Rpc_object<Region_map>,
Rm_dataspace_component *dataspace_component() { return nullptr; }
void address_space(Platform_pd *) { }
using Attach_dma_result = Pd_session::Attach_dma_result;
Attach_dma_result attach_dma(Dataspace_capability, addr_t) {
return Pd_session::Attach_dma_error::DENIED; };
};

View File

@ -95,6 +95,11 @@ struct Genode::Pd_session_client : Rpc_client<Pd_session>
Managing_system_state managing_system(Managing_system_state const & state) override {
return call<Rpc_managing_system>(state); }
addr_t dma_addr(Ram_dataspace_capability ds) override { return call<Rpc_dma_addr>(ds); }
Attach_dma_result attach_dma(Dataspace_capability ds, addr_t at) override {
return call<Rpc_attach_dma>(ds, at); }
};
#endif /* _INCLUDE__PD_SESSION__CLIENT_H_ */

View File

@ -37,6 +37,21 @@ struct Genode::Pd_connection : Connection<Pd_session>, Pd_session_client
RAM_QUOTA, CAP_QUOTA, label, space)),
Pd_session_client(cap())
{ }
struct Device_pd { };
/**
* Constructor used for creating device protection domains
*/
Pd_connection(Env &env, Device_pd)
:
Connection<Pd_session>(env, session(env.parent(),
"ram_quota=%u, cap_quota=%u, "
"label=\"device PD\", virt_space=%u, "
"managing_system=yes",
RAM_QUOTA, CAP_QUOTA, UNCONSTRAIN)),
Pd_session_client(cap())
{ }
};
#endif /* _INCLUDE__PD_SESSION__CONNECTION_H_ */

View File

@ -312,6 +312,37 @@ struct Genode::Pd_session : Session, Ram_allocator
virtual Managing_system_state managing_system(Managing_system_state const &) = 0;
/*******************************************
** Support for user-level device drivers **
*******************************************/
/**
* Return start address of the dataspace to be used for DMA transfers
*
* The intended use of this function is the use of RAM dataspaces as DMA
* buffers. On systems without IOMMU, device drivers need to know the
* physical address of DMA buffers for issuing DMA transfers.
*
* \return DMA address, or 0 if the dataspace is invalid or the
* PD lacks the permission to obtain the information
*/
virtual addr_t dma_addr(Ram_dataspace_capability) = 0;
enum class Attach_dma_error { OUT_OF_RAM, OUT_OF_CAPS, DENIED };
struct Attach_dma_ok { };
using Attach_dma_result = Attempt<Attach_dma_ok, Attach_dma_error>;
/**
* Attach dataspace to I/O page table at specified address 'at'
*
* This operation is preserved to privileged system-management components
* like the platform driver to assign DMA buffers to device protection
* domains. The attach can be reverted by using 'address_space().detach()'.
*/
virtual Attach_dma_result attach_dma(Dataspace_capability, addr_t at) = 0;
/*********************
** RPC declaration **
*********************/
@ -361,6 +392,9 @@ struct Genode::Pd_session : Session, Ram_allocator
GENODE_RPC(Rpc_managing_system, Managing_system_state, managing_system,
Managing_system_state const &);
GENODE_RPC(Rpc_dma_addr, addr_t, dma_addr, Ram_dataspace_capability);
GENODE_RPC(Rpc_attach_dma, Attach_dma_result, attach_dma,
Dataspace_capability, addr_t);
GENODE_RPC_INTERFACE(Rpc_assign_parent, Rpc_assign_pci, Rpc_map,
Rpc_alloc_signal_source, Rpc_free_signal_source,
@ -370,7 +404,8 @@ struct Genode::Pd_session : Session, Ram_allocator
Rpc_transfer_cap_quota, Rpc_cap_quota, Rpc_used_caps,
Rpc_try_alloc, Rpc_free,
Rpc_transfer_ram_quota, Rpc_ram_quota, Rpc_used_ram,
Rpc_native_pd, Rpc_managing_system);
Rpc_native_pd, Rpc_managing_system,
Rpc_dma_addr, Rpc_attach_dma);
};
#endif /* _INCLUDE__PD_SESSION__PD_SESSION_H_ */

View File

@ -197,6 +197,7 @@ class Genode::Pd_session_component : public Session_object<Pd_session>
void map(addr_t, addr_t) override;
/****************
** Signalling **
****************/
@ -334,6 +335,15 @@ class Genode::Pd_session_component : public Session_object<Pd_session>
*******************************/
Managing_system_state managing_system(Managing_system_state const &) override;
/*******************************************
** Support for user-level device drivers **
*******************************************/
addr_t dma_addr(Ram_dataspace_capability) override;
Attach_dma_result attach_dma(Dataspace_capability, addr_t) override;
};
#endif /* _CORE__INCLUDE__PD_SESSION_COMPONENT_H_ */

View File

@ -101,6 +101,8 @@ class Genode::Ram_dataspace_factory : public Ram_allocator,
static_cap_cast<Dataspace>(ds->cap())));
}
addr_t dataspace_dma_addr(Ram_dataspace_capability);
/*****************************
** Ram_allocator interface **

View File

@ -28,6 +28,7 @@
#include <base/heap.h>
#include <util/list.h>
#include <util/fifo.h>
#include <pd_session/pd_session.h>
/* core includes */
#include <platform.h>
@ -70,39 +71,40 @@ class Genode::Region_map_detach : Genode::Interface
*/
class Genode::Rm_region : public List<Rm_region>::Element
{
private:
public:
addr_t const _base;
size_t const _size;
bool const _write;
bool const _exec;
off_t const _off;
struct Attr
{
addr_t base;
size_t size;
bool write;
bool exec;
off_t off;
bool dma;
};
private:
Dataspace_component &_dsc;
Region_map_detach &_rm;
Attr const _attr;
public:
Rm_region(addr_t base, size_t size, bool write,
Dataspace_component &dsc, off_t offset,
Region_map_detach &rm, bool exec)
Rm_region(Dataspace_component &dsc, Region_map_detach &rm, Attr attr)
:
_base(base), _size(size), _write(write), _exec(exec), _off(offset),
_dsc(dsc), _rm(rm)
_dsc(dsc), _rm(rm), _attr(attr)
{ }
/***************
** Accessors **
***************/
addr_t base() const { return _base; }
size_t size() const { return _size; }
bool write() const { return _write; }
bool executable() const { return _exec; }
Dataspace_component &dataspace() const { return _dsc; }
off_t offset() const { return _off; }
Region_map_detach &rm() const { return _rm; }
addr_t base() const { return _attr.base; }
size_t size() const { return _attr.size; }
bool write() const { return _attr.write; }
bool executable() const { return _attr.exec; }
off_t offset() const { return _attr.off; }
bool dma() const { return _attr.dma; }
Dataspace_component &dataspace() const { return _dsc; }
Region_map_detach &rm() const { return _rm; }
};
@ -358,6 +360,19 @@ class Genode::Region_map_component : private Weak_object<Region_map_component>,
*/
addr_t _core_local_addr(Rm_region & r);
struct Attach_attr
{
size_t size;
off_t offset;
bool use_local_addr;
addr_t local_addr;
bool executable;
bool writeable;
bool dma;
};
Local_addr _attach(Dataspace_capability, Attach_attr);
public:
/*
@ -451,6 +466,11 @@ class Genode::Region_map_component : private Weak_object<Region_map_component>,
Dataspace_component &dsc,
addr_t, addr_t);
using Attach_dma_result = Pd_session::Attach_dma_result;
Attach_dma_result attach_dma(Dataspace_capability, addr_t);
/**************************
** Region map interface **
**************************/

View File

@ -176,3 +176,27 @@ void Pd_session_component::transfer_quota(Capability<Pd_session> pd_cap,
});
}
addr_t Pd_session_component::dma_addr(Ram_dataspace_capability ds_cap)
{
if (_managing_system == Managing_system::DENIED)
return 0;
if (this->cap() == ds_cap)
return 0;
return _ram_ds_factory.dataspace_dma_addr(ds_cap);
}
Pd_session::Attach_dma_result
Pd_session_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
{
if (_managing_system == Managing_system::DENIED)
return Attach_dma_error::DENIED;
if (this->cap() == ds_cap)
return Attach_dma_error::DENIED;
return _address_space.attach_dma(ds_cap, at);
}

View File

@ -185,3 +185,14 @@ size_t Ram_dataspace_factory::dataspace_size(Ram_dataspace_capability ds_cap) co
return result;
}
addr_t Ram_dataspace_factory::dataspace_dma_addr(Ram_dataspace_capability ds_cap)
{
addr_t result = 0;
_ep.apply(ds_cap, [&] (Dataspace_component *c) {
if (c && c->owner(*this))
result = c->phys_addr(); });
return result;
}

View File

@ -344,7 +344,7 @@ Mapping Region_map_component::create_map_item(Region_map_component *,
.size_log2 = map_size_log2,
.cached = dataspace.cacheability() == CACHED,
.io_mem = dataspace.io_mem(),
.dma_buffer = dataspace.cacheability() != CACHED,
.dma_buffer = region.dma(),
.write_combined = dataspace.cacheability() == WRITE_COMBINED,
.writeable = region.write() && dataspace.writable(),
.executable = region.executable() };
@ -352,16 +352,13 @@ Mapping Region_map_component::create_map_item(Region_map_component *,
Region_map::Local_addr
Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
off_t offset, bool use_local_addr,
Region_map::Local_addr local_addr,
bool executable, bool writeable)
Region_map_component::_attach(Dataspace_capability ds_cap, Attach_attr const attr)
{
/* serialize access */
Mutex::Guard lock_guard(_mutex);
/* offset must be positive and page-aligned */
if (offset < 0 || align_addr(offset, get_page_size_log2()) != offset)
if (attr.offset < 0 || align_addr(attr.offset, get_page_size_log2()) != attr.offset)
throw Region_conflict();
auto lambda = [&] (Dataspace_component *dsc) {
@ -374,24 +371,26 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
unsigned const min_align_log2 = get_page_size_log2();
size_t const off = offset;
size_t const off = attr.offset;
if (off >= dsc->size())
throw Region_conflict();
size_t size = attr.size;
if (!size)
size = dsc->size() - offset;
size = dsc->size() - attr.offset;
/* work with page granularity */
size = align_addr(size, min_align_log2);
/* deny creation of regions larger then the actual dataspace */
if (dsc->size() < size + offset)
if (dsc->size() < size + attr.offset)
throw Region_conflict();
/* allocate region for attachment */
void *attach_at = nullptr;
if (use_local_addr) {
_map.alloc_addr(size, local_addr).with_result(
if (attr.use_local_addr) {
_map.alloc_addr(size, attr.local_addr).with_result(
[&] (void *ptr) { attach_at = ptr; },
[&] (Range_allocator::Alloc_error error) {
switch (error) {
@ -420,7 +419,7 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
* store. The backing store would constrain the mapping size
* anyway such that a higher alignment of the region is of no use.
*/
if (((dsc->map_src_addr() + offset) & ((1UL << align_log2) - 1)) != 0)
if (((dsc->map_src_addr() + attr.offset) & ((1UL << align_log2) - 1)) != 0)
continue;
/* try allocating the align region */
@ -444,12 +443,19 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
throw Region_conflict();
}
Rm_region::Attr const region_attr
{
.base = (addr_t)attach_at,
.size = size,
.write = attr.writeable,
.exec = attr.executable,
.off = attr.offset,
.dma = attr.dma,
};
/* store attachment info in meta data */
try {
_map.construct_metadata(attach_at,
(addr_t)attach_at, size,
dsc->writable() && writeable,
*dsc, offset, *this, executable);
_map.construct_metadata(attach_at, *dsc, *this, region_attr);
}
catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
@ -527,6 +533,52 @@ void Region_map_component::unmap_region(addr_t base, size_t size)
}
Region_map::Local_addr
Region_map_component::attach(Dataspace_capability ds_cap, size_t size,
off_t offset, bool use_local_addr,
Region_map::Local_addr local_addr,
bool executable, bool writeable)
{
Attach_attr const attr {
.size = size,
.offset = offset,
.use_local_addr = use_local_addr,
.local_addr = local_addr,
.executable = executable,
.writeable = writeable,
.dma = false,
};
return _attach(ds_cap, attr);
}
Region_map_component::Attach_dma_result
Region_map_component::attach_dma(Dataspace_capability ds_cap, addr_t at)
{
Attach_attr const attr {
.size = 0,
.offset = 0,
.use_local_addr = true,
.local_addr = at,
.executable = false,
.writeable = true,
.dma = true,
};
using Attach_dma_error = Pd_session::Attach_dma_error;
try {
_attach(ds_cap, attr);
return Pd_session::Attach_dma_ok();
}
catch (Invalid_dataspace) { return Attach_dma_error::DENIED; }
catch (Region_conflict) { return Attach_dma_error::DENIED; }
catch (Out_of_ram) { return Attach_dma_error::OUT_OF_RAM; }
catch (Out_of_caps) { return Attach_dma_error::OUT_OF_CAPS; }
}
void Region_map_component::detach(Local_addr local_addr)
{
/* serialize access */

View File

@ -69,13 +69,21 @@ void Vm_session_component::attach(Dataspace_capability const cap,
[&] (void *) {
Rm_region::Attr const region_attr
{
.base = guest_phys,
.size = attribute.size,
.write = dsc.writable() && attribute.writeable,
.exec = attribute.executable,
.off = (off_t)attribute.offset,
.dma = false,
};
/* store attachment info in meta data */
try {
_map.construct_metadata((void *)guest_phys,
guest_phys, attribute.size,
dsc.writable() && attribute.writeable,
dsc, attribute.offset, *this,
attribute.executable);
dsc, *this, region_attr);
} catch (Allocator_avl_tpl<Rm_region>::Assign_metadata_failed) {
error("failed to store attachment info");
throw Invalid_dataspace();

View File

@ -31,21 +31,28 @@ void Platform::Device_pd::attach_dma_mem(Dataspace_capability ds_cap,
addr_t page = ~0UL;
try {
page = _address_space.attach_at(ds_cap, dma_addr);
/* trigger eager mapping of memory */
_pd.map(page, size);
}
catch (Out_of_ram) { throw; }
catch (Out_of_caps) { throw; }
catch (Region_map::Region_conflict) {
/*
* DMA memory already attached before.
*/
page = dma_addr;
} catch (...) {
error(_label, ": attach_at or map failed");
}
using Attach_dma_error = Pd_session::Attach_dma_error;
_pd.attach_dma(ds_cap, dma_addr).with_result(
[&] (Pd_session::Attach_dma_ok) {
page = dma_addr;
/* trigger eager mapping of memory */
_pd.map(page, size);
},
[&] (Attach_dma_error e) {
switch (e) {
case Attach_dma_error::OUT_OF_RAM: throw Out_of_ram();
case Attach_dma_error::OUT_OF_CAPS: throw Out_of_caps();
case Attach_dma_error::DENIED:
warning("Pd_session::attach_dma denied");
page = dma_addr;
break;
}
});
/* sanity check */
if ((page == ~0UL) || (page != dma_addr)) {

View File

@ -106,7 +106,7 @@ class Platform::Device_pd
Ram_quota_guard &ram_guard,
Cap_quota_guard &cap_guard)
:
_pd(env, "device PD", Pd_connection::Virt_space::UNCONSTRAIN),
_pd(env, Pd_connection::Device_pd()),
_label(label),
_address_space(env, _pd, ram_guard, cap_guard)
{

View File

@ -836,7 +836,7 @@ class Platform::Session_component : public Rpc_object<Session>
throw Out_of_ram();
Ram_dataspace_capability ram_cap = _env_ram.alloc(size, cache);
addr_t const dma_addr = Dataspace_client(ram_cap).phys_addr();
addr_t const dma_addr = _env.pd().dma_addr(ram_cap);
if (!ram_cap.valid())
return ram_cap;
@ -868,7 +868,7 @@ class Platform::Session_component : public Rpc_object<Session>
if (!ram_cap.valid() || !_owned(ram_cap))
return 0;
return Dataspace_client(ram_cap).phys_addr();
return _env.pd().dma_addr(ram_cap);
}
Device_capability device(Device_name const &name) override;

View File

@ -225,10 +225,8 @@ Genode::addr_t Session_component::dma_addr(Ram_dataspace_capability ram_cap)
return ret;
_buffer_registry.for_each([&] (Dma_buffer & buf) {
if (buf.cap.local_name() == ram_cap.local_name()) {
Dataspace_client dsc(buf.cap);
ret = dsc.phys_addr();
} });
if (buf.cap.local_name() == ram_cap.local_name())
ret = _env.pd().dma_addr(buf.cap); });
return ret;
}

View File

@ -148,6 +148,12 @@ class Gdb_monitor::Pd_session_component : public Rpc_object<Pd_session>
Managing_system_state
managing_system(Managing_system_state const & state) override {
return _pd.managing_system(state); }
addr_t dma_addr(Ram_dataspace_capability ds) override {
return _pd.dma_addr(ds); }
Attach_dma_result attach_dma(Dataspace_capability ds, addr_t at) override {
return _pd.attach_dma(ds, at); }
};
#endif /* _PD_SESSION_COMPONENT_H_ */