pc/platform: enable IOMMU with default mappings

genodelabs/genode#5002
This commit is contained in:
Johannes Schlatow 2023-11-21 18:13:08 +01:00
parent bc09ff7498
commit 3f1e1323f0
7 changed files with 336 additions and 24 deletions

View File

@ -122,6 +122,10 @@ void Driver::Common::acquire_io_mmu_devices()
io_mmu_dev.add_default_range(range, range.start);
has_reserved_mem = true;
});
if (!has_reserved_mem)
return;
/* enable default mappings for corresponding pci devices */
device.for_pci_config([&] (Device::Pci_config const & cfg) {
io_mmu_dev.enable_default_mappings(
@ -135,11 +139,11 @@ void Driver::Common::acquire_io_mmu_devices()
bool kernel_iommu_present { false };
_io_mmu_devices.for_each([&] (Io_mmu & io_mmu_dev) {
io_mmu_dev.default_mappings_complete();
if (io_mmu_dev.name() == "kernel_iommu")
kernel_iommu_present = true;
});
/* if kernel implements iommu, instantiate Kernel_iommu */
if (_iommu() && !kernel_iommu_present)
new (_heap) Kernel_iommu(_env, _io_mmu_devices, "kernel_iommu");

View File

@ -0,0 +1,96 @@
/*
* \brief Default translation table structures
* \author Johannes Schlatow
* \date 2023-11-15
*/
/*
* Copyright (C) 2023 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* local includes */
#include <intel/default_mappings.h>
void Intel::Default_mappings::_insert_context(Managed_root_table & root,
Pci::Bdf bdf,
addr_t paddr,
Domain_id domain_id)
{
using L3_table = Level_3_translation_table;
using L4_table = Level_4_translation_table;
switch (_levels) {
case Translation_levels::LEVEL3:
root.insert_context<L3_table::address_width()>(bdf, paddr, domain_id);
break;
case Translation_levels::LEVEL4:
root.insert_context<L4_table::address_width()>(bdf, paddr, domain_id);
break;
}
}
void Intel::Default_mappings::insert_translation(addr_t va, addr_t pa,
size_t size, Page_flags flags,
uint32_t page_sizes)
{
using L3_table = Level_3_translation_table;
using L4_table = Level_4_translation_table;
switch (_levels)
{
case Translation_levels::LEVEL3:
_table_allocator.with_table<L3_table>(_default_table_phys,
[&] (L3_table & t) {
t.insert_translation(va, pa, size, flags, _table_allocator,
_force_flush, page_sizes);
}, [&] () {});
break;
case Translation_levels::LEVEL4:
_table_allocator.with_table<L4_table>(_default_table_phys,
[&] (L4_table & t) {
t.insert_translation(va, pa, size, flags, _table_allocator,
_force_flush, page_sizes);
}, [&] () {});
break;
}
}
void Intel::Default_mappings::enable_device(Pci::Bdf bdf, Domain_id domain_id)
{
_insert_context(_root_table, bdf, _default_table_phys, domain_id);
}
void Intel::Default_mappings::copy_stage2(Managed_root_table & dst_root,
Pci::Bdf bdf)
{
_root_table.with_stage2_pointer(bdf, [&] (addr_t phys_addr, Domain_id domain) {
_insert_context(dst_root, bdf, phys_addr, domain); });
}
void Intel::Default_mappings::copy_stage2(Managed_root_table & dst_root)
{
_root_table.for_each_stage2_pointer(
[&] (Pci::Bdf bdf, addr_t phys_addr, Domain_id domain) {
_insert_context(dst_root, bdf, phys_addr, domain); });
}
Intel::Default_mappings::~Default_mappings()
{
/* destruct default translation table */
switch(_levels) {
case Translation_levels::LEVEL3:
_table_allocator.destruct<Level_3_translation_table>(_default_table_phys);
break;
case Translation_levels::LEVEL4:
_table_allocator.destruct<Level_4_translation_table>(_default_table_phys);
break;
}
}

View File

@ -0,0 +1,89 @@
/*
* \brief Default translation table structures
* \author Johannes Schlatow
* \date 2023-11-15
*/
/*
* Copyright (C) 2023 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__DEFAULT_MAPPINGS_H_
#define _SRC__DRIVERS__PLATFORM__INTEL__DEFAULT_MAPPINGS_H_
/* local includes */
#include <intel/managed_root_table.h>
#include <intel/report_helper.h>
#include <intel/page_table.h>
namespace Intel {
using namespace Genode;
class Default_mappings;
}
class Intel::Default_mappings
{
public:
using Allocator = Managed_root_table::Allocator;
enum Translation_levels { LEVEL3, LEVEL4 };
private:
Allocator & _table_allocator;
Managed_root_table _root_table;
bool _force_flush;
Translation_levels _levels;
addr_t _default_table_phys;
static addr_t _construct_default_table(Allocator & alloc,
Translation_levels levels)
{
switch (levels) {
case Translation_levels::LEVEL3:
return alloc.construct<Level_3_translation_table>();
case Translation_levels::LEVEL4:
return alloc.construct<Level_4_translation_table>();
}
return 0;
}
void _insert_context(Managed_root_table &, Pci::Bdf, addr_t, Domain_id);
public:
void insert_translation(addr_t, addr_t, size_t, Page_flags, uint32_t);
void enable_device(Pci::Bdf, Domain_id);
void copy_stage2(Managed_root_table &, Pci::Bdf);
void copy_stage2(Managed_root_table &);
Default_mappings(Env & env,
Allocator & table_allocator,
Translation_table_registry & registry,
bool force_flush,
Translation_levels levels)
: _table_allocator(table_allocator),
_root_table(env, table_allocator, registry, force_flush),
_force_flush(force_flush),
_levels(levels),
_default_table_phys(_construct_default_table(_table_allocator, _levels))
{ }
~Default_mappings();
};
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__DEFAULT_MAPPINGS_H_ */

View File

@ -27,12 +27,25 @@ template <typename TABLE>
void Intel::Io_mmu::Domain<TABLE>::enable_pci_device(Io_mem_dataspace_capability const,
Pci::Bdf const & bdf)
{
_intel_iommu.root_table().insert_context<TABLE::address_width()>(
bdf, _translation_table_phys, _domain_id);
Domain_id cur_domain =
_intel_iommu.root_table().insert_context<TABLE::address_width()>(
bdf, _translation_table_phys, _domain_id);
/* invalidate translation caches only if failed requests are cached */
if (_intel_iommu.caching_mode())
_intel_iommu.invalidate_all(_domain_id, Pci::Bdf::rid(bdf));
/**
* We need to invalidate the context-cache entry for this device and
* IOTLB entries for the previously used domain id.
*
* If the IOMMU caches unresolved requests, we must invalidate those. In
* legacy translation mode, these are cached with domain id 0. This is
* currently implemented as global invalidation, though.
*
* Some older architectures also require explicit write-buffer flushing
* unless invalidation takes place.
*/
if (cur_domain.valid())
_intel_iommu.invalidate_all(cur_domain, Pci::Bdf::rid(bdf));
else if (_intel_iommu.caching_mode())
_intel_iommu.invalidate_context(Domain_id(), Pci::Bdf::rid(bdf));
else
_intel_iommu.flush_write_buffer();
}
@ -43,6 +56,9 @@ void Intel::Io_mmu::Domain<TABLE>::disable_pci_device(Pci::Bdf const bdf)
{
_intel_iommu.root_table().remove_context(bdf, _translation_table_phys);
/* lookup default mappings and insert instead */
_intel_iommu.apply_default_mappings(bdf);
_intel_iommu.invalidate_all(_domain_id);
}
@ -147,7 +163,7 @@ void Intel::Io_mmu::invalidate_iotlb(Domain_id domain_id, addr_t, size_t)
}
/**
* Clear context cache and IOTLB.
* Clear context cache
*
* By default, we perform a global invalidation. When provided with a valid
* Domain_id, a domain-specific invalidation is conducted. When a rid is
@ -155,7 +171,7 @@ void Intel::Io_mmu::invalidate_iotlb(Domain_id domain_id, addr_t, size_t)
*
* See Table 25 for required invalidation scopes.
*/
void Intel::Io_mmu::invalidate_all(Domain_id domain_id, Pci::rid_t rid)
void Intel::Io_mmu::invalidate_context(Domain_id domain_id, Pci::rid_t rid)
{
/**
* We are using the register-based invalidation interface for the
@ -196,6 +212,12 @@ void Intel::Io_mmu::invalidate_all(Domain_id domain_id, Pci::rid_t rid)
else if (_verbose && actual_scope < requested_scope)
warning("Performed context-cache invalidation with different granularity ",
"(requested=", requested_scope, ", actual=", actual_scope, ")");
}
void Intel::Io_mmu::invalidate_all(Domain_id domain_id, Pci::rid_t rid)
{
invalidate_context(domain_id, rid);
/* XXX clear PASID cache if we ever switch from legacy mode translation */
@ -337,6 +359,48 @@ void Intel::Io_mmu::generate(Xml_generator & xml)
}
void Intel::Io_mmu::add_default_range(Range const & range, addr_t paddr)
{
addr_t const vaddr { range.start };
size_t const size { range.size };
Page_flags flags { RW, NO_EXEC, USER, NO_GLOBAL,
RAM, Genode::CACHED };
try {
_default_mappings.insert_translation(vaddr, paddr, size, flags,
supported_page_sizes());
} catch (...) { /* catch any double insertions */ }
}
void Intel::Io_mmu::default_mappings_complete()
{
/* skip if already enabled */
if (read<Global_status::Enabled>())
return;
/* caches must be cleared if Esrtps is not set (see 6.6) */
if (!read<Capability::Esrtps>())
invalidate_all();
/* insert contexts into managed root table */
_default_mappings.copy_stage2(_managed_root_table);
/* set root table address */
write<Root_table_address>(
Root_table_address::Address::masked(_managed_root_table.phys_addr()));
/* issue set root table pointer command */
_global_command<Global_command::Srtp>(1);
/* enable IOMMU */
_global_command<Global_command::Enable>(1);
log("enabled IOMMU ", name(), " with default mappings");
}
Intel::Io_mmu::Io_mmu(Env & env,
Io_mmu_devices & io_mmu_devices,
Device::Name const & name,
@ -347,6 +411,8 @@ Intel::Io_mmu::Io_mmu(Env & env,
Driver::Io_mmu(io_mmu_devices, name),
_env(env),
_managed_root_table(_env, table_allocator, *this, !coherent_page_walk()),
_default_mappings(_env, table_allocator, *this, !coherent_page_walk(),
_sagaw_to_levels()),
_domain_allocator(_max_domains()-1)
{
if (_broken_device()) {
@ -359,10 +425,7 @@ Intel::Io_mmu::Io_mmu(Env & env,
return;
}
/* caches must be cleared if Esrtps is not set (see 6.6) */
if (!read<Capability::Esrtps>())
invalidate_all();
else if (read<Global_status::Enabled>()) {
if (read<Global_status::Enabled>()) {
error("IOMMU already enabled");
return;
}
@ -383,11 +446,4 @@ Intel::Io_mmu::Io_mmu(Env & env,
write<Fault_event_control::Mask>(0);
}
}
/* set root table address */
write<Root_table_address>(
Root_table_address::Address::masked(_managed_root_table.phys_addr()));
/* issue set root table pointer command*/
_global_command<Global_command::Srtp>(1);
}

View File

@ -30,6 +30,7 @@
#include <intel/report_helper.h>
#include <intel/page_table.h>
#include <intel/domain_allocator.h>
#include <intel/default_mappings.h>
#include <expanding_page_table_allocator.h>
namespace Intel {
@ -176,11 +177,16 @@ class Intel::Io_mmu : private Attached_mmio,
* controlling all hardware units. Otherwise, the session component will
* create separate Domain objects that receive identical modification
* instructions.
*
* The default root table holds default mappings (e.g. reserved memory)
* that needs to be accessible even if devices have not been acquired yet.
*/
bool _verbose { false };
Managed_root_table _managed_root_table;
Default_mappings _default_mappings;
Report_helper _report_helper { *this };
Domain_allocator _domain_allocator;
Domain_id _default_domain { _domain_allocator.alloc() };
Constructible<Irq_connection> _fault_irq { };
Signal_handler<Io_mmu> _fault_handler {
_env.ep(), *this, &Io_mmu::_handle_faults };
@ -467,8 +473,9 @@ class Intel::Io_mmu : private Attached_mmio,
* Io_mmu interface
*/
void _enable() override {
_global_command<Global_command::Enable>(1);
void _enable() override
{
/* IOMMU gets enabled already after default mappings are complete */
if (_verbose)
log("enabled IOMMU ", name());
@ -476,10 +483,13 @@ class Intel::Io_mmu : private Attached_mmio,
void _disable() override
{
_global_command<Global_command::Enable>(0);
/**
* Ideally, we would block all DMA here, however, we must preserve
* some default mappings to allow access to reserved memory.
*/
if (_verbose)
log("disabled IOMMU ", name());
log("no enabled device for IOMMU ", name(), " anymore");
}
/**
@ -497,6 +507,19 @@ class Intel::Io_mmu : private Attached_mmio,
read<Extended_capability>() == (Extended_capability::access_t)0;
}
Default_mappings::Translation_levels _sagaw_to_levels()
{
using Levels = Default_mappings::Translation_levels;
if (read<Capability::Sagaw_4_level>())
return Levels::LEVEL4;
if (!read<Capability::Sagaw_3_level>() && read<Capability::Sagaw_5_level>())
error("IOMMU requires 5-level translation tables (not implemented)");
return Levels::LEVEL3;
}
const uint32_t _supported_page_sizes {
read<Capability::Page_1GB>() << 30 |
read<Capability::Page_2MB>() << 21 | 1u << 12 };
@ -508,6 +531,7 @@ class Intel::Io_mmu : private Attached_mmio,
void generate(Xml_generator &) override;
void invalidate_iotlb(Domain_id, addr_t, size_t);
void invalidate_context(Domain_id domain, Pci::rid_t);
void invalidate_all(Domain_id domain = Domain_id { Domain_id::INVALID }, Pci::rid_t = 0);
bool coherent_page_walk() const { return read<Extended_capability::Page_walk_coherency>(); }
@ -516,6 +540,16 @@ class Intel::Io_mmu : private Attached_mmio,
void flush_write_buffer();
/**
* Io_mmu interface for default mappings
*/
void add_default_range(Range const &, addr_t) override;
void default_mappings_complete() override;
void enable_default_mappings(Pci::Bdf bdf) override {
_default_mappings.enable_device(bdf, _default_domain); }
void apply_default_mappings(Pci::Bdf bdf) {
_default_mappings.copy_stage2(_managed_root_table, bdf); }
/**
* Io_mmu interface
@ -559,7 +593,11 @@ class Intel::Io_mmu : private Attached_mmio,
Context_table_allocator & table_allocator,
unsigned irq_number);
~Io_mmu() { _destroy_domains(); }
~Io_mmu()
{
_domain_allocator.free(_default_domain);
_destroy_domains();
}
};

View File

@ -82,6 +82,34 @@ class Intel::Managed_root_table : public Registered_translation_table
addr_t phys_addr() { return _root_table_phys; }
template <typename FN>
void for_each_stage2_pointer(FN && fn)
{
Root_table::for_each([&] (uint8_t bus) {
_with_context_table(bus, [&] (Context_table & ctx) {
Pci::rid_t start_rid = Pci::Bdf::rid(Pci::Bdf { bus, 0, 0 });
Context_table::for_each(start_rid, [&] (Pci::rid_t rid) {
if (!ctx.present(rid))
return;
fn(Pci::Bdf::bdf(rid), ctx.stage2_pointer(rid), ctx.domain(rid));
});
});
});
}
template <typename FN>
void with_stage2_pointer(Pci::Bdf bdf, FN && fn)
{
_with_context_table(bdf.bus, [&] (Context_table & ctx) {
Pci::rid_t rid = Pci::Bdf::rid(bdf);
if (ctx.present(rid))
fn(ctx.stage2_pointer(rid), ctx.domain(rid));
});
}
/* add second-stage table */
template <unsigned ADDRESS_WIDTH>
Domain_id insert_context(Pci::Bdf bdf, addr_t phys_addr, Domain_id domain)

View File

@ -8,6 +8,7 @@ SRC_CC += intel/context_table.cc
SRC_CC += intel/managed_root_table.cc
SRC_CC += intel/io_mmu.cc
SRC_CC += intel/page_table.cc
SRC_CC += intel/default_mappings.cc
INC_DIR += $(PRG_DIR)/../../