platform/pc: move register-based invalidation

This is in preparation of implementing the queued-invalidation
interface.

genodelabs/genode#5066
This commit is contained in:
Johannes Schlatow 2024-03-21 09:49:53 +01:00 committed by Christian Helmuth
parent c767c2b129
commit 4790bc03fc
5 changed files with 276 additions and 164 deletions

View File

@ -0,0 +1,120 @@
/*
* \brief Intel IOMMU invalidation interfaces
* \author Johannes Schlatow
* \date 2024-03-21
*/
/*
* Copyright (C) 2024 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* local includes */
#include <intel/invalidator.h>
#include <intel/io_mmu.h>
/**
* Clear IOTLB.
*
* By default, we perform a global invalidation. When provided with a valid
* Domain_id, a domain-specific invalidation is conducted.
*
* See Table 25 for required invalidation scopes.
*/
void Intel::Register_invalidator::invalidate_iotlb(Domain_id domain_id)
{
using Context_command = Context_mmio::Context_command;
using Iotlb = Iotlb_mmio::Iotlb;
unsigned requested_scope = Context_command::Cirg::GLOBAL;
if (domain_id.valid())
requested_scope = Context_command::Cirg::DOMAIN;
/* wait for ongoing invalidation request to be completed */
while (_iotlb_mmio.read<Iotlb::Invalidate>());
/* invalidate IOTLB */
_iotlb_mmio.write<Iotlb>(Iotlb::Invalidate::bits(1) |
Iotlb::Iirg::bits(requested_scope) |
Iotlb::Dr::bits(1) | Iotlb::Dw::bits(1) |
Iotlb::Did::bits(domain_id.value));
/* wait for completion */
while (_iotlb_mmio.read<Iotlb::Invalidate>());
/* check for errors */
unsigned actual_scope = _iotlb_mmio.read<Iotlb::Iaig>();
if (!actual_scope)
error("IOTLB invalidation failed (scope=", requested_scope, ")");
else if (_verbose && actual_scope < requested_scope)
warning("Performed IOTLB invalidation with different granularity ",
"(requested=", requested_scope, ", actual=", actual_scope, ")");
/*
* Note: At the moment we have no practical benefit from implementing
* page-selective invalidation, because
* a) When adding a DMA buffer range, invalidation is only required if
* caching mode is set. This is not supposed to occur on real hardware but
* only in emulators.
* b) Removal of DMA buffer ranges typically occurs only when a domain is
* destructed. In this case, invalidation is not issued for individual
* buffers but for the entire domain once all buffer ranges have been
* removed.
* c) We do not use the register-based invalidation interface if queued
* invalidation is available.
*/
}
/**
* Clear context cache
*
* By default, we perform a global invalidation. When provided with a valid
* Domain_id, a domain-specific invalidation is conducted. When a rid is
* provided, a device-specific invalidation is done.
*
* See Table 25 for required invalidation scopes.
*/
void Intel::Register_invalidator::invalidate_context(Domain_id domain_id, Pci::rid_t rid)
{
using Context_command = Context_mmio::Context_command;
/* make sure that there is no context invalidation ongoing */
while (_context_mmio.read<Context_command::Invalidate>());
unsigned requested_scope = Context_command::Cirg::GLOBAL;
if (domain_id.valid())
requested_scope = Context_command::Cirg::DOMAIN;
if (rid != 0)
requested_scope = Context_command::Cirg::DEVICE;
/* clear context cache */
_context_mmio.write<Context_command>(Context_command::Invalidate::bits(1) |
Context_command::Cirg::bits(requested_scope) |
Context_command::Sid::bits(rid) |
Context_command::Did::bits(domain_id.value));
/* wait for completion */
while (_context_mmio.read<Context_command::Invalidate>());
/* check for errors */
unsigned actual_scope = _context_mmio.read<Context_command::Caig>();
if (!actual_scope)
error("Context-cache invalidation failed (scope=", requested_scope, ")");
else if (_verbose && actual_scope < requested_scope)
warning("Performed context-cache invalidation with different granularity ",
"(requested=", requested_scope, ", actual=", actual_scope, ")");
}
void Intel::Register_invalidator::invalidate_all(Domain_id domain_id, Pci::rid_t rid)
{
invalidate_context(domain_id, rid);
/* XXX clear PASID cache if we ever switch from legacy mode translation */
invalidate_iotlb(domain_id);
}

View File

@ -0,0 +1,130 @@
/*
* \brief Intel IOMMU invalidation interfaces
* \author Johannes Schlatow
* \date 2024-03-21
*/
/*
* Copyright (C) 2024 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _SRC__DRIVERS__PLATFORM__INTEL__INVALIDATOR_H_
#define _SRC__DRIVERS__PLATFORM__INTEL__INVALIDATOR_H_
/* Genode includes */
#include <util/mmio.h>
#include <pci/types.h>
/* local includes */
#include <intel/domain_allocator.h>
namespace Intel {
using namespace Genode;
class Io_mmu; /* forward declaration */
class Invalidator;
class Register_invalidator;
}
class Intel::Invalidator
{
public:
virtual ~Invalidator() { }
virtual void invalidate_iotlb(Domain_id) = 0;
virtual void invalidate_context(Domain_id domain, Pci::rid_t) = 0;
virtual void invalidate_all(Domain_id domain = Domain_id { Domain_id::INVALID },
Pci::rid_t = 0) = 0;
};
class Intel::Register_invalidator : public Invalidator
{
private:
struct Context_mmio : Mmio<8>
{
struct Context_command : Register<0x0, 64>
{
struct Invalidate : Bitfield<63,1> { };
/* invalidation request granularity */
struct Cirg : Bitfield<61,2>
{
enum {
GLOBAL = 0x1,
DOMAIN = 0x2,
DEVICE = 0x3
};
};
/* actual invalidation granularity */
struct Caig : Bitfield<59,2> { };
/* source id */
struct Sid : Bitfield<16,16> { };
/* domain id */
struct Did : Bitfield<0,16> { };
};
Context_mmio(Byte_range_ptr const &range)
: Mmio<8>(range)
{ }
} _context_mmio;
struct Iotlb_mmio : Mmio<16>
{
struct Iotlb : Register<0x8, 64>
{
struct Invalidate : Bitfield<63,1> { };
/* IOTLB invalidation request granularity */
struct Iirg : Bitfield<60,2>
{
enum {
GLOBAL = 0x1,
DOMAIN = 0x2,
DEVICE = 0x3
};
};
/* IOTLB actual invalidation granularity */
struct Iaig : Bitfield<57,2> { };
/* drain reads/writes */
struct Dr : Bitfield<49,1> { };
struct Dw : Bitfield<48,1> { };
/* domain id */
struct Did : Bitfield<32,16> { };
};
Iotlb_mmio(Byte_range_ptr const &range)
: Mmio<16>(range)
{ }
} _iotlb_mmio;
bool _verbose;
public:
void invalidate_iotlb(Domain_id) override;
void invalidate_context(Domain_id domain, Pci::rid_t) override;
void invalidate_all(Domain_id domain = Domain_id { Domain_id::INVALID },
Pci::rid_t = 0) override;
Register_invalidator(addr_t context_reg_base, addr_t iotlb_reg_base, bool verbose)
: _context_mmio({(char*)context_reg_base, 8}),
_iotlb_mmio ({(char*)iotlb_reg_base, 16}),
_verbose(verbose)
{ }
};
#endif /* _SRC__DRIVERS__PLATFORM__INTEL__INVALIDATOR_H_ */

View File

@ -43,9 +43,9 @@ void Intel::Io_mmu::Domain<TABLE>::enable_pci_device(Io_mem_dataspace_capability
* unless invalidation takes place.
*/
if (cur_domain.valid())
_intel_iommu.invalidate_all(cur_domain, Pci::Bdf::rid(bdf));
_intel_iommu.invalidator().invalidate_all(cur_domain, Pci::Bdf::rid(bdf));
else if (_intel_iommu.caching_mode())
_intel_iommu.invalidate_context(Domain_id(), Pci::Bdf::rid(bdf));
_intel_iommu.invalidator().invalidate_context(Domain_id(), Pci::Bdf::rid(bdf));
else
_intel_iommu.flush_write_buffer();
}
@ -59,7 +59,7 @@ void Intel::Io_mmu::Domain<TABLE>::disable_pci_device(Pci::Bdf const & bdf)
/* lookup default mappings and insert instead */
_intel_iommu.apply_default_mappings(bdf);
_intel_iommu.invalidate_all(_domain_id);
_intel_iommu.invalidator().invalidate_all(_domain_id);
}
@ -97,7 +97,7 @@ void Intel::Io_mmu::Domain<TABLE>::add_range(Range const & range,
/* only invalidate iotlb if failed requests are cached */
if (_intel_iommu.caching_mode())
_intel_iommu.invalidate_iotlb(_domain_id, vaddr, size);
_intel_iommu.invalidator().invalidate_iotlb(_domain_id);
else
_intel_iommu.flush_write_buffer();
}
@ -111,7 +111,7 @@ void Intel::Io_mmu::Domain<TABLE>::remove_range(Range const & range)
!_intel_iommu.coherent_page_walk());
if (!_skip_invalidation)
_intel_iommu.invalidate_iotlb(_domain_id, range.start, range.size);
_intel_iommu.invalidator().invalidate_iotlb(_domain_id);
}
@ -136,104 +136,9 @@ void Intel::Io_mmu::flush_write_buffer()
}
/**
* Clear IOTLB.
*
* By default, we perform a global invalidation. When provided with a valid
* Domain_id, a domain-specific invalidation is conducted. If provided with
* a DMA address and size, a page-selective invalidation is performed.
*
* See Table 25 for required invalidation scopes.
*/
void Intel::Io_mmu::invalidate_iotlb(Domain_id domain_id, addr_t, size_t)
Intel::Invalidator & Intel::Io_mmu::invalidator()
{
unsigned requested_scope = Context_command::Cirg::GLOBAL;
if (domain_id.valid())
requested_scope = Context_command::Cirg::DOMAIN;
/* wait for ongoing invalidation request to be completed */
while (Iotlb::Invalidate::get(read_iotlb_reg()));
/* invalidate IOTLB */
write_iotlb_reg(Iotlb::Invalidate::bits(1) |
Iotlb::Iirg::bits(requested_scope) |
Iotlb::Dr::bits(1) | Iotlb::Dw::bits(1) |
Iotlb::Did::bits(domain_id.value));
/* wait for completion */
while (Iotlb::Invalidate::get(read_iotlb_reg()));
/* check for errors */
unsigned actual_scope = Iotlb::Iaig::get(read_iotlb_reg());
if (!actual_scope)
error("IOTLB invalidation failed (scope=", requested_scope, ")");
else if (_verbose && actual_scope < requested_scope)
warning("Performed IOTLB invalidation with different granularity ",
"(requested=", requested_scope, ", actual=", actual_scope, ")");
/* XXX implement page-selective-within-domain IOTLB invalidation */
}
/**
* Clear context cache
*
* By default, we perform a global invalidation. When provided with a valid
* Domain_id, a domain-specific invalidation is conducted. When a rid is
* provided, a device-specific invalidation is done.
*
* See Table 25 for required invalidation scopes.
*/
void Intel::Io_mmu::invalidate_context(Domain_id domain_id, Pci::rid_t rid)
{
/**
* We are using the register-based invalidation interface for the
* moment. This is only supported in legacy mode and for major
* architecture version 5 and lower (cf. 6.5).
*/
if (read<Version::Major>() > 5) {
error("Unable to invalidate caches: Register-based invalidation only ",
"supported in architecture versions 5 and lower");
return;
}
/* make sure that there is no context invalidation ongoing */
while (read<Context_command::Invalidate>());
unsigned requested_scope = Context_command::Cirg::GLOBAL;
if (domain_id.valid())
requested_scope = Context_command::Cirg::DOMAIN;
if (rid != 0)
requested_scope = Context_command::Cirg::DEVICE;
/* clear context cache */
write<Context_command>(Context_command::Invalidate::bits(1) |
Context_command::Cirg::bits(requested_scope) |
Context_command::Sid::bits(rid) |
Context_command::Did::bits(domain_id.value));
/* wait for completion */
while (read<Context_command::Invalidate>());
/* check for errors */
unsigned actual_scope = read<Context_command::Caig>();
if (!actual_scope)
error("Context-cache invalidation failed (scope=", requested_scope, ")");
else if (_verbose && actual_scope < requested_scope)
warning("Performed context-cache invalidation with different granularity ",
"(requested=", requested_scope, ", actual=", actual_scope, ")");
}
void Intel::Io_mmu::invalidate_all(Domain_id domain_id, Pci::rid_t rid)
{
invalidate_context(domain_id, rid);
/* XXX clear PASID cache if we ever switch from legacy mode translation */
invalidate_iotlb(domain_id, 0, 0);
return *_register_invalidator;
}
@ -406,7 +311,7 @@ void Intel::Io_mmu::default_mappings_complete()
/* caches must be cleared if Esrtps is not set (see 6.6) */
if (!read<Capability::Esrtps>())
invalidate_all();
invalidator().invalidate_all();
/* enable IOMMU */
if (!read<Global_status::Enabled>())
@ -443,7 +348,7 @@ void Intel::Io_mmu::resume()
_global_command<Global_command::Srtp>(1);
if (!read<Capability::Esrtps>())
invalidate_all();
invalidator().invalidate_all();
/* enable IOMMU */
if (!read<Global_status::Enabled>())
@ -481,6 +386,15 @@ Intel::Io_mmu::Io_mmu(Env & env,
/* disable queued invalidation interface */
if (read<Global_status::Qies>())
_global_command<Global_command::Qie>(false);
if (read<Version::Major>() > 5) {
error("Register-based invalidation only ",
"supported in architecture versions 5 and lower");
}
addr_t context_reg_base = base() + 0x28;
addr_t iotlb_reg_base = base() + 8*_offset<Extended_capability::Iro>();
_register_invalidator.construct(context_reg_base, iotlb_reg_base, _verbose);
}
/* enable fault event interrupts (if not already enabled by kernel) */

View File

@ -31,6 +31,7 @@
#include <intel/page_table.h>
#include <intel/domain_allocator.h>
#include <intel/default_mappings.h>
#include <intel/invalidator.h>
#include <expanding_page_table_allocator.h>
namespace Intel {
@ -50,6 +51,8 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
{
public:
friend class Register_invalidator;
/* Use derived domain class to store reference to buffer registry */
template <typename TABLE>
class Domain : public Driver::Io_mmu::Domain,
@ -92,7 +95,7 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
_domain._skip_invalidation = false;
if (_requires_invalidation)
_domain._intel_iommu.invalidate_all(_domain._domain_id);
_domain._intel_iommu.invalidator().invalidate_all(_domain._domain_id);
else
_domain._intel_iommu.flush_write_buffer();
}
@ -191,6 +194,8 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
Signal_handler<Io_mmu> _fault_handler {
_env.ep(), *this, &Io_mmu::_handle_faults };
Constructible<Register_invalidator> _register_invalidator { };
/**
* Registers
*/
@ -287,30 +292,6 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
struct Address : Bitfield<12,52> { };
};
struct Context_command : Register<0x28, 64>
{
struct Invalidate : Bitfield<63,1> { };
/* invalidation request granularity */
struct Cirg : Bitfield<61,2>
{
enum {
GLOBAL = 0x1,
DOMAIN = 0x2,
DEVICE = 0x3
};
};
/* actual invalidation granularity */
struct Caig : Bitfield<59,2> { };
/* source id */
struct Sid : Bitfield<16,16> { };
/* domain id */
struct Did : Bitfield<0,16> { };
};
struct Fault_status : Register<0x34, 32>
{
/* fault record index */
@ -383,32 +364,6 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
struct Info : Bitfield<12,52> { };
};
struct Iotlb : Genode::Register<64>
{
struct Invalidate : Bitfield<63,1> { };
/* IOTLB invalidation request granularity */
struct Iirg : Bitfield<60,2>
{
enum {
GLOBAL = 0x1,
DOMAIN = 0x2,
DEVICE = 0x3
};
};
/* IOTLB actual invalidation granularity */
struct Iaig : Bitfield<57,2> { };
/* drain reads/writes */
struct Dr : Bitfield<49,1> { };
struct Dw : Bitfield<48,1> { };
/* domain id */
struct Did : Bitfield<32,16> { };
};
/* saved registers during suspend */
Fault_event_control::access_t _s3_fec { };
Fault_event_data ::access_t _s3_fedata { };
@ -461,12 +416,6 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
All_registers::access_t read_offset_register(unsigned index) {
return read<All_registers>(_offset<OFFSET_BITFIELD>() + index); }
void write_iotlb_reg(Iotlb::access_t v) {
write_offset_register<Extended_capability::Iro>(1, v); }
Iotlb::access_t read_iotlb_reg() {
return read_offset_register<Extended_capability::Iro>(1); }
template <typename REG>
REG::access_t read_fault_record(unsigned index) {
return read_offset_register<Capability::Fro>(index*2 + REG::offset()); }
@ -539,9 +488,7 @@ class Intel::Io_mmu : private Attached_mmio<0x800>,
void generate(Xml_generator &) override;
void invalidate_iotlb(Domain_id, addr_t, size_t);
void invalidate_context(Domain_id domain, Pci::rid_t);
void invalidate_all(Domain_id domain = Domain_id { Domain_id::INVALID }, Pci::rid_t = 0);
Invalidator & invalidator();
bool coherent_page_walk() const { return read<Extended_capability::Page_walk_coherency>(); }
bool caching_mode() const { return read<Capability::Caching_mode>(); }

View File

@ -9,6 +9,7 @@ SRC_CC += intel/managed_root_table.cc
SRC_CC += intel/io_mmu.cc
SRC_CC += intel/page_table.cc
SRC_CC += intel/default_mappings.cc
SRC_CC += intel/invalidator.cc
SRC_CC += ioapic.cc
INC_DIR += $(PRG_DIR)/../../