base_hw & arm: Memory attributes and light caching.

Enable caches. Core is not cached by now. This is because kernel
creates core space and doesn't know any memory attributes by now.
Cacheable pages are cached write-through without write-allocate.

Caching and write-combining is not supported for IOMEM by now.
This commit is contained in:
Martin Stein 2012-11-05 18:32:04 +01:00 committed by Norman Feske
parent 91c27f3f72
commit 53156de9f4
6 changed files with 120 additions and 48 deletions

View File

@ -56,9 +56,17 @@ namespace Arm
{
struct M : Bitfield<0,1> { }; /* enable MMU */
struct A : Bitfield<1,1> { }; /* strict data addr. alignment on */
struct C : Bitfield<2,1> { }; /* enable L1 data cache */
struct C : Bitfield<2,1> { }; /* enable data cache */
struct Z : Bitfield<11,1> { }; /* enable program flow prediction */
struct I : Bitfield<12,1> { }; /* enable L1 instruction-cache */
struct I : Bitfield<12,1> { }; /* enable instruction caches */
/*
* These must be set all ones
*/
struct Static1 : Bitfield<3,4> { };
struct Static2 : Bitfield<16,1> { };
struct Static3 : Bitfield<18,1> { };
struct Static4 : Bitfield<22,2> { };
struct V : Bitfield<13,1> /* select exception-entry base */
{
@ -80,15 +88,18 @@ namespace Arm
struct Ee : Bitfield<25,1> { }; /* raise CPSR.E on exceptions */
/**
* Value for the switch to virtual mode in kernel
* Common bitfield values for all modes
*/
static access_t init_virt_kernel()
static access_t common()
{
return M::bits(1) |
return Static1::bits(~0) |
Static2::bits(~0) |
Static3::bits(~0) |
Static4::bits(~0) |
A::bits(0) |
C::bits(0) |
C::bits(1) |
Z::bits(0) |
I::bits(0) |
I::bits(1) |
V::bits(V::XFFFF0000) |
Rr::bits(Rr::RANDOM) |
Fi::bits(0) |
@ -96,22 +107,17 @@ namespace Arm
Ee::bits(0);
}
/**
* Value for the switch to virtual mode in kernel
*/
static access_t init_virt_kernel() {
return common() | M::bits(1); }
/**
* Value for the initial kernel entry
*/
static access_t init_phys_kernel()
{
return M::bits(0) |
A::bits(0) |
C::bits(0) |
Z::bits(0) |
I::bits(0) |
V::bits(V::XFFFF0000) |
Rr::bits(Rr::RANDOM) |
Fi::bits(0) |
Ve::bits(Ve::FIXED) |
Ee::bits(0);
}
static access_t init_phys_kernel() {
return common() | M::bits(0); }
/**
* Read register value
@ -126,10 +132,8 @@ namespace Arm
/**
* Write register value
*/
static void write(access_t const v)
{
asm volatile ("mcr p15, 0, %[v], c1, c0, 0" :: [v]"r"(v) : );
}
static void write(access_t const v) {
asm volatile ("mcr p15, 0, %[v], c1, c0, 0" :: [v]"r"(v) : ); }
};
/**

View File

@ -90,6 +90,35 @@ namespace Arm
return Xn::bits(!x) | ap_bits[w][k];
}
/**
* Wether support for caching is already enabled
*
* FIXME: Normally all ARM platforms should support caching,
* but for some 'base_hw' misses support by now.
*/
inline bool cache_support();
/**
* Memory region attributes for the translation descriptor 'T'
*/
template <typename T>
static typename T::access_t memory_region_attr(bool const d, bool const c)
{
typedef typename T::Tex Tex;
typedef typename T::C C;
typedef typename T::B B;
/*
* FIXME: upgrade to write-back & write-allocate when !d & c
*/
if(d) return Tex::bits(2) | C::bits(0) | B::bits(0);
if(cache_support()) {
if(c) return Tex::bits(6) | C::bits(1) | B::bits(0);
return Tex::bits(4) | C::bits(0) | B::bits(0);
}
return Tex::bits(4) | C::bits(0) | B::bits(0);
}
/**
* Second level translation table
*
@ -219,6 +248,23 @@ namespace Arm
struct S : Bitfield<10, 1> { }; /* shareable bit */
struct Ng : Bitfield<11, 1> { }; /* not global bit */
struct Pa_31_12 : Bitfield<12, 20> { }; /* physical base */
/**
* Compose descriptor value
*/
static access_t create(bool const w, bool const x,
bool const k, bool const g,
bool const d, bool const c,
addr_t const pa)
{
access_t v = access_permission_bits<Small_page>(w, x, k) |
memory_region_attr<Small_page>(d, c) |
Ng::bits(!g) |
S::bits(0) |
Pa_31_12::masked(pa);
Descriptor::type(v, Descriptor::SMALL_PAGE);
return v;
}
};
/*
@ -292,6 +338,8 @@ namespace Arm
* \param x see 'Section_table::insert_translation'
* \param k see 'Section_table::insert_translation'
* \param g see 'Section_table::insert_translation'
* \param d see 'Section_table::insert_translation'
* \param c see 'Section_table::insert_translation'
*
* This method overrides an existing translation in case
* that it spans the the same virtual range and is not
@ -300,7 +348,8 @@ namespace Arm
void insert_translation(addr_t const vo, addr_t const pa,
unsigned long const size_log2,
bool const w, bool const x,
bool const k, bool const g)
bool const k, bool const g,
bool const d, bool const c)
{
/* validate virtual address */
unsigned long i;
@ -312,11 +361,8 @@ namespace Arm
if (size_log2 == Small_page::VIRT_SIZE_LOG2)
{
/* compose new descriptor value */
Descriptor::access_t entry =
access_permission_bits<Small_page>(w, x, k) |
Small_page::Ng::bits(!g) |
Small_page::Pa_31_12::masked(pa);
Descriptor::type(entry, Descriptor::SMALL_PAGE);
Descriptor::access_t const entry =
Small_page::create(w, x, k, g, d, c, pa);
/* check if we can we write to the targeted entry */
if (Descriptor::valid(_entries[i]))
@ -552,10 +598,13 @@ namespace Arm
*/
static access_t create(bool const w, bool const x,
bool const k, bool const g,
bool const d, bool const c,
addr_t const pa)
{
access_t v = access_permission_bits<Section>(w, x, k) |
memory_region_attr<Section>(d, c) |
Domain::bits(DOMAIN) |
S::bits(0) |
Ng::bits(!g) |
Pa_31_20::masked(pa);
Descriptor::type(v, Descriptor::SECTION);
@ -635,6 +684,8 @@ namespace Arm
* in user mode, while in kernel mode this
* translation grants any type of access.
* \param g if the translation applies to all spaces
* \param d wether 'pa' addresses device IO-memory
* \param c if access shall be cacheable
* \param extra_space If > 0, it must point to a portion of
* size-aligned memory space wich may be used
* furthermore by the table for the incurring
@ -660,6 +711,7 @@ namespace Arm
unsigned long const size_log2,
bool const w, bool const x,
bool const k, bool const g,
bool const d, bool const c,
ST * const st,
void * const extra_space = 0)
{
@ -698,14 +750,14 @@ namespace Arm
/* insert translation */
pt->insert_translation(vo - Section::Pa_31_20::masked(vo),
pa, size_log2, w, x, k, g);
pa, size_log2, w, x, k, g, d, c);
return 0;
}
if (size_log2 == Section::VIRT_SIZE_LOG2)
{
/* compose section descriptor */
Descriptor::access_t entry = Section::create(w, x, k,
g, pa, st);
Descriptor::access_t const entry =
Section::create(w, x, k, g, d, c, pa, st);
/* check if we can we write to the targeted entry */
if (Descriptor::valid(_entries[i]))

View File

@ -55,10 +55,12 @@ namespace Arm_v6
*/
static access_t create(bool const w, bool const x,
bool const k, bool const g,
bool const d, bool const c,
addr_t const pa,
Section_table *)
{
return Arm::Section_table::Section::create(w, x, k, g, pa) |
return Arm::Section_table::Section::create(w, x, k, g,
d, c, pa) |
P::bits(0);
}
};
@ -72,14 +74,20 @@ namespace Arm_v6
unsigned long const size_log2,
bool const w, bool const x,
bool const k, bool const g,
bool const d, bool const c,
void * const extra_space = 0)
{
return Arm::Section_table::
insert_translation<Section_table>(vo, pa, size_log2, w,
x, k, g, this, extra_space);
x, k, g, d, c, this,
extra_space);
}
};
}
bool Arm::cache_support() { return 0; }
#endif /* _INCLUDE__ARM_V6__SECTION_TABLE_H_ */

View File

@ -59,10 +59,11 @@ namespace Arm_v7
*/
static access_t create(bool const w, bool const x,
bool const k, bool const g,
bool const d, bool const c,
addr_t const pa,
Section_table * const st)
{
return Arm::Section_table::Section::create(w, x, k, g, pa) |
return Arm::Section_table::Section::create(w, x, k, g, d, c, pa) |
Ns::bits(!st->secure());
}
};
@ -88,11 +89,13 @@ namespace Arm_v7
unsigned long const size_log2,
bool const w, bool const x,
bool const k, bool const g,
bool const d, bool const c,
void * const extra_space = 0)
{
return Arm::Section_table::
insert_translation<Section_table>(vo, pa, size_log2, w,
x, k, g, this, extra_space);
x, k, g, d, c, this,
extra_space);
}
/***************
@ -103,5 +106,9 @@ namespace Arm_v7
};
}
bool Arm::cache_support() { return 1; }
#endif /* _INCLUDE__ARM_V7__SECTION_TABLE_H_ */

View File

@ -801,11 +801,11 @@ namespace Kernel
Pd()
{
/* try to add translation for mode transition region */
enum Mtc_attributes { W = 1, X = 1, K = 1, G = 1 };
enum Mtc_attributes { W = 1, X = 1, K = 1, G = 1, D = 0, C = 1 };
unsigned const slog2 = insert_translation(mtc()->VIRT_BASE,
mtc()->phys_base(),
mtc()->SIZE_LOG2,
W, X, K, G);
W, X, K, G, D, C);
/* extra space needed to translate mode transition region */
if (slog2)
@ -822,7 +822,7 @@ namespace Kernel
/* translate mode transition region globally */
insert_translation(mtc()->VIRT_BASE, mtc()->phys_base(),
mtc()->SIZE_LOG2, W, X, K, G,
mtc()->SIZE_LOG2, W, X, K, G, D, C,
(void *)aligned_es);
}
}
@ -2109,11 +2109,8 @@ extern "C" void kernel()
SIZE = 1 << SIZE_LOG2,
};
if (mtc()->VIRT_END <= a || mtc()->VIRT_BASE > (a + SIZE - 1))
{
/* map 1:1 with rwx permissions */
if (core()->insert_translation(a, a, SIZE_LOG2, 1, 1, 0, 0))
assert(0);
}
assert(!core()->insert_translation(a, a, SIZE_LOG2, 1, 1, 0, 0, 1, 0));
/* check condition to continue */
addr_t const next_a = a + SIZE;
if (next_a > a) a = next_a;

View File

@ -59,10 +59,14 @@ void Ipc_pager::resolve_and_wait_for_fault()
/* do we need extra space to resolve pagefault? */
Software_tlb * const tlb = _pagefault.software_tlb;
enum Mapping_attributes { X = 1, K = 0, G = 0 };
enum { X = 1, K = 0, G = 0 };
bool c = !_mapping.write_combined && !_mapping.io_mem;
bool d = _mapping.io_mem;
/* insert mapping into TLB */
unsigned sl2 = tlb->insert_translation(_mapping.virt_address,
_mapping.phys_address, _mapping.size_log2,
_mapping.writable, X, K, G);
_mapping.writable, X, K, G, d, c);
if (sl2)
{
/* try to get some natural aligned space */
@ -73,7 +77,7 @@ void Ipc_pager::resolve_and_wait_for_fault()
sl2 = tlb->insert_translation(_mapping.virt_address,
_mapping.phys_address,
_mapping.size_log2,
_mapping.writable, X, K, G, space);
_mapping.writable, X, K, G, d, c, space);
assert(!sl2);
}
/* try to wake up faulter */