dde_linux: x86 requirements for SMP

Prepare shadow implementations in spinlocks, pagetable defines,
and irq_stack assembler macros to be able to enable SMP on x86/PC.

Ref genodelabs/genode#4562
This commit is contained in:
Stefan Kalkowski 2022-07-19 11:03:00 +02:00 committed by Christian Helmuth
parent c898a4770b
commit ec1b060fc5
3 changed files with 55 additions and 12 deletions

View File

@ -0,0 +1,39 @@
/**
* \brief Shadow copy of asm/irq_stack.h
* \author Stefan Kalkowski
* \date 2022-06-29
*/
#ifndef _LX_EMUL__SHADOW__ARCH__X89__INCLUDE__ASM__IRQ_STACK_H_
#define _LX_EMUL__SHADOW__ARCH__X89__INCLUDE__ASM__IRQ_STACK_H_
#include_next <asm/irq_stack.h>
#undef call_on_stack
#undef ASM_CALL_ARG0
#define call_on_stack(stack, func, asm_call, argconstr...) \
{ \
register void *tos asm("r11"); \
\
tos = ((void *)(stack)); \
\
asm_inline volatile( \
"movq %%rsp, (%[tos]) \n" \
"movq %[tos], %%rsp \n" \
\
asm_call \
\
"popq %%rsp \n" \
\
: "+r" (tos), ASM_CALL_CONSTRAINT \
: [__func] "r" (func), [tos] "r" (tos) argconstr \
: "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
"memory" \
); \
}
#define ASM_CALL_ARG0 \
"call *%P[__func] \n"
#endif /* _LX_EMUL__SHADOW__ARCH__X89__INCLUDE__ASM__IRQ_STACK_H_ */

View File

@ -53,5 +53,7 @@ static inline int pud_none(pud_t pud)
#endif
#define pmd_page(pmd) NULL
#endif /*_ASM__X86__PGTABLE_H */

View File

@ -36,12 +36,6 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t * lock)
}
void __lockfunc _raw_spin_unlock(raw_spinlock_t * lock)
{
arch_spin_unlock(&lock->raw_lock);
}
void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t * lock,
unsigned long flags)
{
@ -56,12 +50,6 @@ void __lockfunc _raw_spin_lock_irq(raw_spinlock_t * lock)
}
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t * lock)
{
_raw_spin_unlock_irqrestore(lock, 0);
}
int __lockfunc _raw_spin_trylock(raw_spinlock_t * lock)
{
return arch_spin_trylock(&lock->raw_lock);
@ -74,7 +62,21 @@ void __lockfunc _raw_write_lock(rwlock_t * lock)
}
#ifndef CONFIG_X86
void __lockfunc __raw_spin_unlock(raw_spinlock_t * lock)
{
arch_spin_unlock(&lock->raw_lock);
}
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t * lock)
{
_raw_spin_unlock_irqrestore(lock, 0);
}
void __lockfunc _raw_write_unlock(rwlock_t * lock)
{
arch_write_unlock(&(lock)->raw_lock);
}
#endif