diff --git a/repos/dde_linux/src/include/lx_emul/shadow/arch/x86/include/asm/irq_stack.h b/repos/dde_linux/src/include/lx_emul/shadow/arch/x86/include/asm/irq_stack.h new file mode 100644 index 0000000000..26f321cd7e --- /dev/null +++ b/repos/dde_linux/src/include/lx_emul/shadow/arch/x86/include/asm/irq_stack.h @@ -0,0 +1,39 @@ +/** + * \brief Shadow copy of asm/irq_stack.h + * \author Stefan Kalkowski + * \date 2022-06-29 + */ + +#ifndef _LX_EMUL__SHADOW__ARCH__X89__INCLUDE__ASM__IRQ_STACK_H_ +#define _LX_EMUL__SHADOW__ARCH__X89__INCLUDE__ASM__IRQ_STACK_H_ + +#include_next + +#undef call_on_stack +#undef ASM_CALL_ARG0 + +#define call_on_stack(stack, func, asm_call, argconstr...) \ +{ \ + register void *tos asm("r11"); \ + \ + tos = ((void *)(stack)); \ + \ + asm_inline volatile( \ + "movq %%rsp, (%[tos]) \n" \ + "movq %[tos], %%rsp \n" \ + \ + asm_call \ + \ + "popq %%rsp \n" \ + \ + : "+r" (tos), ASM_CALL_CONSTRAINT \ + : [__func] "r" (func), [tos] "r" (tos) argconstr \ + : "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \ + "memory" \ + ); \ +} + +#define ASM_CALL_ARG0 \ + "call *%P[__func] \n" + +#endif /* _LX_EMUL__SHADOW__ARCH__X89__INCLUDE__ASM__IRQ_STACK_H_ */ diff --git a/repos/dde_linux/src/include/lx_emul/shadow/arch/x86/include/asm/pgtable.h b/repos/dde_linux/src/include/lx_emul/shadow/arch/x86/include/asm/pgtable.h index a1c3db06c3..08608cb80d 100644 --- a/repos/dde_linux/src/include/lx_emul/shadow/arch/x86/include/asm/pgtable.h +++ b/repos/dde_linux/src/include/lx_emul/shadow/arch/x86/include/asm/pgtable.h @@ -53,5 +53,7 @@ static inline int pud_none(pud_t pud) #endif +#define pmd_page(pmd) NULL + #endif /*_ASM__X86__PGTABLE_H */ diff --git a/repos/dde_linux/src/lib/lx_emul/shadow/kernel/locking/spinlock.c b/repos/dde_linux/src/lib/lx_emul/shadow/kernel/locking/spinlock.c index 2e3104d54c..e3c7fcf73a 100644 --- a/repos/dde_linux/src/lib/lx_emul/shadow/kernel/locking/spinlock.c +++ b/repos/dde_linux/src/lib/lx_emul/shadow/kernel/locking/spinlock.c @@ -36,12 +36,6 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t * lock) } -void __lockfunc _raw_spin_unlock(raw_spinlock_t * lock) -{ - arch_spin_unlock(&lock->raw_lock); -} - - void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t * lock, unsigned long flags) { @@ -56,12 +50,6 @@ void __lockfunc _raw_spin_lock_irq(raw_spinlock_t * lock) } -void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t * lock) -{ - _raw_spin_unlock_irqrestore(lock, 0); -} - - int __lockfunc _raw_spin_trylock(raw_spinlock_t * lock) { return arch_spin_trylock(&lock->raw_lock); @@ -74,7 +62,21 @@ void __lockfunc _raw_write_lock(rwlock_t * lock) } +#ifndef CONFIG_X86 +void __lockfunc __raw_spin_unlock(raw_spinlock_t * lock) +{ + arch_spin_unlock(&lock->raw_lock); +} + + +void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t * lock) +{ + _raw_spin_unlock_irqrestore(lock, 0); +} + + void __lockfunc _raw_write_unlock(rwlock_t * lock) { arch_write_unlock(&(lock)->raw_lock); } +#endif