pc: enable SMP and softirq/tasklets in lx_emul

Enables symetric-multi-processor support in the Linux kernel configuration
used as base for the driver ports for PC. This is done to be compliant with
common usage of x86 drivers today.
Moreover, this commit uses the original kernel source for softirq/tasklet
implementation to get rid of the insufficient shadow implementation
in the lx_emul sources.

Ref genodelabs/genode#4562
This commit is contained in:
Stefan Kalkowski
2022-07-19 11:06:52 +02:00
committed by Christian Helmuth
parent ec1b060fc5
commit 596c20c199
35 changed files with 485 additions and 165 deletions

View File

@ -11,6 +11,7 @@
#undef call_on_stack
#undef ASM_CALL_ARG0
#undef do_softirq_own_stack
#define call_on_stack(stack, func, asm_call, argconstr...) \
{ \
@ -36,4 +37,9 @@
#define ASM_CALL_ARG0 \
"call *%P[__func] \n"
#define do_softirq_own_stack() \
{ \
__do_softirq(); \
}
#endif /* _LX_EMUL__SHADOW__ARCH__X89__INCLUDE__ASM__IRQ_STACK_H_ */

View File

@ -0,0 +1,21 @@
/**
* \brief Shadow copy of asm/percpu.h
* \author Stefan Kalkowski
* \date 2022-06-29
*/
#ifndef _LX_EMUL__SHADOW__ARCH__X86__INCLUDE__ASM__PERCPU_H_
#define _LX_EMUL__SHADOW__ARCH__X86__INCLUDE__ASM__PERCPU_H_
#include_next <asm/percpu.h>
/*
* The original implementation uses gs or fs register
* to hold an cpu offset, which is not set correctly in our use-case
* where we use one cpu only anyway.
*/
#undef __percpu_prefix
#define __percpu_prefix ""
#endif /* _LX_EMUL__SHADOW__ARCH__X86__INCLUDE__ASM__PERCPU_H_ */

View File

@ -0,0 +1,25 @@
/*
* \brief Replaces arch/x86/kernel/irq_32.c
* \author Stefan Kalkowski
* \date 2022-07-20
*/
/*
* Copyright (C) 2022 Genode Labs GmbH
*
* This file is distributed under the terms of the GNU General Public License
* version 2.
*/
#include <linux/interrupt.h>
void do_softirq_own_stack(void)
{
/*
* We have no IRQ stack to switch to anyway,
* so we stay here in contrast to the original
* implementation
*/
__do_softirq();
}

View File

@ -21,12 +21,32 @@
#include <lx_emul/task.h>
#ifndef CONFIG_INLINE_SPIN_LOCK
void __lockfunc _raw_spin_lock(raw_spinlock_t * lock)
{
arch_spin_lock(&lock->raw_lock);
}
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
void __lockfunc _raw_spin_lock_bh(raw_spinlock_t * lock)
{
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
_raw_spin_lock(lock);
}
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t * lock)
{
_raw_spin_lock_irqsave(lock);
}
#endif
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t * lock)
{
unsigned long flags;
@ -34,49 +54,131 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t * lock)
_raw_spin_lock(lock);
return flags;
}
#endif
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
int __lockfunc _raw_spin_trylock(raw_spinlock_t * lock)
{
return arch_spin_trylock(&lock->raw_lock);
}
#endif
#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
void __lockfunc _raw_spin_unlock(raw_spinlock_t * lock)
{
arch_spin_unlock(&lock->raw_lock);
}
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t * lock)
{
_raw_spin_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t * lock)
{
_raw_spin_unlock_irqrestore(lock, 0);
}
#endif
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t * lock,
unsigned long flags)
{
_raw_spin_unlock(lock);
local_irq_restore(flags);
}
#endif
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t * lock)
#ifndef CONFIG_INLINE_READ_LOCK
void __lockfunc _raw_read_lock(rwlock_t * lock)
{
_raw_spin_lock_irqsave(lock);
lx_emul_trace_and_stop(__func__);
}
#endif
int __lockfunc _raw_spin_trylock(raw_spinlock_t * lock)
#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t * lock)
{
return arch_spin_trylock(&lock->raw_lock);
unsigned long flags;
local_irq_save(flags);
arch_read_lock(&(lock)->raw_lock);
return flags;
}
#endif
#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
void __lockfunc _raw_write_unlock_bh(rwlock_t * lock)
{
arch_write_unlock(&(lock)->raw_lock);
}
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_BH
void __lockfunc _raw_read_unlock_bh(rwlock_t * lock)
{
arch_read_unlock(&(lock)->raw_lock);
}
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK
void __lockfunc _raw_write_lock(rwlock_t * lock)
{
arch_write_lock(&(lock)->raw_lock);
}
#endif
#ifndef CONFIG_X86
void __lockfunc __raw_spin_unlock(raw_spinlock_t * lock)
{
arch_spin_unlock(&lock->raw_lock);
}
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t * lock)
{
_raw_spin_unlock_irqrestore(lock, 0);
}
#ifndef CONFIG_INLINE_WRITE_UNLOCK
void __lockfunc _raw_write_unlock(rwlock_t * lock)
{
arch_write_unlock(&(lock)->raw_lock);
}
#endif
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
void __lockfunc _raw_read_unlock_irqrestore(rwlock_t * lock,unsigned long flags)
{
arch_read_unlock(&(lock)->raw_lock);
local_irq_restore(flags);
}
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_BH
void __lockfunc _raw_write_lock_bh(rwlock_t * lock)
{
arch_write_lock(&(lock)->raw_lock);
}
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
void __lockfunc _raw_write_lock_irq(rwlock_t * lock)
{
arch_write_lock(&(lock)->raw_lock);
}
#endif
#ifndef CONFIG_INLINE_READ_LOCK_BH
void __lockfunc _raw_read_lock_bh(rwlock_t * lock)
{
arch_read_lock(&(lock)->raw_lock);
}
#endif