asm-generic: SOFTIRQ_ON_OWN_STACK rework
Just one fixup patch, reworking the softirq_on_own_stack logic for preempt-rt kernels as discussed in https://lore.kernel.org/all/CAHk-=wgZSD3W2y6yczad2Am=EfHYyiPzTn3CfXxrriJf9i5W5w@mail.gmail.com/ -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAmMZ/FUACgkQmmx57+YA GNl4DBAAutecM/RGJnK//0+NEcCsuHoIWusifeEIhR8yNFxm1UOiwBNYV4M/73UA x2lLGQCu4ze1zSU3BXX1Ug3MfpmrBOhlsW2+x7j0b3tG9vXpvojitTsCPo6lgNDu nUx58rtN0wfZgwT7c4DaueEL1aIZ1yUOgCE+SCGw3subgpZeoaYUi3sbZxI2K+T8 MnzcVVpmccMaxLh0xwCn6u4csYrarj0XodY11a8+DAN9Afek9nEq4UNhIewjvQw0 2OTbGuxIBZoXNmWdjcDBO97eUtq0oJcRIgPkRi0xlas2hzRJMwO6liqsA34r4r7r Irxyyw4//jlCtJOKwrv+rSxR6ULvGw2+UBk/Q/nYeN9hL2qKnwJIa8hryj+cQ/os KWclMGEYtKvOzciMIvViXAOY4sa7jZO1Q9rntyUIVkNiqQqLDRxocW+GjsqG5KZI bl+akScQhX3a7bEZePO0EJqfitjaxk5JtxF3KcV5apKaWto+EwOpvRf6eUVT97/n Zpoy2dBygOtWiMJR7o143xeOQlhutDCOh6VeDfJiUA57ekABRY/8+R+ZltkmDP84 4iCCtE4oWuMXz390ybBHxKcNjiknLUMvDUNq5wfjsyJ8ftBRF+1+vKR+BdLqsx8O d5/FjRXC5MWaW4GLO89l+MZh+BrOkobdq333VxIStpS9Jd0pUa4= =0A1T -----END PGP SIGNATURE----- Merge tag 'asm-generic-fixes-6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic Pull SOFTIRQ_ON_OWN_STACK rework from Arnd Bergmann: "Just one fixup patch, reworking the softirq_on_own_stack logic for preempt-rt kernels as discussed in https://lore.kernel.org/all/CAHk-=wgZSD3W2y6yczad2Am=EfHYyiPzTn3CfXxrriJf9i5W5w@mail.gmail.com/" * tag 'asm-generic-fixes-6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: asm-generic: Conditionally enable do_softirq_own_stack() via Kconfig.
This commit is contained in:
commit
f448dda895
|
@ -923,6 +923,9 @@ config HAVE_SOFTIRQ_ON_OWN_STACK
|
|||
Architecture provides a function to run __do_softirq() on a
|
||||
separate stack.
|
||||
|
||||
config SOFTIRQ_ON_OWN_STACK
|
||||
def_bool HAVE_SOFTIRQ_ON_OWN_STACK && !PREEMPT_RT
|
||||
|
||||
config ALTERNATE_USER_ADDRESS_SPACE
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -70,7 +70,7 @@ static void __init init_irq_stacks(void)
|
|||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
static void ____do_softirq(void *arg)
|
||||
{
|
||||
__do_softirq();
|
||||
|
|
|
@ -480,7 +480,7 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
|
|||
*irq_stack_in_use = 1;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
execute_on_irq_stack(__do_softirq, 0);
|
||||
|
|
|
@ -199,7 +199,7 @@ static inline void check_stack_overflow(unsigned long sp)
|
|||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
static __always_inline void call_do_softirq(const void *sp)
|
||||
{
|
||||
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
|
||||
|
@ -335,7 +335,7 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
|
|||
void *softirq_ctx[NR_CPUS] __read_mostly;
|
||||
void *hardirq_ctx[NR_CPUS] __read_mostly;
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
call_do_softirq(softirq_ctx[smp_processor_id()]);
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <asm/lowcore.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
static inline void do_softirq_own_stack(void)
|
||||
{
|
||||
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
|
||||
|
|
|
@ -149,7 +149,7 @@ void irq_ctx_exit(int cpu)
|
|||
hardirq_ctx[cpu] = NULL;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
struct thread_info *curctx;
|
||||
|
|
|
@ -855,7 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
|
|||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
|
||||
|
|
|
@ -203,7 +203,7 @@
|
|||
IRQ_CONSTRAINTS, regs, vector); \
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
/*
|
||||
* Macro to invoke __do_softirq on the irq stack. This is only called from
|
||||
* task context when bottom halves are about to be reenabled and soft
|
||||
|
|
|
@ -132,7 +132,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
void do_softirq_own_stack(void)
|
||||
{
|
||||
struct irq_stack *irqstk;
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
|
||||
#define __ASM_GENERIC_SOFTIRQ_STACK_H
|
||||
|
||||
#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
|
||||
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
|
||||
void do_softirq_own_stack(void);
|
||||
#else
|
||||
static inline void do_softirq_own_stack(void)
|
||||
|
|
Loading…
Reference in New Issue