Merge branch 'core/urgent' into core/locking
Prepare for dependent patch. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
7d5b158310
|
@ -211,14 +211,52 @@ EXPORT_SYMBOL(local_bh_enable_ip);
|
|||
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
|
||||
#define MAX_SOFTIRQ_RESTART 10
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
/*
|
||||
* Convoluted means of passing __do_softirq() a message through the various
|
||||
* architecture execute_on_stack() bits.
|
||||
*
|
||||
* When we run softirqs from irq_exit() and thus on the hardirq stack we need
|
||||
* to keep the lockdep irq context tracking as tight as possible in order to
|
||||
* not miss-qualify lock contexts and miss possible deadlocks.
|
||||
*/
|
||||
static DEFINE_PER_CPU(int, softirq_from_hardirq);
|
||||
|
||||
static inline void lockdep_softirq_from_hardirq(void)
|
||||
{
|
||||
this_cpu_write(softirq_from_hardirq, 1);
|
||||
}
|
||||
|
||||
static inline void lockdep_softirq_start(void)
|
||||
{
|
||||
if (this_cpu_read(softirq_from_hardirq))
|
||||
trace_hardirq_exit();
|
||||
lockdep_softirq_enter();
|
||||
}
|
||||
|
||||
static inline void lockdep_softirq_end(void)
|
||||
{
|
||||
lockdep_softirq_exit();
|
||||
if (this_cpu_read(softirq_from_hardirq)) {
|
||||
this_cpu_write(softirq_from_hardirq, 0);
|
||||
trace_hardirq_enter();
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void lockdep_softirq_from_hardirq(void) { }
|
||||
static inline void lockdep_softirq_start(void) { }
|
||||
static inline void lockdep_softirq_end(void) { }
|
||||
#endif
|
||||
|
||||
asmlinkage void __do_softirq(void)
|
||||
{
|
||||
struct softirq_action *h;
|
||||
__u32 pending;
|
||||
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
|
||||
int cpu;
|
||||
unsigned long old_flags = current->flags;
|
||||
int max_restart = MAX_SOFTIRQ_RESTART;
|
||||
struct softirq_action *h;
|
||||
__u32 pending;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Mask out PF_MEMALLOC s current task context is borrowed for the
|
||||
|
@ -231,7 +269,7 @@ asmlinkage void __do_softirq(void)
|
|||
account_irq_enter_time(current);
|
||||
|
||||
__local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
|
||||
lockdep_softirq_enter();
|
||||
lockdep_softirq_start();
|
||||
|
||||
cpu = smp_processor_id();
|
||||
restart:
|
||||
|
@ -278,16 +316,13 @@ asmlinkage void __do_softirq(void)
|
|||
wakeup_softirqd();
|
||||
}
|
||||
|
||||
lockdep_softirq_exit();
|
||||
|
||||
lockdep_softirq_end();
|
||||
account_irq_exit_time(current);
|
||||
__local_bh_enable(SOFTIRQ_OFFSET);
|
||||
WARN_ON_ONCE(in_interrupt());
|
||||
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
|
||||
}
|
||||
|
||||
|
||||
|
||||
asmlinkage void do_softirq(void)
|
||||
{
|
||||
__u32 pending;
|
||||
|
@ -330,6 +365,7 @@ void irq_enter(void)
|
|||
static inline void invoke_softirq(void)
|
||||
{
|
||||
if (!force_irqthreads) {
|
||||
lockdep_softirq_from_hardirq();
|
||||
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
/*
|
||||
* We can safely execute softirq on the current stack if
|
||||
|
@ -375,13 +411,13 @@ void irq_exit(void)
|
|||
#endif
|
||||
|
||||
account_irq_exit_time(current);
|
||||
trace_hardirq_exit();
|
||||
preempt_count_sub(HARDIRQ_OFFSET);
|
||||
if (!in_interrupt() && local_softirq_pending())
|
||||
invoke_softirq();
|
||||
|
||||
tick_irq_exit();
|
||||
rcu_irq_exit();
|
||||
trace_hardirq_exit(); /* must be last! */
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -342,7 +342,8 @@ config CHECK_SIGNATURE
|
|||
bool
|
||||
|
||||
config CPUMASK_OFFSTACK
|
||||
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
|
||||
bool "Force CPU masks off stack"
|
||||
depends on SMP
|
||||
help
|
||||
Use dynamic allocation for cpumask_var_t, instead of putting
|
||||
them on the stack. This is a bit more expensive, but avoids
|
||||
|
|
Loading…
Reference in New Issue
Block a user