forked from luck/tmp_suning_uos_patched
[SPARC64]: Send all device interrupts via one PIL.
This is the first in a series of cleanups that will hopefully allow a seamless attempt at using the generic IRQ handling infrastructure in the Linux kernel. Define PIL_DEVICE_IRQ and vector all device interrupts through there. Get rid of the ugly pil0_dummy_{bucket,desc}, instead vector the timer interrupt directly to a specific handler since the timer interrupt is the only event that will be signaled on PIL 14. The irq_worklist is now in the per-cpu trap_block[]. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
3185d4d287
commit
fd0504c321
|
@ -22,6 +22,7 @@
|
|||
#include <asm/estate.h>
|
||||
#include <asm/auxio.h>
|
||||
#include <asm/sfafsr.h>
|
||||
#include <asm/pil.h>
|
||||
|
||||
#define curptr g6
|
||||
|
||||
|
@ -434,17 +435,13 @@ do_ivec:
|
|||
sllx %g3, 5, %g3
|
||||
or %g2, %lo(ivector_table), %g2
|
||||
add %g2, %g3, %g3
|
||||
ldub [%g3 + 0x04], %g4 /* pil */
|
||||
mov 1, %g2
|
||||
sllx %g2, %g4, %g2
|
||||
sllx %g4, 2, %g4
|
||||
|
||||
TRAP_LOAD_IRQ_WORK(%g6, %g1)
|
||||
|
||||
lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
|
||||
lduw [%g6], %g5 /* g5 = irq_work(cpu) */
|
||||
stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
|
||||
stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
|
||||
wr %g2, 0x0, %set_softint
|
||||
stw %g3, [%g6] /* irq_work(cpu) = bucket */
|
||||
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
|
||||
retry
|
||||
do_ivec_xcall:
|
||||
mov 0x50, %g1
|
||||
|
|
|
@ -68,11 +68,7 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
|
|||
* access to this structure takes a TLB miss it could cause
|
||||
* the 5-level sparc v9 trap stack to overflow.
|
||||
*/
|
||||
struct irq_work_struct {
|
||||
unsigned int irq_worklists[16];
|
||||
};
|
||||
struct irq_work_struct __irq_work[NR_CPUS];
|
||||
#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
|
||||
#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
|
||||
|
||||
static struct irqaction *irq_action[NR_IRQS+1];
|
||||
|
||||
|
@ -91,10 +87,8 @@ static void register_irq_proc (unsigned int irq);
|
|||
*/
|
||||
#define put_ino_in_irqaction(action, irq) \
|
||||
action->flags &= 0xffffffffffffUL; \
|
||||
if (__bucket(irq) == &pil0_dummy_bucket) \
|
||||
action->flags |= 0xdeadUL << 48; \
|
||||
else \
|
||||
action->flags |= __irq_ino(irq) << 48;
|
||||
action->flags |= __irq_ino(irq) << 48;
|
||||
|
||||
#define get_ino_in_irqaction(action) (action->flags >> 48)
|
||||
|
||||
#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
|
||||
|
@ -251,15 +245,6 @@ void disable_irq(unsigned int irq)
|
|||
}
|
||||
}
|
||||
|
||||
/* The timer is the one "weird" interrupt which is generated by
|
||||
* the CPU %tick register and not by some normal vectored interrupt
|
||||
* source. To handle this special case, we use this dummy INO bucket.
|
||||
*/
|
||||
static struct irq_desc pil0_dummy_desc;
|
||||
static struct ino_bucket pil0_dummy_bucket = {
|
||||
.irq_info = &pil0_dummy_desc,
|
||||
};
|
||||
|
||||
static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
|
||||
unsigned long iclr, unsigned long imap,
|
||||
struct ino_bucket *bucket)
|
||||
|
@ -276,15 +261,7 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
|
|||
struct ino_bucket *bucket;
|
||||
int ino;
|
||||
|
||||
if (pil == 0) {
|
||||
if (iclr != 0UL || imap != 0UL) {
|
||||
prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
|
||||
iclr, imap);
|
||||
prom_halt();
|
||||
}
|
||||
return __irq(&pil0_dummy_bucket);
|
||||
}
|
||||
|
||||
BUG_ON(pil == 0);
|
||||
BUG_ON(tlb_type == hypervisor);
|
||||
|
||||
/* RULE: Both must be specified in all other cases. */
|
||||
|
@ -371,7 +348,7 @@ static void atomic_bucket_insert(struct ino_bucket *bucket)
|
|||
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
|
||||
__asm__ __volatile__("wrpr %0, %1, %%pstate"
|
||||
: : "r" (pstate), "i" (PSTATE_IE));
|
||||
ent = irq_work(smp_processor_id(), bucket->pil);
|
||||
ent = irq_work(smp_processor_id());
|
||||
bucket->irq_chain = *ent;
|
||||
*ent = __irq(bucket);
|
||||
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
|
||||
|
@ -437,7 +414,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
|
|||
if (unlikely(!bucket->irq_info))
|
||||
return -ENODEV;
|
||||
|
||||
if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
|
||||
if (irqflags & SA_SAMPLE_RANDOM) {
|
||||
/*
|
||||
* This function might sleep, we want to call it first,
|
||||
* outside of the atomic block. In SA_STATIC_ALLOC case,
|
||||
|
@ -465,12 +442,9 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
|
|||
}
|
||||
|
||||
bucket->flags |= IBF_ACTIVE;
|
||||
pending = 0;
|
||||
if (bucket != &pil0_dummy_bucket) {
|
||||
pending = bucket->pending;
|
||||
if (pending)
|
||||
bucket->pending = 0;
|
||||
}
|
||||
pending = bucket->pending;
|
||||
if (pending)
|
||||
bucket->pending = 0;
|
||||
|
||||
action->handler = handler;
|
||||
action->flags = irqflags;
|
||||
|
@ -487,13 +461,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
|
|||
/* We ate the IVEC already, this makes sure it does not get lost. */
|
||||
if (pending) {
|
||||
atomic_bucket_insert(bucket);
|
||||
set_softint(1 << bucket->pil);
|
||||
set_softint(1 << PIL_DEVICE_IRQ);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&irq_action_lock, flags);
|
||||
|
||||
if (bucket != &pil0_dummy_bucket)
|
||||
register_irq_proc(__irq_ino(irq));
|
||||
register_irq_proc(__irq_ino(irq));
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
distribute_irqs();
|
||||
|
@ -533,7 +506,9 @@ void free_irq(unsigned int irq, void *dev_id)
|
|||
{
|
||||
struct irqaction *action;
|
||||
struct ino_bucket *bucket;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
int ent, i;
|
||||
|
||||
spin_lock_irqsave(&irq_action_lock, flags);
|
||||
|
||||
|
@ -549,42 +524,39 @@ void free_irq(unsigned int irq, void *dev_id)
|
|||
spin_lock_irqsave(&irq_action_lock, flags);
|
||||
|
||||
bucket = __bucket(irq);
|
||||
if (bucket != &pil0_dummy_bucket) {
|
||||
struct irq_desc *desc = bucket->irq_info;
|
||||
int ent, i;
|
||||
desc = bucket->irq_info;
|
||||
|
||||
for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
|
||||
struct irqaction *p = &desc->action[i];
|
||||
for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
|
||||
struct irqaction *p = &desc->action[i];
|
||||
|
||||
if (p == action) {
|
||||
desc->action_active_mask &= ~(1 << i);
|
||||
if (p == action) {
|
||||
desc->action_active_mask &= ~(1 << i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!desc->action_active_mask) {
|
||||
unsigned long imap = bucket->imap;
|
||||
|
||||
/* This unique interrupt source is now inactive. */
|
||||
bucket->flags &= ~IBF_ACTIVE;
|
||||
|
||||
/* See if any other buckets share this bucket's IMAP
|
||||
* and are still active.
|
||||
*/
|
||||
for (ent = 0; ent < NUM_IVECS; ent++) {
|
||||
struct ino_bucket *bp = &ivector_table[ent];
|
||||
if (bp != bucket &&
|
||||
bp->imap == imap &&
|
||||
(bp->flags & IBF_ACTIVE) != 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!desc->action_active_mask) {
|
||||
unsigned long imap = bucket->imap;
|
||||
|
||||
/* This unique interrupt source is now inactive. */
|
||||
bucket->flags &= ~IBF_ACTIVE;
|
||||
|
||||
/* See if any other buckets share this bucket's IMAP
|
||||
* and are still active.
|
||||
*/
|
||||
for (ent = 0; ent < NUM_IVECS; ent++) {
|
||||
struct ino_bucket *bp = &ivector_table[ent];
|
||||
if (bp != bucket &&
|
||||
bp->imap == imap &&
|
||||
(bp->flags & IBF_ACTIVE) != 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* Only disable when no other sub-irq levels of
|
||||
* the same IMAP are active.
|
||||
*/
|
||||
if (ent == NUM_IVECS)
|
||||
disable_irq(irq);
|
||||
}
|
||||
/* Only disable when no other sub-irq levels of
|
||||
* the same IMAP are active.
|
||||
*/
|
||||
if (ent == NUM_IVECS)
|
||||
disable_irq(irq);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&irq_action_lock, flags);
|
||||
|
@ -625,7 +597,7 @@ void synchronize_irq(unsigned int irq)
|
|||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
|
||||
static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
|
||||
{
|
||||
struct irq_desc *desc = bp->irq_info;
|
||||
unsigned char flags = bp->flags;
|
||||
|
@ -676,51 +648,54 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
|
|||
|
||||
/* Test and add entropy */
|
||||
if (random & SA_SAMPLE_RANDOM)
|
||||
add_interrupt_randomness(irq);
|
||||
add_interrupt_randomness(bp->pil);
|
||||
}
|
||||
out:
|
||||
bp->flags &= ~IBF_INPROGRESS;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
|
||||
|
||||
void timer_irq(int irq, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long clr_mask = 1 << irq;
|
||||
unsigned long tick_mask = tick_ops->softint_mask;
|
||||
|
||||
if (get_softint() & tick_mask) {
|
||||
irq = 0;
|
||||
clr_mask = tick_mask;
|
||||
}
|
||||
clear_softint(clr_mask);
|
||||
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
timer_interrupt(irq, NULL, regs);
|
||||
irq_exit();
|
||||
}
|
||||
#endif
|
||||
|
||||
void handler_irq(int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct ino_bucket *bp;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
/*
|
||||
* Check for TICK_INT on level 14 softint.
|
||||
/* XXX at this point we should be able to assert that
|
||||
* XXX irq is PIL_DEVICE_IRQ...
|
||||
*/
|
||||
{
|
||||
unsigned long clr_mask = 1 << irq;
|
||||
unsigned long tick_mask = tick_ops->softint_mask;
|
||||
|
||||
if ((irq == 14) && (get_softint() & tick_mask)) {
|
||||
irq = 0;
|
||||
clr_mask = tick_mask;
|
||||
}
|
||||
clear_softint(clr_mask);
|
||||
}
|
||||
#else
|
||||
clear_softint(1 << irq);
|
||||
#endif
|
||||
|
||||
irq_enter();
|
||||
kstat_this_cpu.irqs[irq]++;
|
||||
|
||||
/* Sliiiick... */
|
||||
#ifndef CONFIG_SMP
|
||||
bp = ((irq != 0) ?
|
||||
__bucket(xchg32(irq_work(cpu, irq), 0)) :
|
||||
&pil0_dummy_bucket);
|
||||
#else
|
||||
bp = __bucket(xchg32(irq_work(cpu, irq), 0));
|
||||
#endif
|
||||
bp = __bucket(xchg32(irq_work(cpu), 0));
|
||||
while (bp) {
|
||||
struct ino_bucket *nbp = __bucket(bp->irq_chain);
|
||||
|
||||
kstat_this_cpu.irqs[bp->pil]++;
|
||||
|
||||
bp->irq_chain = 0;
|
||||
process_bucket(irq, bp, regs);
|
||||
process_bucket(bp, regs);
|
||||
bp = nbp;
|
||||
}
|
||||
irq_exit();
|
||||
|
@ -929,7 +904,7 @@ void init_irqwork_curcpu(void)
|
|||
{
|
||||
int cpu = hard_smp_processor_id();
|
||||
|
||||
memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct));
|
||||
trap_block[cpu].irq_worklist = 0;
|
||||
}
|
||||
|
||||
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include <asm/cpudata.h>
|
||||
#include <asm/intr_queue.h>
|
||||
#include <asm/pil.h>
|
||||
|
||||
.text
|
||||
.align 32
|
||||
|
@ -106,19 +107,13 @@ sun4v_dev_mondo:
|
|||
or %g4, %lo(ivector_table), %g4
|
||||
add %g4, %g3, %g4
|
||||
|
||||
/* Load IRQ %pil into %g5. */
|
||||
ldub [%g4 + 0x04], %g5
|
||||
|
||||
/* Insert ivector_table[] entry into __irq_work[] queue. */
|
||||
sllx %g5, 2, %g3
|
||||
lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */
|
||||
lduw [%g1], %g2 /* g2 = irq_work(cpu) */
|
||||
stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
|
||||
stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */
|
||||
stw %g4, [%g1] /* irq_work(cpu) = bucket */
|
||||
|
||||
/* Signal the interrupt by setting (1 << pil) in %softint. */
|
||||
mov 1, %g2
|
||||
sllx %g2, %g5, %g2
|
||||
wr %g2, 0x0, %set_softint
|
||||
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
|
||||
|
||||
sun4v_dev_mondo_queue_empty:
|
||||
retry
|
||||
|
|
|
@ -457,7 +457,7 @@ static inline void timer_check_rtc(void)
|
|||
}
|
||||
}
|
||||
|
||||
static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
|
||||
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
|
||||
{
|
||||
unsigned long ticks, compare, pstate;
|
||||
|
||||
|
@ -1020,19 +1020,9 @@ static unsigned long sparc64_init_timers(void)
|
|||
return clock;
|
||||
}
|
||||
|
||||
static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_regs *))
|
||||
static void sparc64_start_timers(void)
|
||||
{
|
||||
unsigned long pstate;
|
||||
int err;
|
||||
|
||||
/* Register IRQ handler. */
|
||||
err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0,
|
||||
"timer", NULL);
|
||||
|
||||
if (err) {
|
||||
prom_printf("Serious problem, cannot register TICK_INT\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
/* Guarantee that the following sequences execute
|
||||
* uninterrupted.
|
||||
|
@ -1116,7 +1106,7 @@ void __init time_init(void)
|
|||
/* Now that the interpolator is registered, it is
|
||||
* safe to start the timer ticking.
|
||||
*/
|
||||
sparc64_start_timers(timer_interrupt);
|
||||
sparc64_start_timers();
|
||||
|
||||
timer_ticks_per_nsec_quotient =
|
||||
(((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
|
||||
|
|
|
@ -2544,7 +2544,9 @@ void __init trap_init(void)
|
|||
(TRAP_PER_CPU_TSB_HUGE !=
|
||||
offsetof(struct trap_per_cpu, tsb_huge)) ||
|
||||
(TRAP_PER_CPU_TSB_HUGE_TEMP !=
|
||||
offsetof(struct trap_per_cpu, tsb_huge_temp)))
|
||||
offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
|
||||
(TRAP_PER_CPU_IRQ_WORKLIST !=
|
||||
offsetof(struct trap_per_cpu, irq_worklist)))
|
||||
trap_per_cpu_offsets_are_bolixed_dave();
|
||||
|
||||
if ((TSB_CONFIG_TSB !=
|
||||
|
|
|
@ -58,13 +58,11 @@ tl0_irq2: BTRAP(0x42)
|
|||
tl0_irq3: BTRAP(0x43)
|
||||
tl0_irq4: BTRAP(0x44)
|
||||
#endif
|
||||
tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
|
||||
tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8)
|
||||
tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10)
|
||||
tl0_irq11: TRAP_IRQ(handler_irq, 11) TRAP_IRQ(handler_irq, 12)
|
||||
tl0_irq13: TRAP_IRQ(handler_irq, 13)
|
||||
tl0_irq5: TRAP_IRQ(handler_irq, 5)
|
||||
tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
|
||||
tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
|
||||
#ifndef CONFIG_SMP
|
||||
tl0_irq14: TRAP_IRQ(handler_irq, 14)
|
||||
tl0_irq14: TRAP_IRQ(timer_irq, 14)
|
||||
#else
|
||||
tl0_irq14: TICK_SMP_IRQ
|
||||
#endif
|
||||
|
|
|
@ -74,8 +74,10 @@ struct trap_per_cpu {
|
|||
unsigned long tsb_huge;
|
||||
unsigned long tsb_huge_temp;
|
||||
|
||||
/* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */
|
||||
unsigned long __pad2[4];
|
||||
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
|
||||
unsigned int irq_worklist;
|
||||
unsigned int __pad1;
|
||||
unsigned long __pad2[3];
|
||||
} __attribute__((aligned(64)));
|
||||
extern struct trap_per_cpu trap_block[NR_CPUS];
|
||||
extern void init_cur_cpu_trap(struct thread_info *);
|
||||
|
@ -119,6 +121,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
|
|||
#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
|
||||
#define TRAP_PER_CPU_TSB_HUGE 0xd0
|
||||
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
|
||||
#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0
|
||||
|
||||
#define TRAP_BLOCK_SZ_SHIFT 8
|
||||
|
||||
|
@ -171,11 +174,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
|
|||
|
||||
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
|
||||
#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
|
||||
__GET_CPUID(TMP) \
|
||||
sethi %hi(__irq_work), DEST; \
|
||||
sllx TMP, 6, TMP; \
|
||||
or DEST, %lo(__irq_work), DEST; \
|
||||
add DEST, TMP, DEST;
|
||||
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
|
||||
add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST;
|
||||
|
||||
/* Clobbers TMP, loads DEST with current thread info pointer. */
|
||||
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
|
||||
|
@ -211,9 +211,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
|
|||
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
|
||||
ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
|
||||
|
||||
/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
|
||||
#define TRAP_LOAD_IRQ_WORK(DEST, TMP) \
|
||||
sethi %hi(__irq_work), DEST; \
|
||||
or DEST, %lo(__irq_work), DEST;
|
||||
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
|
||||
add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST;
|
||||
|
||||
#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
|
||||
TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
/* To avoid some locking problems, we hard allocate certain PILs
|
||||
* for SMP cross call messages that must do a etrap/rtrap.
|
||||
*
|
||||
* A cli() does not block the cross call delivery, so when SMP
|
||||
* locking is an issue we reschedule the event into a PIL interrupt
|
||||
* which is blocked by cli().
|
||||
* A local_irq_disable() does not block the cross call delivery, so
|
||||
* when SMP locking is an issue we reschedule the event into a PIL
|
||||
* interrupt which is blocked by local_irq_disable().
|
||||
*
|
||||
* In fact any XCALL which has to etrap/rtrap has a problem because
|
||||
* it is difficult to prevent rtrap from running BH's, and that would
|
||||
|
@ -17,6 +17,7 @@
|
|||
#define PIL_SMP_RECEIVE_SIGNAL 2
|
||||
#define PIL_SMP_CAPTURE 3
|
||||
#define PIL_SMP_CTX_NEW_VERSION 4
|
||||
#define PIL_DEVICE_IRQ 5
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \
|
||||
|
|
Loading…
Reference in New Issue
Block a user