forked from luck/tmp_suning_uos_patched
3cd99ac355
When I had lockdep turned on and dropped into kgdb I got a nice splat on my system. Specifically it hit: DEBUG_LOCKS_WARN_ON(current->hardirq_context) Specifically it looked like this: sysrq: SysRq : DEBUG ------------[ cut here ]------------ DEBUG_LOCKS_WARN_ON(current->hardirq_context) WARNING: CPU: 0 PID: 0 at .../kernel/locking/lockdep.c:2875 lockdep_hardirqs_on+0xf0/0x160 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.19.0 #27 pstate: 604003c9 (nZCv DAIF +PAN -UAO) pc : lockdep_hardirqs_on+0xf0/0x160 ... Call trace: lockdep_hardirqs_on+0xf0/0x160 trace_hardirqs_on+0x188/0x1ac kgdb_roundup_cpus+0x14/0x3c kgdb_cpu_enter+0x53c/0x5cc kgdb_handle_exception+0x180/0x1d4 kgdb_compiled_brk_fn+0x30/0x3c brk_handler+0x134/0x178 do_debug_exception+0xfc/0x178 el1_dbg+0x18/0x78 kgdb_breakpoint+0x34/0x58 sysrq_handle_dbg+0x54/0x5c __handle_sysrq+0x114/0x21c handle_sysrq+0x30/0x3c qcom_geni_serial_isr+0x2dc/0x30c ... ... irq event stamp: ...45 hardirqs last enabled at (...44): [...] __do_softirq+0xd8/0x4e4 hardirqs last disabled at (...45): [...] el1_irq+0x74/0x130 softirqs last enabled at (...42): [...] _local_bh_enable+0x2c/0x34 softirqs last disabled at (...43): [...] irq_exit+0xa8/0x100 ---[ end trace adf21f830c46e638 ]--- Looking closely at it, it seems like a really bad idea to be calling local_irq_enable() in kgdb_roundup_cpus(). If nothing else that seems like it could violate spinlock semantics and cause a deadlock. Instead, let's use a private csd alongside smp_call_function_single_async() to round up the other CPUs. Using smp_call_function_single_async() doesn't require interrupts to be enabled so we can remove the offending bit of code. In order to avoid duplicating this across all the architectures that use the default kgdb_roundup_cpus(), we'll add a "weak" implementation to debug_core.c. Looking at all the people who previously had copies of this code, there were a few variants. I've attempted to keep the variants working like they used to. Specifically: * For arch/arc we passed NULL to kgdb_nmicallback() instead of get_irq_regs(). * For arch/mips there was a bit of extra code around kgdb_nmicallback() NOTE: In this patch we will still get into trouble if we try to round up a CPU that failed to round up before. We'll try to round it up again and potentially hang when we try to grab the csd lock. That's not new behavior but we'll still try to do better in a future patch. Suggested-by: Daniel Thompson <daniel.thompson@linaro.org> Signed-off-by: Douglas Anderson <dianders@chromium.org> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Burton <paul.burton@mips.com> Cc: James Hogan <jhogan@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Rich Felker <dalias@libc.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
417 lines
11 KiB
C
417 lines
11 KiB
C
/*
|
|
* Originally written by Glenn Engel, Lake Stevens Instrument Division
|
|
*
|
|
* Contributed by HP Systems
|
|
*
|
|
* Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
|
|
* Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
|
|
*
|
|
* Copyright (C) 1995 Andreas Busse
|
|
*
|
|
* Copyright (C) 2003 MontaVista Software Inc.
|
|
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
|
|
*
|
|
* Copyright (C) 2004-2005 MontaVista Software Inc.
|
|
* Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
|
|
*
|
|
* Copyright (C) 2007-2008 Wind River Systems, Inc.
|
|
* Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
|
|
*
|
|
* This file is licensed under the terms of the GNU General Public License
|
|
* version 2. This program is licensed "as is" without any warranty of any
|
|
* kind, whether express or implied.
|
|
*/
|
|
|
|
#include <linux/ptrace.h> /* for linux pt_regs struct */
|
|
#include <linux/kgdb.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/smp.h>
|
|
#include <asm/inst.h>
|
|
#include <asm/fpu.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/sigcontext.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
static struct hard_trap_info {
|
|
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
|
|
unsigned char signo; /* Signal that we map this trap into */
|
|
} hard_trap_info[] = {
|
|
{ 6, SIGBUS }, /* instruction bus error */
|
|
{ 7, SIGBUS }, /* data bus error */
|
|
{ 9, SIGTRAP }, /* break */
|
|
/* { 11, SIGILL }, */ /* CPU unusable */
|
|
{ 12, SIGFPE }, /* overflow */
|
|
{ 13, SIGTRAP }, /* trap */
|
|
{ 14, SIGSEGV }, /* virtual instruction cache coherency */
|
|
{ 15, SIGFPE }, /* floating point exception */
|
|
{ 23, SIGSEGV }, /* watch */
|
|
{ 31, SIGSEGV }, /* virtual data cache coherency */
|
|
{ 0, 0} /* Must be last */
|
|
};
|
|
|
|
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
|
|
{
|
|
{ "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
|
|
{ "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
|
|
{ "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
|
|
{ "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
|
|
{ "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
|
|
{ "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
|
|
{ "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
|
|
{ "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
|
|
{ "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
|
|
{ "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
|
|
{ "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
|
|
{ "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
|
|
{ "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
|
|
{ "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
|
|
{ "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
|
|
{ "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
|
|
{ "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
|
|
{ "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
|
|
{ "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
|
|
{ "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
|
|
{ "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
|
|
{ "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
|
|
{ "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
|
|
{ "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
|
|
{ "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
|
|
{ "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
|
|
{ "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
|
|
{ "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
|
|
{ "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
|
|
{ "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
|
|
{ "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
|
|
{ "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
|
|
{ "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
|
|
{ "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
|
|
{ "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
|
|
{ "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
|
|
{ "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
|
|
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
|
|
{ "f0", GDB_SIZEOF_REG, 0 },
|
|
{ "f1", GDB_SIZEOF_REG, 1 },
|
|
{ "f2", GDB_SIZEOF_REG, 2 },
|
|
{ "f3", GDB_SIZEOF_REG, 3 },
|
|
{ "f4", GDB_SIZEOF_REG, 4 },
|
|
{ "f5", GDB_SIZEOF_REG, 5 },
|
|
{ "f6", GDB_SIZEOF_REG, 6 },
|
|
{ "f7", GDB_SIZEOF_REG, 7 },
|
|
{ "f8", GDB_SIZEOF_REG, 8 },
|
|
{ "f9", GDB_SIZEOF_REG, 9 },
|
|
{ "f10", GDB_SIZEOF_REG, 10 },
|
|
{ "f11", GDB_SIZEOF_REG, 11 },
|
|
{ "f12", GDB_SIZEOF_REG, 12 },
|
|
{ "f13", GDB_SIZEOF_REG, 13 },
|
|
{ "f14", GDB_SIZEOF_REG, 14 },
|
|
{ "f15", GDB_SIZEOF_REG, 15 },
|
|
{ "f16", GDB_SIZEOF_REG, 16 },
|
|
{ "f17", GDB_SIZEOF_REG, 17 },
|
|
{ "f18", GDB_SIZEOF_REG, 18 },
|
|
{ "f19", GDB_SIZEOF_REG, 19 },
|
|
{ "f20", GDB_SIZEOF_REG, 20 },
|
|
{ "f21", GDB_SIZEOF_REG, 21 },
|
|
{ "f22", GDB_SIZEOF_REG, 22 },
|
|
{ "f23", GDB_SIZEOF_REG, 23 },
|
|
{ "f24", GDB_SIZEOF_REG, 24 },
|
|
{ "f25", GDB_SIZEOF_REG, 25 },
|
|
{ "f26", GDB_SIZEOF_REG, 26 },
|
|
{ "f27", GDB_SIZEOF_REG, 27 },
|
|
{ "f28", GDB_SIZEOF_REG, 28 },
|
|
{ "f29", GDB_SIZEOF_REG, 29 },
|
|
{ "f30", GDB_SIZEOF_REG, 30 },
|
|
{ "f31", GDB_SIZEOF_REG, 31 },
|
|
{ "fsr", GDB_SIZEOF_REG, 0 },
|
|
{ "fir", GDB_SIZEOF_REG, 0 },
|
|
};
|
|
|
|
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
|
|
{
|
|
int fp_reg;
|
|
|
|
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
|
|
return -EINVAL;
|
|
|
|
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
|
|
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
|
|
dbg_reg_def[regno].size);
|
|
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
|
|
/* FP registers 38 -> 69 */
|
|
if (!(regs->cp0_status & ST0_CU1))
|
|
return 0;
|
|
if (regno == 70) {
|
|
/* Process the fcr31/fsr (register 70) */
|
|
memcpy((void *)¤t->thread.fpu.fcr31, mem,
|
|
dbg_reg_def[regno].size);
|
|
goto out_save;
|
|
} else if (regno == 71) {
|
|
/* Ignore the fir (register 71) */
|
|
goto out_save;
|
|
}
|
|
fp_reg = dbg_reg_def[regno].offset;
|
|
memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem,
|
|
dbg_reg_def[regno].size);
|
|
out_save:
|
|
restore_fp(current);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
|
|
{
|
|
int fp_reg;
|
|
|
|
if (regno >= DBG_MAX_REG_NUM || regno < 0)
|
|
return NULL;
|
|
|
|
if (dbg_reg_def[regno].offset != -1 && regno < 38) {
|
|
/* First 38 registers */
|
|
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
|
|
dbg_reg_def[regno].size);
|
|
} else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
|
|
/* FP registers 38 -> 69 */
|
|
if (!(regs->cp0_status & ST0_CU1))
|
|
goto out;
|
|
save_fp(current);
|
|
if (regno == 70) {
|
|
/* Process the fcr31/fsr (register 70) */
|
|
memcpy(mem, (void *)¤t->thread.fpu.fcr31,
|
|
dbg_reg_def[regno].size);
|
|
goto out;
|
|
} else if (regno == 71) {
|
|
/* Ignore the fir (register 71) */
|
|
memset(mem, 0, dbg_reg_def[regno].size);
|
|
goto out;
|
|
}
|
|
fp_reg = dbg_reg_def[regno].offset;
|
|
memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg],
|
|
dbg_reg_def[regno].size);
|
|
}
|
|
|
|
out:
|
|
return dbg_reg_def[regno].name;
|
|
|
|
}
|
|
|
|
void arch_kgdb_breakpoint(void)
|
|
{
|
|
__asm__ __volatile__(
|
|
".globl breakinst\n\t"
|
|
".set\tnoreorder\n\t"
|
|
"nop\n"
|
|
"breakinst:\tbreak\n\t"
|
|
"nop\n\t"
|
|
".set\treorder");
|
|
}
|
|
|
|
void kgdb_call_nmi_hook(void *ignored)
|
|
{
|
|
mm_segment_t old_fs;
|
|
|
|
old_fs = get_fs();
|
|
set_fs(get_ds());
|
|
|
|
kgdb_nmicallback(raw_smp_processor_id(), NULL);
|
|
|
|
set_fs(old_fs);
|
|
}
|
|
|
|
static int compute_signal(int tt)
|
|
{
|
|
struct hard_trap_info *ht;
|
|
|
|
for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
|
|
if (ht->tt == tt)
|
|
return ht->signo;
|
|
|
|
return SIGHUP; /* default for things we don't know about */
|
|
}
|
|
|
|
/*
|
|
* Similar to regs_to_gdb_regs() except that process is sleeping and so
|
|
* we may not be able to get all the info.
|
|
*/
|
|
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
|
|
{
|
|
int reg;
|
|
#if (KGDB_GDB_REG_SIZE == 32)
|
|
u32 *ptr = (u32 *)gdb_regs;
|
|
#else
|
|
u64 *ptr = (u64 *)gdb_regs;
|
|
#endif
|
|
|
|
for (reg = 0; reg < 16; reg++)
|
|
*(ptr++) = 0;
|
|
|
|
/* S0 - S7 */
|
|
*(ptr++) = p->thread.reg16;
|
|
*(ptr++) = p->thread.reg17;
|
|
*(ptr++) = p->thread.reg18;
|
|
*(ptr++) = p->thread.reg19;
|
|
*(ptr++) = p->thread.reg20;
|
|
*(ptr++) = p->thread.reg21;
|
|
*(ptr++) = p->thread.reg22;
|
|
*(ptr++) = p->thread.reg23;
|
|
|
|
for (reg = 24; reg < 28; reg++)
|
|
*(ptr++) = 0;
|
|
|
|
/* GP, SP, FP, RA */
|
|
*(ptr++) = (long)p;
|
|
*(ptr++) = p->thread.reg29;
|
|
*(ptr++) = p->thread.reg30;
|
|
*(ptr++) = p->thread.reg31;
|
|
|
|
*(ptr++) = p->thread.cp0_status;
|
|
|
|
/* lo, hi */
|
|
*(ptr++) = 0;
|
|
*(ptr++) = 0;
|
|
|
|
/*
|
|
* BadVAddr, Cause
|
|
* Ideally these would come from the last exception frame up the stack
|
|
* but that requires unwinding, otherwise we can't know much for sure.
|
|
*/
|
|
*(ptr++) = 0;
|
|
*(ptr++) = 0;
|
|
|
|
/*
|
|
* PC
|
|
* use return address (RA), i.e. the moment after return from resume()
|
|
*/
|
|
*(ptr++) = p->thread.reg31;
|
|
}
|
|
|
|
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
|
|
{
|
|
regs->cp0_epc = pc;
|
|
}
|
|
|
|
/*
|
|
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
|
|
* then try to fall into the debugger
|
|
*/
|
|
static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
|
|
void *ptr)
|
|
{
|
|
struct die_args *args = (struct die_args *)ptr;
|
|
struct pt_regs *regs = args->regs;
|
|
int trap = (regs->cp0_cause & 0x7c) >> 2;
|
|
mm_segment_t old_fs;
|
|
|
|
#ifdef CONFIG_KPROBES
|
|
/*
|
|
* Return immediately if the kprobes fault notifier has set
|
|
* DIE_PAGE_FAULT.
|
|
*/
|
|
if (cmd == DIE_PAGE_FAULT)
|
|
return NOTIFY_DONE;
|
|
#endif /* CONFIG_KPROBES */
|
|
|
|
/* Userspace events, ignore. */
|
|
if (user_mode(regs))
|
|
return NOTIFY_DONE;
|
|
|
|
/* Kernel mode. Set correct address limit */
|
|
old_fs = get_fs();
|
|
set_fs(get_ds());
|
|
|
|
if (atomic_read(&kgdb_active) != -1)
|
|
kgdb_nmicallback(smp_processor_id(), regs);
|
|
|
|
if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
|
|
set_fs(old_fs);
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
if (atomic_read(&kgdb_setting_breakpoint))
|
|
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
|
|
regs->cp0_epc += 4;
|
|
|
|
/* In SMP mode, __flush_cache_all does IPI */
|
|
local_irq_enable();
|
|
__flush_cache_all();
|
|
|
|
set_fs(old_fs);
|
|
return NOTIFY_STOP;
|
|
}
|
|
|
|
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
|
|
int kgdb_ll_trap(int cmd, const char *str,
|
|
struct pt_regs *regs, long err, int trap, int sig)
|
|
{
|
|
struct die_args args = {
|
|
.regs = regs,
|
|
.str = str,
|
|
.err = err,
|
|
.trapnr = trap,
|
|
.signr = sig,
|
|
|
|
};
|
|
|
|
if (!kgdb_io_module_registered)
|
|
return NOTIFY_DONE;
|
|
|
|
return kgdb_mips_notify(NULL, cmd, &args);
|
|
}
|
|
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
|
|
|
|
static struct notifier_block kgdb_notifier = {
|
|
.notifier_call = kgdb_mips_notify,
|
|
};
|
|
|
|
/*
|
|
* Handle the 'c' command
|
|
*/
|
|
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
|
|
char *remcom_in_buffer, char *remcom_out_buffer,
|
|
struct pt_regs *regs)
|
|
{
|
|
char *ptr;
|
|
unsigned long address;
|
|
|
|
switch (remcom_in_buffer[0]) {
|
|
case 'c':
|
|
/* handle the optional parameter */
|
|
ptr = &remcom_in_buffer[1];
|
|
if (kgdb_hex2long(&ptr, &address))
|
|
regs->cp0_epc = address;
|
|
|
|
return 0;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
struct kgdb_arch arch_kgdb_ops;
|
|
|
|
int kgdb_arch_init(void)
|
|
{
|
|
union mips_instruction insn = {
|
|
.r_format = {
|
|
.opcode = spec_op,
|
|
.func = break_op,
|
|
}
|
|
};
|
|
memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
|
|
|
|
register_die_notifier(&kgdb_notifier);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* kgdb_arch_exit - Perform any architecture specific uninitalization.
|
|
*
|
|
* This function will handle the uninitalization of any architecture
|
|
* specific callbacks, for dynamic registration and unregistration.
|
|
*/
|
|
void kgdb_arch_exit(void)
|
|
{
|
|
unregister_die_notifier(&kgdb_notifier);
|
|
}
|