|
| 1 | +/* |
| 2 | + * Originally written by Glenn Engel, Lake Stevens Instrument Division |
| 3 | + * |
| 4 | + * Contributed by HP Systems |
| 5 | + * |
| 6 | + * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse |
| 7 | + * Send complaints, suggestions etc. to <[email protected]> |
| 8 | + * |
| 9 | + * Copyright (C) 1995 Andreas Busse |
| 10 | + * |
| 11 | + * Copyright (C) 2003 MontaVista Software Inc. |
| 12 | + |
| 13 | + * |
| 14 | + * Copyright (C) 2004-2005 MontaVista Software Inc. |
| 15 | + |
| 16 | + * |
| 17 | + * Copyright (C) 2007-2008 Wind River Systems, Inc. |
| 18 | + * Author/Maintainer: Jason Wessel, [email protected] |
| 19 | + * |
| 20 | + * This file is licensed under the terms of the GNU General Public License |
| 21 | + * version 2. This program is licensed "as is" without any warranty of any |
| 22 | + * kind, whether express or implied. |
| 23 | + */ |
| 24 | + |
| 25 | +#include <linux/ptrace.h> /* for linux pt_regs struct */ |
| 26 | +#include <linux/kgdb.h> |
| 27 | +#include <linux/kdebug.h> |
| 28 | +#include <linux/sched.h> |
| 29 | +#include <asm/inst.h> |
| 30 | +#include <asm/fpu.h> |
| 31 | +#include <asm/cacheflush.h> |
| 32 | +#include <asm/processor.h> |
| 33 | +#include <asm/sigcontext.h> |
| 34 | + |
| 35 | +static struct hard_trap_info { |
| 36 | + unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ |
| 37 | + unsigned char signo; /* Signal that we map this trap into */ |
| 38 | +} hard_trap_info[] = { |
| 39 | + { 6, SIGBUS }, /* instruction bus error */ |
| 40 | + { 7, SIGBUS }, /* data bus error */ |
| 41 | + { 9, SIGTRAP }, /* break */ |
| 42 | +/* { 11, SIGILL }, */ /* CPU unusable */ |
| 43 | + { 12, SIGFPE }, /* overflow */ |
| 44 | + { 13, SIGTRAP }, /* trap */ |
| 45 | + { 14, SIGSEGV }, /* virtual instruction cache coherency */ |
| 46 | + { 15, SIGFPE }, /* floating point exception */ |
| 47 | + { 23, SIGSEGV }, /* watch */ |
| 48 | + { 31, SIGSEGV }, /* virtual data cache coherency */ |
| 49 | + { 0, 0} /* Must be last */ |
| 50 | +}; |
| 51 | + |
| 52 | +void arch_kgdb_breakpoint(void) |
| 53 | +{ |
| 54 | + __asm__ __volatile__( |
| 55 | + ".globl breakinst\n\t" |
| 56 | + ".set\tnoreorder\n\t" |
| 57 | + "nop\n" |
| 58 | + "breakinst:\tbreak\n\t" |
| 59 | + "nop\n\t" |
| 60 | + ".set\treorder"); |
| 61 | +} |
| 62 | + |
| 63 | +static void kgdb_call_nmi_hook(void *ignored) |
| 64 | +{ |
| 65 | + kgdb_nmicallback(raw_smp_processor_id(), (void *)0); |
| 66 | +} |
| 67 | + |
| 68 | +void kgdb_roundup_cpus(unsigned long flags) |
| 69 | +{ |
| 70 | + local_irq_enable(); |
| 71 | + smp_call_function(kgdb_call_nmi_hook, NULL, NULL); |
| 72 | + local_irq_disable(); |
| 73 | +} |
| 74 | + |
| 75 | +static int compute_signal(int tt) |
| 76 | +{ |
| 77 | + struct hard_trap_info *ht; |
| 78 | + |
| 79 | + for (ht = hard_trap_info; ht->tt && ht->signo; ht++) |
| 80 | + if (ht->tt == tt) |
| 81 | + return ht->signo; |
| 82 | + |
| 83 | + return SIGHUP; /* default for things we don't know about */ |
| 84 | +} |
| 85 | + |
| 86 | +void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) |
| 87 | +{ |
| 88 | + int reg; |
| 89 | + |
| 90 | +#if (KGDB_GDB_REG_SIZE == 32) |
| 91 | + u32 *ptr = (u32 *)gdb_regs; |
| 92 | +#else |
| 93 | + u64 *ptr = (u64 *)gdb_regs; |
| 94 | +#endif |
| 95 | + |
| 96 | + for (reg = 0; reg < 32; reg++) |
| 97 | + *(ptr++) = regs->regs[reg]; |
| 98 | + |
| 99 | + *(ptr++) = regs->cp0_status; |
| 100 | + *(ptr++) = regs->lo; |
| 101 | + *(ptr++) = regs->hi; |
| 102 | + *(ptr++) = regs->cp0_badvaddr; |
| 103 | + *(ptr++) = regs->cp0_cause; |
| 104 | + *(ptr++) = regs->cp0_epc; |
| 105 | + |
| 106 | + /* FP REGS */ |
| 107 | + if (!(current && (regs->cp0_status & ST0_CU1))) |
| 108 | + return; |
| 109 | + |
| 110 | + save_fp(current); |
| 111 | + for (reg = 0; reg < 32; reg++) |
| 112 | + *(ptr++) = current->thread.fpu.fpr[reg]; |
| 113 | +} |
| 114 | + |
| 115 | +void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) |
| 116 | +{ |
| 117 | + int reg; |
| 118 | + |
| 119 | +#if (KGDB_GDB_REG_SIZE == 32) |
| 120 | + const u32 *ptr = (u32 *)gdb_regs; |
| 121 | +#else |
| 122 | + const u64 *ptr = (u64 *)gdb_regs; |
| 123 | +#endif |
| 124 | + |
| 125 | + for (reg = 0; reg < 32; reg++) |
| 126 | + regs->regs[reg] = *(ptr++); |
| 127 | + |
| 128 | + regs->cp0_status = *(ptr++); |
| 129 | + regs->lo = *(ptr++); |
| 130 | + regs->hi = *(ptr++); |
| 131 | + regs->cp0_badvaddr = *(ptr++); |
| 132 | + regs->cp0_cause = *(ptr++); |
| 133 | + regs->cp0_epc = *(ptr++); |
| 134 | + |
| 135 | + /* FP REGS from current */ |
| 136 | + if (!(current && (regs->cp0_status & ST0_CU1))) |
| 137 | + return; |
| 138 | + |
| 139 | + for (reg = 0; reg < 32; reg++) |
| 140 | + current->thread.fpu.fpr[reg] = *(ptr++); |
| 141 | + restore_fp(current); |
| 142 | +} |
| 143 | + |
| 144 | +/* |
| 145 | + * Similar to regs_to_gdb_regs() except that process is sleeping and so |
| 146 | + * we may not be able to get all the info. |
| 147 | + */ |
| 148 | +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) |
| 149 | +{ |
| 150 | + int reg; |
| 151 | + struct thread_info *ti = task_thread_info(p); |
| 152 | + unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32; |
| 153 | + struct pt_regs *regs = (struct pt_regs *)ksp - 1; |
| 154 | +#if (KGDB_GDB_REG_SIZE == 32) |
| 155 | + u32 *ptr = (u32 *)gdb_regs; |
| 156 | +#else |
| 157 | + u64 *ptr = (u64 *)gdb_regs; |
| 158 | +#endif |
| 159 | + |
| 160 | + for (reg = 0; reg < 16; reg++) |
| 161 | + *(ptr++) = regs->regs[reg]; |
| 162 | + |
| 163 | + /* S0 - S7 */ |
| 164 | + for (reg = 16; reg < 24; reg++) |
| 165 | + *(ptr++) = regs->regs[reg]; |
| 166 | + |
| 167 | + for (reg = 24; reg < 28; reg++) |
| 168 | + *(ptr++) = 0; |
| 169 | + |
| 170 | + /* GP, SP, FP, RA */ |
| 171 | + for (reg = 28; reg < 32; reg++) |
| 172 | + *(ptr++) = regs->regs[reg]; |
| 173 | + |
| 174 | + *(ptr++) = regs->cp0_status; |
| 175 | + *(ptr++) = regs->lo; |
| 176 | + *(ptr++) = regs->hi; |
| 177 | + *(ptr++) = regs->cp0_badvaddr; |
| 178 | + *(ptr++) = regs->cp0_cause; |
| 179 | + *(ptr++) = regs->cp0_epc; |
| 180 | +} |
| 181 | + |
| 182 | +/* |
| 183 | + * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, |
| 184 | + * then try to fall into the debugger |
| 185 | + */ |
| 186 | +static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, |
| 187 | + void *ptr) |
| 188 | +{ |
| 189 | + struct die_args *args = (struct die_args *)ptr; |
| 190 | + struct pt_regs *regs = args->regs; |
| 191 | + int trap = (regs->cp0_cause & 0x7c) >> 2; |
| 192 | + |
| 193 | + if (fixup_exception(regs)) |
| 194 | + return NOTIFY_DONE; |
| 195 | + |
| 196 | + /* Userpace events, ignore. */ |
| 197 | + if (user_mode(regs)) |
| 198 | + return NOTIFY_DONE; |
| 199 | + |
| 200 | + if (atomic_read(&kgdb_active) != -1) |
| 201 | + kgdb_nmicallback(smp_processor_id(), regs); |
| 202 | + |
| 203 | + if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs)) |
| 204 | + return NOTIFY_DONE; |
| 205 | + |
| 206 | + if (atomic_read(&kgdb_setting_breakpoint)) |
| 207 | + if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) |
| 208 | + regs->cp0_epc += 4; |
| 209 | + |
| 210 | + /* In SMP mode, __flush_cache_all does IPI */ |
| 211 | + local_irq_enable(); |
| 212 | + __flush_cache_all(); |
| 213 | + |
| 214 | + return NOTIFY_STOP; |
| 215 | +} |
| 216 | + |
| 217 | +static struct notifier_block kgdb_notifier = { |
| 218 | + .notifier_call = kgdb_mips_notify, |
| 219 | +}; |
| 220 | + |
| 221 | +/* |
| 222 | + * Handle the 's' and 'c' commands |
| 223 | + */ |
| 224 | +int kgdb_arch_handle_exception(int vector, int signo, int err_code, |
| 225 | + char *remcom_in_buffer, char *remcom_out_buffer, |
| 226 | + struct pt_regs *regs) |
| 227 | +{ |
| 228 | + char *ptr; |
| 229 | + unsigned long address; |
| 230 | + int cpu = smp_processor_id(); |
| 231 | + |
| 232 | + switch (remcom_in_buffer[0]) { |
| 233 | + case 's': |
| 234 | + case 'c': |
| 235 | + /* handle the optional parameter */ |
| 236 | + ptr = &remcom_in_buffer[1]; |
| 237 | + if (kgdb_hex2long(&ptr, &address)) |
| 238 | + regs->cp0_epc = address; |
| 239 | + |
| 240 | + atomic_set(&kgdb_cpu_doing_single_step, -1); |
| 241 | + if (remcom_in_buffer[0] == 's') |
| 242 | + if (kgdb_contthread) |
| 243 | + atomic_set(&kgdb_cpu_doing_single_step, cpu); |
| 244 | + |
| 245 | + return 0; |
| 246 | + } |
| 247 | + |
| 248 | + return -1; |
| 249 | +} |
| 250 | + |
| 251 | +struct kgdb_arch arch_kgdb_ops; |
| 252 | + |
| 253 | +/* |
| 254 | + * We use kgdb_early_setup so that functions we need to call now don't |
| 255 | + * cause trouble when called again later. |
| 256 | + */ |
| 257 | +int kgdb_arch_init(void) |
| 258 | +{ |
| 259 | + union mips_instruction insn = { |
| 260 | + .r_format = { |
| 261 | + .opcode = spec_op, |
| 262 | + .func = break_op, |
| 263 | + } |
| 264 | + }; |
| 265 | + memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE); |
| 266 | + |
| 267 | + register_die_notifier(&kgdb_notifier); |
| 268 | + |
| 269 | + return 0; |
| 270 | +} |
| 271 | + |
| 272 | +/* |
| 273 | + * kgdb_arch_exit - Perform any architecture specific uninitalization. |
| 274 | + * |
| 275 | + * This function will handle the uninitalization of any architecture |
| 276 | + * specific callbacks, for dynamic registration and unregistration. |
| 277 | + */ |
| 278 | +void kgdb_arch_exit(void) |
| 279 | +{ |
| 280 | + unregister_die_notifier(&kgdb_notifier); |
| 281 | +} |
0 commit comments