1 /*
2  * SuperH KGDB support
3  *
4  * Copyright (C) 2008 - 2009  Paul Mundt
5  *
6  * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/kgdb.h>
13 #include <linux/kdebug.h>
14 #include <linux/irq.h>
15 #include <linux/io.h>
16 #include <asm/cacheflush.h>
17 
18 /* Macros for single step instruction identification */
19 #define OPCODE_BT(op)		(((op) & 0xff00) == 0x8900)
20 #define OPCODE_BF(op)		(((op) & 0xff00) == 0x8b00)
21 #define OPCODE_BTF_DISP(op)	(((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
22 				 (((op) & 0x7f ) << 1))
23 #define OPCODE_BFS(op)		(((op) & 0xff00) == 0x8f00)
24 #define OPCODE_BTS(op)		(((op) & 0xff00) == 0x8d00)
25 #define OPCODE_BRA(op)		(((op) & 0xf000) == 0xa000)
26 #define OPCODE_BRA_DISP(op)	(((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
27 				 (((op) & 0x7ff) << 1))
28 #define OPCODE_BRAF(op)		(((op) & 0xf0ff) == 0x0023)
29 #define OPCODE_BRAF_REG(op)	(((op) & 0x0f00) >> 8)
30 #define OPCODE_BSR(op)		(((op) & 0xf000) == 0xb000)
31 #define OPCODE_BSR_DISP(op)	(((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
32 				 (((op) & 0x7ff) << 1))
33 #define OPCODE_BSRF(op)		(((op) & 0xf0ff) == 0x0003)
34 #define OPCODE_BSRF_REG(op)	(((op) >> 8) & 0xf)
35 #define OPCODE_JMP(op)		(((op) & 0xf0ff) == 0x402b)
36 #define OPCODE_JMP_REG(op)	(((op) >> 8) & 0xf)
37 #define OPCODE_JSR(op)		(((op) & 0xf0ff) == 0x400b)
38 #define OPCODE_JSR_REG(op)	(((op) >> 8) & 0xf)
39 #define OPCODE_RTS(op)		((op) == 0xb)
40 #define OPCODE_RTE(op)		((op) == 0x2b)
41 
42 #define SR_T_BIT_MASK           0x1
43 #define STEP_OPCODE             0xc33d
44 
45 /* Calculate the new address for after a step */
get_step_address(struct pt_regs * linux_regs)46 static short *get_step_address(struct pt_regs *linux_regs)
47 {
48 	insn_size_t op = __raw_readw(linux_regs->pc);
49 	long addr;
50 
51 	/* BT */
52 	if (OPCODE_BT(op)) {
53 		if (linux_regs->sr & SR_T_BIT_MASK)
54 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
55 		else
56 			addr = linux_regs->pc + 2;
57 	}
58 
59 	/* BTS */
60 	else if (OPCODE_BTS(op)) {
61 		if (linux_regs->sr & SR_T_BIT_MASK)
62 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
63 		else
64 			addr = linux_regs->pc + 4;	/* Not in delay slot */
65 	}
66 
67 	/* BF */
68 	else if (OPCODE_BF(op)) {
69 		if (!(linux_regs->sr & SR_T_BIT_MASK))
70 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
71 		else
72 			addr = linux_regs->pc + 2;
73 	}
74 
75 	/* BFS */
76 	else if (OPCODE_BFS(op)) {
77 		if (!(linux_regs->sr & SR_T_BIT_MASK))
78 			addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
79 		else
80 			addr = linux_regs->pc + 4;	/* Not in delay slot */
81 	}
82 
83 	/* BRA */
84 	else if (OPCODE_BRA(op))
85 		addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
86 
87 	/* BRAF */
88 	else if (OPCODE_BRAF(op))
89 		addr = linux_regs->pc + 4
90 		    + linux_regs->regs[OPCODE_BRAF_REG(op)];
91 
92 	/* BSR */
93 	else if (OPCODE_BSR(op))
94 		addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
95 
96 	/* BSRF */
97 	else if (OPCODE_BSRF(op))
98 		addr = linux_regs->pc + 4
99 		    + linux_regs->regs[OPCODE_BSRF_REG(op)];
100 
101 	/* JMP */
102 	else if (OPCODE_JMP(op))
103 		addr = linux_regs->regs[OPCODE_JMP_REG(op)];
104 
105 	/* JSR */
106 	else if (OPCODE_JSR(op))
107 		addr = linux_regs->regs[OPCODE_JSR_REG(op)];
108 
109 	/* RTS */
110 	else if (OPCODE_RTS(op))
111 		addr = linux_regs->pr;
112 
113 	/* RTE */
114 	else if (OPCODE_RTE(op))
115 		addr = linux_regs->regs[15];
116 
117 	/* Other */
118 	else
119 		addr = linux_regs->pc + instruction_size(op);
120 
121 	flush_icache_range(addr, addr + instruction_size(op));
122 	return (short *)addr;
123 }
124 
125 /*
126  * Replace the instruction immediately after the current instruction
127  * (i.e. next in the expected flow of control) with a trap instruction,
128  * so that returning will cause only a single instruction to be executed.
129  * Note that this model is slightly broken for instructions with delay
130  * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
131  * instruction in the delay slot will be executed.
132  */
133 
134 static unsigned long stepped_address;
135 static insn_size_t stepped_opcode;
136 
do_single_step(struct pt_regs * linux_regs)137 static void do_single_step(struct pt_regs *linux_regs)
138 {
139 	/* Determine where the target instruction will send us to */
140 	unsigned short *addr = get_step_address(linux_regs);
141 
142 	stepped_address = (int)addr;
143 
144 	/* Replace it */
145 	stepped_opcode = __raw_readw((long)addr);
146 	*addr = STEP_OPCODE;
147 
148 	/* Flush and return */
149 	flush_icache_range((long)addr, (long)addr +
150 			   instruction_size(stepped_opcode));
151 }
152 
153 /* Undo a single step */
undo_single_step(struct pt_regs * linux_regs)154 static void undo_single_step(struct pt_regs *linux_regs)
155 {
156 	/* If we have stepped, put back the old instruction */
157 	/* Use stepped_address in case we stopped elsewhere */
158 	if (stepped_opcode != 0) {
159 		__raw_writew(stepped_opcode, stepped_address);
160 		flush_icache_range(stepped_address, stepped_address + 2);
161 	}
162 
163 	stepped_opcode = 0;
164 }
165 
pt_regs_to_gdb_regs(unsigned long * gdb_regs,struct pt_regs * regs)166 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
167 {
168 	int i;
169 
170 	for (i = 0; i < 16; i++)
171 		gdb_regs[GDB_R0 + i] = regs->regs[i];
172 
173 	gdb_regs[GDB_PC] = regs->pc;
174 	gdb_regs[GDB_PR] = regs->pr;
175 	gdb_regs[GDB_SR] = regs->sr;
176 	gdb_regs[GDB_GBR] = regs->gbr;
177 	gdb_regs[GDB_MACH] = regs->mach;
178 	gdb_regs[GDB_MACL] = regs->macl;
179 
180 	__asm__ __volatile__ ("stc vbr, %0" : "=r" (gdb_regs[GDB_VBR]));
181 }
182 
gdb_regs_to_pt_regs(unsigned long * gdb_regs,struct pt_regs * regs)183 void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
184 {
185 	int i;
186 
187 	for (i = 0; i < 16; i++)
188 		regs->regs[GDB_R0 + i] = gdb_regs[GDB_R0 + i];
189 
190 	regs->pc = gdb_regs[GDB_PC];
191 	regs->pr = gdb_regs[GDB_PR];
192 	regs->sr = gdb_regs[GDB_SR];
193 	regs->gbr = gdb_regs[GDB_GBR];
194 	regs->mach = gdb_regs[GDB_MACH];
195 	regs->macl = gdb_regs[GDB_MACL];
196 }
197 
sleeping_thread_to_gdb_regs(unsigned long * gdb_regs,struct task_struct * p)198 void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
199 {
200 	gdb_regs[GDB_R15] = p->thread.sp;
201 	gdb_regs[GDB_PC] = p->thread.pc;
202 }
203 
kgdb_arch_handle_exception(int e_vector,int signo,int err_code,char * remcomInBuffer,char * remcomOutBuffer,struct pt_regs * linux_regs)204 int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
205 			       char *remcomInBuffer, char *remcomOutBuffer,
206 			       struct pt_regs *linux_regs)
207 {
208 	unsigned long addr;
209 	char *ptr;
210 
211 	/* Undo any stepping we may have done */
212 	undo_single_step(linux_regs);
213 
214 	switch (remcomInBuffer[0]) {
215 	case 'c':
216 	case 's':
217 		/* try to read optional parameter, pc unchanged if no parm */
218 		ptr = &remcomInBuffer[1];
219 		if (kgdb_hex2long(&ptr, &addr))
220 			linux_regs->pc = addr;
221 	case 'D':
222 	case 'k':
223 		atomic_set(&kgdb_cpu_doing_single_step, -1);
224 
225 		if (remcomInBuffer[0] == 's') {
226 			do_single_step(linux_regs);
227 			kgdb_single_step = 1;
228 
229 			atomic_set(&kgdb_cpu_doing_single_step,
230 				   raw_smp_processor_id());
231 		}
232 
233 		return 0;
234 	}
235 
236 	/* this means that we do not want to exit from the handler: */
237 	return -1;
238 }
239 
kgdb_arch_pc(int exception,struct pt_regs * regs)240 unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
241 {
242 	if (exception == 60)
243 		return instruction_pointer(regs) - 2;
244 	return instruction_pointer(regs);
245 }
246 
kgdb_arch_set_pc(struct pt_regs * regs,unsigned long ip)247 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
248 {
249 	regs->pc = ip;
250 }
251 
252 /*
253  * The primary entry points for the kgdb debug trap table entries.
254  */
BUILD_TRAP_HANDLER(singlestep)255 BUILD_TRAP_HANDLER(singlestep)
256 {
257 	unsigned long flags;
258 	TRAP_HANDLER_DECL;
259 
260 	local_irq_save(flags);
261 	regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
262 	kgdb_handle_exception(0, SIGTRAP, 0, regs);
263 	local_irq_restore(flags);
264 }
265 
__kgdb_notify(struct die_args * args,unsigned long cmd)266 static int __kgdb_notify(struct die_args *args, unsigned long cmd)
267 {
268 	int ret;
269 
270 	switch (cmd) {
271 	case DIE_BREAKPOINT:
272 		/*
273 		 * This means a user thread is single stepping
274 		 * a system call which should be ignored
275 		 */
276 		if (test_thread_flag(TIF_SINGLESTEP))
277 			return NOTIFY_DONE;
278 
279 		ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
280 					    args->err, args->regs);
281 		if (ret)
282 			return NOTIFY_DONE;
283 
284 		break;
285 	}
286 
287 	return NOTIFY_STOP;
288 }
289 
290 static int
kgdb_notify(struct notifier_block * self,unsigned long cmd,void * ptr)291 kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
292 {
293 	unsigned long flags;
294 	int ret;
295 
296 	local_irq_save(flags);
297 	ret = __kgdb_notify(ptr, cmd);
298 	local_irq_restore(flags);
299 
300 	return ret;
301 }
302 
303 static struct notifier_block kgdb_notifier = {
304 	.notifier_call	= kgdb_notify,
305 
306 	/*
307 	 * Lowest-prio notifier priority, we want to be notified last:
308 	 */
309 	.priority	= -INT_MAX,
310 };
311 
kgdb_arch_init(void)312 int kgdb_arch_init(void)
313 {
314 	return register_die_notifier(&kgdb_notifier);
315 }
316 
kgdb_arch_exit(void)317 void kgdb_arch_exit(void)
318 {
319 	unregister_die_notifier(&kgdb_notifier);
320 }
321 
322 struct kgdb_arch arch_kgdb_ops = {
323 	/* Breakpoint instruction: trapa #0x3c */
324 #ifdef CONFIG_CPU_LITTLE_ENDIAN
325 	.gdb_bpt_instr		= { 0x3c, 0xc3 },
326 #else
327 	.gdb_bpt_instr		= { 0xc3, 0x3c },
328 #endif
329 };
330