1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * A code-rewriter that enables instruction single-stepping.
15  * Derived from iLib's single-stepping code.
16  */
17 
18 #ifndef __tilegx__   /* Hardware support for single step unavailable. */
19 
20 /* These functions are only used on the TILE platform */
21 #include <linux/slab.h>
22 #include <linux/thread_info.h>
23 #include <linux/uaccess.h>
24 #include <linux/mman.h>
25 #include <linux/types.h>
26 #include <linux/err.h>
27 #include <asm/cacheflush.h>
28 #include <asm/opcode-tile.h>
29 #include <asm/opcode_constants.h>
30 #include <arch/abi.h>
31 
32 #define signExtend17(val) sign_extend((val), 17)
33 #define TILE_X1_MASK (0xffffffffULL << 31)
34 
35 int unaligned_printk;
36 
setup_unaligned_printk(char * str)37 static int __init setup_unaligned_printk(char *str)
38 {
39 	long val;
40 	if (strict_strtol(str, 0, &val) != 0)
41 		return 0;
42 	unaligned_printk = val;
43 	pr_info("Printk for each unaligned data accesses is %s\n",
44 		unaligned_printk ? "enabled" : "disabled");
45 	return 1;
46 }
47 __setup("unaligned_printk=", setup_unaligned_printk);
48 
49 unsigned int unaligned_fixup_count;
50 
51 enum mem_op {
52 	MEMOP_NONE,
53 	MEMOP_LOAD,
54 	MEMOP_STORE,
55 	MEMOP_LOAD_POSTINCR,
56 	MEMOP_STORE_POSTINCR
57 };
58 
set_BrOff_X1(tile_bundle_bits n,s32 offset)59 static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
60 {
61 	tile_bundle_bits result;
62 
63 	/* mask out the old offset */
64 	tile_bundle_bits mask = create_BrOff_X1(-1);
65 	result = n & (~mask);
66 
67 	/* or in the new offset */
68 	result |= create_BrOff_X1(offset);
69 
70 	return result;
71 }
72 
move_X1(tile_bundle_bits n,int dest,int src)73 static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
74 {
75 	tile_bundle_bits result;
76 	tile_bundle_bits op;
77 
78 	result = n & (~TILE_X1_MASK);
79 
80 	op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
81 		create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
82 		create_Dest_X1(dest) |
83 		create_SrcB_X1(TREG_ZERO) |
84 		create_SrcA_X1(src) ;
85 
86 	result |= op;
87 	return result;
88 }
89 
nop_X1(tile_bundle_bits n)90 static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
91 {
92 	return move_X1(n, TREG_ZERO, TREG_ZERO);
93 }
94 
addi_X1(tile_bundle_bits n,int dest,int src,int imm)95 static inline tile_bundle_bits addi_X1(
96 	tile_bundle_bits n, int dest, int src, int imm)
97 {
98 	n &= ~TILE_X1_MASK;
99 
100 	n |=  (create_SrcA_X1(src) |
101 	       create_Dest_X1(dest) |
102 	       create_Imm8_X1(imm) |
103 	       create_S_X1(0) |
104 	       create_Opcode_X1(IMM_0_OPCODE_X1) |
105 	       create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
106 
107 	return n;
108 }
109 
rewrite_load_store_unaligned(struct single_step_state * state,tile_bundle_bits bundle,struct pt_regs * regs,enum mem_op mem_op,int size,int sign_ext)110 static tile_bundle_bits rewrite_load_store_unaligned(
111 	struct single_step_state *state,
112 	tile_bundle_bits bundle,
113 	struct pt_regs *regs,
114 	enum mem_op mem_op,
115 	int size, int sign_ext)
116 {
117 	unsigned char __user *addr;
118 	int val_reg, addr_reg, err, val;
119 
120 	/* Get address and value registers */
121 	if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
122 		addr_reg = get_SrcA_Y2(bundle);
123 		val_reg = get_SrcBDest_Y2(bundle);
124 	} else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
125 		addr_reg = get_SrcA_X1(bundle);
126 		val_reg  = get_Dest_X1(bundle);
127 	} else {
128 		addr_reg = get_SrcA_X1(bundle);
129 		val_reg  = get_SrcB_X1(bundle);
130 	}
131 
132 	/*
133 	 * If registers are not GPRs, don't try to handle it.
134 	 *
135 	 * FIXME: we could handle non-GPR loads by getting the real value
136 	 * from memory, writing it to the single step buffer, using a
137 	 * temp_reg to hold a pointer to that memory, then executing that
138 	 * instruction and resetting temp_reg.  For non-GPR stores, it's a
139 	 * little trickier; we could use the single step buffer for that
140 	 * too, but we'd have to add some more state bits so that we could
141 	 * call back in here to copy that value to the real target.  For
142 	 * now, we just handle the simple case.
143 	 */
144 	if ((val_reg >= PTREGS_NR_GPRS &&
145 	     (val_reg != TREG_ZERO ||
146 	      mem_op == MEMOP_LOAD ||
147 	      mem_op == MEMOP_LOAD_POSTINCR)) ||
148 	    addr_reg >= PTREGS_NR_GPRS)
149 		return bundle;
150 
151 	/* If it's aligned, don't handle it specially */
152 	addr = (void __user *)regs->regs[addr_reg];
153 	if (((unsigned long)addr % size) == 0)
154 		return bundle;
155 
156 #ifndef __LITTLE_ENDIAN
157 # error We assume little-endian representation with copy_xx_user size 2 here
158 #endif
159 	/* Handle unaligned load/store */
160 	if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
161 		unsigned short val_16;
162 		switch (size) {
163 		case 2:
164 			err = copy_from_user(&val_16, addr, sizeof(val_16));
165 			val = sign_ext ? ((short)val_16) : val_16;
166 			break;
167 		case 4:
168 			err = copy_from_user(&val, addr, sizeof(val));
169 			break;
170 		default:
171 			BUG();
172 		}
173 		if (err == 0) {
174 			state->update_reg = val_reg;
175 			state->update_value = val;
176 			state->update = 1;
177 		}
178 	} else {
179 		val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
180 		err = copy_to_user(addr, &val, size);
181 	}
182 
183 	if (err) {
184 		siginfo_t info = {
185 			.si_signo = SIGSEGV,
186 			.si_code = SEGV_MAPERR,
187 			.si_addr = addr
188 		};
189 		force_sig_info(info.si_signo, &info, current);
190 		return (tile_bundle_bits) 0;
191 	}
192 
193 	if (unaligned_fixup == 0) {
194 		siginfo_t info = {
195 			.si_signo = SIGBUS,
196 			.si_code = BUS_ADRALN,
197 			.si_addr = addr
198 		};
199 		force_sig_info(info.si_signo, &info, current);
200 		return (tile_bundle_bits) 0;
201 	}
202 
203 	if (unaligned_printk || unaligned_fixup_count == 0) {
204 		pr_info("Process %d/%s: PC %#lx: Fixup of"
205 			" unaligned %s at %#lx.\n",
206 			current->pid, current->comm, regs->pc,
207 			(mem_op == MEMOP_LOAD ||
208 			 mem_op == MEMOP_LOAD_POSTINCR) ?
209 			"load" : "store",
210 			(unsigned long)addr);
211 		if (!unaligned_printk) {
212 #define P pr_info
213 P("\n");
214 P("Unaligned fixups in the kernel will slow your application considerably.\n");
215 P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
216 P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
217 P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
218 P("access will become a SIGBUS you can debug. No further warnings will be\n");
219 P("shown so as to avoid additional slowdown, but you can track the number\n");
220 P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
221 P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
222 P("\n");
223 #undef P
224 		}
225 	}
226 	++unaligned_fixup_count;
227 
228 	if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
229 		/* Convert the Y2 instruction to a prefetch. */
230 		bundle &= ~(create_SrcBDest_Y2(-1) |
231 			    create_Opcode_Y2(-1));
232 		bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
233 			   create_Opcode_Y2(LW_OPCODE_Y2));
234 	/* Replace the load postincr with an addi */
235 	} else if (mem_op == MEMOP_LOAD_POSTINCR) {
236 		bundle = addi_X1(bundle, addr_reg, addr_reg,
237 				 get_Imm8_X1(bundle));
238 	/* Replace the store postincr with an addi */
239 	} else if (mem_op == MEMOP_STORE_POSTINCR) {
240 		bundle = addi_X1(bundle, addr_reg, addr_reg,
241 				 get_Dest_Imm8_X1(bundle));
242 	} else {
243 		/* Convert the X1 instruction to a nop. */
244 		bundle &= ~(create_Opcode_X1(-1) |
245 			    create_UnShOpcodeExtension_X1(-1) |
246 			    create_UnOpcodeExtension_X1(-1));
247 		bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
248 			   create_UnShOpcodeExtension_X1(
249 				   UN_0_SHUN_0_OPCODE_X1) |
250 			   create_UnOpcodeExtension_X1(
251 				   NOP_UN_0_SHUN_0_OPCODE_X1));
252 	}
253 
254 	return bundle;
255 }
256 
257 /*
258  * Called after execve() has started the new image.  This allows us
259  * to reset the info state.  Note that the the mmap'ed memory, if there
260  * was any, has already been unmapped by the exec.
261  */
single_step_execve(void)262 void single_step_execve(void)
263 {
264 	struct thread_info *ti = current_thread_info();
265 	kfree(ti->step_state);
266 	ti->step_state = NULL;
267 }
268 
269 /**
270  * single_step_once() - entry point when single stepping has been triggered.
271  * @regs: The machine register state
272  *
273  *  When we arrive at this routine via a trampoline, the single step
274  *  engine copies the executing bundle to the single step buffer.
275  *  If the instruction is a condition branch, then the target is
276  *  reset to one past the next instruction. If the instruction
277  *  sets the lr, then that is noted. If the instruction is a jump
278  *  or call, then the new target pc is preserved and the current
279  *  bundle instruction set to null.
280  *
281  *  The necessary post-single-step rewriting information is stored in
282  *  single_step_state->  We use data segment values because the
283  *  stack will be rewound when we run the rewritten single-stepped
284  *  instruction.
285  */
single_step_once(struct pt_regs * regs)286 void single_step_once(struct pt_regs *regs)
287 {
288 	extern tile_bundle_bits __single_step_ill_insn;
289 	extern tile_bundle_bits __single_step_j_insn;
290 	extern tile_bundle_bits __single_step_addli_insn;
291 	extern tile_bundle_bits __single_step_auli_insn;
292 	struct thread_info *info = (void *)current_thread_info();
293 	struct single_step_state *state = info->step_state;
294 	int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
295 	tile_bundle_bits __user *buffer, *pc;
296 	tile_bundle_bits bundle;
297 	int temp_reg;
298 	int target_reg = TREG_LR;
299 	int err;
300 	enum mem_op mem_op = MEMOP_NONE;
301 	int size = 0, sign_ext = 0;  /* happy compiler */
302 
303 	asm(
304 "    .pushsection .rodata.single_step\n"
305 "    .align 8\n"
306 "    .globl    __single_step_ill_insn\n"
307 "__single_step_ill_insn:\n"
308 "    ill\n"
309 "    .globl    __single_step_addli_insn\n"
310 "__single_step_addli_insn:\n"
311 "    { nop; addli r0, zero, 0 }\n"
312 "    .globl    __single_step_auli_insn\n"
313 "__single_step_auli_insn:\n"
314 "    { nop; auli r0, r0, 0 }\n"
315 "    .globl    __single_step_j_insn\n"
316 "__single_step_j_insn:\n"
317 "    j .\n"
318 "    .popsection\n"
319 	);
320 
321 	if (state == NULL) {
322 		/* allocate a page of writable, executable memory */
323 		state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
324 		if (state == NULL) {
325 			pr_err("Out of kernel memory trying to single-step\n");
326 			return;
327 		}
328 
329 		/* allocate a cache line of writable, executable memory */
330 		down_write(&current->mm->mmap_sem);
331 		buffer = (void __user *) do_mmap(NULL, 0, 64,
332 					  PROT_EXEC | PROT_READ | PROT_WRITE,
333 					  MAP_PRIVATE | MAP_ANONYMOUS,
334 					  0);
335 		up_write(&current->mm->mmap_sem);
336 
337 		if (IS_ERR((void __force *)buffer)) {
338 			kfree(state);
339 			pr_err("Out of kernel pages trying to single-step\n");
340 			return;
341 		}
342 
343 		state->buffer = buffer;
344 		state->is_enabled = 0;
345 
346 		info->step_state = state;
347 
348 		/* Validate our stored instruction patterns */
349 		BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
350 		       ADDLI_OPCODE_X1);
351 		BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
352 		       AULI_OPCODE_X1);
353 		BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
354 		BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
355 		BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
356 	}
357 
358 	/*
359 	 * If we are returning from a syscall, we still haven't hit the
360 	 * "ill" for the swint1 instruction.  So back the PC up to be
361 	 * pointing at the swint1, but we'll actually return directly
362 	 * back to the "ill" so we come back in via SIGILL as if we
363 	 * had "executed" the swint1 without ever being in kernel space.
364 	 */
365 	if (regs->faultnum == INT_SWINT_1)
366 		regs->pc -= 8;
367 
368 	pc = (tile_bundle_bits __user *)(regs->pc);
369 	if (get_user(bundle, pc) != 0) {
370 		pr_err("Couldn't read instruction at %p trying to step\n", pc);
371 		return;
372 	}
373 
374 	/* We'll follow the instruction with 2 ill op bundles */
375 	state->orig_pc = (unsigned long)pc;
376 	state->next_pc = (unsigned long)(pc + 1);
377 	state->branch_next_pc = 0;
378 	state->update = 0;
379 
380 	if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
381 		/* two wide, check for control flow */
382 		int opcode = get_Opcode_X1(bundle);
383 
384 		switch (opcode) {
385 		/* branches */
386 		case BRANCH_OPCODE_X1:
387 		{
388 			s32 offset = signExtend17(get_BrOff_X1(bundle));
389 
390 			/*
391 			 * For branches, we use a rewriting trick to let the
392 			 * hardware evaluate whether the branch is taken or
393 			 * untaken.  We record the target offset and then
394 			 * rewrite the branch instruction to target 1 insn
395 			 * ahead if the branch is taken.  We then follow the
396 			 * rewritten branch with two bundles, each containing
397 			 * an "ill" instruction. The supervisor examines the
398 			 * pc after the single step code is executed, and if
399 			 * the pc is the first ill instruction, then the
400 			 * branch (if any) was not taken.  If the pc is the
401 			 * second ill instruction, then the branch was
402 			 * taken. The new pc is computed for these cases, and
403 			 * inserted into the registers for the thread.  If
404 			 * the pc is the start of the single step code, then
405 			 * an exception or interrupt was taken before the
406 			 * code started processing, and the same "original"
407 			 * pc is restored.  This change, different from the
408 			 * original implementation, has the advantage of
409 			 * executing a single user instruction.
410 			 */
411 			state->branch_next_pc = (unsigned long)(pc + offset);
412 
413 			/* rewrite branch offset to go forward one bundle */
414 			bundle = set_BrOff_X1(bundle, 2);
415 		}
416 		break;
417 
418 		/* jumps */
419 		case JALB_OPCODE_X1:
420 		case JALF_OPCODE_X1:
421 			state->update = 1;
422 			state->next_pc =
423 				(unsigned long) (pc + get_JOffLong_X1(bundle));
424 			break;
425 
426 		case JB_OPCODE_X1:
427 		case JF_OPCODE_X1:
428 			state->next_pc =
429 				(unsigned long) (pc + get_JOffLong_X1(bundle));
430 			bundle = nop_X1(bundle);
431 			break;
432 
433 		case SPECIAL_0_OPCODE_X1:
434 			switch (get_RRROpcodeExtension_X1(bundle)) {
435 			/* jump-register */
436 			case JALRP_SPECIAL_0_OPCODE_X1:
437 			case JALR_SPECIAL_0_OPCODE_X1:
438 				state->update = 1;
439 				state->next_pc =
440 					regs->regs[get_SrcA_X1(bundle)];
441 				break;
442 
443 			case JRP_SPECIAL_0_OPCODE_X1:
444 			case JR_SPECIAL_0_OPCODE_X1:
445 				state->next_pc =
446 					regs->regs[get_SrcA_X1(bundle)];
447 				bundle = nop_X1(bundle);
448 				break;
449 
450 			case LNK_SPECIAL_0_OPCODE_X1:
451 				state->update = 1;
452 				target_reg = get_Dest_X1(bundle);
453 				break;
454 
455 			/* stores */
456 			case SH_SPECIAL_0_OPCODE_X1:
457 				mem_op = MEMOP_STORE;
458 				size = 2;
459 				break;
460 
461 			case SW_SPECIAL_0_OPCODE_X1:
462 				mem_op = MEMOP_STORE;
463 				size = 4;
464 				break;
465 			}
466 			break;
467 
468 		/* loads and iret */
469 		case SHUN_0_OPCODE_X1:
470 			if (get_UnShOpcodeExtension_X1(bundle) ==
471 			    UN_0_SHUN_0_OPCODE_X1) {
472 				switch (get_UnOpcodeExtension_X1(bundle)) {
473 				case LH_UN_0_SHUN_0_OPCODE_X1:
474 					mem_op = MEMOP_LOAD;
475 					size = 2;
476 					sign_ext = 1;
477 					break;
478 
479 				case LH_U_UN_0_SHUN_0_OPCODE_X1:
480 					mem_op = MEMOP_LOAD;
481 					size = 2;
482 					sign_ext = 0;
483 					break;
484 
485 				case LW_UN_0_SHUN_0_OPCODE_X1:
486 					mem_op = MEMOP_LOAD;
487 					size = 4;
488 					break;
489 
490 				case IRET_UN_0_SHUN_0_OPCODE_X1:
491 				{
492 					unsigned long ex0_0 = __insn_mfspr(
493 						SPR_EX_CONTEXT_0_0);
494 					unsigned long ex0_1 = __insn_mfspr(
495 						SPR_EX_CONTEXT_0_1);
496 					/*
497 					 * Special-case it if we're iret'ing
498 					 * to PL0 again.  Otherwise just let
499 					 * it run and it will generate SIGILL.
500 					 */
501 					if (EX1_PL(ex0_1) == USER_PL) {
502 						state->next_pc = ex0_0;
503 						regs->ex1 = ex0_1;
504 						bundle = nop_X1(bundle);
505 					}
506 				}
507 				}
508 			}
509 			break;
510 
511 #if CHIP_HAS_WH64()
512 		/* postincrement operations */
513 		case IMM_0_OPCODE_X1:
514 			switch (get_ImmOpcodeExtension_X1(bundle)) {
515 			case LWADD_IMM_0_OPCODE_X1:
516 				mem_op = MEMOP_LOAD_POSTINCR;
517 				size = 4;
518 				break;
519 
520 			case LHADD_IMM_0_OPCODE_X1:
521 				mem_op = MEMOP_LOAD_POSTINCR;
522 				size = 2;
523 				sign_ext = 1;
524 				break;
525 
526 			case LHADD_U_IMM_0_OPCODE_X1:
527 				mem_op = MEMOP_LOAD_POSTINCR;
528 				size = 2;
529 				sign_ext = 0;
530 				break;
531 
532 			case SWADD_IMM_0_OPCODE_X1:
533 				mem_op = MEMOP_STORE_POSTINCR;
534 				size = 4;
535 				break;
536 
537 			case SHADD_IMM_0_OPCODE_X1:
538 				mem_op = MEMOP_STORE_POSTINCR;
539 				size = 2;
540 				break;
541 
542 			default:
543 				break;
544 			}
545 			break;
546 #endif /* CHIP_HAS_WH64() */
547 		}
548 
549 		if (state->update) {
550 			/*
551 			 * Get an available register.  We start with a
552 			 * bitmask with 1's for available registers.
553 			 * We truncate to the low 32 registers since
554 			 * we are guaranteed to have set bits in the
555 			 * low 32 bits, then use ctz to pick the first.
556 			 */
557 			u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
558 					   (1ULL << get_SrcA_X0(bundle)) |
559 					   (1ULL << get_SrcB_X0(bundle)) |
560 					   (1ULL << target_reg));
561 			temp_reg = __builtin_ctz(mask);
562 			state->update_reg = temp_reg;
563 			state->update_value = regs->regs[temp_reg];
564 			regs->regs[temp_reg] = (unsigned long) (pc+1);
565 			regs->flags |= PT_FLAGS_RESTORE_REGS;
566 			bundle = move_X1(bundle, target_reg, temp_reg);
567 		}
568 	} else {
569 		int opcode = get_Opcode_Y2(bundle);
570 
571 		switch (opcode) {
572 		/* loads */
573 		case LH_OPCODE_Y2:
574 			mem_op = MEMOP_LOAD;
575 			size = 2;
576 			sign_ext = 1;
577 			break;
578 
579 		case LH_U_OPCODE_Y2:
580 			mem_op = MEMOP_LOAD;
581 			size = 2;
582 			sign_ext = 0;
583 			break;
584 
585 		case LW_OPCODE_Y2:
586 			mem_op = MEMOP_LOAD;
587 			size = 4;
588 			break;
589 
590 		/* stores */
591 		case SH_OPCODE_Y2:
592 			mem_op = MEMOP_STORE;
593 			size = 2;
594 			break;
595 
596 		case SW_OPCODE_Y2:
597 			mem_op = MEMOP_STORE;
598 			size = 4;
599 			break;
600 		}
601 	}
602 
603 	/*
604 	 * Check if we need to rewrite an unaligned load/store.
605 	 * Returning zero is a special value meaning we need to SIGSEGV.
606 	 */
607 	if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
608 		bundle = rewrite_load_store_unaligned(state, bundle, regs,
609 						      mem_op, size, sign_ext);
610 		if (bundle == 0)
611 			return;
612 	}
613 
614 	/* write the bundle to our execution area */
615 	buffer = state->buffer;
616 	err = __put_user(bundle, buffer++);
617 
618 	/*
619 	 * If we're really single-stepping, we take an INT_ILL after.
620 	 * If we're just handling an unaligned access, we can just
621 	 * jump directly back to where we were in user code.
622 	 */
623 	if (is_single_step) {
624 		err |= __put_user(__single_step_ill_insn, buffer++);
625 		err |= __put_user(__single_step_ill_insn, buffer++);
626 	} else {
627 		long delta;
628 
629 		if (state->update) {
630 			/* We have some state to update; do it inline */
631 			int ha16;
632 			bundle = __single_step_addli_insn;
633 			bundle |= create_Dest_X1(state->update_reg);
634 			bundle |= create_Imm16_X1(state->update_value);
635 			err |= __put_user(bundle, buffer++);
636 			bundle = __single_step_auli_insn;
637 			bundle |= create_Dest_X1(state->update_reg);
638 			bundle |= create_SrcA_X1(state->update_reg);
639 			ha16 = (state->update_value + 0x8000) >> 16;
640 			bundle |= create_Imm16_X1(ha16);
641 			err |= __put_user(bundle, buffer++);
642 			state->update = 0;
643 		}
644 
645 		/* End with a jump back to the next instruction */
646 		delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
647 			(unsigned long)buffer) >>
648 			TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
649 		bundle = __single_step_j_insn;
650 		bundle |= create_JOffLong_X1(delta);
651 		err |= __put_user(bundle, buffer++);
652 	}
653 
654 	if (err) {
655 		pr_err("Fault when writing to single-step buffer\n");
656 		return;
657 	}
658 
659 	/*
660 	 * Flush the buffer.
661 	 * We do a local flush only, since this is a thread-specific buffer.
662 	 */
663 	__flush_icache_range((unsigned long)state->buffer,
664 			     (unsigned long)buffer);
665 
666 	/* Indicate enabled */
667 	state->is_enabled = is_single_step;
668 	regs->pc = (unsigned long)state->buffer;
669 
670 	/* Fault immediately if we are coming back from a syscall. */
671 	if (regs->faultnum == INT_SWINT_1)
672 		regs->pc += 8;
673 }
674 
675 #else
676 #include <linux/smp.h>
677 #include <linux/ptrace.h>
678 #include <arch/spr_def.h>
679 
680 static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
681 
682 
683 /*
684  * Called directly on the occasion of an interrupt.
685  *
686  * If the process doesn't have single step set, then we use this as an
687  * opportunity to turn single step off.
688  *
689  * It has been mentioned that we could conditionally turn off single stepping
690  * on each entry into the kernel and rely on single_step_once to turn it
691  * on for the processes that matter (as we already do), but this
692  * implementation is somewhat more efficient in that we muck with registers
693  * once on a bum interrupt rather than on every entry into the kernel.
694  *
695  * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
696  * so we have to run through this process again before we can say that an
697  * instruction has executed.
698  *
699  * swint will set CANCELED, but it's a legitimate instruction.  Fortunately
700  * it changes the PC.  If it hasn't changed, then we know that the interrupt
701  * wasn't generated by swint and we'll need to run this process again before
702  * we can say an instruction has executed.
703  *
704  * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
705  * on with our lives.
706  */
707 
gx_singlestep_handle(struct pt_regs * regs,int fault_num)708 void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
709 {
710 	unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
711 	struct thread_info *info = (void *)current_thread_info();
712 	int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
713 	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
714 
715 	if (is_single_step == 0) {
716 		__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
717 
718 	} else if ((*ss_pc != regs->pc) ||
719 		   (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
720 
721 		ptrace_notify(SIGTRAP);
722 		control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
723 		control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
724 		__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
725 	}
726 }
727 
728 
729 /*
730  * Called from need_singlestep.  Set up the control registers and the enable
731  * register, then return back.
732  */
733 
single_step_once(struct pt_regs * regs)734 void single_step_once(struct pt_regs *regs)
735 {
736 	unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
737 	unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
738 
739 	*ss_pc = regs->pc;
740 	control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
741 	control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
742 	__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
743 	__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
744 }
745 
single_step_execve(void)746 void single_step_execve(void)
747 {
748 	/* Nothing */
749 }
750 
751 #endif /* !__tilegx__ */
752