1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Author: Huacai Chen <chenhuacai@loongson.cn>
4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 *
6 * Derived from MIPS:
7 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
8 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 * Copyright (C) 2004 Thiemo Seufer
11 * Copyright (C) 2013 Imagination Technologies Ltd.
12 */
13 #include <linux/cpu.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/hw_breakpoint.h>
22 #include <linux/mm.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/export.h>
26 #include <linux/ptrace.h>
27 #include <linux/mman.h>
28 #include <linux/personality.h>
29 #include <linux/sys.h>
30 #include <linux/completion.h>
31 #include <linux/kallsyms.h>
32 #include <linux/random.h>
33 #include <linux/prctl.h>
34 #include <linux/nmi.h>
35
36 #include <asm/asm.h>
37 #include <asm/bootinfo.h>
38 #include <asm/cpu.h>
39 #include <asm/elf.h>
40 #include <asm/exec.h>
41 #include <asm/fpu.h>
42 #include <asm/lbt.h>
43 #include <asm/io.h>
44 #include <asm/irq.h>
45 #include <asm/irq_regs.h>
46 #include <asm/loongarch.h>
47 #include <asm/pgtable.h>
48 #include <asm/processor.h>
49 #include <asm/reg.h>
50 #include <asm/unwind.h>
51 #include <asm/vdso.h>
52
53 #ifdef CONFIG_STACKPROTECTOR
54 #include <linux/stackprotector.h>
55 unsigned long __stack_chk_guard __read_mostly;
56 EXPORT_SYMBOL(__stack_chk_guard);
57 #endif
58
59 /*
60 * Idle related variables and functions
61 */
62
63 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
64 EXPORT_SYMBOL(boot_option_idle_override);
65
66 asmlinkage void ret_from_fork(void);
67 asmlinkage void ret_from_kernel_thread(void);
68
start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)69 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
70 {
71 unsigned long crmd;
72 unsigned long prmd;
73 unsigned long euen;
74
75 /* New thread loses kernel privileges. */
76 crmd = regs->csr_crmd & ~(PLV_MASK);
77 crmd |= PLV_USER;
78 regs->csr_crmd = crmd;
79
80 prmd = regs->csr_prmd & ~(PLV_MASK);
81 prmd |= PLV_USER;
82 regs->csr_prmd = prmd;
83
84 euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
85 regs->csr_euen = euen;
86 lose_fpu(0);
87 lose_lbt(0);
88 current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
89
90 clear_thread_flag(TIF_LSX_CTX_LIVE);
91 clear_thread_flag(TIF_LASX_CTX_LIVE);
92 clear_thread_flag(TIF_LBT_CTX_LIVE);
93 clear_used_math();
94 regs->csr_era = pc;
95 regs->regs[3] = sp;
96 }
97
flush_thread(void)98 void flush_thread(void)
99 {
100 flush_ptrace_hw_breakpoint(current);
101 }
102
exit_thread(struct task_struct * tsk)103 void exit_thread(struct task_struct *tsk)
104 {
105 }
106
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)107 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
108 {
109 /*
110 * Save any process state which is live in hardware registers to the
111 * parent context prior to duplication. This prevents the new child
112 * state becoming stale if the parent is preempted before copy_thread()
113 * gets a chance to save the parent's live hardware registers to the
114 * child context.
115 */
116 preempt_disable();
117
118 if (is_fpu_owner()) {
119 if (is_lasx_enabled())
120 save_lasx(current);
121 else if (is_lsx_enabled())
122 save_lsx(current);
123 else
124 save_fp(current);
125 }
126
127 preempt_enable();
128
129 if (!used_math())
130 memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
131 else
132 memcpy(dst, src, offsetof(struct task_struct, thread.lbt.scr0));
133
134 #ifdef CONFIG_CPU_HAS_LBT
135 memcpy(&dst->thread.lbt, &src->thread.lbt, sizeof(struct loongarch_lbt));
136 #endif
137
138 return 0;
139 }
140
141 /*
142 * Copy architecture-specific thread state
143 */
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)144 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
145 {
146 unsigned long childksp;
147 unsigned long tls = args->tls;
148 unsigned long usp = args->stack;
149 unsigned long clone_flags = args->flags;
150 struct pt_regs *childregs, *regs = current_pt_regs();
151
152 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
153
154 /* set up new TSS. */
155 childregs = (struct pt_regs *) childksp - 1;
156 /* Put the stack after the struct pt_regs. */
157 childksp = (unsigned long) childregs;
158 p->thread.sched_cfa = 0;
159 p->thread.csr_euen = 0;
160 p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
161 p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
162 p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
163 if (unlikely(args->fn)) {
164 /* kernel thread */
165 p->thread.reg03 = childksp;
166 p->thread.reg23 = (unsigned long)args->fn;
167 p->thread.reg24 = (unsigned long)args->fn_arg;
168 p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
169 p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
170 memset(childregs, 0, sizeof(struct pt_regs));
171 childregs->csr_euen = p->thread.csr_euen;
172 childregs->csr_crmd = p->thread.csr_crmd;
173 childregs->csr_prmd = p->thread.csr_prmd;
174 childregs->csr_ecfg = p->thread.csr_ecfg;
175 goto out;
176 }
177
178 /* user thread */
179 *childregs = *regs;
180 childregs->regs[4] = 0; /* Child gets zero as return value */
181 if (usp)
182 childregs->regs[3] = usp;
183
184 p->thread.reg03 = (unsigned long) childregs;
185 p->thread.reg01 = (unsigned long) ret_from_fork;
186 p->thread.sched_ra = (unsigned long) ret_from_fork;
187
188 /*
189 * New tasks lose permission to use the fpu. This accelerates context
190 * switching for most programs since they don't use the fpu.
191 */
192 childregs->csr_euen = 0;
193
194 if (clone_flags & CLONE_SETTLS)
195 childregs->regs[2] = tls;
196
197 out:
198 ptrace_hw_copy_thread(p);
199 clear_tsk_thread_flag(p, TIF_USEDFPU);
200 clear_tsk_thread_flag(p, TIF_USEDSIMD);
201 clear_tsk_thread_flag(p, TIF_USEDLBT);
202 clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
203 clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
204 clear_tsk_thread_flag(p, TIF_LBT_CTX_LIVE);
205
206 return 0;
207 }
208
__get_wchan(struct task_struct * task)209 unsigned long __get_wchan(struct task_struct *task)
210 {
211 unsigned long pc = 0;
212 struct unwind_state state;
213
214 if (!try_get_task_stack(task))
215 return 0;
216
217 for (unwind_start(&state, task, NULL);
218 !unwind_done(&state); unwind_next_frame(&state)) {
219 pc = unwind_get_return_address(&state);
220 if (!pc)
221 break;
222 if (in_sched_functions(pc))
223 continue;
224 break;
225 }
226
227 put_task_stack(task);
228
229 return pc;
230 }
231
in_irq_stack(unsigned long stack,struct stack_info * info)232 bool in_irq_stack(unsigned long stack, struct stack_info *info)
233 {
234 unsigned long nextsp;
235 unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
236 unsigned long end = begin + IRQ_STACK_START;
237
238 if (stack < begin || stack >= end)
239 return false;
240
241 nextsp = *(unsigned long *)end;
242 if (nextsp & (SZREG - 1))
243 return false;
244
245 info->begin = begin;
246 info->end = end;
247 info->next_sp = nextsp;
248 info->type = STACK_TYPE_IRQ;
249
250 return true;
251 }
252
in_task_stack(unsigned long stack,struct task_struct * task,struct stack_info * info)253 bool in_task_stack(unsigned long stack, struct task_struct *task,
254 struct stack_info *info)
255 {
256 unsigned long begin = (unsigned long)task_stack_page(task);
257 unsigned long end = begin + THREAD_SIZE;
258
259 if (stack < begin || stack >= end)
260 return false;
261
262 info->begin = begin;
263 info->end = end;
264 info->next_sp = 0;
265 info->type = STACK_TYPE_TASK;
266
267 return true;
268 }
269
get_stack_info(unsigned long stack,struct task_struct * task,struct stack_info * info)270 int get_stack_info(unsigned long stack, struct task_struct *task,
271 struct stack_info *info)
272 {
273 task = task ? : current;
274
275 if (!stack || stack & (SZREG - 1))
276 goto unknown;
277
278 if (in_task_stack(stack, task, info))
279 return 0;
280
281 if (task != current)
282 goto unknown;
283
284 if (in_irq_stack(stack, info))
285 return 0;
286
287 unknown:
288 info->type = STACK_TYPE_UNKNOWN;
289 return -EINVAL;
290 }
291
stack_top(void)292 unsigned long stack_top(void)
293 {
294 unsigned long top = TASK_SIZE & PAGE_MASK;
295
296 /* Space for the VDSO & data page */
297 top -= PAGE_ALIGN(current->thread.vdso->size);
298 top -= VVAR_SIZE;
299
300 /* Space to randomize the VDSO base */
301 if (current->flags & PF_RANDOMIZE)
302 top -= VDSO_RANDOMIZE_SIZE;
303
304 return top;
305 }
306
307 /*
308 * Don't forget that the stack pointer must be aligned on a 8 bytes
309 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
310 */
arch_align_stack(unsigned long sp)311 unsigned long arch_align_stack(unsigned long sp)
312 {
313 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
314 sp -= get_random_u32_below(PAGE_SIZE);
315
316 return sp & STACK_ALIGN;
317 }
318
319 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
320 static struct cpumask backtrace_csd_busy;
321
handle_backtrace(void * info)322 static void handle_backtrace(void *info)
323 {
324 nmi_cpu_backtrace(get_irq_regs());
325 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
326 }
327
raise_backtrace(cpumask_t * mask)328 static void raise_backtrace(cpumask_t *mask)
329 {
330 call_single_data_t *csd;
331 int cpu;
332
333 for_each_cpu(cpu, mask) {
334 /*
335 * If we previously sent an IPI to the target CPU & it hasn't
336 * cleared its bit in the busy cpumask then it didn't handle
337 * our previous IPI & it's not safe for us to reuse the
338 * call_single_data_t.
339 */
340 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
341 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
342 cpu);
343 continue;
344 }
345
346 csd = &per_cpu(backtrace_csd, cpu);
347 csd->func = handle_backtrace;
348 smp_call_function_single_async(cpu, csd);
349 }
350 }
351
arch_trigger_cpumask_backtrace(const cpumask_t * mask,int exclude_cpu)352 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
353 {
354 nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
355 }
356
357 #ifdef CONFIG_64BIT
loongarch_dump_regs64(u64 * uregs,const struct pt_regs * regs)358 void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
359 {
360 unsigned int i;
361
362 for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
363 uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
364 }
365
366 uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
367 uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
368 uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
369 uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
370 uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
371 uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
372 uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
373 uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
374 }
375 #endif /* CONFIG_64BIT */
376