/linux-6.6.21/arch/um/os-Linux/skas/ |
D | mem.c | 26 unsigned long *stack) in check_init_stack() argument 28 if (stack == NULL) { in check_init_stack() 29 stack = (unsigned long *) mm_idp->stack + 2; in check_init_stack() 30 *stack = 0; in check_init_stack() 32 return stack; in check_init_stack() 80 ret = *((unsigned long *) mm_idp->stack); in do_syscall_stub() 81 offset = *((unsigned long *) mm_idp->stack + 1); in do_syscall_stub() 83 data = (unsigned long *)(mm_idp->stack + offset - STUB_DATA); in do_syscall_stub() 113 unsigned long *stack = check_init_stack(mm_idp, *addr); in run_syscall_stub() local 115 *stack += sizeof(long); in run_syscall_stub() [all …]
|
/linux-6.6.21/drivers/misc/altera-stapl/ |
D | altera.c | 213 long *stack = astate->stack; in altera_execute() local 528 stack[stack_ptr] = stack[stack_ptr - 1]; in altera_execute() 534 swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); in altera_execute() 539 stack[stack_ptr - 1] += stack[stack_ptr]; in altera_execute() 545 stack[stack_ptr - 1] -= stack[stack_ptr]; in altera_execute() 551 stack[stack_ptr - 1] *= stack[stack_ptr]; in altera_execute() 557 stack[stack_ptr - 1] /= stack[stack_ptr]; in altera_execute() 563 stack[stack_ptr - 1] %= stack[stack_ptr]; in altera_execute() 569 stack[stack_ptr - 1] <<= stack[stack_ptr]; in altera_execute() 575 stack[stack_ptr - 1] >>= stack[stack_ptr]; in altera_execute() [all …]
|
/linux-6.6.21/arch/x86/kernel/ |
D | dumpstack_32.c | 38 static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info) in in_hardirq_stack() argument 47 if (stack < begin || stack > end) in in_hardirq_stack() 63 static bool in_softirq_stack(unsigned long *stack, struct stack_info *info) in in_softirq_stack() argument 72 if (stack < begin || stack > end) in in_softirq_stack() 88 static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info) in in_doublefault_stack() argument 93 void *begin = ss->stack; in in_doublefault_stack() 94 void *end = begin + sizeof(ss->stack); in in_doublefault_stack() 96 if ((void *)stack < begin || (void *)stack >= end) in in_doublefault_stack() 108 int get_stack_info(unsigned long *stack, struct task_struct *task, in get_stack_info() argument 111 if (!stack) in get_stack_info() [all …]
|
D | dumpstack.c | 32 bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, in in_task_stack() argument 38 if (stack < begin || stack >= end) in in_task_stack() 50 bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) in in_entry_stack() argument 57 if ((void *)stack < begin || (void *)stack >= end) in in_entry_stack() 187 unsigned long *stack, const char *log_lvl) in show_trace_log_lvl() argument 197 unwind_start(&state, task, regs, stack); in show_trace_log_lvl() 216 for (stack = stack ?: get_stack_pointer(task, regs); in show_trace_log_lvl() 217 stack; in show_trace_log_lvl() 218 stack = stack_info.next_sp) { in show_trace_log_lvl() 221 stack = PTR_ALIGN(stack, sizeof(long)); in show_trace_log_lvl() [all …]
|
D | dumpstack_64.c | 94 static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info) in in_exception_stack() argument 96 unsigned long begin, end, stk = (unsigned long)stack; in in_exception_stack() 135 static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info) in in_irq_stack() argument 153 if (stack < begin || stack >= end) in in_irq_stack() 170 bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, in get_stack_info_noinstr() argument 173 if (in_task_stack(stack, task, info)) in get_stack_info_noinstr() 179 if (in_exception_stack(stack, info)) in get_stack_info_noinstr() 182 if (in_irq_stack(stack, info)) in get_stack_info_noinstr() 185 if (in_entry_stack(stack, info)) in get_stack_info_noinstr() 191 int get_stack_info(unsigned long *stack, struct task_struct *task, in get_stack_info() argument [all …]
|
/linux-6.6.21/tools/testing/selftests/bpf/progs/ |
D | test_global_func_ctx_args.c | 11 static long stack[256]; variable 19 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_typedef_ctx_subprog() 33 return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0); in kprobe_struct_ctx_subprog() 48 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_workaround_ctx_subprog() 64 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_ctx_subprog() 80 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_writable_ctx_subprog() 96 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in perf_event_ctx_subprog()
|
/linux-6.6.21/Documentation/arch/x86/ |
D | shstk.rst | 14 CET introduces shadow stack and indirect branch tracking (IBT). A shadow stack 15 is a secondary stack allocated from memory which cannot be directly modified by 17 return address to both the normal stack and the shadow stack. Upon 18 function return, the processor pops the shadow stack copy and compares it 19 to the normal stack copy. If the two differ, the processor raises a 23 shadow stack and kernel IBT are supported. 28 To use userspace shadow stack you need HW that supports it, a kernel 34 To build a user shadow stack enabled kernel, Binutils v2.29 or LLVM v6 or later 38 CET. "user_shstk" means that userspace shadow stack is supported on the current 98 ARCH_SHSTK_SHSTK - Shadow stack [all …]
|
D | kernel-stacks.rst | 14 Like all other architectures, x86_64 has a kernel stack for every 17 zombie. While the thread is in user space the kernel stack is empty 25 * Interrupt stack. IRQ_STACK_SIZE 29 kernel switches from the current task to the interrupt stack. Like 32 of every per thread stack. 34 The interrupt stack is also used when processing a softirq. 36 Switching to the kernel interrupt stack is done by software based on a 41 to automatically switch to a new stack for designated events such as 46 point to dedicated stacks; each stack can be a different size. 50 loads such a descriptor, the hardware automatically sets the new stack [all …]
|
/linux-6.6.21/arch/s390/kernel/ |
D | dumpstack.c | 44 enum stack_type type, unsigned long stack) in in_stack() argument 46 if (sp < stack || sp >= stack + THREAD_SIZE) in in_stack() 49 info->begin = stack; in in_stack() 50 info->end = stack + THREAD_SIZE; in in_stack() 57 unsigned long stack = (unsigned long)task_stack_page(task); in in_task_stack() local 59 return in_stack(sp, info, STACK_TYPE_TASK, stack); in in_task_stack() 64 unsigned long stack = S390_lowcore.async_stack - STACK_INIT_OFFSET; in in_irq_stack() local 66 return in_stack(sp, info, STACK_TYPE_IRQ, stack); in in_irq_stack() 71 unsigned long stack = S390_lowcore.nodat_stack - STACK_INIT_OFFSET; in in_nodat_stack() local 73 return in_stack(sp, info, STACK_TYPE_NODAT, stack); in in_nodat_stack() [all …]
|
/linux-6.6.21/arch/um/kernel/skas/ |
D | mmu.c | 21 unsigned long stack = 0; in init_new_context() local 24 stack = __get_free_pages(GFP_KERNEL | __GFP_ZERO, ilog2(STUB_DATA_PAGES)); in init_new_context() 25 if (stack == 0) in init_new_context() 28 to_mm->id.stack = stack; in init_new_context() 34 to_mm->id.u.pid = copy_context_skas0(stack, in init_new_context() 36 else to_mm->id.u.pid = start_userspace(stack); in init_new_context() 54 if (to_mm->id.stack != 0) in init_new_context() 55 free_pages(to_mm->id.stack, ilog2(STUB_DATA_PAGES)); in init_new_context() 77 free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES)); in destroy_context()
|
/linux-6.6.21/Documentation/mm/ |
D | vmalloced-kernel-stacks.rst | 21 Kernel stack overflows are often hard to debug and make the kernel 25 Virtually-mapped kernel stacks with guard pages causes kernel stack 31 causes reliable faults when the stack overflows. The usability of 32 the stack trace after overflow and response to the overflow itself 49 needs to work while the stack points to a virtual address with 51 most likely) needs to ensure that the stack's page table entries 52 are populated before running on a possibly unpopulated stack. 53 - If the stack overflows into a guard page, something reasonable 64 with guard pages. This causes kernel stack overflows to be caught 75 VMAP_STACK is enabled, it is not possible to run DMA on stack [all …]
|
/linux-6.6.21/lib/ |
D | stackdepot.c | 262 struct stack_record *stack; in depot_alloc_stack() local 263 size_t required_size = struct_size(stack, entries, size); in depot_alloc_stack() 301 stack = stack_pools[pool_index] + pool_offset; in depot_alloc_stack() 302 stack->hash = hash; in depot_alloc_stack() 303 stack->size = size; in depot_alloc_stack() 304 stack->handle.pool_index = pool_index; in depot_alloc_stack() 305 stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; in depot_alloc_stack() 306 stack->handle.valid = 1; in depot_alloc_stack() 307 stack->handle.extra = 0; in depot_alloc_stack() 308 memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); in depot_alloc_stack() [all …]
|
/linux-6.6.21/arch/nios2/kernel/ |
D | traps.c | 60 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument 66 if (!stack) { in show_stack() 68 stack = (unsigned long *)task->thread.ksp; in show_stack() 70 stack = (unsigned long *)&stack; in show_stack() 73 addr = (unsigned long) stack; in show_stack() 76 printk("%sStack from %08lx:", loglvl, (unsigned long)stack); in show_stack() 78 if (stack + 1 > endstack) in show_stack() 82 printk("%s %08lx", loglvl, *stack++); in show_stack() 87 while (stack + 1 <= endstack) { in show_stack() 88 addr = *stack++; in show_stack()
|
/linux-6.6.21/arch/um/os-Linux/ |
D | helper.c | 46 unsigned long stack, sp; in run_helper() local 49 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper() 50 if (stack == 0) in run_helper() 68 sp = stack + UM_KERN_PAGE_SIZE; in run_helper() 114 free_stack(stack, 0); in run_helper() 121 unsigned long stack, sp; in run_helper_thread() local 124 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper_thread() 125 if (stack == 0) in run_helper_thread() 128 sp = stack + UM_KERN_PAGE_SIZE; in run_helper_thread() 147 free_stack(stack, 0); in run_helper_thread() [all …]
|
/linux-6.6.21/arch/openrisc/kernel/ |
D | unwinder.c | 60 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument 67 while (!kstack_end(stack)) { in unwind_stack() 68 frameinfo = container_of(stack, in unwind_stack() 83 stack++; in unwind_stack() 93 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument 98 while (!kstack_end(stack)) { in unwind_stack() 99 addr = *stack++; in unwind_stack()
|
/linux-6.6.21/mm/kmsan/ |
D | init.c | 147 static void smallstack_push(struct smallstack *stack, struct page *pages) in smallstack_push() argument 149 KMSAN_WARN_ON(stack->index == MAX_BLOCKS); in smallstack_push() 150 stack->items[stack->index] = pages; in smallstack_push() 151 stack->index++; in smallstack_push() 155 static struct page *smallstack_pop(struct smallstack *stack) in smallstack_pop() argument 159 KMSAN_WARN_ON(stack->index == 0); in smallstack_pop() 160 stack->index--; in smallstack_pop() 161 ret = stack->items[stack->index]; in smallstack_pop() 162 stack->items[stack->index] = NULL; in smallstack_pop()
|
/linux-6.6.21/tools/testing/selftests/vDSO/ |
D | vdso_standalone_test_x86.c | 73 __attribute__((externally_visible)) void c_main(void **stack) in c_main() argument 76 long argc = (long)*stack; in c_main() 77 stack += argc + 2; in c_main() 80 while(*stack) in c_main() 81 stack++; in c_main() 82 stack++; in c_main() 85 vdso_init_from_auxv((void *)stack); in c_main()
|
/linux-6.6.21/drivers/gpu/drm/i915/ |
D | intel_runtime_pm.c | 77 depot_stack_handle_t stack, *stacks; in track_intel_runtime_pm_wakeref() local 83 stack = __save_depot_stack(); in track_intel_runtime_pm_wakeref() 84 if (!stack) in track_intel_runtime_pm_wakeref() 90 rpm->debug.last_acquire = stack; in track_intel_runtime_pm_wakeref() 96 stacks[rpm->debug.count++] = stack; in track_intel_runtime_pm_wakeref() 99 stack = -1; in track_intel_runtime_pm_wakeref() 104 return stack; in track_intel_runtime_pm_wakeref() 108 depot_stack_handle_t stack) in untrack_intel_runtime_pm_wakeref() argument 116 if (unlikely(stack == -1)) in untrack_intel_runtime_pm_wakeref() 121 if (rpm->debug.owners[n] == stack) { in untrack_intel_runtime_pm_wakeref() [all …]
|
/linux-6.6.21/arch/um/kernel/ |
D | sysrq.c | 30 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument 42 if (!stack) in show_stack() 43 stack = get_stack_pointer(task, segv_regs); in show_stack() 47 if (kstack_end(stack)) in show_stack() 51 pr_cont(" %08lx", READ_ONCE_NOCHECK(*stack)); in show_stack() 52 stack++; in show_stack()
|
/linux-6.6.21/include/linux/sched/ |
D | task_stack.h | 21 return task->stack; in task_stack_page() 29 return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; in end_of_stack() 31 return task->stack; in end_of_stack() 37 #define task_stack_page(task) ((void *)(task)->stack) 89 void *stack = task_stack_page(current); in object_is_on_stack() local 91 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); in object_is_on_stack()
|
/linux-6.6.21/tools/perf/scripts/python/ |
D | stackcollapse.py | 97 stack = list() 103 stack.append(tidy_function_name(entry['sym']['name'], 108 stack.append(tidy_function_name(param_dict['symbol'], 119 stack.append(comm) 121 stack_string = ';'.join(reversed(stack)) 126 for stack in list: 127 print("%s %d" % (stack, lines[stack]))
|
/linux-6.6.21/mm/kasan/ |
D | report_tags.c | 36 depot_stack_handle_t stack; in kasan_complete_mode_report_info() local 72 stack = READ_ONCE(entry->stack); in kasan_complete_mode_report_info() 84 info->free_track.stack = stack; in kasan_complete_mode_report_info() 99 info->alloc_track.stack = stack; in kasan_complete_mode_report_info()
|
/linux-6.6.21/arch/x86/include/asm/ |
D | stacktrace.h | 31 bool in_task_stack(unsigned long *stack, struct task_struct *task, 34 bool in_entry_stack(unsigned long *stack, struct stack_info *info); 36 int get_stack_info(unsigned long *stack, struct task_struct *task, 38 bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, 42 bool get_stack_guard_info(unsigned long *stack, struct stack_info *info) in get_stack_guard_info() argument 45 if (get_stack_info_noinstr(stack, current, info)) in get_stack_guard_info() 48 return get_stack_info_noinstr((void *)stack + PAGE_SIZE, current, info); in get_stack_guard_info()
|
/linux-6.6.21/arch/powerpc/kernel/ |
D | stacktrace.c | 43 unsigned long *stack = (unsigned long *) sp; in arch_stack_walk() local 49 newsp = stack[0]; in arch_stack_walk() 50 ip = stack[STACK_FRAME_LR_SAVE]; in arch_stack_walk() 95 unsigned long *stack = (unsigned long *) sp; in arch_stack_walk_reliable() local 102 newsp = stack[0]; in arch_stack_walk_reliable() 122 stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) { in arch_stack_walk_reliable() 127 ip = stack[STACK_FRAME_LR_SAVE]; in arch_stack_walk_reliable() 135 ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack); in arch_stack_walk_reliable()
|
/linux-6.6.21/scripts/kconfig/ |
D | symbol.c | 973 static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym) in dep_stack_insert() argument 975 memset(stack, 0, sizeof(*stack)); in dep_stack_insert() 977 check_top->next = stack; in dep_stack_insert() 978 stack->prev = check_top; in dep_stack_insert() 979 stack->sym = sym; in dep_stack_insert() 980 check_top = stack; in dep_stack_insert() 997 struct dep_stack *stack; in sym_check_print_recursive() local 1008 for (stack = check_top; stack != NULL; stack = stack->prev) in sym_check_print_recursive() 1009 if (stack->sym == last_sym) in sym_check_print_recursive() 1011 if (!stack) { in sym_check_print_recursive() [all …]
|