1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * User-space Probes (UProbes)
4 *
5 * Copyright (C) IBM Corporation, 2008-2012
6 * Authors:
7 * Srikar Dronamraju
8 * Jim Keniston
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h> /* read_mapping_page */
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/coredump.h>
19 #include <linux/export.h>
20 #include <linux/rmap.h> /* anon_vma_prepare */
21 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
22 #include <linux/swap.h> /* folio_free_swap */
23 #include <linux/ptrace.h> /* user_enable_single_step */
24 #include <linux/kdebug.h> /* notifier mechanism */
25 #include "../../mm/internal.h" /* munlock_vma_page */
26 #include <linux/percpu-rwsem.h>
27 #include <linux/task_work.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/khugepaged.h>
30
31 #include <linux/uprobes.h>
32
33 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
34 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
35
36 static struct rb_root uprobes_tree = RB_ROOT;
37 /*
38 * allows us to skip the uprobe_mmap if there are no uprobe events active
39 * at this time. Probably a fine grained per inode count is better?
40 */
41 #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
42
43 static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
44
45 #define UPROBES_HASH_SZ 13
46 /* serialize uprobe->pending_list */
47 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
48 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
49
50 DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
51
52 /* Have a copy of original instruction */
53 #define UPROBE_COPY_INSN 0
54
55 struct uprobe {
56 struct rb_node rb_node; /* node in the rb tree */
57 refcount_t ref;
58 struct rw_semaphore register_rwsem;
59 struct rw_semaphore consumer_rwsem;
60 struct list_head pending_list;
61 struct uprobe_consumer *consumers;
62 struct inode *inode; /* Also hold a ref to inode */
63 loff_t offset;
64 loff_t ref_ctr_offset;
65 unsigned long flags;
66
67 /*
68 * The generic code assumes that it has two members of unknown type
69 * owned by the arch-specific code:
70 *
71 * insn - copy_insn() saves the original instruction here for
72 * arch_uprobe_analyze_insn().
73 *
74 * ixol - potentially modified instruction to execute out of
75 * line, copied to xol_area by xol_get_insn_slot().
76 */
77 struct arch_uprobe arch;
78 };
79
80 struct delayed_uprobe {
81 struct list_head list;
82 struct uprobe *uprobe;
83 struct mm_struct *mm;
84 };
85
86 static DEFINE_MUTEX(delayed_uprobe_lock);
87 static LIST_HEAD(delayed_uprobe_list);
88
89 /*
90 * Execute out of line area: anonymous executable mapping installed
91 * by the probed task to execute the copy of the original instruction
92 * mangled by set_swbp().
93 *
94 * On a breakpoint hit, thread contests for a slot. It frees the
95 * slot after singlestep. Currently a fixed number of slots are
96 * allocated.
97 */
98 struct xol_area {
99 wait_queue_head_t wq; /* if all slots are busy */
100 atomic_t slot_count; /* number of in-use slots */
101 unsigned long *bitmap; /* 0 = free slot */
102
103 struct vm_special_mapping xol_mapping;
104 struct page *pages[2];
105 /*
106 * We keep the vma's vm_start rather than a pointer to the vma
107 * itself. The probed process or a naughty kernel module could make
108 * the vma go away, and we must handle that reasonably gracefully.
109 */
110 unsigned long vaddr; /* Page(s) of instruction slots */
111 };
112
113 /*
114 * valid_vma: Verify if the specified vma is an executable vma
115 * Relax restrictions while unregistering: vm_flags might have
116 * changed after breakpoint was inserted.
117 * - is_register: indicates if we are in register context.
118 * - Return 1 if the specified virtual address is in an
119 * executable vma.
120 */
valid_vma(struct vm_area_struct * vma,bool is_register)121 static bool valid_vma(struct vm_area_struct *vma, bool is_register)
122 {
123 vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
124
125 if (is_register)
126 flags |= VM_WRITE;
127
128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
129 }
130
offset_to_vaddr(struct vm_area_struct * vma,loff_t offset)131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
132 {
133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
134 }
135
vaddr_to_offset(struct vm_area_struct * vma,unsigned long vaddr)136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
137 {
138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
139 }
140
141 /**
142 * __replace_page - replace page in vma by new page.
143 * based on replace_page in mm/ksm.c
144 *
145 * @vma: vma that holds the pte pointing to page
146 * @addr: address the old @page is mapped at
147 * @old_page: the page we are replacing by new_page
148 * @new_page: the modified page we replace page by
149 *
150 * If @new_page is NULL, only unmap @old_page.
151 *
152 * Returns 0 on success, negative error code otherwise.
153 */
__replace_page(struct vm_area_struct * vma,unsigned long addr,struct page * old_page,struct page * new_page)154 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
155 struct page *old_page, struct page *new_page)
156 {
157 struct folio *old_folio = page_folio(old_page);
158 struct folio *new_folio;
159 struct mm_struct *mm = vma->vm_mm;
160 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
161 int err;
162 struct mmu_notifier_range range;
163
164 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
165 addr + PAGE_SIZE);
166
167 if (new_page) {
168 new_folio = page_folio(new_page);
169 err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
170 if (err)
171 return err;
172 }
173
174 /* For folio_free_swap() below */
175 folio_lock(old_folio);
176
177 mmu_notifier_invalidate_range_start(&range);
178 err = -EAGAIN;
179 if (!page_vma_mapped_walk(&pvmw))
180 goto unlock;
181 VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
182
183 if (new_page) {
184 folio_get(new_folio);
185 page_add_new_anon_rmap(new_page, vma, addr);
186 folio_add_lru_vma(new_folio, vma);
187 } else
188 /* no new page, just dec_mm_counter for old_page */
189 dec_mm_counter(mm, MM_ANONPAGES);
190
191 if (!folio_test_anon(old_folio)) {
192 dec_mm_counter(mm, mm_counter_file(old_page));
193 inc_mm_counter(mm, MM_ANONPAGES);
194 }
195
196 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
197 ptep_clear_flush_notify(vma, addr, pvmw.pte);
198 if (new_page)
199 set_pte_at_notify(mm, addr, pvmw.pte,
200 mk_pte(new_page, vma->vm_page_prot));
201
202 page_remove_rmap(old_page, vma, false);
203 if (!folio_mapped(old_folio))
204 folio_free_swap(old_folio);
205 page_vma_mapped_walk_done(&pvmw);
206 folio_put(old_folio);
207
208 err = 0;
209 unlock:
210 mmu_notifier_invalidate_range_end(&range);
211 folio_unlock(old_folio);
212 return err;
213 }
214
215 /**
216 * is_swbp_insn - check if instruction is breakpoint instruction.
217 * @insn: instruction to be checked.
218 * Default implementation of is_swbp_insn
219 * Returns true if @insn is a breakpoint instruction.
220 */
is_swbp_insn(uprobe_opcode_t * insn)221 bool __weak is_swbp_insn(uprobe_opcode_t *insn)
222 {
223 return *insn == UPROBE_SWBP_INSN;
224 }
225
226 /**
227 * is_trap_insn - check if instruction is breakpoint instruction.
228 * @insn: instruction to be checked.
229 * Default implementation of is_trap_insn
230 * Returns true if @insn is a breakpoint instruction.
231 *
232 * This function is needed for the case where an architecture has multiple
233 * trap instructions (like powerpc).
234 */
is_trap_insn(uprobe_opcode_t * insn)235 bool __weak is_trap_insn(uprobe_opcode_t *insn)
236 {
237 return is_swbp_insn(insn);
238 }
239
copy_from_page(struct page * page,unsigned long vaddr,void * dst,int len)240 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
241 {
242 void *kaddr = kmap_atomic(page);
243 memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
244 kunmap_atomic(kaddr);
245 }
246
copy_to_page(struct page * page,unsigned long vaddr,const void * src,int len)247 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
248 {
249 void *kaddr = kmap_atomic(page);
250 memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
251 kunmap_atomic(kaddr);
252 }
253
verify_opcode(struct page * page,unsigned long vaddr,uprobe_opcode_t * new_opcode)254 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
255 {
256 uprobe_opcode_t old_opcode;
257 bool is_swbp;
258
259 /*
260 * Note: We only check if the old_opcode is UPROBE_SWBP_INSN here.
261 * We do not check if it is any other 'trap variant' which could
262 * be conditional trap instruction such as the one powerpc supports.
263 *
264 * The logic is that we do not care if the underlying instruction
265 * is a trap variant; uprobes always wins over any other (gdb)
266 * breakpoint.
267 */
268 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
269 is_swbp = is_swbp_insn(&old_opcode);
270
271 if (is_swbp_insn(new_opcode)) {
272 if (is_swbp) /* register: already installed? */
273 return 0;
274 } else {
275 if (!is_swbp) /* unregister: was it changed by us? */
276 return 0;
277 }
278
279 return 1;
280 }
281
282 static struct delayed_uprobe *
delayed_uprobe_check(struct uprobe * uprobe,struct mm_struct * mm)283 delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
284 {
285 struct delayed_uprobe *du;
286
287 list_for_each_entry(du, &delayed_uprobe_list, list)
288 if (du->uprobe == uprobe && du->mm == mm)
289 return du;
290 return NULL;
291 }
292
delayed_uprobe_add(struct uprobe * uprobe,struct mm_struct * mm)293 static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
294 {
295 struct delayed_uprobe *du;
296
297 if (delayed_uprobe_check(uprobe, mm))
298 return 0;
299
300 du = kzalloc(sizeof(*du), GFP_KERNEL);
301 if (!du)
302 return -ENOMEM;
303
304 du->uprobe = uprobe;
305 du->mm = mm;
306 list_add(&du->list, &delayed_uprobe_list);
307 return 0;
308 }
309
delayed_uprobe_delete(struct delayed_uprobe * du)310 static void delayed_uprobe_delete(struct delayed_uprobe *du)
311 {
312 if (WARN_ON(!du))
313 return;
314 list_del(&du->list);
315 kfree(du);
316 }
317
delayed_uprobe_remove(struct uprobe * uprobe,struct mm_struct * mm)318 static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
319 {
320 struct list_head *pos, *q;
321 struct delayed_uprobe *du;
322
323 if (!uprobe && !mm)
324 return;
325
326 list_for_each_safe(pos, q, &delayed_uprobe_list) {
327 du = list_entry(pos, struct delayed_uprobe, list);
328
329 if (uprobe && du->uprobe != uprobe)
330 continue;
331 if (mm && du->mm != mm)
332 continue;
333
334 delayed_uprobe_delete(du);
335 }
336 }
337
valid_ref_ctr_vma(struct uprobe * uprobe,struct vm_area_struct * vma)338 static bool valid_ref_ctr_vma(struct uprobe *uprobe,
339 struct vm_area_struct *vma)
340 {
341 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
342
343 return uprobe->ref_ctr_offset &&
344 vma->vm_file &&
345 file_inode(vma->vm_file) == uprobe->inode &&
346 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
347 vma->vm_start <= vaddr &&
348 vma->vm_end > vaddr;
349 }
350
351 static struct vm_area_struct *
find_ref_ctr_vma(struct uprobe * uprobe,struct mm_struct * mm)352 find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
353 {
354 VMA_ITERATOR(vmi, mm, 0);
355 struct vm_area_struct *tmp;
356
357 for_each_vma(vmi, tmp)
358 if (valid_ref_ctr_vma(uprobe, tmp))
359 return tmp;
360
361 return NULL;
362 }
363
364 static int
__update_ref_ctr(struct mm_struct * mm,unsigned long vaddr,short d)365 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
366 {
367 void *kaddr;
368 struct page *page;
369 struct vm_area_struct *vma;
370 int ret;
371 short *ptr;
372
373 if (!vaddr || !d)
374 return -EINVAL;
375
376 ret = get_user_pages_remote(mm, vaddr, 1,
377 FOLL_WRITE, &page, &vma, NULL);
378 if (unlikely(ret <= 0)) {
379 /*
380 * We are asking for 1 page. If get_user_pages_remote() fails,
381 * it may return 0, in that case we have to return error.
382 */
383 return ret == 0 ? -EBUSY : ret;
384 }
385
386 kaddr = kmap_atomic(page);
387 ptr = kaddr + (vaddr & ~PAGE_MASK);
388
389 if (unlikely(*ptr + d < 0)) {
390 pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
391 "curr val: %d, delta: %d\n", vaddr, *ptr, d);
392 ret = -EINVAL;
393 goto out;
394 }
395
396 *ptr += d;
397 ret = 0;
398 out:
399 kunmap_atomic(kaddr);
400 put_page(page);
401 return ret;
402 }
403
update_ref_ctr_warn(struct uprobe * uprobe,struct mm_struct * mm,short d)404 static void update_ref_ctr_warn(struct uprobe *uprobe,
405 struct mm_struct *mm, short d)
406 {
407 pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
408 "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n",
409 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
410 (unsigned long long) uprobe->offset,
411 (unsigned long long) uprobe->ref_ctr_offset, mm);
412 }
413
update_ref_ctr(struct uprobe * uprobe,struct mm_struct * mm,short d)414 static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
415 short d)
416 {
417 struct vm_area_struct *rc_vma;
418 unsigned long rc_vaddr;
419 int ret = 0;
420
421 rc_vma = find_ref_ctr_vma(uprobe, mm);
422
423 if (rc_vma) {
424 rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
425 ret = __update_ref_ctr(mm, rc_vaddr, d);
426 if (ret)
427 update_ref_ctr_warn(uprobe, mm, d);
428
429 if (d > 0)
430 return ret;
431 }
432
433 mutex_lock(&delayed_uprobe_lock);
434 if (d > 0)
435 ret = delayed_uprobe_add(uprobe, mm);
436 else
437 delayed_uprobe_remove(uprobe, mm);
438 mutex_unlock(&delayed_uprobe_lock);
439
440 return ret;
441 }
442
443 /*
444 * NOTE:
445 * Expect the breakpoint instruction to be the smallest size instruction for
446 * the architecture. If an arch has variable length instruction and the
447 * breakpoint instruction is not of the smallest length instruction
448 * supported by that architecture then we need to modify is_trap_at_addr and
449 * uprobe_write_opcode accordingly. This would never be a problem for archs
450 * that have fixed length instructions.
451 *
452 * uprobe_write_opcode - write the opcode at a given virtual address.
453 * @auprobe: arch specific probepoint information.
454 * @mm: the probed process address space.
455 * @vaddr: the virtual address to store the opcode.
456 * @opcode: opcode to be written at @vaddr.
457 *
458 * Called with mm->mmap_lock held for write.
459 * Return 0 (success) or a negative errno.
460 */
uprobe_write_opcode(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr,uprobe_opcode_t opcode)461 int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
462 unsigned long vaddr, uprobe_opcode_t opcode)
463 {
464 struct uprobe *uprobe;
465 struct page *old_page, *new_page;
466 struct vm_area_struct *vma;
467 int ret, is_register, ref_ctr_updated = 0;
468 bool orig_page_huge = false;
469 unsigned int gup_flags = FOLL_FORCE;
470
471 is_register = is_swbp_insn(&opcode);
472 uprobe = container_of(auprobe, struct uprobe, arch);
473
474 retry:
475 if (is_register)
476 gup_flags |= FOLL_SPLIT_PMD;
477 /* Read the page with vaddr into memory */
478 ret = get_user_pages_remote(mm, vaddr, 1, gup_flags,
479 &old_page, &vma, NULL);
480 if (ret <= 0)
481 return ret;
482
483 ret = verify_opcode(old_page, vaddr, &opcode);
484 if (ret <= 0)
485 goto put_old;
486
487 if (WARN(!is_register && PageCompound(old_page),
488 "uprobe unregister should never work on compound page\n")) {
489 ret = -EINVAL;
490 goto put_old;
491 }
492
493 /* We are going to replace instruction, update ref_ctr. */
494 if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
495 ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
496 if (ret)
497 goto put_old;
498
499 ref_ctr_updated = 1;
500 }
501
502 ret = 0;
503 if (!is_register && !PageAnon(old_page))
504 goto put_old;
505
506 ret = anon_vma_prepare(vma);
507 if (ret)
508 goto put_old;
509
510 ret = -ENOMEM;
511 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
512 if (!new_page)
513 goto put_old;
514
515 __SetPageUptodate(new_page);
516 copy_highpage(new_page, old_page);
517 copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
518
519 if (!is_register) {
520 struct page *orig_page;
521 pgoff_t index;
522
523 VM_BUG_ON_PAGE(!PageAnon(old_page), old_page);
524
525 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT;
526 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping,
527 index);
528
529 if (orig_page) {
530 if (PageUptodate(orig_page) &&
531 pages_identical(new_page, orig_page)) {
532 /* let go new_page */
533 put_page(new_page);
534 new_page = NULL;
535
536 if (PageCompound(orig_page))
537 orig_page_huge = true;
538 }
539 put_page(orig_page);
540 }
541 }
542
543 ret = __replace_page(vma, vaddr, old_page, new_page);
544 if (new_page)
545 put_page(new_page);
546 put_old:
547 put_page(old_page);
548
549 if (unlikely(ret == -EAGAIN))
550 goto retry;
551
552 /* Revert back reference counter if instruction update failed. */
553 if (ret && is_register && ref_ctr_updated)
554 update_ref_ctr(uprobe, mm, -1);
555
556 /* try collapse pmd for compound page */
557 if (!ret && orig_page_huge)
558 collapse_pte_mapped_thp(mm, vaddr, false);
559
560 return ret;
561 }
562
563 /**
564 * set_swbp - store breakpoint at a given address.
565 * @auprobe: arch specific probepoint information.
566 * @mm: the probed process address space.
567 * @vaddr: the virtual address to insert the opcode.
568 *
569 * For mm @mm, store the breakpoint instruction at @vaddr.
570 * Return 0 (success) or a negative errno.
571 */
set_swbp(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)572 int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
573 {
574 return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
575 }
576
577 /**
578 * set_orig_insn - Restore the original instruction.
579 * @mm: the probed process address space.
580 * @auprobe: arch specific probepoint information.
581 * @vaddr: the virtual address to insert the opcode.
582 *
583 * For mm @mm, restore the original opcode (opcode) at @vaddr.
584 * Return 0 (success) or a negative errno.
585 */
586 int __weak
set_orig_insn(struct arch_uprobe * auprobe,struct mm_struct * mm,unsigned long vaddr)587 set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
588 {
589 return uprobe_write_opcode(auprobe, mm, vaddr,
590 *(uprobe_opcode_t *)&auprobe->insn);
591 }
592
get_uprobe(struct uprobe * uprobe)593 static struct uprobe *get_uprobe(struct uprobe *uprobe)
594 {
595 refcount_inc(&uprobe->ref);
596 return uprobe;
597 }
598
put_uprobe(struct uprobe * uprobe)599 static void put_uprobe(struct uprobe *uprobe)
600 {
601 if (refcount_dec_and_test(&uprobe->ref)) {
602 /*
603 * If application munmap(exec_vma) before uprobe_unregister()
604 * gets called, we don't get a chance to remove uprobe from
605 * delayed_uprobe_list from remove_breakpoint(). Do it here.
606 */
607 mutex_lock(&delayed_uprobe_lock);
608 delayed_uprobe_remove(uprobe, NULL);
609 mutex_unlock(&delayed_uprobe_lock);
610 kfree(uprobe);
611 }
612 }
613
614 static __always_inline
uprobe_cmp(const struct inode * l_inode,const loff_t l_offset,const struct uprobe * r)615 int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
616 const struct uprobe *r)
617 {
618 if (l_inode < r->inode)
619 return -1;
620
621 if (l_inode > r->inode)
622 return 1;
623
624 if (l_offset < r->offset)
625 return -1;
626
627 if (l_offset > r->offset)
628 return 1;
629
630 return 0;
631 }
632
633 #define __node_2_uprobe(node) \
634 rb_entry((node), struct uprobe, rb_node)
635
636 struct __uprobe_key {
637 struct inode *inode;
638 loff_t offset;
639 };
640
__uprobe_cmp_key(const void * key,const struct rb_node * b)641 static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
642 {
643 const struct __uprobe_key *a = key;
644 return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
645 }
646
__uprobe_cmp(struct rb_node * a,const struct rb_node * b)647 static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
648 {
649 struct uprobe *u = __node_2_uprobe(a);
650 return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
651 }
652
__find_uprobe(struct inode * inode,loff_t offset)653 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
654 {
655 struct __uprobe_key key = {
656 .inode = inode,
657 .offset = offset,
658 };
659 struct rb_node *node = rb_find(&key, &uprobes_tree, __uprobe_cmp_key);
660
661 if (node)
662 return get_uprobe(__node_2_uprobe(node));
663
664 return NULL;
665 }
666
667 /*
668 * Find a uprobe corresponding to a given inode:offset
669 * Acquires uprobes_treelock
670 */
find_uprobe(struct inode * inode,loff_t offset)671 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
672 {
673 struct uprobe *uprobe;
674
675 spin_lock(&uprobes_treelock);
676 uprobe = __find_uprobe(inode, offset);
677 spin_unlock(&uprobes_treelock);
678
679 return uprobe;
680 }
681
__insert_uprobe(struct uprobe * uprobe)682 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
683 {
684 struct rb_node *node;
685
686 node = rb_find_add(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
687 if (node)
688 return get_uprobe(__node_2_uprobe(node));
689
690 /* get access + creation ref */
691 refcount_set(&uprobe->ref, 2);
692 return NULL;
693 }
694
695 /*
696 * Acquire uprobes_treelock.
697 * Matching uprobe already exists in rbtree;
698 * increment (access refcount) and return the matching uprobe.
699 *
700 * No matching uprobe; insert the uprobe in rb_tree;
701 * get a double refcount (access + creation) and return NULL.
702 */
insert_uprobe(struct uprobe * uprobe)703 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
704 {
705 struct uprobe *u;
706
707 spin_lock(&uprobes_treelock);
708 u = __insert_uprobe(uprobe);
709 spin_unlock(&uprobes_treelock);
710
711 return u;
712 }
713
714 static void
ref_ctr_mismatch_warn(struct uprobe * cur_uprobe,struct uprobe * uprobe)715 ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
716 {
717 pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
718 "ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
719 uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
720 (unsigned long long) cur_uprobe->ref_ctr_offset,
721 (unsigned long long) uprobe->ref_ctr_offset);
722 }
723
alloc_uprobe(struct inode * inode,loff_t offset,loff_t ref_ctr_offset)724 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
725 loff_t ref_ctr_offset)
726 {
727 struct uprobe *uprobe, *cur_uprobe;
728
729 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
730 if (!uprobe)
731 return NULL;
732
733 uprobe->inode = inode;
734 uprobe->offset = offset;
735 uprobe->ref_ctr_offset = ref_ctr_offset;
736 init_rwsem(&uprobe->register_rwsem);
737 init_rwsem(&uprobe->consumer_rwsem);
738
739 /* add to uprobes_tree, sorted on inode:offset */
740 cur_uprobe = insert_uprobe(uprobe);
741 /* a uprobe exists for this inode:offset combination */
742 if (cur_uprobe) {
743 if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
744 ref_ctr_mismatch_warn(cur_uprobe, uprobe);
745 put_uprobe(cur_uprobe);
746 kfree(uprobe);
747 return ERR_PTR(-EINVAL);
748 }
749 kfree(uprobe);
750 uprobe = cur_uprobe;
751 }
752
753 return uprobe;
754 }
755
consumer_add(struct uprobe * uprobe,struct uprobe_consumer * uc)756 static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
757 {
758 down_write(&uprobe->consumer_rwsem);
759 uc->next = uprobe->consumers;
760 uprobe->consumers = uc;
761 up_write(&uprobe->consumer_rwsem);
762 }
763
764 /*
765 * For uprobe @uprobe, delete the consumer @uc.
766 * Return true if the @uc is deleted successfully
767 * or return false.
768 */
consumer_del(struct uprobe * uprobe,struct uprobe_consumer * uc)769 static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
770 {
771 struct uprobe_consumer **con;
772 bool ret = false;
773
774 down_write(&uprobe->consumer_rwsem);
775 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
776 if (*con == uc) {
777 *con = uc->next;
778 ret = true;
779 break;
780 }
781 }
782 up_write(&uprobe->consumer_rwsem);
783
784 return ret;
785 }
786
__copy_insn(struct address_space * mapping,struct file * filp,void * insn,int nbytes,loff_t offset)787 static int __copy_insn(struct address_space *mapping, struct file *filp,
788 void *insn, int nbytes, loff_t offset)
789 {
790 struct page *page;
791 /*
792 * Ensure that the page that has the original instruction is populated
793 * and in page-cache. If ->read_folio == NULL it must be shmem_mapping(),
794 * see uprobe_register().
795 */
796 if (mapping->a_ops->read_folio)
797 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
798 else
799 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
800 if (IS_ERR(page))
801 return PTR_ERR(page);
802
803 copy_from_page(page, offset, insn, nbytes);
804 put_page(page);
805
806 return 0;
807 }
808
copy_insn(struct uprobe * uprobe,struct file * filp)809 static int copy_insn(struct uprobe *uprobe, struct file *filp)
810 {
811 struct address_space *mapping = uprobe->inode->i_mapping;
812 loff_t offs = uprobe->offset;
813 void *insn = &uprobe->arch.insn;
814 int size = sizeof(uprobe->arch.insn);
815 int len, err = -EIO;
816
817 /* Copy only available bytes, -EIO if nothing was read */
818 do {
819 if (offs >= i_size_read(uprobe->inode))
820 break;
821
822 len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
823 err = __copy_insn(mapping, filp, insn, len, offs);
824 if (err)
825 break;
826
827 insn += len;
828 offs += len;
829 size -= len;
830 } while (size);
831
832 return err;
833 }
834
prepare_uprobe(struct uprobe * uprobe,struct file * file,struct mm_struct * mm,unsigned long vaddr)835 static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
836 struct mm_struct *mm, unsigned long vaddr)
837 {
838 int ret = 0;
839
840 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
841 return ret;
842
843 /* TODO: move this into _register, until then we abuse this sem. */
844 down_write(&uprobe->consumer_rwsem);
845 if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
846 goto out;
847
848 ret = copy_insn(uprobe, file);
849 if (ret)
850 goto out;
851
852 ret = -ENOTSUPP;
853 if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
854 goto out;
855
856 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
857 if (ret)
858 goto out;
859
860 smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
861 set_bit(UPROBE_COPY_INSN, &uprobe->flags);
862
863 out:
864 up_write(&uprobe->consumer_rwsem);
865
866 return ret;
867 }
868
consumer_filter(struct uprobe_consumer * uc,enum uprobe_filter_ctx ctx,struct mm_struct * mm)869 static inline bool consumer_filter(struct uprobe_consumer *uc,
870 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
871 {
872 return !uc->filter || uc->filter(uc, ctx, mm);
873 }
874
filter_chain(struct uprobe * uprobe,enum uprobe_filter_ctx ctx,struct mm_struct * mm)875 static bool filter_chain(struct uprobe *uprobe,
876 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
877 {
878 struct uprobe_consumer *uc;
879 bool ret = false;
880
881 down_read(&uprobe->consumer_rwsem);
882 for (uc = uprobe->consumers; uc; uc = uc->next) {
883 ret = consumer_filter(uc, ctx, mm);
884 if (ret)
885 break;
886 }
887 up_read(&uprobe->consumer_rwsem);
888
889 return ret;
890 }
891
892 static int
install_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,struct vm_area_struct * vma,unsigned long vaddr)893 install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
894 struct vm_area_struct *vma, unsigned long vaddr)
895 {
896 bool first_uprobe;
897 int ret;
898
899 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
900 if (ret)
901 return ret;
902
903 /*
904 * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
905 * the task can hit this breakpoint right after __replace_page().
906 */
907 first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
908 if (first_uprobe)
909 set_bit(MMF_HAS_UPROBES, &mm->flags);
910
911 ret = set_swbp(&uprobe->arch, mm, vaddr);
912 if (!ret)
913 clear_bit(MMF_RECALC_UPROBES, &mm->flags);
914 else if (first_uprobe)
915 clear_bit(MMF_HAS_UPROBES, &mm->flags);
916
917 return ret;
918 }
919
920 static int
remove_breakpoint(struct uprobe * uprobe,struct mm_struct * mm,unsigned long vaddr)921 remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
922 {
923 set_bit(MMF_RECALC_UPROBES, &mm->flags);
924 return set_orig_insn(&uprobe->arch, mm, vaddr);
925 }
926
uprobe_is_active(struct uprobe * uprobe)927 static inline bool uprobe_is_active(struct uprobe *uprobe)
928 {
929 return !RB_EMPTY_NODE(&uprobe->rb_node);
930 }
931 /*
932 * There could be threads that have already hit the breakpoint. They
933 * will recheck the current insn and restart if find_uprobe() fails.
934 * See find_active_uprobe().
935 */
delete_uprobe(struct uprobe * uprobe)936 static void delete_uprobe(struct uprobe *uprobe)
937 {
938 if (WARN_ON(!uprobe_is_active(uprobe)))
939 return;
940
941 spin_lock(&uprobes_treelock);
942 rb_erase(&uprobe->rb_node, &uprobes_tree);
943 spin_unlock(&uprobes_treelock);
944 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
945 put_uprobe(uprobe);
946 }
947
948 struct map_info {
949 struct map_info *next;
950 struct mm_struct *mm;
951 unsigned long vaddr;
952 };
953
free_map_info(struct map_info * info)954 static inline struct map_info *free_map_info(struct map_info *info)
955 {
956 struct map_info *next = info->next;
957 kfree(info);
958 return next;
959 }
960
961 static struct map_info *
build_map_info(struct address_space * mapping,loff_t offset,bool is_register)962 build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
963 {
964 unsigned long pgoff = offset >> PAGE_SHIFT;
965 struct vm_area_struct *vma;
966 struct map_info *curr = NULL;
967 struct map_info *prev = NULL;
968 struct map_info *info;
969 int more = 0;
970
971 again:
972 i_mmap_lock_read(mapping);
973 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
974 if (!valid_vma(vma, is_register))
975 continue;
976
977 if (!prev && !more) {
978 /*
979 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
980 * reclaim. This is optimistic, no harm done if it fails.
981 */
982 prev = kmalloc(sizeof(struct map_info),
983 GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
984 if (prev)
985 prev->next = NULL;
986 }
987 if (!prev) {
988 more++;
989 continue;
990 }
991
992 if (!mmget_not_zero(vma->vm_mm))
993 continue;
994
995 info = prev;
996 prev = prev->next;
997 info->next = curr;
998 curr = info;
999
1000 info->mm = vma->vm_mm;
1001 info->vaddr = offset_to_vaddr(vma, offset);
1002 }
1003 i_mmap_unlock_read(mapping);
1004
1005 if (!more)
1006 goto out;
1007
1008 prev = curr;
1009 while (curr) {
1010 mmput(curr->mm);
1011 curr = curr->next;
1012 }
1013
1014 do {
1015 info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
1016 if (!info) {
1017 curr = ERR_PTR(-ENOMEM);
1018 goto out;
1019 }
1020 info->next = prev;
1021 prev = info;
1022 } while (--more);
1023
1024 goto again;
1025 out:
1026 while (prev)
1027 prev = free_map_info(prev);
1028 return curr;
1029 }
1030
1031 static int
register_for_each_vma(struct uprobe * uprobe,struct uprobe_consumer * new)1032 register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
1033 {
1034 bool is_register = !!new;
1035 struct map_info *info;
1036 int err = 0;
1037
1038 percpu_down_write(&dup_mmap_sem);
1039 info = build_map_info(uprobe->inode->i_mapping,
1040 uprobe->offset, is_register);
1041 if (IS_ERR(info)) {
1042 err = PTR_ERR(info);
1043 goto out;
1044 }
1045
1046 while (info) {
1047 struct mm_struct *mm = info->mm;
1048 struct vm_area_struct *vma;
1049
1050 if (err && is_register)
1051 goto free;
1052
1053 mmap_write_lock(mm);
1054 vma = find_vma(mm, info->vaddr);
1055 if (!vma || !valid_vma(vma, is_register) ||
1056 file_inode(vma->vm_file) != uprobe->inode)
1057 goto unlock;
1058
1059 if (vma->vm_start > info->vaddr ||
1060 vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
1061 goto unlock;
1062
1063 if (is_register) {
1064 /* consult only the "caller", new consumer. */
1065 if (consumer_filter(new,
1066 UPROBE_FILTER_REGISTER, mm))
1067 err = install_breakpoint(uprobe, mm, vma, info->vaddr);
1068 } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
1069 if (!filter_chain(uprobe,
1070 UPROBE_FILTER_UNREGISTER, mm))
1071 err |= remove_breakpoint(uprobe, mm, info->vaddr);
1072 }
1073
1074 unlock:
1075 mmap_write_unlock(mm);
1076 free:
1077 mmput(mm);
1078 info = free_map_info(info);
1079 }
1080 out:
1081 percpu_up_write(&dup_mmap_sem);
1082 return err;
1083 }
1084
1085 static void
__uprobe_unregister(struct uprobe * uprobe,struct uprobe_consumer * uc)1086 __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
1087 {
1088 int err;
1089
1090 if (WARN_ON(!consumer_del(uprobe, uc)))
1091 return;
1092
1093 err = register_for_each_vma(uprobe, NULL);
1094 /* TODO : cant unregister? schedule a worker thread */
1095 if (!uprobe->consumers && !err)
1096 delete_uprobe(uprobe);
1097 }
1098
1099 /*
1100 * uprobe_unregister - unregister an already registered probe.
1101 * @inode: the file in which the probe has to be removed.
1102 * @offset: offset from the start of the file.
1103 * @uc: identify which probe if multiple probes are colocated.
1104 */
uprobe_unregister(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)1105 void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
1106 {
1107 struct uprobe *uprobe;
1108
1109 uprobe = find_uprobe(inode, offset);
1110 if (WARN_ON(!uprobe))
1111 return;
1112
1113 down_write(&uprobe->register_rwsem);
1114 __uprobe_unregister(uprobe, uc);
1115 up_write(&uprobe->register_rwsem);
1116 put_uprobe(uprobe);
1117 }
1118 EXPORT_SYMBOL_GPL(uprobe_unregister);
1119
1120 /*
1121 * __uprobe_register - register a probe
1122 * @inode: the file in which the probe has to be placed.
1123 * @offset: offset from the start of the file.
1124 * @uc: information on howto handle the probe..
1125 *
1126 * Apart from the access refcount, __uprobe_register() takes a creation
1127 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
1128 * inserted into the rbtree (i.e first consumer for a @inode:@offset
1129 * tuple). Creation refcount stops uprobe_unregister from freeing the
1130 * @uprobe even before the register operation is complete. Creation
1131 * refcount is released when the last @uc for the @uprobe
1132 * unregisters. Caller of __uprobe_register() is required to keep @inode
1133 * (and the containing mount) referenced.
1134 *
1135 * Return errno if it cannot successully install probes
1136 * else return 0 (success)
1137 */
__uprobe_register(struct inode * inode,loff_t offset,loff_t ref_ctr_offset,struct uprobe_consumer * uc)1138 static int __uprobe_register(struct inode *inode, loff_t offset,
1139 loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1140 {
1141 struct uprobe *uprobe;
1142 int ret;
1143
1144 /* Uprobe must have at least one set consumer */
1145 if (!uc->handler && !uc->ret_handler)
1146 return -EINVAL;
1147
1148 /* copy_insn() uses read_mapping_page() or shmem_read_mapping_page() */
1149 if (!inode->i_mapping->a_ops->read_folio &&
1150 !shmem_mapping(inode->i_mapping))
1151 return -EIO;
1152 /* Racy, just to catch the obvious mistakes */
1153 if (offset > i_size_read(inode))
1154 return -EINVAL;
1155
1156 /*
1157 * This ensures that copy_from_page(), copy_to_page() and
1158 * __update_ref_ctr() can't cross page boundary.
1159 */
1160 if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
1161 return -EINVAL;
1162 if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
1163 return -EINVAL;
1164
1165 retry:
1166 uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
1167 if (!uprobe)
1168 return -ENOMEM;
1169 if (IS_ERR(uprobe))
1170 return PTR_ERR(uprobe);
1171
1172 /*
1173 * We can race with uprobe_unregister()->delete_uprobe().
1174 * Check uprobe_is_active() and retry if it is false.
1175 */
1176 down_write(&uprobe->register_rwsem);
1177 ret = -EAGAIN;
1178 if (likely(uprobe_is_active(uprobe))) {
1179 consumer_add(uprobe, uc);
1180 ret = register_for_each_vma(uprobe, uc);
1181 if (ret)
1182 __uprobe_unregister(uprobe, uc);
1183 }
1184 up_write(&uprobe->register_rwsem);
1185 put_uprobe(uprobe);
1186
1187 if (unlikely(ret == -EAGAIN))
1188 goto retry;
1189 return ret;
1190 }
1191
uprobe_register(struct inode * inode,loff_t offset,struct uprobe_consumer * uc)1192 int uprobe_register(struct inode *inode, loff_t offset,
1193 struct uprobe_consumer *uc)
1194 {
1195 return __uprobe_register(inode, offset, 0, uc);
1196 }
1197 EXPORT_SYMBOL_GPL(uprobe_register);
1198
uprobe_register_refctr(struct inode * inode,loff_t offset,loff_t ref_ctr_offset,struct uprobe_consumer * uc)1199 int uprobe_register_refctr(struct inode *inode, loff_t offset,
1200 loff_t ref_ctr_offset, struct uprobe_consumer *uc)
1201 {
1202 return __uprobe_register(inode, offset, ref_ctr_offset, uc);
1203 }
1204 EXPORT_SYMBOL_GPL(uprobe_register_refctr);
1205
1206 /*
1207 * uprobe_apply - unregister an already registered probe.
1208 * @inode: the file in which the probe has to be removed.
1209 * @offset: offset from the start of the file.
1210 * @uc: consumer which wants to add more or remove some breakpoints
1211 * @add: add or remove the breakpoints
1212 */
uprobe_apply(struct inode * inode,loff_t offset,struct uprobe_consumer * uc,bool add)1213 int uprobe_apply(struct inode *inode, loff_t offset,
1214 struct uprobe_consumer *uc, bool add)
1215 {
1216 struct uprobe *uprobe;
1217 struct uprobe_consumer *con;
1218 int ret = -ENOENT;
1219
1220 uprobe = find_uprobe(inode, offset);
1221 if (WARN_ON(!uprobe))
1222 return ret;
1223
1224 down_write(&uprobe->register_rwsem);
1225 for (con = uprobe->consumers; con && con != uc ; con = con->next)
1226 ;
1227 if (con)
1228 ret = register_for_each_vma(uprobe, add ? uc : NULL);
1229 up_write(&uprobe->register_rwsem);
1230 put_uprobe(uprobe);
1231
1232 return ret;
1233 }
1234
unapply_uprobe(struct uprobe * uprobe,struct mm_struct * mm)1235 static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
1236 {
1237 VMA_ITERATOR(vmi, mm, 0);
1238 struct vm_area_struct *vma;
1239 int err = 0;
1240
1241 mmap_read_lock(mm);
1242 for_each_vma(vmi, vma) {
1243 unsigned long vaddr;
1244 loff_t offset;
1245
1246 if (!valid_vma(vma, false) ||
1247 file_inode(vma->vm_file) != uprobe->inode)
1248 continue;
1249
1250 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1251 if (uprobe->offset < offset ||
1252 uprobe->offset >= offset + vma->vm_end - vma->vm_start)
1253 continue;
1254
1255 vaddr = offset_to_vaddr(vma, uprobe->offset);
1256 err |= remove_breakpoint(uprobe, mm, vaddr);
1257 }
1258 mmap_read_unlock(mm);
1259
1260 return err;
1261 }
1262
1263 static struct rb_node *
find_node_in_range(struct inode * inode,loff_t min,loff_t max)1264 find_node_in_range(struct inode *inode, loff_t min, loff_t max)
1265 {
1266 struct rb_node *n = uprobes_tree.rb_node;
1267
1268 while (n) {
1269 struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
1270
1271 if (inode < u->inode) {
1272 n = n->rb_left;
1273 } else if (inode > u->inode) {
1274 n = n->rb_right;
1275 } else {
1276 if (max < u->offset)
1277 n = n->rb_left;
1278 else if (min > u->offset)
1279 n = n->rb_right;
1280 else
1281 break;
1282 }
1283 }
1284
1285 return n;
1286 }
1287
1288 /*
1289 * For a given range in vma, build a list of probes that need to be inserted.
1290 */
build_probe_list(struct inode * inode,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * head)1291 static void build_probe_list(struct inode *inode,
1292 struct vm_area_struct *vma,
1293 unsigned long start, unsigned long end,
1294 struct list_head *head)
1295 {
1296 loff_t min, max;
1297 struct rb_node *n, *t;
1298 struct uprobe *u;
1299
1300 INIT_LIST_HEAD(head);
1301 min = vaddr_to_offset(vma, start);
1302 max = min + (end - start) - 1;
1303
1304 spin_lock(&uprobes_treelock);
1305 n = find_node_in_range(inode, min, max);
1306 if (n) {
1307 for (t = n; t; t = rb_prev(t)) {
1308 u = rb_entry(t, struct uprobe, rb_node);
1309 if (u->inode != inode || u->offset < min)
1310 break;
1311 list_add(&u->pending_list, head);
1312 get_uprobe(u);
1313 }
1314 for (t = n; (t = rb_next(t)); ) {
1315 u = rb_entry(t, struct uprobe, rb_node);
1316 if (u->inode != inode || u->offset > max)
1317 break;
1318 list_add(&u->pending_list, head);
1319 get_uprobe(u);
1320 }
1321 }
1322 spin_unlock(&uprobes_treelock);
1323 }
1324
1325 /* @vma contains reference counter, not the probed instruction. */
delayed_ref_ctr_inc(struct vm_area_struct * vma)1326 static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
1327 {
1328 struct list_head *pos, *q;
1329 struct delayed_uprobe *du;
1330 unsigned long vaddr;
1331 int ret = 0, err = 0;
1332
1333 mutex_lock(&delayed_uprobe_lock);
1334 list_for_each_safe(pos, q, &delayed_uprobe_list) {
1335 du = list_entry(pos, struct delayed_uprobe, list);
1336
1337 if (du->mm != vma->vm_mm ||
1338 !valid_ref_ctr_vma(du->uprobe, vma))
1339 continue;
1340
1341 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
1342 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
1343 if (ret) {
1344 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
1345 if (!err)
1346 err = ret;
1347 }
1348 delayed_uprobe_delete(du);
1349 }
1350 mutex_unlock(&delayed_uprobe_lock);
1351 return err;
1352 }
1353
1354 /*
1355 * Called from mmap_region/vma_adjust with mm->mmap_lock acquired.
1356 *
1357 * Currently we ignore all errors and always return 0, the callers
1358 * can't handle the failure anyway.
1359 */
uprobe_mmap(struct vm_area_struct * vma)1360 int uprobe_mmap(struct vm_area_struct *vma)
1361 {
1362 struct list_head tmp_list;
1363 struct uprobe *uprobe, *u;
1364 struct inode *inode;
1365
1366 if (no_uprobe_events())
1367 return 0;
1368
1369 if (vma->vm_file &&
1370 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
1371 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags))
1372 delayed_ref_ctr_inc(vma);
1373
1374 if (!valid_vma(vma, true))
1375 return 0;
1376
1377 inode = file_inode(vma->vm_file);
1378 if (!inode)
1379 return 0;
1380
1381 mutex_lock(uprobes_mmap_hash(inode));
1382 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
1383 /*
1384 * We can race with uprobe_unregister(), this uprobe can be already
1385 * removed. But in this case filter_chain() must return false, all
1386 * consumers have gone away.
1387 */
1388 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1389 if (!fatal_signal_pending(current) &&
1390 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
1391 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
1392 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
1393 }
1394 put_uprobe(uprobe);
1395 }
1396 mutex_unlock(uprobes_mmap_hash(inode));
1397
1398 return 0;
1399 }
1400
1401 static bool
vma_has_uprobes(struct vm_area_struct * vma,unsigned long start,unsigned long end)1402 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1403 {
1404 loff_t min, max;
1405 struct inode *inode;
1406 struct rb_node *n;
1407
1408 inode = file_inode(vma->vm_file);
1409
1410 min = vaddr_to_offset(vma, start);
1411 max = min + (end - start) - 1;
1412
1413 spin_lock(&uprobes_treelock);
1414 n = find_node_in_range(inode, min, max);
1415 spin_unlock(&uprobes_treelock);
1416
1417 return !!n;
1418 }
1419
1420 /*
1421 * Called in context of a munmap of a vma.
1422 */
uprobe_munmap(struct vm_area_struct * vma,unsigned long start,unsigned long end)1423 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1424 {
1425 if (no_uprobe_events() || !valid_vma(vma, false))
1426 return;
1427
1428 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
1429 return;
1430
1431 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
1432 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
1433 return;
1434
1435 if (vma_has_uprobes(vma, start, end))
1436 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
1437 }
1438
1439 /* Slot allocation for XOL */
xol_add_vma(struct mm_struct * mm,struct xol_area * area)1440 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
1441 {
1442 struct vm_area_struct *vma;
1443 int ret;
1444
1445 if (mmap_write_lock_killable(mm))
1446 return -EINTR;
1447
1448 if (mm->uprobes_state.xol_area) {
1449 ret = -EALREADY;
1450 goto fail;
1451 }
1452
1453 if (!area->vaddr) {
1454 /* Try to map as high as possible, this is only a hint. */
1455 area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1456 PAGE_SIZE, 0, 0);
1457 if (IS_ERR_VALUE(area->vaddr)) {
1458 ret = area->vaddr;
1459 goto fail;
1460 }
1461 }
1462
1463 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1464 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1465 &area->xol_mapping);
1466 if (IS_ERR(vma)) {
1467 ret = PTR_ERR(vma);
1468 goto fail;
1469 }
1470
1471 ret = 0;
1472 /* pairs with get_xol_area() */
1473 smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
1474 fail:
1475 mmap_write_unlock(mm);
1476
1477 return ret;
1478 }
1479
__create_xol_area(unsigned long vaddr)1480 static struct xol_area *__create_xol_area(unsigned long vaddr)
1481 {
1482 struct mm_struct *mm = current->mm;
1483 uprobe_opcode_t insn = UPROBE_SWBP_INSN;
1484 struct xol_area *area;
1485
1486 area = kmalloc(sizeof(*area), GFP_KERNEL);
1487 if (unlikely(!area))
1488 goto out;
1489
1490 area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
1491 GFP_KERNEL);
1492 if (!area->bitmap)
1493 goto free_area;
1494
1495 area->xol_mapping.name = "[uprobes]";
1496 area->xol_mapping.fault = NULL;
1497 area->xol_mapping.pages = area->pages;
1498 area->pages[0] = alloc_page(GFP_HIGHUSER);
1499 if (!area->pages[0])
1500 goto free_bitmap;
1501 area->pages[1] = NULL;
1502
1503 area->vaddr = vaddr;
1504 init_waitqueue_head(&area->wq);
1505 /* Reserve the 1st slot for get_trampoline_vaddr() */
1506 set_bit(0, area->bitmap);
1507 atomic_set(&area->slot_count, 1);
1508 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
1509
1510 if (!xol_add_vma(mm, area))
1511 return area;
1512
1513 __free_page(area->pages[0]);
1514 free_bitmap:
1515 kfree(area->bitmap);
1516 free_area:
1517 kfree(area);
1518 out:
1519 return NULL;
1520 }
1521
1522 /*
1523 * get_xol_area - Allocate process's xol_area if necessary.
1524 * This area will be used for storing instructions for execution out of line.
1525 *
1526 * Returns the allocated area or NULL.
1527 */
get_xol_area(void)1528 static struct xol_area *get_xol_area(void)
1529 {
1530 struct mm_struct *mm = current->mm;
1531 struct xol_area *area;
1532
1533 if (!mm->uprobes_state.xol_area)
1534 __create_xol_area(0);
1535
1536 /* Pairs with xol_add_vma() smp_store_release() */
1537 area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
1538 return area;
1539 }
1540
1541 /*
1542 * uprobe_clear_state - Free the area allocated for slots.
1543 */
uprobe_clear_state(struct mm_struct * mm)1544 void uprobe_clear_state(struct mm_struct *mm)
1545 {
1546 struct xol_area *area = mm->uprobes_state.xol_area;
1547
1548 mutex_lock(&delayed_uprobe_lock);
1549 delayed_uprobe_remove(NULL, mm);
1550 mutex_unlock(&delayed_uprobe_lock);
1551
1552 if (!area)
1553 return;
1554
1555 put_page(area->pages[0]);
1556 kfree(area->bitmap);
1557 kfree(area);
1558 }
1559
uprobe_start_dup_mmap(void)1560 void uprobe_start_dup_mmap(void)
1561 {
1562 percpu_down_read(&dup_mmap_sem);
1563 }
1564
uprobe_end_dup_mmap(void)1565 void uprobe_end_dup_mmap(void)
1566 {
1567 percpu_up_read(&dup_mmap_sem);
1568 }
1569
uprobe_dup_mmap(struct mm_struct * oldmm,struct mm_struct * newmm)1570 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
1571 {
1572 if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
1573 set_bit(MMF_HAS_UPROBES, &newmm->flags);
1574 /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
1575 set_bit(MMF_RECALC_UPROBES, &newmm->flags);
1576 }
1577 }
1578
1579 /*
1580 * - search for a free slot.
1581 */
xol_take_insn_slot(struct xol_area * area)1582 static unsigned long xol_take_insn_slot(struct xol_area *area)
1583 {
1584 unsigned long slot_addr;
1585 int slot_nr;
1586
1587 do {
1588 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1589 if (slot_nr < UINSNS_PER_PAGE) {
1590 if (!test_and_set_bit(slot_nr, area->bitmap))
1591 break;
1592
1593 slot_nr = UINSNS_PER_PAGE;
1594 continue;
1595 }
1596 wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1597 } while (slot_nr >= UINSNS_PER_PAGE);
1598
1599 slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
1600 atomic_inc(&area->slot_count);
1601
1602 return slot_addr;
1603 }
1604
1605 /*
1606 * xol_get_insn_slot - allocate a slot for xol.
1607 * Returns the allocated slot address or 0.
1608 */
xol_get_insn_slot(struct uprobe * uprobe)1609 static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
1610 {
1611 struct xol_area *area;
1612 unsigned long xol_vaddr;
1613
1614 area = get_xol_area();
1615 if (!area)
1616 return 0;
1617
1618 xol_vaddr = xol_take_insn_slot(area);
1619 if (unlikely(!xol_vaddr))
1620 return 0;
1621
1622 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
1623 &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
1624
1625 return xol_vaddr;
1626 }
1627
1628 /*
1629 * xol_free_insn_slot - If slot was earlier allocated by
1630 * @xol_get_insn_slot(), make the slot available for
1631 * subsequent requests.
1632 */
xol_free_insn_slot(struct task_struct * tsk)1633 static void xol_free_insn_slot(struct task_struct *tsk)
1634 {
1635 struct xol_area *area;
1636 unsigned long vma_end;
1637 unsigned long slot_addr;
1638
1639 if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
1640 return;
1641
1642 slot_addr = tsk->utask->xol_vaddr;
1643 if (unlikely(!slot_addr))
1644 return;
1645
1646 area = tsk->mm->uprobes_state.xol_area;
1647 vma_end = area->vaddr + PAGE_SIZE;
1648 if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1649 unsigned long offset;
1650 int slot_nr;
1651
1652 offset = slot_addr - area->vaddr;
1653 slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
1654 if (slot_nr >= UINSNS_PER_PAGE)
1655 return;
1656
1657 clear_bit(slot_nr, area->bitmap);
1658 atomic_dec(&area->slot_count);
1659 smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
1660 if (waitqueue_active(&area->wq))
1661 wake_up(&area->wq);
1662
1663 tsk->utask->xol_vaddr = 0;
1664 }
1665 }
1666
arch_uprobe_copy_ixol(struct page * page,unsigned long vaddr,void * src,unsigned long len)1667 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
1668 void *src, unsigned long len)
1669 {
1670 /* Initialize the slot */
1671 copy_to_page(page, vaddr, src, len);
1672
1673 /*
1674 * We probably need flush_icache_user_page() but it needs vma.
1675 * This should work on most of architectures by default. If
1676 * architecture needs to do something different it can define
1677 * its own version of the function.
1678 */
1679 flush_dcache_page(page);
1680 }
1681
1682 /**
1683 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1684 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1685 * instruction.
1686 * Return the address of the breakpoint instruction.
1687 */
uprobe_get_swbp_addr(struct pt_regs * regs)1688 unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1689 {
1690 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1691 }
1692
uprobe_get_trap_addr(struct pt_regs * regs)1693 unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
1694 {
1695 struct uprobe_task *utask = current->utask;
1696
1697 if (unlikely(utask && utask->active_uprobe))
1698 return utask->vaddr;
1699
1700 return instruction_pointer(regs);
1701 }
1702
free_ret_instance(struct return_instance * ri)1703 static struct return_instance *free_ret_instance(struct return_instance *ri)
1704 {
1705 struct return_instance *next = ri->next;
1706 put_uprobe(ri->uprobe);
1707 kfree(ri);
1708 return next;
1709 }
1710
1711 /*
1712 * Called with no locks held.
1713 * Called in context of an exiting or an exec-ing thread.
1714 */
uprobe_free_utask(struct task_struct * t)1715 void uprobe_free_utask(struct task_struct *t)
1716 {
1717 struct uprobe_task *utask = t->utask;
1718 struct return_instance *ri;
1719
1720 if (!utask)
1721 return;
1722
1723 if (utask->active_uprobe)
1724 put_uprobe(utask->active_uprobe);
1725
1726 ri = utask->return_instances;
1727 while (ri)
1728 ri = free_ret_instance(ri);
1729
1730 xol_free_insn_slot(t);
1731 kfree(utask);
1732 t->utask = NULL;
1733 }
1734
1735 /*
1736 * Allocate a uprobe_task object for the task if necessary.
1737 * Called when the thread hits a breakpoint.
1738 *
1739 * Returns:
1740 * - pointer to new uprobe_task on success
1741 * - NULL otherwise
1742 */
get_utask(void)1743 static struct uprobe_task *get_utask(void)
1744 {
1745 if (!current->utask)
1746 current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1747 return current->utask;
1748 }
1749
dup_utask(struct task_struct * t,struct uprobe_task * o_utask)1750 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
1751 {
1752 struct uprobe_task *n_utask;
1753 struct return_instance **p, *o, *n;
1754
1755 n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
1756 if (!n_utask)
1757 return -ENOMEM;
1758 t->utask = n_utask;
1759
1760 p = &n_utask->return_instances;
1761 for (o = o_utask->return_instances; o; o = o->next) {
1762 n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1763 if (!n)
1764 return -ENOMEM;
1765
1766 *n = *o;
1767 get_uprobe(n->uprobe);
1768 n->next = NULL;
1769
1770 *p = n;
1771 p = &n->next;
1772 n_utask->depth++;
1773 }
1774
1775 return 0;
1776 }
1777
uprobe_warn(struct task_struct * t,const char * msg)1778 static void uprobe_warn(struct task_struct *t, const char *msg)
1779 {
1780 pr_warn("uprobe: %s:%d failed to %s\n",
1781 current->comm, current->pid, msg);
1782 }
1783
dup_xol_work(struct callback_head * work)1784 static void dup_xol_work(struct callback_head *work)
1785 {
1786 if (current->flags & PF_EXITING)
1787 return;
1788
1789 if (!__create_xol_area(current->utask->dup_xol_addr) &&
1790 !fatal_signal_pending(current))
1791 uprobe_warn(current, "dup xol area");
1792 }
1793
1794 /*
1795 * Called in context of a new clone/fork from copy_process.
1796 */
uprobe_copy_process(struct task_struct * t,unsigned long flags)1797 void uprobe_copy_process(struct task_struct *t, unsigned long flags)
1798 {
1799 struct uprobe_task *utask = current->utask;
1800 struct mm_struct *mm = current->mm;
1801 struct xol_area *area;
1802
1803 t->utask = NULL;
1804
1805 if (!utask || !utask->return_instances)
1806 return;
1807
1808 if (mm == t->mm && !(flags & CLONE_VFORK))
1809 return;
1810
1811 if (dup_utask(t, utask))
1812 return uprobe_warn(t, "dup ret instances");
1813
1814 /* The task can fork() after dup_xol_work() fails */
1815 area = mm->uprobes_state.xol_area;
1816 if (!area)
1817 return uprobe_warn(t, "dup xol area");
1818
1819 if (mm == t->mm)
1820 return;
1821
1822 t->utask->dup_xol_addr = area->vaddr;
1823 init_task_work(&t->utask->dup_xol_work, dup_xol_work);
1824 task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
1825 }
1826
1827 /*
1828 * Current area->vaddr notion assume the trampoline address is always
1829 * equal area->vaddr.
1830 *
1831 * Returns -1 in case the xol_area is not allocated.
1832 */
get_trampoline_vaddr(void)1833 static unsigned long get_trampoline_vaddr(void)
1834 {
1835 struct xol_area *area;
1836 unsigned long trampoline_vaddr = -1;
1837
1838 /* Pairs with xol_add_vma() smp_store_release() */
1839 area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
1840 if (area)
1841 trampoline_vaddr = area->vaddr;
1842
1843 return trampoline_vaddr;
1844 }
1845
cleanup_return_instances(struct uprobe_task * utask,bool chained,struct pt_regs * regs)1846 static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
1847 struct pt_regs *regs)
1848 {
1849 struct return_instance *ri = utask->return_instances;
1850 enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
1851
1852 while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
1853 ri = free_ret_instance(ri);
1854 utask->depth--;
1855 }
1856 utask->return_instances = ri;
1857 }
1858
prepare_uretprobe(struct uprobe * uprobe,struct pt_regs * regs)1859 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
1860 {
1861 struct return_instance *ri;
1862 struct uprobe_task *utask;
1863 unsigned long orig_ret_vaddr, trampoline_vaddr;
1864 bool chained;
1865
1866 if (!get_xol_area())
1867 return;
1868
1869 utask = get_utask();
1870 if (!utask)
1871 return;
1872
1873 if (utask->depth >= MAX_URETPROBE_DEPTH) {
1874 printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
1875 " nestedness limit pid/tgid=%d/%d\n",
1876 current->pid, current->tgid);
1877 return;
1878 }
1879
1880 ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
1881 if (!ri)
1882 return;
1883
1884 trampoline_vaddr = get_trampoline_vaddr();
1885 orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
1886 if (orig_ret_vaddr == -1)
1887 goto fail;
1888
1889 /* drop the entries invalidated by longjmp() */
1890 chained = (orig_ret_vaddr == trampoline_vaddr);
1891 cleanup_return_instances(utask, chained, regs);
1892
1893 /*
1894 * We don't want to keep trampoline address in stack, rather keep the
1895 * original return address of first caller thru all the consequent
1896 * instances. This also makes breakpoint unwrapping easier.
1897 */
1898 if (chained) {
1899 if (!utask->return_instances) {
1900 /*
1901 * This situation is not possible. Likely we have an
1902 * attack from user-space.
1903 */
1904 uprobe_warn(current, "handle tail call");
1905 goto fail;
1906 }
1907 orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
1908 }
1909
1910 ri->uprobe = get_uprobe(uprobe);
1911 ri->func = instruction_pointer(regs);
1912 ri->stack = user_stack_pointer(regs);
1913 ri->orig_ret_vaddr = orig_ret_vaddr;
1914 ri->chained = chained;
1915
1916 utask->depth++;
1917 ri->next = utask->return_instances;
1918 utask->return_instances = ri;
1919
1920 return;
1921 fail:
1922 kfree(ri);
1923 }
1924
1925 /* Prepare to single-step probed instruction out of line. */
1926 static int
pre_ssout(struct uprobe * uprobe,struct pt_regs * regs,unsigned long bp_vaddr)1927 pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
1928 {
1929 struct uprobe_task *utask;
1930 unsigned long xol_vaddr;
1931 int err;
1932
1933 utask = get_utask();
1934 if (!utask)
1935 return -ENOMEM;
1936
1937 xol_vaddr = xol_get_insn_slot(uprobe);
1938 if (!xol_vaddr)
1939 return -ENOMEM;
1940
1941 utask->xol_vaddr = xol_vaddr;
1942 utask->vaddr = bp_vaddr;
1943
1944 err = arch_uprobe_pre_xol(&uprobe->arch, regs);
1945 if (unlikely(err)) {
1946 xol_free_insn_slot(current);
1947 return err;
1948 }
1949
1950 utask->active_uprobe = uprobe;
1951 utask->state = UTASK_SSTEP;
1952 return 0;
1953 }
1954
1955 /*
1956 * If we are singlestepping, then ensure this thread is not connected to
1957 * non-fatal signals until completion of singlestep. When xol insn itself
1958 * triggers the signal, restart the original insn even if the task is
1959 * already SIGKILL'ed (since coredump should report the correct ip). This
1960 * is even more important if the task has a handler for SIGSEGV/etc, The
1961 * _same_ instruction should be repeated again after return from the signal
1962 * handler, and SSTEP can never finish in this case.
1963 */
uprobe_deny_signal(void)1964 bool uprobe_deny_signal(void)
1965 {
1966 struct task_struct *t = current;
1967 struct uprobe_task *utask = t->utask;
1968
1969 if (likely(!utask || !utask->active_uprobe))
1970 return false;
1971
1972 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1973
1974 if (task_sigpending(t)) {
1975 spin_lock_irq(&t->sighand->siglock);
1976 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1977 spin_unlock_irq(&t->sighand->siglock);
1978
1979 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1980 utask->state = UTASK_SSTEP_TRAPPED;
1981 set_tsk_thread_flag(t, TIF_UPROBE);
1982 }
1983 }
1984
1985 return true;
1986 }
1987
mmf_recalc_uprobes(struct mm_struct * mm)1988 static void mmf_recalc_uprobes(struct mm_struct *mm)
1989 {
1990 VMA_ITERATOR(vmi, mm, 0);
1991 struct vm_area_struct *vma;
1992
1993 for_each_vma(vmi, vma) {
1994 if (!valid_vma(vma, false))
1995 continue;
1996 /*
1997 * This is not strictly accurate, we can race with
1998 * uprobe_unregister() and see the already removed
1999 * uprobe if delete_uprobe() was not yet called.
2000 * Or this uprobe can be filtered out.
2001 */
2002 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
2003 return;
2004 }
2005
2006 clear_bit(MMF_HAS_UPROBES, &mm->flags);
2007 }
2008
is_trap_at_addr(struct mm_struct * mm,unsigned long vaddr)2009 static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
2010 {
2011 struct page *page;
2012 uprobe_opcode_t opcode;
2013 int result;
2014
2015 if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
2016 return -EINVAL;
2017
2018 pagefault_disable();
2019 result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
2020 pagefault_enable();
2021
2022 if (likely(result == 0))
2023 goto out;
2024
2025 /*
2026 * The NULL 'tsk' here ensures that any faults that occur here
2027 * will not be accounted to the task. 'mm' *is* current->mm,
2028 * but we treat this as a 'remote' access since it is
2029 * essentially a kernel access to the memory.
2030 */
2031 result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page,
2032 NULL, NULL);
2033 if (result < 0)
2034 return result;
2035
2036 copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2037 put_page(page);
2038 out:
2039 /* This needs to return true for any variant of the trap insn */
2040 return is_trap_insn(&opcode);
2041 }
2042
find_active_uprobe(unsigned long bp_vaddr,int * is_swbp)2043 static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
2044 {
2045 struct mm_struct *mm = current->mm;
2046 struct uprobe *uprobe = NULL;
2047 struct vm_area_struct *vma;
2048
2049 mmap_read_lock(mm);
2050 vma = vma_lookup(mm, bp_vaddr);
2051 if (vma) {
2052 if (valid_vma(vma, false)) {
2053 struct inode *inode = file_inode(vma->vm_file);
2054 loff_t offset = vaddr_to_offset(vma, bp_vaddr);
2055
2056 uprobe = find_uprobe(inode, offset);
2057 }
2058
2059 if (!uprobe)
2060 *is_swbp = is_trap_at_addr(mm, bp_vaddr);
2061 } else {
2062 *is_swbp = -EFAULT;
2063 }
2064
2065 if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
2066 mmf_recalc_uprobes(mm);
2067 mmap_read_unlock(mm);
2068
2069 return uprobe;
2070 }
2071
handler_chain(struct uprobe * uprobe,struct pt_regs * regs)2072 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
2073 {
2074 struct uprobe_consumer *uc;
2075 int remove = UPROBE_HANDLER_REMOVE;
2076 bool need_prep = false; /* prepare return uprobe, when needed */
2077
2078 down_read(&uprobe->register_rwsem);
2079 for (uc = uprobe->consumers; uc; uc = uc->next) {
2080 int rc = 0;
2081
2082 if (uc->handler) {
2083 rc = uc->handler(uc, regs);
2084 WARN(rc & ~UPROBE_HANDLER_MASK,
2085 "bad rc=0x%x from %ps()\n", rc, uc->handler);
2086 }
2087
2088 if (uc->ret_handler)
2089 need_prep = true;
2090
2091 remove &= rc;
2092 }
2093
2094 if (need_prep && !remove)
2095 prepare_uretprobe(uprobe, regs); /* put bp at return */
2096
2097 if (remove && uprobe->consumers) {
2098 WARN_ON(!uprobe_is_active(uprobe));
2099 unapply_uprobe(uprobe, current->mm);
2100 }
2101 up_read(&uprobe->register_rwsem);
2102 }
2103
2104 static void
handle_uretprobe_chain(struct return_instance * ri,struct pt_regs * regs)2105 handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs)
2106 {
2107 struct uprobe *uprobe = ri->uprobe;
2108 struct uprobe_consumer *uc;
2109
2110 down_read(&uprobe->register_rwsem);
2111 for (uc = uprobe->consumers; uc; uc = uc->next) {
2112 if (uc->ret_handler)
2113 uc->ret_handler(uc, ri->func, regs);
2114 }
2115 up_read(&uprobe->register_rwsem);
2116 }
2117
find_next_ret_chain(struct return_instance * ri)2118 static struct return_instance *find_next_ret_chain(struct return_instance *ri)
2119 {
2120 bool chained;
2121
2122 do {
2123 chained = ri->chained;
2124 ri = ri->next; /* can't be NULL if chained */
2125 } while (chained);
2126
2127 return ri;
2128 }
2129
handle_trampoline(struct pt_regs * regs)2130 static void handle_trampoline(struct pt_regs *regs)
2131 {
2132 struct uprobe_task *utask;
2133 struct return_instance *ri, *next;
2134 bool valid;
2135
2136 utask = current->utask;
2137 if (!utask)
2138 goto sigill;
2139
2140 ri = utask->return_instances;
2141 if (!ri)
2142 goto sigill;
2143
2144 do {
2145 /*
2146 * We should throw out the frames invalidated by longjmp().
2147 * If this chain is valid, then the next one should be alive
2148 * or NULL; the latter case means that nobody but ri->func
2149 * could hit this trampoline on return. TODO: sigaltstack().
2150 */
2151 next = find_next_ret_chain(ri);
2152 valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
2153
2154 instruction_pointer_set(regs, ri->orig_ret_vaddr);
2155 do {
2156 if (valid)
2157 handle_uretprobe_chain(ri, regs);
2158 ri = free_ret_instance(ri);
2159 utask->depth--;
2160 } while (ri != next);
2161 } while (!valid);
2162
2163 utask->return_instances = ri;
2164 return;
2165
2166 sigill:
2167 uprobe_warn(current, "handle uretprobe, sending SIGILL.");
2168 force_sig(SIGILL);
2169
2170 }
2171
arch_uprobe_ignore(struct arch_uprobe * aup,struct pt_regs * regs)2172 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
2173 {
2174 return false;
2175 }
2176
arch_uretprobe_is_alive(struct return_instance * ret,enum rp_check ctx,struct pt_regs * regs)2177 bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
2178 struct pt_regs *regs)
2179 {
2180 return true;
2181 }
2182
2183 /*
2184 * Run handler and ask thread to singlestep.
2185 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
2186 */
handle_swbp(struct pt_regs * regs)2187 static void handle_swbp(struct pt_regs *regs)
2188 {
2189 struct uprobe *uprobe;
2190 unsigned long bp_vaddr;
2191 int is_swbp;
2192
2193 bp_vaddr = uprobe_get_swbp_addr(regs);
2194 if (bp_vaddr == get_trampoline_vaddr())
2195 return handle_trampoline(regs);
2196
2197 uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
2198 if (!uprobe) {
2199 if (is_swbp > 0) {
2200 /* No matching uprobe; signal SIGTRAP. */
2201 force_sig(SIGTRAP);
2202 } else {
2203 /*
2204 * Either we raced with uprobe_unregister() or we can't
2205 * access this memory. The latter is only possible if
2206 * another thread plays with our ->mm. In both cases
2207 * we can simply restart. If this vma was unmapped we
2208 * can pretend this insn was not executed yet and get
2209 * the (correct) SIGSEGV after restart.
2210 */
2211 instruction_pointer_set(regs, bp_vaddr);
2212 }
2213 return;
2214 }
2215
2216 /* change it in advance for ->handler() and restart */
2217 instruction_pointer_set(regs, bp_vaddr);
2218
2219 /*
2220 * TODO: move copy_insn/etc into _register and remove this hack.
2221 * After we hit the bp, _unregister + _register can install the
2222 * new and not-yet-analyzed uprobe at the same address, restart.
2223 */
2224 if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
2225 goto out;
2226
2227 /*
2228 * Pairs with the smp_wmb() in prepare_uprobe().
2229 *
2230 * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
2231 * we must also see the stores to &uprobe->arch performed by the
2232 * prepare_uprobe() call.
2233 */
2234 smp_rmb();
2235
2236 /* Tracing handlers use ->utask to communicate with fetch methods */
2237 if (!get_utask())
2238 goto out;
2239
2240 if (arch_uprobe_ignore(&uprobe->arch, regs))
2241 goto out;
2242
2243 handler_chain(uprobe, regs);
2244
2245 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
2246 goto out;
2247
2248 if (!pre_ssout(uprobe, regs, bp_vaddr))
2249 return;
2250
2251 /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */
2252 out:
2253 put_uprobe(uprobe);
2254 }
2255
2256 /*
2257 * Perform required fix-ups and disable singlestep.
2258 * Allow pending signals to take effect.
2259 */
handle_singlestep(struct uprobe_task * utask,struct pt_regs * regs)2260 static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
2261 {
2262 struct uprobe *uprobe;
2263 int err = 0;
2264
2265 uprobe = utask->active_uprobe;
2266 if (utask->state == UTASK_SSTEP_ACK)
2267 err = arch_uprobe_post_xol(&uprobe->arch, regs);
2268 else if (utask->state == UTASK_SSTEP_TRAPPED)
2269 arch_uprobe_abort_xol(&uprobe->arch, regs);
2270 else
2271 WARN_ON_ONCE(1);
2272
2273 put_uprobe(uprobe);
2274 utask->active_uprobe = NULL;
2275 utask->state = UTASK_RUNNING;
2276 xol_free_insn_slot(current);
2277
2278 spin_lock_irq(¤t->sighand->siglock);
2279 recalc_sigpending(); /* see uprobe_deny_signal() */
2280 spin_unlock_irq(¤t->sighand->siglock);
2281
2282 if (unlikely(err)) {
2283 uprobe_warn(current, "execute the probed insn, sending SIGILL.");
2284 force_sig(SIGILL);
2285 }
2286 }
2287
2288 /*
2289 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
2290 * allows the thread to return from interrupt. After that handle_swbp()
2291 * sets utask->active_uprobe.
2292 *
2293 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
2294 * and allows the thread to return from interrupt.
2295 *
2296 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
2297 * uprobe_notify_resume().
2298 */
uprobe_notify_resume(struct pt_regs * regs)2299 void uprobe_notify_resume(struct pt_regs *regs)
2300 {
2301 struct uprobe_task *utask;
2302
2303 clear_thread_flag(TIF_UPROBE);
2304
2305 utask = current->utask;
2306 if (utask && utask->active_uprobe)
2307 handle_singlestep(utask, regs);
2308 else
2309 handle_swbp(regs);
2310 }
2311
2312 /*
2313 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
2314 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
2315 */
uprobe_pre_sstep_notifier(struct pt_regs * regs)2316 int uprobe_pre_sstep_notifier(struct pt_regs *regs)
2317 {
2318 if (!current->mm)
2319 return 0;
2320
2321 if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) &&
2322 (!current->utask || !current->utask->return_instances))
2323 return 0;
2324
2325 set_thread_flag(TIF_UPROBE);
2326 return 1;
2327 }
2328
2329 /*
2330 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
2331 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
2332 */
uprobe_post_sstep_notifier(struct pt_regs * regs)2333 int uprobe_post_sstep_notifier(struct pt_regs *regs)
2334 {
2335 struct uprobe_task *utask = current->utask;
2336
2337 if (!current->mm || !utask || !utask->active_uprobe)
2338 /* task is currently not uprobed */
2339 return 0;
2340
2341 utask->state = UTASK_SSTEP_ACK;
2342 set_thread_flag(TIF_UPROBE);
2343 return 1;
2344 }
2345
2346 static struct notifier_block uprobe_exception_nb = {
2347 .notifier_call = arch_uprobe_exception_notify,
2348 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
2349 };
2350
uprobes_init(void)2351 void __init uprobes_init(void)
2352 {
2353 int i;
2354
2355 for (i = 0; i < UPROBES_HASH_SZ; i++)
2356 mutex_init(&uprobes_mmap_mutex[i]);
2357
2358 BUG_ON(register_die_notifier(&uprobe_exception_nb));
2359 }
2360