1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * functions to patch RO kernel text during runtime
4 *
5 * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/kprobes.h>
11 #include <linux/mm.h>
12 #include <linux/stop_machine.h>
13
14 #include <asm/cacheflush.h>
15 #include <asm/fixmap.h>
16 #include <asm/patch.h>
17
18 struct patch {
19 void *addr;
20 u32 *insn;
21 unsigned int len;
22 };
23
24 static DEFINE_RAW_SPINLOCK(patch_lock);
25
patch_map(void * addr,int fixmap,unsigned long * flags,int * need_unmap)26 static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
27 int *need_unmap)
28 {
29 unsigned long uintaddr = (uintptr_t) addr;
30 bool module = !core_kernel_text(uintaddr);
31 struct page *page;
32
33 *need_unmap = 0;
34 if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
35 page = vmalloc_to_page(addr);
36 else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
37 page = virt_to_page(addr);
38 else
39 return addr;
40
41 *need_unmap = 1;
42 set_fixmap(fixmap, page_to_phys(page));
43 raw_spin_lock_irqsave(&patch_lock, *flags);
44
45 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
46 }
47
patch_unmap(int fixmap,unsigned long * flags)48 static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
49 {
50 clear_fixmap(fixmap);
51
52 raw_spin_unlock_irqrestore(&patch_lock, *flags);
53 }
54
__patch_text_multiple(void * addr,u32 * insn,unsigned int len)55 void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
56 {
57 unsigned long start = (unsigned long)addr;
58 unsigned long end = (unsigned long)addr + len;
59 unsigned long flags;
60 u32 *p, *fixmap;
61 int mapped;
62
63 /* Make sure we don't have any aliases in cache */
64 flush_kernel_dcache_range_asm(start, end);
65 flush_kernel_icache_range_asm(start, end);
66 flush_tlb_kernel_range(start, end);
67
68 p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
69
70 while (len >= 4) {
71 *p++ = *insn++;
72 addr += sizeof(u32);
73 len -= sizeof(u32);
74 if (len && offset_in_page(addr) == 0) {
75 /*
76 * We're crossing a page boundary, so
77 * need to remap
78 */
79 flush_kernel_dcache_range_asm((unsigned long)fixmap,
80 (unsigned long)p);
81 flush_tlb_kernel_range((unsigned long)fixmap,
82 (unsigned long)p);
83 if (mapped)
84 patch_unmap(FIX_TEXT_POKE0, &flags);
85 p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
86 &mapped);
87 }
88 }
89
90 flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
91 flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
92 if (mapped)
93 patch_unmap(FIX_TEXT_POKE0, &flags);
94 }
95
__patch_text(void * addr,u32 insn)96 void __kprobes __patch_text(void *addr, u32 insn)
97 {
98 __patch_text_multiple(addr, &insn, sizeof(insn));
99 }
100
patch_text_stop_machine(void * data)101 static int __kprobes patch_text_stop_machine(void *data)
102 {
103 struct patch *patch = data;
104
105 __patch_text_multiple(patch->addr, patch->insn, patch->len);
106 return 0;
107 }
108
patch_text(void * addr,unsigned int insn)109 void __kprobes patch_text(void *addr, unsigned int insn)
110 {
111 struct patch patch = {
112 .addr = addr,
113 .insn = &insn,
114 .len = sizeof(insn),
115 };
116
117 stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
118 }
119
patch_text_multiple(void * addr,u32 * insn,unsigned int len)120 void __kprobes patch_text_multiple(void *addr, u32 *insn, unsigned int len)
121 {
122
123 struct patch patch = {
124 .addr = addr,
125 .insn = insn,
126 .len = len
127 };
128
129 stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
130 }
131