1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KCSAN core runtime.
4 *
5 * Copyright (C) 2019, Google LLC.
6 */
7
8 #define pr_fmt(fmt) "kcsan: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/minmax.h>
18 #include <linux/moduleparam.h>
19 #include <linux/percpu.h>
20 #include <linux/preempt.h>
21 #include <linux/sched.h>
22 #include <linux/string.h>
23 #include <linux/uaccess.h>
24
25 #include "encoding.h"
26 #include "kcsan.h"
27 #include "permissive.h"
28
29 static bool kcsan_early_enable = IS_ENABLED(CONFIG_KCSAN_EARLY_ENABLE);
30 unsigned int kcsan_udelay_task = CONFIG_KCSAN_UDELAY_TASK;
31 unsigned int kcsan_udelay_interrupt = CONFIG_KCSAN_UDELAY_INTERRUPT;
32 static long kcsan_skip_watch = CONFIG_KCSAN_SKIP_WATCH;
33 static bool kcsan_interrupt_watcher = IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER);
34
35 #ifdef MODULE_PARAM_PREFIX
36 #undef MODULE_PARAM_PREFIX
37 #endif
38 #define MODULE_PARAM_PREFIX "kcsan."
39 module_param_named(early_enable, kcsan_early_enable, bool, 0);
40 module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
41 module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
42 module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
43 module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
44
45 #ifdef CONFIG_KCSAN_WEAK_MEMORY
46 static bool kcsan_weak_memory = true;
47 module_param_named(weak_memory, kcsan_weak_memory, bool, 0644);
48 #else
49 #define kcsan_weak_memory false
50 #endif
51
52 bool kcsan_enabled;
53
54 /* Per-CPU kcsan_ctx for interrupts */
55 static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
56 .scoped_accesses = {LIST_POISON1, NULL},
57 };
58
59 /*
60 * Helper macros to index into adjacent slots, starting from address slot
61 * itself, followed by the right and left slots.
62 *
63 * The purpose is 2-fold:
64 *
65 * 1. if during insertion the address slot is already occupied, check if
66 * any adjacent slots are free;
67 * 2. accesses that straddle a slot boundary due to size that exceeds a
68 * slot's range may check adjacent slots if any watchpoint matches.
69 *
70 * Note that accesses with very large size may still miss a watchpoint; however,
71 * given this should be rare, this is a reasonable trade-off to make, since this
72 * will avoid:
73 *
74 * 1. excessive contention between watchpoint checks and setup;
75 * 2. larger number of simultaneous watchpoints without sacrificing
76 * performance.
77 *
78 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
79 *
80 * slot=0: [ 1, 2, 0]
81 * slot=9: [10, 11, 9]
82 * slot=63: [64, 65, 63]
83 */
84 #define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))
85
86 /*
87 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
88 * slot (middle) is fine if we assume that races occur rarely. The set of
89 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
90 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
91 */
92 #define SLOT_IDX_FAST(slot, i) (slot + i)
93
94 /*
95 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
96 * able to safely update and access a watchpoint without introducing locking
97 * overhead, we encode each watchpoint as a single atomic long. The initial
98 * zero-initialized state matches INVALID_WATCHPOINT.
99 *
100 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
101 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
102 */
103 static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
104
105 /*
106 * Instructions to skip watching counter, used in should_watch(). We use a
107 * per-CPU counter to avoid excessive contention.
108 */
109 static DEFINE_PER_CPU(long, kcsan_skip);
110
111 /* For kcsan_prandom_u32_max(). */
112 static DEFINE_PER_CPU(u32, kcsan_rand_state);
113
find_watchpoint(unsigned long addr,size_t size,bool expect_write,long * encoded_watchpoint)114 static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
115 size_t size,
116 bool expect_write,
117 long *encoded_watchpoint)
118 {
119 const int slot = watchpoint_slot(addr);
120 const unsigned long addr_masked = addr & WATCHPOINT_ADDR_MASK;
121 atomic_long_t *watchpoint;
122 unsigned long wp_addr_masked;
123 size_t wp_size;
124 bool is_write;
125 int i;
126
127 BUILD_BUG_ON(CONFIG_KCSAN_NUM_WATCHPOINTS < NUM_SLOTS);
128
129 for (i = 0; i < NUM_SLOTS; ++i) {
130 watchpoint = &watchpoints[SLOT_IDX_FAST(slot, i)];
131 *encoded_watchpoint = atomic_long_read(watchpoint);
132 if (!decode_watchpoint(*encoded_watchpoint, &wp_addr_masked,
133 &wp_size, &is_write))
134 continue;
135
136 if (expect_write && !is_write)
137 continue;
138
139 /* Check if the watchpoint matches the access. */
140 if (matching_access(wp_addr_masked, wp_size, addr_masked, size))
141 return watchpoint;
142 }
143
144 return NULL;
145 }
146
147 static inline atomic_long_t *
insert_watchpoint(unsigned long addr,size_t size,bool is_write)148 insert_watchpoint(unsigned long addr, size_t size, bool is_write)
149 {
150 const int slot = watchpoint_slot(addr);
151 const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
152 atomic_long_t *watchpoint;
153 int i;
154
155 /* Check slot index logic, ensuring we stay within array bounds. */
156 BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
157 BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
158 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
159 BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);
160
161 for (i = 0; i < NUM_SLOTS; ++i) {
162 long expect_val = INVALID_WATCHPOINT;
163
164 /* Try to acquire this slot. */
165 watchpoint = &watchpoints[SLOT_IDX(slot, i)];
166 if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
167 return watchpoint;
168 }
169
170 return NULL;
171 }
172
173 /*
174 * Return true if watchpoint was successfully consumed, false otherwise.
175 *
176 * This may return false if:
177 *
178 * 1. another thread already consumed the watchpoint;
179 * 2. the thread that set up the watchpoint already removed it;
180 * 3. the watchpoint was removed and then re-used.
181 */
182 static __always_inline bool
try_consume_watchpoint(atomic_long_t * watchpoint,long encoded_watchpoint)183 try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
184 {
185 return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
186 }
187
188 /* Return true if watchpoint was not touched, false if already consumed. */
consume_watchpoint(atomic_long_t * watchpoint)189 static inline bool consume_watchpoint(atomic_long_t *watchpoint)
190 {
191 return atomic_long_xchg_relaxed(watchpoint, CONSUMED_WATCHPOINT) != CONSUMED_WATCHPOINT;
192 }
193
194 /* Remove the watchpoint -- its slot may be reused after. */
remove_watchpoint(atomic_long_t * watchpoint)195 static inline void remove_watchpoint(atomic_long_t *watchpoint)
196 {
197 atomic_long_set(watchpoint, INVALID_WATCHPOINT);
198 }
199
get_ctx(void)200 static __always_inline struct kcsan_ctx *get_ctx(void)
201 {
202 /*
203 * In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
204 * also result in calls that generate warnings in uaccess regions.
205 */
206 return in_task() ? ¤t->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
207 }
208
209 static __always_inline void
210 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
211
212 /* Check scoped accesses; never inline because this is a slow-path! */
kcsan_check_scoped_accesses(void)213 static noinline void kcsan_check_scoped_accesses(void)
214 {
215 struct kcsan_ctx *ctx = get_ctx();
216 struct kcsan_scoped_access *scoped_access;
217
218 if (ctx->disable_scoped)
219 return;
220
221 ctx->disable_scoped++;
222 list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
223 check_access(scoped_access->ptr, scoped_access->size,
224 scoped_access->type, scoped_access->ip);
225 }
226 ctx->disable_scoped--;
227 }
228
229 /* Rules for generic atomic accesses. Called from fast-path. */
230 static __always_inline bool
is_atomic(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type)231 is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
232 {
233 if (type & KCSAN_ACCESS_ATOMIC)
234 return true;
235
236 /*
237 * Unless explicitly declared atomic, never consider an assertion access
238 * as atomic. This allows using them also in atomic regions, such as
239 * seqlocks, without implicitly changing their semantics.
240 */
241 if (type & KCSAN_ACCESS_ASSERT)
242 return false;
243
244 if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
245 (type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
246 !(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
247 return true; /* Assume aligned writes up to word size are atomic. */
248
249 if (ctx->atomic_next > 0) {
250 /*
251 * Because we do not have separate contexts for nested
252 * interrupts, in case atomic_next is set, we simply assume that
253 * the outer interrupt set atomic_next. In the worst case, we
254 * will conservatively consider operations as atomic. This is a
255 * reasonable trade-off to make, since this case should be
256 * extremely rare; however, even if extremely rare, it could
257 * lead to false positives otherwise.
258 */
259 if ((hardirq_count() >> HARDIRQ_SHIFT) < 2)
260 --ctx->atomic_next; /* in task, or outer interrupt */
261 return true;
262 }
263
264 return ctx->atomic_nest_count > 0 || ctx->in_flat_atomic;
265 }
266
267 static __always_inline bool
should_watch(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type)268 should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
269 {
270 /*
271 * Never set up watchpoints when memory operations are atomic.
272 *
273 * Need to check this first, before kcsan_skip check below: (1) atomics
274 * should not count towards skipped instructions, and (2) to actually
275 * decrement kcsan_atomic_next for consecutive instruction stream.
276 */
277 if (is_atomic(ctx, ptr, size, type))
278 return false;
279
280 if (this_cpu_dec_return(kcsan_skip) >= 0)
281 return false;
282
283 /*
284 * NOTE: If we get here, kcsan_skip must always be reset in slow path
285 * via reset_kcsan_skip() to avoid underflow.
286 */
287
288 /* this operation should be watched */
289 return true;
290 }
291
292 /*
293 * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
294 * congruential generator, using constants from "Numerical Recipes".
295 */
kcsan_prandom_u32_max(u32 ep_ro)296 static u32 kcsan_prandom_u32_max(u32 ep_ro)
297 {
298 u32 state = this_cpu_read(kcsan_rand_state);
299
300 state = 1664525 * state + 1013904223;
301 this_cpu_write(kcsan_rand_state, state);
302
303 return state % ep_ro;
304 }
305
reset_kcsan_skip(void)306 static inline void reset_kcsan_skip(void)
307 {
308 long skip_count = kcsan_skip_watch -
309 (IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
310 kcsan_prandom_u32_max(kcsan_skip_watch) :
311 0);
312 this_cpu_write(kcsan_skip, skip_count);
313 }
314
kcsan_is_enabled(struct kcsan_ctx * ctx)315 static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
316 {
317 return READ_ONCE(kcsan_enabled) && !ctx->disable_count;
318 }
319
320 /* Introduce delay depending on context and configuration. */
delay_access(int type)321 static void delay_access(int type)
322 {
323 unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
324 /* For certain access types, skew the random delay to be longer. */
325 unsigned int skew_delay_order =
326 (type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
327
328 delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
329 kcsan_prandom_u32_max(delay >> skew_delay_order) :
330 0;
331 udelay(delay);
332 }
333
334 /*
335 * Reads the instrumented memory for value change detection; value change
336 * detection is currently done for accesses up to a size of 8 bytes.
337 */
read_instrumented_memory(const volatile void * ptr,size_t size)338 static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
339 {
340 switch (size) {
341 case 1: return READ_ONCE(*(const u8 *)ptr);
342 case 2: return READ_ONCE(*(const u16 *)ptr);
343 case 4: return READ_ONCE(*(const u32 *)ptr);
344 case 8: return READ_ONCE(*(const u64 *)ptr);
345 default: return 0; /* Ignore; we do not diff the values. */
346 }
347 }
348
kcsan_save_irqtrace(struct task_struct * task)349 void kcsan_save_irqtrace(struct task_struct *task)
350 {
351 #ifdef CONFIG_TRACE_IRQFLAGS
352 task->kcsan_save_irqtrace = task->irqtrace;
353 #endif
354 }
355
kcsan_restore_irqtrace(struct task_struct * task)356 void kcsan_restore_irqtrace(struct task_struct *task)
357 {
358 #ifdef CONFIG_TRACE_IRQFLAGS
359 task->irqtrace = task->kcsan_save_irqtrace;
360 #endif
361 }
362
get_kcsan_stack_depth(void)363 static __always_inline int get_kcsan_stack_depth(void)
364 {
365 #ifdef CONFIG_KCSAN_WEAK_MEMORY
366 return current->kcsan_stack_depth;
367 #else
368 BUILD_BUG();
369 return 0;
370 #endif
371 }
372
add_kcsan_stack_depth(int val)373 static __always_inline void add_kcsan_stack_depth(int val)
374 {
375 #ifdef CONFIG_KCSAN_WEAK_MEMORY
376 current->kcsan_stack_depth += val;
377 #else
378 BUILD_BUG();
379 #endif
380 }
381
get_reorder_access(struct kcsan_ctx * ctx)382 static __always_inline struct kcsan_scoped_access *get_reorder_access(struct kcsan_ctx *ctx)
383 {
384 #ifdef CONFIG_KCSAN_WEAK_MEMORY
385 return ctx->disable_scoped ? NULL : &ctx->reorder_access;
386 #else
387 return NULL;
388 #endif
389 }
390
391 static __always_inline bool
find_reorder_access(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type,unsigned long ip)392 find_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
393 int type, unsigned long ip)
394 {
395 struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
396
397 if (!reorder_access)
398 return false;
399
400 /*
401 * Note: If accesses are repeated while reorder_access is identical,
402 * never matches the new access, because !(type & KCSAN_ACCESS_SCOPED).
403 */
404 return reorder_access->ptr == ptr && reorder_access->size == size &&
405 reorder_access->type == type && reorder_access->ip == ip;
406 }
407
408 static inline void
set_reorder_access(struct kcsan_ctx * ctx,const volatile void * ptr,size_t size,int type,unsigned long ip)409 set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
410 int type, unsigned long ip)
411 {
412 struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
413
414 if (!reorder_access || !kcsan_weak_memory)
415 return;
416
417 /*
418 * To avoid nested interrupts or scheduler (which share kcsan_ctx)
419 * reading an inconsistent reorder_access, ensure that the below has
420 * exclusive access to reorder_access by disallowing concurrent use.
421 */
422 ctx->disable_scoped++;
423 barrier();
424 reorder_access->ptr = ptr;
425 reorder_access->size = size;
426 reorder_access->type = type | KCSAN_ACCESS_SCOPED;
427 reorder_access->ip = ip;
428 reorder_access->stack_depth = get_kcsan_stack_depth();
429 barrier();
430 ctx->disable_scoped--;
431 }
432
433 /*
434 * Pull everything together: check_access() below contains the performance
435 * critical operations; the fast-path (including check_access) functions should
436 * all be inlinable by the instrumentation functions.
437 *
438 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
439 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
440 * be filtered from the stacktrace, as well as give them unique names for the
441 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
442 * since they do not access any user memory, but instrumentation is still
443 * emitted in UACCESS regions.
444 */
445
kcsan_found_watchpoint(const volatile void * ptr,size_t size,int type,unsigned long ip,atomic_long_t * watchpoint,long encoded_watchpoint)446 static noinline void kcsan_found_watchpoint(const volatile void *ptr,
447 size_t size,
448 int type,
449 unsigned long ip,
450 atomic_long_t *watchpoint,
451 long encoded_watchpoint)
452 {
453 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
454 struct kcsan_ctx *ctx = get_ctx();
455 unsigned long flags;
456 bool consumed;
457
458 /*
459 * We know a watchpoint exists. Let's try to keep the race-window
460 * between here and finally consuming the watchpoint below as small as
461 * possible -- avoid unneccessarily complex code until consumed.
462 */
463
464 if (!kcsan_is_enabled(ctx))
465 return;
466
467 /*
468 * The access_mask check relies on value-change comparison. To avoid
469 * reporting a race where e.g. the writer set up the watchpoint, but the
470 * reader has access_mask!=0, we have to ignore the found watchpoint.
471 *
472 * reorder_access is never created from an access with access_mask set.
473 */
474 if (ctx->access_mask && !find_reorder_access(ctx, ptr, size, type, ip))
475 return;
476
477 /*
478 * If the other thread does not want to ignore the access, and there was
479 * a value change as a result of this thread's operation, we will still
480 * generate a report of unknown origin.
481 *
482 * Use CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN=n to filter.
483 */
484 if (!is_assert && kcsan_ignore_address(ptr))
485 return;
486
487 /*
488 * Consuming the watchpoint must be guarded by kcsan_is_enabled() to
489 * avoid erroneously triggering reports if the context is disabled.
490 */
491 consumed = try_consume_watchpoint(watchpoint, encoded_watchpoint);
492
493 /* keep this after try_consume_watchpoint */
494 flags = user_access_save();
495
496 if (consumed) {
497 kcsan_save_irqtrace(current);
498 kcsan_report_set_info(ptr, size, type, ip, watchpoint - watchpoints);
499 kcsan_restore_irqtrace(current);
500 } else {
501 /*
502 * The other thread may not print any diagnostics, as it has
503 * already removed the watchpoint, or another thread consumed
504 * the watchpoint before this thread.
505 */
506 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
507 }
508
509 if (is_assert)
510 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
511 else
512 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
513
514 user_access_restore(flags);
515 }
516
517 static noinline void
kcsan_setup_watchpoint(const volatile void * ptr,size_t size,int type,unsigned long ip)518 kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
519 {
520 const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
521 const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
522 atomic_long_t *watchpoint;
523 u64 old, new, diff;
524 enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
525 bool interrupt_watcher = kcsan_interrupt_watcher;
526 unsigned long ua_flags = user_access_save();
527 struct kcsan_ctx *ctx = get_ctx();
528 unsigned long access_mask = ctx->access_mask;
529 unsigned long irq_flags = 0;
530 bool is_reorder_access;
531
532 /*
533 * Always reset kcsan_skip counter in slow-path to avoid underflow; see
534 * should_watch().
535 */
536 reset_kcsan_skip();
537
538 if (!kcsan_is_enabled(ctx))
539 goto out;
540
541 /*
542 * Check to-ignore addresses after kcsan_is_enabled(), as we may access
543 * memory that is not yet initialized during early boot.
544 */
545 if (!is_assert && kcsan_ignore_address(ptr))
546 goto out;
547
548 if (!check_encodable((unsigned long)ptr, size)) {
549 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
550 goto out;
551 }
552
553 /*
554 * The local CPU cannot observe reordering of its own accesses, and
555 * therefore we need to take care of 2 cases to avoid false positives:
556 *
557 * 1. Races of the reordered access with interrupts. To avoid, if
558 * the current access is reorder_access, disable interrupts.
559 * 2. Avoid races of scoped accesses from nested interrupts (below).
560 */
561 is_reorder_access = find_reorder_access(ctx, ptr, size, type, ip);
562 if (is_reorder_access)
563 interrupt_watcher = false;
564 /*
565 * Avoid races of scoped accesses from nested interrupts (or scheduler).
566 * Assume setting up a watchpoint for a non-scoped (normal) access that
567 * also conflicts with a current scoped access. In a nested interrupt,
568 * which shares the context, it would check a conflicting scoped access.
569 * To avoid, disable scoped access checking.
570 */
571 ctx->disable_scoped++;
572
573 /*
574 * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
575 * runtime is entered for every memory access, and potentially useful
576 * information is lost if dirtied by KCSAN.
577 */
578 kcsan_save_irqtrace(current);
579 if (!interrupt_watcher)
580 local_irq_save(irq_flags);
581
582 watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
583 if (watchpoint == NULL) {
584 /*
585 * Out of capacity: the size of 'watchpoints', and the frequency
586 * with which should_watch() returns true should be tweaked so
587 * that this case happens very rarely.
588 */
589 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
590 goto out_unlock;
591 }
592
593 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
594 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
595
596 /*
597 * Read the current value, to later check and infer a race if the data
598 * was modified via a non-instrumented access, e.g. from a device.
599 */
600 old = is_reorder_access ? 0 : read_instrumented_memory(ptr, size);
601
602 /*
603 * Delay this thread, to increase probability of observing a racy
604 * conflicting access.
605 */
606 delay_access(type);
607
608 /*
609 * Re-read value, and check if it is as expected; if not, we infer a
610 * racy access.
611 */
612 if (!is_reorder_access) {
613 new = read_instrumented_memory(ptr, size);
614 } else {
615 /*
616 * Reordered accesses cannot be used for value change detection,
617 * because the memory location may no longer be accessible and
618 * could result in a fault.
619 */
620 new = 0;
621 access_mask = 0;
622 }
623
624 diff = old ^ new;
625 if (access_mask)
626 diff &= access_mask;
627
628 /*
629 * Check if we observed a value change.
630 *
631 * Also check if the data race should be ignored (the rules depend on
632 * non-zero diff); if it is to be ignored, the below rules for
633 * KCSAN_VALUE_CHANGE_MAYBE apply.
634 */
635 if (diff && !kcsan_ignore_data_race(size, type, old, new, diff))
636 value_change = KCSAN_VALUE_CHANGE_TRUE;
637
638 /* Check if this access raced with another. */
639 if (!consume_watchpoint(watchpoint)) {
640 /*
641 * Depending on the access type, map a value_change of MAYBE to
642 * TRUE (always report) or FALSE (never report).
643 */
644 if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
645 if (access_mask != 0) {
646 /*
647 * For access with access_mask, we require a
648 * value-change, as it is likely that races on
649 * ~access_mask bits are expected.
650 */
651 value_change = KCSAN_VALUE_CHANGE_FALSE;
652 } else if (size > 8 || is_assert) {
653 /* Always assume a value-change. */
654 value_change = KCSAN_VALUE_CHANGE_TRUE;
655 }
656 }
657
658 /*
659 * No need to increment 'data_races' counter, as the racing
660 * thread already did.
661 *
662 * Count 'assert_failures' for each failed ASSERT access,
663 * therefore both this thread and the racing thread may
664 * increment this counter.
665 */
666 if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
667 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
668
669 kcsan_report_known_origin(ptr, size, type, ip,
670 value_change, watchpoint - watchpoints,
671 old, new, access_mask);
672 } else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
673 /* Inferring a race, since the value should not have changed. */
674
675 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
676 if (is_assert)
677 atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
678
679 if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert) {
680 kcsan_report_unknown_origin(ptr, size, type, ip,
681 old, new, access_mask);
682 }
683 }
684
685 /*
686 * Remove watchpoint; must be after reporting, since the slot may be
687 * reused after this point.
688 */
689 remove_watchpoint(watchpoint);
690 atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
691
692 out_unlock:
693 if (!interrupt_watcher)
694 local_irq_restore(irq_flags);
695 kcsan_restore_irqtrace(current);
696 ctx->disable_scoped--;
697
698 /*
699 * Reordered accesses cannot be used for value change detection,
700 * therefore never consider for reordering if access_mask is set.
701 * ASSERT_EXCLUSIVE are not real accesses, ignore them as well.
702 */
703 if (!access_mask && !is_assert)
704 set_reorder_access(ctx, ptr, size, type, ip);
705 out:
706 user_access_restore(ua_flags);
707 }
708
709 static __always_inline void
check_access(const volatile void * ptr,size_t size,int type,unsigned long ip)710 check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
711 {
712 atomic_long_t *watchpoint;
713 long encoded_watchpoint;
714
715 /*
716 * Do nothing for 0 sized check; this comparison will be optimized out
717 * for constant sized instrumentation (__tsan_{read,write}N).
718 */
719 if (unlikely(size == 0))
720 return;
721
722 again:
723 /*
724 * Avoid user_access_save in fast-path: find_watchpoint is safe without
725 * user_access_save, as the address that ptr points to is only used to
726 * check if a watchpoint exists; ptr is never dereferenced.
727 */
728 watchpoint = find_watchpoint((unsigned long)ptr, size,
729 !(type & KCSAN_ACCESS_WRITE),
730 &encoded_watchpoint);
731 /*
732 * It is safe to check kcsan_is_enabled() after find_watchpoint in the
733 * slow-path, as long as no state changes that cause a race to be
734 * detected and reported have occurred until kcsan_is_enabled() is
735 * checked.
736 */
737
738 if (unlikely(watchpoint != NULL))
739 kcsan_found_watchpoint(ptr, size, type, ip, watchpoint, encoded_watchpoint);
740 else {
741 struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
742
743 if (unlikely(should_watch(ctx, ptr, size, type))) {
744 kcsan_setup_watchpoint(ptr, size, type, ip);
745 return;
746 }
747
748 if (!(type & KCSAN_ACCESS_SCOPED)) {
749 struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
750
751 if (reorder_access) {
752 /*
753 * reorder_access check: simulates reordering of
754 * the access after subsequent operations.
755 */
756 ptr = reorder_access->ptr;
757 type = reorder_access->type;
758 ip = reorder_access->ip;
759 /*
760 * Upon a nested interrupt, this context's
761 * reorder_access can be modified (shared ctx).
762 * We know that upon return, reorder_access is
763 * always invalidated by setting size to 0 via
764 * __tsan_func_exit(). Therefore we must read
765 * and check size after the other fields.
766 */
767 barrier();
768 size = READ_ONCE(reorder_access->size);
769 if (size)
770 goto again;
771 }
772 }
773
774 /*
775 * Always checked last, right before returning from runtime;
776 * if reorder_access is valid, checked after it was checked.
777 */
778 if (unlikely(ctx->scoped_accesses.prev))
779 kcsan_check_scoped_accesses();
780 }
781 }
782
783 /* === Public interface ===================================================== */
784
kcsan_init(void)785 void __init kcsan_init(void)
786 {
787 int cpu;
788
789 BUG_ON(!in_task());
790
791 for_each_possible_cpu(cpu)
792 per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
793
794 /*
795 * We are in the init task, and no other tasks should be running;
796 * WRITE_ONCE without memory barrier is sufficient.
797 */
798 if (kcsan_early_enable) {
799 pr_info("enabled early\n");
800 WRITE_ONCE(kcsan_enabled, true);
801 }
802
803 if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY) ||
804 IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) ||
805 IS_ENABLED(CONFIG_KCSAN_PERMISSIVE) ||
806 IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
807 pr_warn("non-strict mode configured - use CONFIG_KCSAN_STRICT=y to see all data races\n");
808 } else {
809 pr_info("strict mode configured\n");
810 }
811 }
812
813 /* === Exported interface =================================================== */
814
kcsan_disable_current(void)815 void kcsan_disable_current(void)
816 {
817 ++get_ctx()->disable_count;
818 }
819 EXPORT_SYMBOL(kcsan_disable_current);
820
kcsan_enable_current(void)821 void kcsan_enable_current(void)
822 {
823 if (get_ctx()->disable_count-- == 0) {
824 /*
825 * Warn if kcsan_enable_current() calls are unbalanced with
826 * kcsan_disable_current() calls, which causes disable_count to
827 * become negative and should not happen.
828 */
829 kcsan_disable_current(); /* restore to 0, KCSAN still enabled */
830 kcsan_disable_current(); /* disable to generate warning */
831 WARN(1, "Unbalanced %s()", __func__);
832 kcsan_enable_current();
833 }
834 }
835 EXPORT_SYMBOL(kcsan_enable_current);
836
kcsan_enable_current_nowarn(void)837 void kcsan_enable_current_nowarn(void)
838 {
839 if (get_ctx()->disable_count-- == 0)
840 kcsan_disable_current();
841 }
842 EXPORT_SYMBOL(kcsan_enable_current_nowarn);
843
kcsan_nestable_atomic_begin(void)844 void kcsan_nestable_atomic_begin(void)
845 {
846 /*
847 * Do *not* check and warn if we are in a flat atomic region: nestable
848 * and flat atomic regions are independent from each other.
849 * See include/linux/kcsan.h: struct kcsan_ctx comments for more
850 * comments.
851 */
852
853 ++get_ctx()->atomic_nest_count;
854 }
855 EXPORT_SYMBOL(kcsan_nestable_atomic_begin);
856
kcsan_nestable_atomic_end(void)857 void kcsan_nestable_atomic_end(void)
858 {
859 if (get_ctx()->atomic_nest_count-- == 0) {
860 /*
861 * Warn if kcsan_nestable_atomic_end() calls are unbalanced with
862 * kcsan_nestable_atomic_begin() calls, which causes
863 * atomic_nest_count to become negative and should not happen.
864 */
865 kcsan_nestable_atomic_begin(); /* restore to 0 */
866 kcsan_disable_current(); /* disable to generate warning */
867 WARN(1, "Unbalanced %s()", __func__);
868 kcsan_enable_current();
869 }
870 }
871 EXPORT_SYMBOL(kcsan_nestable_atomic_end);
872
kcsan_flat_atomic_begin(void)873 void kcsan_flat_atomic_begin(void)
874 {
875 get_ctx()->in_flat_atomic = true;
876 }
877 EXPORT_SYMBOL(kcsan_flat_atomic_begin);
878
kcsan_flat_atomic_end(void)879 void kcsan_flat_atomic_end(void)
880 {
881 get_ctx()->in_flat_atomic = false;
882 }
883 EXPORT_SYMBOL(kcsan_flat_atomic_end);
884
kcsan_atomic_next(int n)885 void kcsan_atomic_next(int n)
886 {
887 get_ctx()->atomic_next = n;
888 }
889 EXPORT_SYMBOL(kcsan_atomic_next);
890
kcsan_set_access_mask(unsigned long mask)891 void kcsan_set_access_mask(unsigned long mask)
892 {
893 get_ctx()->access_mask = mask;
894 }
895 EXPORT_SYMBOL(kcsan_set_access_mask);
896
897 struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void * ptr,size_t size,int type,struct kcsan_scoped_access * sa)898 kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
899 struct kcsan_scoped_access *sa)
900 {
901 struct kcsan_ctx *ctx = get_ctx();
902
903 check_access(ptr, size, type, _RET_IP_);
904
905 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
906
907 INIT_LIST_HEAD(&sa->list);
908 sa->ptr = ptr;
909 sa->size = size;
910 sa->type = type;
911 sa->ip = _RET_IP_;
912
913 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */
914 INIT_LIST_HEAD(&ctx->scoped_accesses);
915 list_add(&sa->list, &ctx->scoped_accesses);
916
917 ctx->disable_count--;
918 return sa;
919 }
920 EXPORT_SYMBOL(kcsan_begin_scoped_access);
921
kcsan_end_scoped_access(struct kcsan_scoped_access * sa)922 void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
923 {
924 struct kcsan_ctx *ctx = get_ctx();
925
926 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__))
927 return;
928
929 ctx->disable_count++; /* Disable KCSAN, in case list debugging is on. */
930
931 list_del(&sa->list);
932 if (list_empty(&ctx->scoped_accesses))
933 /*
934 * Ensure we do not enter kcsan_check_scoped_accesses()
935 * slow-path if unnecessary, and avoids requiring list_empty()
936 * in the fast-path (to avoid a READ_ONCE() and potential
937 * uaccess warning).
938 */
939 ctx->scoped_accesses.prev = NULL;
940
941 ctx->disable_count--;
942
943 check_access(sa->ptr, sa->size, sa->type, sa->ip);
944 }
945 EXPORT_SYMBOL(kcsan_end_scoped_access);
946
__kcsan_check_access(const volatile void * ptr,size_t size,int type)947 void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
948 {
949 check_access(ptr, size, type, _RET_IP_);
950 }
951 EXPORT_SYMBOL(__kcsan_check_access);
952
953 #define DEFINE_MEMORY_BARRIER(name, order_before_cond) \
954 void __kcsan_##name(void) \
955 { \
956 struct kcsan_scoped_access *sa = get_reorder_access(get_ctx()); \
957 if (!sa) \
958 return; \
959 if (order_before_cond) \
960 sa->size = 0; \
961 } \
962 EXPORT_SYMBOL(__kcsan_##name)
963
964 DEFINE_MEMORY_BARRIER(mb, true);
965 DEFINE_MEMORY_BARRIER(wmb, sa->type & (KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND));
966 DEFINE_MEMORY_BARRIER(rmb, !(sa->type & KCSAN_ACCESS_WRITE) || (sa->type & KCSAN_ACCESS_COMPOUND));
967 DEFINE_MEMORY_BARRIER(release, true);
968
969 /*
970 * KCSAN uses the same instrumentation that is emitted by supported compilers
971 * for ThreadSanitizer (TSAN).
972 *
973 * When enabled, the compiler emits instrumentation calls (the functions
974 * prefixed with "__tsan" below) for all loads and stores that it generated;
975 * inline asm is not instrumented.
976 *
977 * Note that, not all supported compiler versions distinguish aligned/unaligned
978 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
979 * version to the generic version, which can handle both.
980 */
981
982 #define DEFINE_TSAN_READ_WRITE(size) \
983 void __tsan_read##size(void *ptr); \
984 void __tsan_read##size(void *ptr) \
985 { \
986 check_access(ptr, size, 0, _RET_IP_); \
987 } \
988 EXPORT_SYMBOL(__tsan_read##size); \
989 void __tsan_unaligned_read##size(void *ptr) \
990 __alias(__tsan_read##size); \
991 EXPORT_SYMBOL(__tsan_unaligned_read##size); \
992 void __tsan_write##size(void *ptr); \
993 void __tsan_write##size(void *ptr) \
994 { \
995 check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_); \
996 } \
997 EXPORT_SYMBOL(__tsan_write##size); \
998 void __tsan_unaligned_write##size(void *ptr) \
999 __alias(__tsan_write##size); \
1000 EXPORT_SYMBOL(__tsan_unaligned_write##size); \
1001 void __tsan_read_write##size(void *ptr); \
1002 void __tsan_read_write##size(void *ptr) \
1003 { \
1004 check_access(ptr, size, \
1005 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, \
1006 _RET_IP_); \
1007 } \
1008 EXPORT_SYMBOL(__tsan_read_write##size); \
1009 void __tsan_unaligned_read_write##size(void *ptr) \
1010 __alias(__tsan_read_write##size); \
1011 EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
1012
1013 DEFINE_TSAN_READ_WRITE(1);
1014 DEFINE_TSAN_READ_WRITE(2);
1015 DEFINE_TSAN_READ_WRITE(4);
1016 DEFINE_TSAN_READ_WRITE(8);
1017 DEFINE_TSAN_READ_WRITE(16);
1018
1019 void __tsan_read_range(void *ptr, size_t size);
__tsan_read_range(void * ptr,size_t size)1020 void __tsan_read_range(void *ptr, size_t size)
1021 {
1022 check_access(ptr, size, 0, _RET_IP_);
1023 }
1024 EXPORT_SYMBOL(__tsan_read_range);
1025
1026 void __tsan_write_range(void *ptr, size_t size);
__tsan_write_range(void * ptr,size_t size)1027 void __tsan_write_range(void *ptr, size_t size)
1028 {
1029 check_access(ptr, size, KCSAN_ACCESS_WRITE, _RET_IP_);
1030 }
1031 EXPORT_SYMBOL(__tsan_write_range);
1032
1033 /*
1034 * Use of explicit volatile is generally disallowed [1], however, volatile is
1035 * still used in various concurrent context, whether in low-level
1036 * synchronization primitives or for legacy reasons.
1037 * [1] https://lwn.net/Articles/233479/
1038 *
1039 * We only consider volatile accesses atomic if they are aligned and would pass
1040 * the size-check of compiletime_assert_rwonce_type().
1041 */
1042 #define DEFINE_TSAN_VOLATILE_READ_WRITE(size) \
1043 void __tsan_volatile_read##size(void *ptr); \
1044 void __tsan_volatile_read##size(void *ptr) \
1045 { \
1046 const bool is_atomic = size <= sizeof(long long) && \
1047 IS_ALIGNED((unsigned long)ptr, size); \
1048 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
1049 return; \
1050 check_access(ptr, size, is_atomic ? KCSAN_ACCESS_ATOMIC : 0, \
1051 _RET_IP_); \
1052 } \
1053 EXPORT_SYMBOL(__tsan_volatile_read##size); \
1054 void __tsan_unaligned_volatile_read##size(void *ptr) \
1055 __alias(__tsan_volatile_read##size); \
1056 EXPORT_SYMBOL(__tsan_unaligned_volatile_read##size); \
1057 void __tsan_volatile_write##size(void *ptr); \
1058 void __tsan_volatile_write##size(void *ptr) \
1059 { \
1060 const bool is_atomic = size <= sizeof(long long) && \
1061 IS_ALIGNED((unsigned long)ptr, size); \
1062 if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) && is_atomic) \
1063 return; \
1064 check_access(ptr, size, \
1065 KCSAN_ACCESS_WRITE | \
1066 (is_atomic ? KCSAN_ACCESS_ATOMIC : 0), \
1067 _RET_IP_); \
1068 } \
1069 EXPORT_SYMBOL(__tsan_volatile_write##size); \
1070 void __tsan_unaligned_volatile_write##size(void *ptr) \
1071 __alias(__tsan_volatile_write##size); \
1072 EXPORT_SYMBOL(__tsan_unaligned_volatile_write##size)
1073
1074 DEFINE_TSAN_VOLATILE_READ_WRITE(1);
1075 DEFINE_TSAN_VOLATILE_READ_WRITE(2);
1076 DEFINE_TSAN_VOLATILE_READ_WRITE(4);
1077 DEFINE_TSAN_VOLATILE_READ_WRITE(8);
1078 DEFINE_TSAN_VOLATILE_READ_WRITE(16);
1079
1080 /*
1081 * Function entry and exit are used to determine the validty of reorder_access.
1082 * Reordering of the access ends at the end of the function scope where the
1083 * access happened. This is done for two reasons:
1084 *
1085 * 1. Artificially limits the scope where missing barriers are detected.
1086 * This minimizes false positives due to uninstrumented functions that
1087 * contain the required barriers but were missed.
1088 *
1089 * 2. Simplifies generating the stack trace of the access.
1090 */
1091 void __tsan_func_entry(void *call_pc);
__tsan_func_entry(void * call_pc)1092 noinline void __tsan_func_entry(void *call_pc)
1093 {
1094 if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1095 return;
1096
1097 add_kcsan_stack_depth(1);
1098 }
1099 EXPORT_SYMBOL(__tsan_func_entry);
1100
1101 void __tsan_func_exit(void);
__tsan_func_exit(void)1102 noinline void __tsan_func_exit(void)
1103 {
1104 struct kcsan_scoped_access *reorder_access;
1105
1106 if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1107 return;
1108
1109 reorder_access = get_reorder_access(get_ctx());
1110 if (!reorder_access)
1111 goto out;
1112
1113 if (get_kcsan_stack_depth() <= reorder_access->stack_depth) {
1114 /*
1115 * Access check to catch cases where write without a barrier
1116 * (supposed release) was last access in function: because
1117 * instrumentation is inserted before the real access, a data
1118 * race due to the write giving up a c-s would only be caught if
1119 * we do the conflicting access after.
1120 */
1121 check_access(reorder_access->ptr, reorder_access->size,
1122 reorder_access->type, reorder_access->ip);
1123 reorder_access->size = 0;
1124 reorder_access->stack_depth = INT_MIN;
1125 }
1126 out:
1127 add_kcsan_stack_depth(-1);
1128 }
1129 EXPORT_SYMBOL(__tsan_func_exit);
1130
1131 void __tsan_init(void);
__tsan_init(void)1132 void __tsan_init(void)
1133 {
1134 }
1135 EXPORT_SYMBOL(__tsan_init);
1136
1137 /*
1138 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
1139 *
1140 * Normal kernel code _should not_ be using them directly, but some
1141 * architectures may implement some or all atomics using the compilers'
1142 * builtins.
1143 *
1144 * Note: If an architecture decides to fully implement atomics using the
1145 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
1146 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
1147 * atomic-instrumented) is no longer necessary.
1148 *
1149 * TSAN instrumentation replaces atomic accesses with calls to any of the below
1150 * functions, whose job is to also execute the operation itself.
1151 */
1152
kcsan_atomic_builtin_memorder(int memorder)1153 static __always_inline void kcsan_atomic_builtin_memorder(int memorder)
1154 {
1155 if (memorder == __ATOMIC_RELEASE ||
1156 memorder == __ATOMIC_SEQ_CST ||
1157 memorder == __ATOMIC_ACQ_REL)
1158 __kcsan_release();
1159 }
1160
1161 #define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
1162 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
1163 u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
1164 { \
1165 kcsan_atomic_builtin_memorder(memorder); \
1166 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1167 check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1168 } \
1169 return __atomic_load_n(ptr, memorder); \
1170 } \
1171 EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
1172 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
1173 void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
1174 { \
1175 kcsan_atomic_builtin_memorder(memorder); \
1176 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1177 check_access(ptr, bits / BITS_PER_BYTE, \
1178 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1179 } \
1180 __atomic_store_n(ptr, v, memorder); \
1181 } \
1182 EXPORT_SYMBOL(__tsan_atomic##bits##_store)
1183
1184 #define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
1185 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
1186 u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
1187 { \
1188 kcsan_atomic_builtin_memorder(memorder); \
1189 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1190 check_access(ptr, bits / BITS_PER_BYTE, \
1191 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1192 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1193 } \
1194 return __atomic_##op##suffix(ptr, v, memorder); \
1195 } \
1196 EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
1197
1198 /*
1199 * Note: CAS operations are always classified as write, even in case they
1200 * fail. We cannot perform check_access() after a write, as it might lead to
1201 * false positives, in cases such as:
1202 *
1203 * T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
1204 *
1205 * T1: if (__atomic_load_n(&p->flag, ...)) {
1206 * modify *p;
1207 * p->flag = 0;
1208 * }
1209 *
1210 * The only downside is that, if there are 3 threads, with one CAS that
1211 * succeeds, another CAS that fails, and an unmarked racing operation, we may
1212 * point at the wrong CAS as the source of the race. However, if we assume that
1213 * all CAS can succeed in some other execution, the data race is still valid.
1214 */
1215 #define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
1216 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1217 u##bits val, int mo, int fail_mo); \
1218 int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
1219 u##bits val, int mo, int fail_mo) \
1220 { \
1221 kcsan_atomic_builtin_memorder(mo); \
1222 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1223 check_access(ptr, bits / BITS_PER_BYTE, \
1224 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1225 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1226 } \
1227 return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
1228 } \
1229 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
1230
1231 #define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
1232 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1233 int mo, int fail_mo); \
1234 u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
1235 int mo, int fail_mo) \
1236 { \
1237 kcsan_atomic_builtin_memorder(mo); \
1238 if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
1239 check_access(ptr, bits / BITS_PER_BYTE, \
1240 KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
1241 KCSAN_ACCESS_ATOMIC, _RET_IP_); \
1242 } \
1243 __atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
1244 return exp; \
1245 } \
1246 EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
1247
1248 #define DEFINE_TSAN_ATOMIC_OPS(bits) \
1249 DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
1250 DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
1251 DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
1252 DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
1253 DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
1254 DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
1255 DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
1256 DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
1257 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
1258 DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
1259 DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
1260
1261 DEFINE_TSAN_ATOMIC_OPS(8);
1262 DEFINE_TSAN_ATOMIC_OPS(16);
1263 DEFINE_TSAN_ATOMIC_OPS(32);
1264 DEFINE_TSAN_ATOMIC_OPS(64);
1265
1266 void __tsan_atomic_thread_fence(int memorder);
__tsan_atomic_thread_fence(int memorder)1267 void __tsan_atomic_thread_fence(int memorder)
1268 {
1269 kcsan_atomic_builtin_memorder(memorder);
1270 __atomic_thread_fence(memorder);
1271 }
1272 EXPORT_SYMBOL(__tsan_atomic_thread_fence);
1273
1274 /*
1275 * In instrumented files, we emit instrumentation for barriers by mapping the
1276 * kernel barriers to an __atomic_signal_fence(), which is interpreted specially
1277 * and otherwise has no relation to a real __atomic_signal_fence(). No known
1278 * kernel code uses __atomic_signal_fence().
1279 *
1280 * Since fsanitize=thread instrumentation handles __atomic_signal_fence(), which
1281 * are turned into calls to __tsan_atomic_signal_fence(), such instrumentation
1282 * can be disabled via the __no_kcsan function attribute (vs. an explicit call
1283 * which could not). When __no_kcsan is requested, __atomic_signal_fence()
1284 * generates no code.
1285 *
1286 * Note: The result of using __atomic_signal_fence() with KCSAN enabled is
1287 * potentially limiting the compiler's ability to reorder operations; however,
1288 * if barriers were instrumented with explicit calls (without LTO), the compiler
1289 * couldn't optimize much anyway. The result of a hypothetical architecture
1290 * using __atomic_signal_fence() in normal code would be KCSAN false negatives.
1291 */
1292 void __tsan_atomic_signal_fence(int memorder);
__tsan_atomic_signal_fence(int memorder)1293 noinline void __tsan_atomic_signal_fence(int memorder)
1294 {
1295 switch (memorder) {
1296 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb:
1297 __kcsan_mb();
1298 break;
1299 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb:
1300 __kcsan_wmb();
1301 break;
1302 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb:
1303 __kcsan_rmb();
1304 break;
1305 case __KCSAN_BARRIER_TO_SIGNAL_FENCE_release:
1306 __kcsan_release();
1307 break;
1308 default:
1309 break;
1310 }
1311 }
1312 EXPORT_SYMBOL(__tsan_atomic_signal_fence);
1313
1314 #ifdef __HAVE_ARCH_MEMSET
1315 void *__tsan_memset(void *s, int c, size_t count);
__tsan_memset(void * s,int c,size_t count)1316 noinline void *__tsan_memset(void *s, int c, size_t count)
1317 {
1318 /*
1319 * Instead of not setting up watchpoints where accessed size is greater
1320 * than MAX_ENCODABLE_SIZE, truncate checked size to MAX_ENCODABLE_SIZE.
1321 */
1322 size_t check_len = min_t(size_t, count, MAX_ENCODABLE_SIZE);
1323
1324 check_access(s, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
1325 return memset(s, c, count);
1326 }
1327 #else
1328 void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
1329 #endif
1330 EXPORT_SYMBOL(__tsan_memset);
1331
1332 #ifdef __HAVE_ARCH_MEMMOVE
1333 void *__tsan_memmove(void *dst, const void *src, size_t len);
__tsan_memmove(void * dst,const void * src,size_t len)1334 noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
1335 {
1336 size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
1337
1338 check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
1339 check_access(src, check_len, 0, _RET_IP_);
1340 return memmove(dst, src, len);
1341 }
1342 #else
1343 void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
1344 #endif
1345 EXPORT_SYMBOL(__tsan_memmove);
1346
1347 #ifdef __HAVE_ARCH_MEMCPY
1348 void *__tsan_memcpy(void *dst, const void *src, size_t len);
__tsan_memcpy(void * dst,const void * src,size_t len)1349 noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
1350 {
1351 size_t check_len = min_t(size_t, len, MAX_ENCODABLE_SIZE);
1352
1353 check_access(dst, check_len, KCSAN_ACCESS_WRITE, _RET_IP_);
1354 check_access(src, check_len, 0, _RET_IP_);
1355 return memcpy(dst, src, len);
1356 }
1357 #else
1358 void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy);
1359 #endif
1360 EXPORT_SYMBOL(__tsan_memcpy);
1361