1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Landlock LSM - Filesystem management and hooks
4 *
5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 * Copyright © 2018-2020 ANSSI
7 * Copyright © 2021-2022 Microsoft Corporation
8 */
9
10 #include <linux/atomic.h>
11 #include <linux/bitops.h>
12 #include <linux/bits.h>
13 #include <linux/compiler_types.h>
14 #include <linux/dcache.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/limits.h>
20 #include <linux/list.h>
21 #include <linux/lsm_hooks.h>
22 #include <linux/mount.h>
23 #include <linux/namei.h>
24 #include <linux/path.h>
25 #include <linux/rcupdate.h>
26 #include <linux/spinlock.h>
27 #include <linux/stat.h>
28 #include <linux/types.h>
29 #include <linux/wait_bit.h>
30 #include <linux/workqueue.h>
31 #include <uapi/linux/landlock.h>
32
33 #include "common.h"
34 #include "cred.h"
35 #include "fs.h"
36 #include "limits.h"
37 #include "object.h"
38 #include "ruleset.h"
39 #include "setup.h"
40
41 /* Underlying object management */
42
release_inode(struct landlock_object * const object)43 static void release_inode(struct landlock_object *const object)
44 __releases(object->lock)
45 {
46 struct inode *const inode = object->underobj;
47 struct super_block *sb;
48
49 if (!inode) {
50 spin_unlock(&object->lock);
51 return;
52 }
53
54 /*
55 * Protects against concurrent use by hook_sb_delete() of the reference
56 * to the underlying inode.
57 */
58 object->underobj = NULL;
59 /*
60 * Makes sure that if the filesystem is concurrently unmounted,
61 * hook_sb_delete() will wait for us to finish iput().
62 */
63 sb = inode->i_sb;
64 atomic_long_inc(&landlock_superblock(sb)->inode_refs);
65 spin_unlock(&object->lock);
66 /*
67 * Because object->underobj was not NULL, hook_sb_delete() and
68 * get_inode_object() guarantee that it is safe to reset
69 * landlock_inode(inode)->object while it is not NULL. It is therefore
70 * not necessary to lock inode->i_lock.
71 */
72 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
73 /*
74 * Now, new rules can safely be tied to @inode with get_inode_object().
75 */
76
77 iput(inode);
78 if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
79 wake_up_var(&landlock_superblock(sb)->inode_refs);
80 }
81
82 static const struct landlock_object_underops landlock_fs_underops = {
83 .release = release_inode
84 };
85
86 /* Ruleset management */
87
get_inode_object(struct inode * const inode)88 static struct landlock_object *get_inode_object(struct inode *const inode)
89 {
90 struct landlock_object *object, *new_object;
91 struct landlock_inode_security *inode_sec = landlock_inode(inode);
92
93 rcu_read_lock();
94 retry:
95 object = rcu_dereference(inode_sec->object);
96 if (object) {
97 if (likely(refcount_inc_not_zero(&object->usage))) {
98 rcu_read_unlock();
99 return object;
100 }
101 /*
102 * We are racing with release_inode(), the object is going
103 * away. Wait for release_inode(), then retry.
104 */
105 spin_lock(&object->lock);
106 spin_unlock(&object->lock);
107 goto retry;
108 }
109 rcu_read_unlock();
110
111 /*
112 * If there is no object tied to @inode, then create a new one (without
113 * holding any locks).
114 */
115 new_object = landlock_create_object(&landlock_fs_underops, inode);
116 if (IS_ERR(new_object))
117 return new_object;
118
119 /*
120 * Protects against concurrent calls to get_inode_object() or
121 * hook_sb_delete().
122 */
123 spin_lock(&inode->i_lock);
124 if (unlikely(rcu_access_pointer(inode_sec->object))) {
125 /* Someone else just created the object, bail out and retry. */
126 spin_unlock(&inode->i_lock);
127 kfree(new_object);
128
129 rcu_read_lock();
130 goto retry;
131 }
132
133 /*
134 * @inode will be released by hook_sb_delete() on its superblock
135 * shutdown, or by release_inode() when no more ruleset references the
136 * related object.
137 */
138 ihold(inode);
139 rcu_assign_pointer(inode_sec->object, new_object);
140 spin_unlock(&inode->i_lock);
141 return new_object;
142 }
143
144 /* All access rights that can be tied to files. */
145 /* clang-format off */
146 #define ACCESS_FILE ( \
147 LANDLOCK_ACCESS_FS_EXECUTE | \
148 LANDLOCK_ACCESS_FS_WRITE_FILE | \
149 LANDLOCK_ACCESS_FS_READ_FILE)
150 /* clang-format on */
151
152 /*
153 * All access rights that are denied by default whether they are handled or not
154 * by a ruleset/layer. This must be ORed with all ruleset->fs_access_masks[]
155 * entries when we need to get the absolute handled access masks.
156 */
157 /* clang-format off */
158 #define ACCESS_INITIALLY_DENIED ( \
159 LANDLOCK_ACCESS_FS_REFER)
160 /* clang-format on */
161
162 /*
163 * @path: Should have been checked by get_path_from_fd().
164 */
landlock_append_fs_rule(struct landlock_ruleset * const ruleset,const struct path * const path,access_mask_t access_rights)165 int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
166 const struct path *const path,
167 access_mask_t access_rights)
168 {
169 int err;
170 struct landlock_object *object;
171
172 /* Files only get access rights that make sense. */
173 if (!d_is_dir(path->dentry) &&
174 (access_rights | ACCESS_FILE) != ACCESS_FILE)
175 return -EINVAL;
176 if (WARN_ON_ONCE(ruleset->num_layers != 1))
177 return -EINVAL;
178
179 /* Transforms relative access rights to absolute ones. */
180 access_rights |=
181 LANDLOCK_MASK_ACCESS_FS &
182 ~(ruleset->fs_access_masks[0] | ACCESS_INITIALLY_DENIED);
183 object = get_inode_object(d_backing_inode(path->dentry));
184 if (IS_ERR(object))
185 return PTR_ERR(object);
186 mutex_lock(&ruleset->lock);
187 err = landlock_insert_rule(ruleset, object, access_rights);
188 mutex_unlock(&ruleset->lock);
189 /*
190 * No need to check for an error because landlock_insert_rule()
191 * increments the refcount for the new object if needed.
192 */
193 landlock_put_object(object);
194 return err;
195 }
196
197 /* Access-control management */
198
199 /*
200 * The lifetime of the returned rule is tied to @domain.
201 *
202 * Returns NULL if no rule is found or if @dentry is negative.
203 */
204 static inline const struct landlock_rule *
find_rule(const struct landlock_ruleset * const domain,const struct dentry * const dentry)205 find_rule(const struct landlock_ruleset *const domain,
206 const struct dentry *const dentry)
207 {
208 const struct landlock_rule *rule;
209 const struct inode *inode;
210
211 /* Ignores nonexistent leafs. */
212 if (d_is_negative(dentry))
213 return NULL;
214
215 inode = d_backing_inode(dentry);
216 rcu_read_lock();
217 rule = landlock_find_rule(
218 domain, rcu_dereference(landlock_inode(inode)->object));
219 rcu_read_unlock();
220 return rule;
221 }
222
223 /*
224 * @layer_masks is read and may be updated according to the access request and
225 * the matching rule.
226 *
227 * Returns true if the request is allowed (i.e. relevant layer masks for the
228 * request are empty).
229 */
230 static inline bool
unmask_layers(const struct landlock_rule * const rule,const access_mask_t access_request,layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS])231 unmask_layers(const struct landlock_rule *const rule,
232 const access_mask_t access_request,
233 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
234 {
235 size_t layer_level;
236
237 if (!access_request || !layer_masks)
238 return true;
239 if (!rule)
240 return false;
241
242 /*
243 * An access is granted if, for each policy layer, at least one rule
244 * encountered on the pathwalk grants the requested access,
245 * regardless of its position in the layer stack. We must then check
246 * the remaining layers for each inode, from the first added layer to
247 * the last one. When there is multiple requested accesses, for each
248 * policy layer, the full set of requested accesses may not be granted
249 * by only one rule, but by the union (binary OR) of multiple rules.
250 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
251 */
252 for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
253 const struct landlock_layer *const layer =
254 &rule->layers[layer_level];
255 const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
256 const unsigned long access_req = access_request;
257 unsigned long access_bit;
258 bool is_empty;
259
260 /*
261 * Records in @layer_masks which layer grants access to each
262 * requested access.
263 */
264 is_empty = true;
265 for_each_set_bit(access_bit, &access_req,
266 ARRAY_SIZE(*layer_masks)) {
267 if (layer->access & BIT_ULL(access_bit))
268 (*layer_masks)[access_bit] &= ~layer_bit;
269 is_empty = is_empty && !(*layer_masks)[access_bit];
270 }
271 if (is_empty)
272 return true;
273 }
274 return false;
275 }
276
277 /*
278 * Allows access to pseudo filesystems that will never be mountable (e.g.
279 * sockfs, pipefs), but can still be reachable through
280 * /proc/<pid>/fd/<file-descriptor>
281 */
is_nouser_or_private(const struct dentry * dentry)282 static inline bool is_nouser_or_private(const struct dentry *dentry)
283 {
284 return (dentry->d_sb->s_flags & SB_NOUSER) ||
285 (d_is_positive(dentry) &&
286 unlikely(IS_PRIVATE(d_backing_inode(dentry))));
287 }
288
289 static inline access_mask_t
get_handled_accesses(const struct landlock_ruleset * const domain)290 get_handled_accesses(const struct landlock_ruleset *const domain)
291 {
292 access_mask_t access_dom = ACCESS_INITIALLY_DENIED;
293 size_t layer_level;
294
295 for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
296 access_dom |= domain->fs_access_masks[layer_level];
297 return access_dom & LANDLOCK_MASK_ACCESS_FS;
298 }
299
300 static inline access_mask_t
init_layer_masks(const struct landlock_ruleset * const domain,const access_mask_t access_request,layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS])301 init_layer_masks(const struct landlock_ruleset *const domain,
302 const access_mask_t access_request,
303 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
304 {
305 access_mask_t handled_accesses = 0;
306 size_t layer_level;
307
308 memset(layer_masks, 0, sizeof(*layer_masks));
309 /* An empty access request can happen because of O_WRONLY | O_RDWR. */
310 if (!access_request)
311 return 0;
312
313 /* Saves all handled accesses per layer. */
314 for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
315 const unsigned long access_req = access_request;
316 unsigned long access_bit;
317
318 for_each_set_bit(access_bit, &access_req,
319 ARRAY_SIZE(*layer_masks)) {
320 /*
321 * Artificially handles all initially denied by default
322 * access rights.
323 */
324 if (BIT_ULL(access_bit) &
325 (domain->fs_access_masks[layer_level] |
326 ACCESS_INITIALLY_DENIED)) {
327 (*layer_masks)[access_bit] |=
328 BIT_ULL(layer_level);
329 handled_accesses |= BIT_ULL(access_bit);
330 }
331 }
332 }
333 return handled_accesses;
334 }
335
336 /*
337 * Check that a destination file hierarchy has more restrictions than a source
338 * file hierarchy. This is only used for link and rename actions.
339 *
340 * @layer_masks_child2: Optional child masks.
341 */
no_more_access(const layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],const bool child1_is_directory,const layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const layer_mask_t (* const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],const bool child2_is_directory)342 static inline bool no_more_access(
343 const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
344 const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
345 const bool child1_is_directory,
346 const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
347 const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
348 const bool child2_is_directory)
349 {
350 unsigned long access_bit;
351
352 for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
353 access_bit++) {
354 /* Ignores accesses that only make sense for directories. */
355 const bool is_file_access =
356 !!(BIT_ULL(access_bit) & ACCESS_FILE);
357
358 if (child1_is_directory || is_file_access) {
359 /*
360 * Checks if the destination restrictions are a
361 * superset of the source ones (i.e. inherited access
362 * rights without child exceptions):
363 * restrictions(parent2) >= restrictions(child1)
364 */
365 if ((((*layer_masks_parent1)[access_bit] &
366 (*layer_masks_child1)[access_bit]) |
367 (*layer_masks_parent2)[access_bit]) !=
368 (*layer_masks_parent2)[access_bit])
369 return false;
370 }
371
372 if (!layer_masks_child2)
373 continue;
374 if (child2_is_directory || is_file_access) {
375 /*
376 * Checks inverted restrictions for RENAME_EXCHANGE:
377 * restrictions(parent1) >= restrictions(child2)
378 */
379 if ((((*layer_masks_parent2)[access_bit] &
380 (*layer_masks_child2)[access_bit]) |
381 (*layer_masks_parent1)[access_bit]) !=
382 (*layer_masks_parent1)[access_bit])
383 return false;
384 }
385 }
386 return true;
387 }
388
389 /*
390 * Removes @layer_masks accesses that are not requested.
391 *
392 * Returns true if the request is allowed, false otherwise.
393 */
394 static inline bool
scope_to_request(const access_mask_t access_request,layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS])395 scope_to_request(const access_mask_t access_request,
396 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
397 {
398 const unsigned long access_req = access_request;
399 unsigned long access_bit;
400
401 if (WARN_ON_ONCE(!layer_masks))
402 return true;
403
404 for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
405 (*layer_masks)[access_bit] = 0;
406 return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
407 }
408
409 /*
410 * Returns true if there is at least one access right different than
411 * LANDLOCK_ACCESS_FS_REFER.
412 */
413 static inline bool
is_eacces(const layer_mask_t (* const layer_masks)[LANDLOCK_NUM_ACCESS_FS],const access_mask_t access_request)414 is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
415 const access_mask_t access_request)
416 {
417 unsigned long access_bit;
418 /* LANDLOCK_ACCESS_FS_REFER alone must return -EXDEV. */
419 const unsigned long access_check = access_request &
420 ~LANDLOCK_ACCESS_FS_REFER;
421
422 if (!layer_masks)
423 return false;
424
425 for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
426 if ((*layer_masks)[access_bit])
427 return true;
428 }
429 return false;
430 }
431
432 /**
433 * check_access_path_dual - Check accesses for requests with a common path
434 *
435 * @domain: Domain to check against.
436 * @path: File hierarchy to walk through.
437 * @access_request_parent1: Accesses to check, once @layer_masks_parent1 is
438 * equal to @layer_masks_parent2 (if any). This is tied to the unique
439 * requested path for most actions, or the source in case of a refer action
440 * (i.e. rename or link), or the source and destination in case of
441 * RENAME_EXCHANGE.
442 * @layer_masks_parent1: Pointer to a matrix of layer masks per access
443 * masks, identifying the layers that forbid a specific access. Bits from
444 * this matrix can be unset according to the @path walk. An empty matrix
445 * means that @domain allows all possible Landlock accesses (i.e. not only
446 * those identified by @access_request_parent1). This matrix can
447 * initially refer to domain layer masks and, when the accesses for the
448 * destination and source are the same, to requested layer masks.
449 * @dentry_child1: Dentry to the initial child of the parent1 path. This
450 * pointer must be NULL for non-refer actions (i.e. not link nor rename).
451 * @access_request_parent2: Similar to @access_request_parent1 but for a
452 * request involving a source and a destination. This refers to the
453 * destination, except in case of RENAME_EXCHANGE where it also refers to
454 * the source. Must be set to 0 when using a simple path request.
455 * @layer_masks_parent2: Similar to @layer_masks_parent1 but for a refer
456 * action. This must be NULL otherwise.
457 * @dentry_child2: Dentry to the initial child of the parent2 path. This
458 * pointer is only set for RENAME_EXCHANGE actions and must be NULL
459 * otherwise.
460 *
461 * This helper first checks that the destination has a superset of restrictions
462 * compared to the source (if any) for a common path. Because of
463 * RENAME_EXCHANGE actions, source and destinations may be swapped. It then
464 * checks that the collected accesses and the remaining ones are enough to
465 * allow the request.
466 *
467 * Returns:
468 * - 0 if the access request is granted;
469 * - -EACCES if it is denied because of access right other than
470 * LANDLOCK_ACCESS_FS_REFER;
471 * - -EXDEV if the renaming or linking would be a privileged escalation
472 * (according to each layered policies), or if LANDLOCK_ACCESS_FS_REFER is
473 * not allowed by the source or the destination.
474 */
check_access_path_dual(const struct landlock_ruleset * const domain,const struct path * const path,const access_mask_t access_request_parent1,layer_mask_t (* const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child1,const access_mask_t access_request_parent2,layer_mask_t (* const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],const struct dentry * const dentry_child2)475 static int check_access_path_dual(
476 const struct landlock_ruleset *const domain,
477 const struct path *const path,
478 const access_mask_t access_request_parent1,
479 layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
480 const struct dentry *const dentry_child1,
481 const access_mask_t access_request_parent2,
482 layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
483 const struct dentry *const dentry_child2)
484 {
485 bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
486 child1_is_directory = true, child2_is_directory = true;
487 struct path walker_path;
488 access_mask_t access_masked_parent1, access_masked_parent2;
489 layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
490 _layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
491 layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
492 (*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
493
494 if (!access_request_parent1 && !access_request_parent2)
495 return 0;
496 if (WARN_ON_ONCE(!domain || !path))
497 return 0;
498 if (is_nouser_or_private(path->dentry))
499 return 0;
500 if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
501 return -EACCES;
502
503 if (unlikely(layer_masks_parent2)) {
504 if (WARN_ON_ONCE(!dentry_child1))
505 return -EACCES;
506 /*
507 * For a double request, first check for potential privilege
508 * escalation by looking at domain handled accesses (which are
509 * a superset of the meaningful requested accesses).
510 */
511 access_masked_parent1 = access_masked_parent2 =
512 get_handled_accesses(domain);
513 is_dom_check = true;
514 } else {
515 if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
516 return -EACCES;
517 /* For a simple request, only check for requested accesses. */
518 access_masked_parent1 = access_request_parent1;
519 access_masked_parent2 = access_request_parent2;
520 is_dom_check = false;
521 }
522
523 if (unlikely(dentry_child1)) {
524 unmask_layers(find_rule(domain, dentry_child1),
525 init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
526 &_layer_masks_child1),
527 &_layer_masks_child1);
528 layer_masks_child1 = &_layer_masks_child1;
529 child1_is_directory = d_is_dir(dentry_child1);
530 }
531 if (unlikely(dentry_child2)) {
532 unmask_layers(find_rule(domain, dentry_child2),
533 init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
534 &_layer_masks_child2),
535 &_layer_masks_child2);
536 layer_masks_child2 = &_layer_masks_child2;
537 child2_is_directory = d_is_dir(dentry_child2);
538 }
539
540 walker_path = *path;
541 path_get(&walker_path);
542 /*
543 * We need to walk through all the hierarchy to not miss any relevant
544 * restriction.
545 */
546 while (true) {
547 struct dentry *parent_dentry;
548 const struct landlock_rule *rule;
549
550 /*
551 * If at least all accesses allowed on the destination are
552 * already allowed on the source, respectively if there is at
553 * least as much as restrictions on the destination than on the
554 * source, then we can safely refer files from the source to
555 * the destination without risking a privilege escalation.
556 * This also applies in the case of RENAME_EXCHANGE, which
557 * implies checks on both direction. This is crucial for
558 * standalone multilayered security policies. Furthermore,
559 * this helps avoid policy writers to shoot themselves in the
560 * foot.
561 */
562 if (unlikely(is_dom_check &&
563 no_more_access(
564 layer_masks_parent1, layer_masks_child1,
565 child1_is_directory, layer_masks_parent2,
566 layer_masks_child2,
567 child2_is_directory))) {
568 allowed_parent1 = scope_to_request(
569 access_request_parent1, layer_masks_parent1);
570 allowed_parent2 = scope_to_request(
571 access_request_parent2, layer_masks_parent2);
572
573 /* Stops when all accesses are granted. */
574 if (allowed_parent1 && allowed_parent2)
575 break;
576
577 /*
578 * Now, downgrades the remaining checks from domain
579 * handled accesses to requested accesses.
580 */
581 is_dom_check = false;
582 access_masked_parent1 = access_request_parent1;
583 access_masked_parent2 = access_request_parent2;
584 }
585
586 rule = find_rule(domain, walker_path.dentry);
587 allowed_parent1 = unmask_layers(rule, access_masked_parent1,
588 layer_masks_parent1);
589 allowed_parent2 = unmask_layers(rule, access_masked_parent2,
590 layer_masks_parent2);
591
592 /* Stops when a rule from each layer grants access. */
593 if (allowed_parent1 && allowed_parent2)
594 break;
595
596 jump_up:
597 if (walker_path.dentry == walker_path.mnt->mnt_root) {
598 if (follow_up(&walker_path)) {
599 /* Ignores hidden mount points. */
600 goto jump_up;
601 } else {
602 /*
603 * Stops at the real root. Denies access
604 * because not all layers have granted access.
605 */
606 break;
607 }
608 }
609 if (unlikely(IS_ROOT(walker_path.dentry))) {
610 /*
611 * Stops at disconnected root directories. Only allows
612 * access to internal filesystems (e.g. nsfs, which is
613 * reachable through /proc/<pid>/ns/<namespace>).
614 */
615 allowed_parent1 = allowed_parent2 =
616 !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
617 break;
618 }
619 parent_dentry = dget_parent(walker_path.dentry);
620 dput(walker_path.dentry);
621 walker_path.dentry = parent_dentry;
622 }
623 path_put(&walker_path);
624
625 if (allowed_parent1 && allowed_parent2)
626 return 0;
627
628 /*
629 * This prioritizes EACCES over EXDEV for all actions, including
630 * renames with RENAME_EXCHANGE.
631 */
632 if (likely(is_eacces(layer_masks_parent1, access_request_parent1) ||
633 is_eacces(layer_masks_parent2, access_request_parent2)))
634 return -EACCES;
635
636 /*
637 * Gracefully forbids reparenting if the destination directory
638 * hierarchy is not a superset of restrictions of the source directory
639 * hierarchy, or if LANDLOCK_ACCESS_FS_REFER is not allowed by the
640 * source or the destination.
641 */
642 return -EXDEV;
643 }
644
check_access_path(const struct landlock_ruleset * const domain,const struct path * const path,access_mask_t access_request)645 static inline int check_access_path(const struct landlock_ruleset *const domain,
646 const struct path *const path,
647 access_mask_t access_request)
648 {
649 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
650
651 access_request = init_layer_masks(domain, access_request, &layer_masks);
652 return check_access_path_dual(domain, path, access_request,
653 &layer_masks, NULL, 0, NULL, NULL);
654 }
655
current_check_access_path(const struct path * const path,const access_mask_t access_request)656 static inline int current_check_access_path(const struct path *const path,
657 const access_mask_t access_request)
658 {
659 const struct landlock_ruleset *const dom =
660 landlock_get_current_domain();
661
662 if (!dom)
663 return 0;
664 return check_access_path(dom, path, access_request);
665 }
666
get_mode_access(const umode_t mode)667 static inline access_mask_t get_mode_access(const umode_t mode)
668 {
669 switch (mode & S_IFMT) {
670 case S_IFLNK:
671 return LANDLOCK_ACCESS_FS_MAKE_SYM;
672 case 0:
673 /* A zero mode translates to S_IFREG. */
674 case S_IFREG:
675 return LANDLOCK_ACCESS_FS_MAKE_REG;
676 case S_IFDIR:
677 return LANDLOCK_ACCESS_FS_MAKE_DIR;
678 case S_IFCHR:
679 return LANDLOCK_ACCESS_FS_MAKE_CHAR;
680 case S_IFBLK:
681 return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
682 case S_IFIFO:
683 return LANDLOCK_ACCESS_FS_MAKE_FIFO;
684 case S_IFSOCK:
685 return LANDLOCK_ACCESS_FS_MAKE_SOCK;
686 default:
687 WARN_ON_ONCE(1);
688 return 0;
689 }
690 }
691
maybe_remove(const struct dentry * const dentry)692 static inline access_mask_t maybe_remove(const struct dentry *const dentry)
693 {
694 if (d_is_negative(dentry))
695 return 0;
696 return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
697 LANDLOCK_ACCESS_FS_REMOVE_FILE;
698 }
699
700 /**
701 * collect_domain_accesses - Walk through a file path and collect accesses
702 *
703 * @domain: Domain to check against.
704 * @mnt_root: Last directory to check.
705 * @dir: Directory to start the walk from.
706 * @layer_masks_dom: Where to store the collected accesses.
707 *
708 * This helper is useful to begin a path walk from the @dir directory to a
709 * @mnt_root directory used as a mount point. This mount point is the common
710 * ancestor between the source and the destination of a renamed and linked
711 * file. While walking from @dir to @mnt_root, we record all the domain's
712 * allowed accesses in @layer_masks_dom.
713 *
714 * This is similar to check_access_path_dual() but much simpler because it only
715 * handles walking on the same mount point and only checks one set of accesses.
716 *
717 * Returns:
718 * - true if all the domain access rights are allowed for @dir;
719 * - false if the walk reached @mnt_root.
720 */
collect_domain_accesses(const struct landlock_ruleset * const domain,const struct dentry * const mnt_root,struct dentry * dir,layer_mask_t (* const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])721 static bool collect_domain_accesses(
722 const struct landlock_ruleset *const domain,
723 const struct dentry *const mnt_root, struct dentry *dir,
724 layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
725 {
726 unsigned long access_dom;
727 bool ret = false;
728
729 if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
730 return true;
731 if (is_nouser_or_private(dir))
732 return true;
733
734 access_dom = init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
735 layer_masks_dom);
736
737 dget(dir);
738 while (true) {
739 struct dentry *parent_dentry;
740
741 /* Gets all layers allowing all domain accesses. */
742 if (unmask_layers(find_rule(domain, dir), access_dom,
743 layer_masks_dom)) {
744 /*
745 * Stops when all handled accesses are allowed by at
746 * least one rule in each layer.
747 */
748 ret = true;
749 break;
750 }
751
752 /* We should not reach a root other than @mnt_root. */
753 if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
754 break;
755
756 parent_dentry = dget_parent(dir);
757 dput(dir);
758 dir = parent_dentry;
759 }
760 dput(dir);
761 return ret;
762 }
763
764 /**
765 * current_check_refer_path - Check if a rename or link action is allowed
766 *
767 * @old_dentry: File or directory requested to be moved or linked.
768 * @new_dir: Destination parent directory.
769 * @new_dentry: Destination file or directory.
770 * @removable: Sets to true if it is a rename operation.
771 * @exchange: Sets to true if it is a rename operation with RENAME_EXCHANGE.
772 *
773 * Because of its unprivileged constraints, Landlock relies on file hierarchies
774 * (and not only inodes) to tie access rights to files. Being able to link or
775 * rename a file hierarchy brings some challenges. Indeed, moving or linking a
776 * file (i.e. creating a new reference to an inode) can have an impact on the
777 * actions allowed for a set of files if it would change its parent directory
778 * (i.e. reparenting).
779 *
780 * To avoid trivial access right bypasses, Landlock first checks if the file or
781 * directory requested to be moved would gain new access rights inherited from
782 * its new hierarchy. Before returning any error, Landlock then checks that
783 * the parent source hierarchy and the destination hierarchy would allow the
784 * link or rename action. If it is not the case, an error with EACCES is
785 * returned to inform user space that there is no way to remove or create the
786 * requested source file type. If it should be allowed but the new inherited
787 * access rights would be greater than the source access rights, then the
788 * kernel returns an error with EXDEV. Prioritizing EACCES over EXDEV enables
789 * user space to abort the whole operation if there is no way to do it, or to
790 * manually copy the source to the destination if this remains allowed, e.g.
791 * because file creation is allowed on the destination directory but not direct
792 * linking.
793 *
794 * To achieve this goal, the kernel needs to compare two file hierarchies: the
795 * one identifying the source file or directory (including itself), and the
796 * destination one. This can be seen as a multilayer partial ordering problem.
797 * The kernel walks through these paths and collects in a matrix the access
798 * rights that are denied per layer. These matrices are then compared to see
799 * if the destination one has more (or the same) restrictions as the source
800 * one. If this is the case, the requested action will not return EXDEV, which
801 * doesn't mean the action is allowed. The parent hierarchy of the source
802 * (i.e. parent directory), and the destination hierarchy must also be checked
803 * to verify that they explicitly allow such action (i.e. referencing,
804 * creation and potentially removal rights). The kernel implementation is then
805 * required to rely on potentially four matrices of access rights: one for the
806 * source file or directory (i.e. the child), a potentially other one for the
807 * other source/destination (in case of RENAME_EXCHANGE), one for the source
808 * parent hierarchy and a last one for the destination hierarchy. These
809 * ephemeral matrices take some space on the stack, which limits the number of
810 * layers to a deemed reasonable number: 16.
811 *
812 * Returns:
813 * - 0 if access is allowed;
814 * - -EXDEV if @old_dentry would inherit new access rights from @new_dir;
815 * - -EACCES if file removal or creation is denied.
816 */
current_check_refer_path(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const bool removable,const bool exchange)817 static int current_check_refer_path(struct dentry *const old_dentry,
818 const struct path *const new_dir,
819 struct dentry *const new_dentry,
820 const bool removable, const bool exchange)
821 {
822 const struct landlock_ruleset *const dom =
823 landlock_get_current_domain();
824 bool allow_parent1, allow_parent2;
825 access_mask_t access_request_parent1, access_request_parent2;
826 struct path mnt_dir;
827 layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
828 layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
829
830 if (!dom)
831 return 0;
832 if (WARN_ON_ONCE(dom->num_layers < 1))
833 return -EACCES;
834 if (unlikely(d_is_negative(old_dentry)))
835 return -ENOENT;
836 if (exchange) {
837 if (unlikely(d_is_negative(new_dentry)))
838 return -ENOENT;
839 access_request_parent1 =
840 get_mode_access(d_backing_inode(new_dentry)->i_mode);
841 } else {
842 access_request_parent1 = 0;
843 }
844 access_request_parent2 =
845 get_mode_access(d_backing_inode(old_dentry)->i_mode);
846 if (removable) {
847 access_request_parent1 |= maybe_remove(old_dentry);
848 access_request_parent2 |= maybe_remove(new_dentry);
849 }
850
851 /* The mount points are the same for old and new paths, cf. EXDEV. */
852 if (old_dentry->d_parent == new_dir->dentry) {
853 /*
854 * The LANDLOCK_ACCESS_FS_REFER access right is not required
855 * for same-directory referer (i.e. no reparenting).
856 */
857 access_request_parent1 = init_layer_masks(
858 dom, access_request_parent1 | access_request_parent2,
859 &layer_masks_parent1);
860 return check_access_path_dual(dom, new_dir,
861 access_request_parent1,
862 &layer_masks_parent1, NULL, 0,
863 NULL, NULL);
864 }
865
866 access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
867 access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
868
869 /* Saves the common mount point. */
870 mnt_dir.mnt = new_dir->mnt;
871 mnt_dir.dentry = new_dir->mnt->mnt_root;
872
873 /* new_dir->dentry is equal to new_dentry->d_parent */
874 allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
875 old_dentry->d_parent,
876 &layer_masks_parent1);
877 allow_parent2 = collect_domain_accesses(
878 dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
879
880 if (allow_parent1 && allow_parent2)
881 return 0;
882
883 /*
884 * To be able to compare source and destination domain access rights,
885 * take into account the @old_dentry access rights aggregated with its
886 * parent access rights. This will be useful to compare with the
887 * destination parent access rights.
888 */
889 return check_access_path_dual(dom, &mnt_dir, access_request_parent1,
890 &layer_masks_parent1, old_dentry,
891 access_request_parent2,
892 &layer_masks_parent2,
893 exchange ? new_dentry : NULL);
894 }
895
896 /* Inode hooks */
897
hook_inode_free_security(struct inode * const inode)898 static void hook_inode_free_security(struct inode *const inode)
899 {
900 /*
901 * All inodes must already have been untied from their object by
902 * release_inode() or hook_sb_delete().
903 */
904 WARN_ON_ONCE(landlock_inode(inode)->object);
905 }
906
907 /* Super-block hooks */
908
909 /*
910 * Release the inodes used in a security policy.
911 *
912 * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
913 */
hook_sb_delete(struct super_block * const sb)914 static void hook_sb_delete(struct super_block *const sb)
915 {
916 struct inode *inode, *prev_inode = NULL;
917
918 if (!landlock_initialized)
919 return;
920
921 spin_lock(&sb->s_inode_list_lock);
922 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
923 struct landlock_object *object;
924
925 /* Only handles referenced inodes. */
926 if (!atomic_read(&inode->i_count))
927 continue;
928
929 /*
930 * Protects against concurrent modification of inode (e.g.
931 * from get_inode_object()).
932 */
933 spin_lock(&inode->i_lock);
934 /*
935 * Checks I_FREEING and I_WILL_FREE to protect against a race
936 * condition when release_inode() just called iput(), which
937 * could lead to a NULL dereference of inode->security or a
938 * second call to iput() for the same Landlock object. Also
939 * checks I_NEW because such inode cannot be tied to an object.
940 */
941 if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
942 spin_unlock(&inode->i_lock);
943 continue;
944 }
945
946 rcu_read_lock();
947 object = rcu_dereference(landlock_inode(inode)->object);
948 if (!object) {
949 rcu_read_unlock();
950 spin_unlock(&inode->i_lock);
951 continue;
952 }
953 /* Keeps a reference to this inode until the next loop walk. */
954 __iget(inode);
955 spin_unlock(&inode->i_lock);
956
957 /*
958 * If there is no concurrent release_inode() ongoing, then we
959 * are in charge of calling iput() on this inode, otherwise we
960 * will just wait for it to finish.
961 */
962 spin_lock(&object->lock);
963 if (object->underobj == inode) {
964 object->underobj = NULL;
965 spin_unlock(&object->lock);
966 rcu_read_unlock();
967
968 /*
969 * Because object->underobj was not NULL,
970 * release_inode() and get_inode_object() guarantee
971 * that it is safe to reset
972 * landlock_inode(inode)->object while it is not NULL.
973 * It is therefore not necessary to lock inode->i_lock.
974 */
975 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
976 /*
977 * At this point, we own the ihold() reference that was
978 * originally set up by get_inode_object() and the
979 * __iget() reference that we just set in this loop
980 * walk. Therefore the following call to iput() will
981 * not sleep nor drop the inode because there is now at
982 * least two references to it.
983 */
984 iput(inode);
985 } else {
986 spin_unlock(&object->lock);
987 rcu_read_unlock();
988 }
989
990 if (prev_inode) {
991 /*
992 * At this point, we still own the __iget() reference
993 * that we just set in this loop walk. Therefore we
994 * can drop the list lock and know that the inode won't
995 * disappear from under us until the next loop walk.
996 */
997 spin_unlock(&sb->s_inode_list_lock);
998 /*
999 * We can now actually put the inode reference from the
1000 * previous loop walk, which is not needed anymore.
1001 */
1002 iput(prev_inode);
1003 cond_resched();
1004 spin_lock(&sb->s_inode_list_lock);
1005 }
1006 prev_inode = inode;
1007 }
1008 spin_unlock(&sb->s_inode_list_lock);
1009
1010 /* Puts the inode reference from the last loop walk, if any. */
1011 if (prev_inode)
1012 iput(prev_inode);
1013 /* Waits for pending iput() in release_inode(). */
1014 wait_var_event(&landlock_superblock(sb)->inode_refs,
1015 !atomic_long_read(&landlock_superblock(sb)->inode_refs));
1016 }
1017
1018 /*
1019 * Because a Landlock security policy is defined according to the filesystem
1020 * topology (i.e. the mount namespace), changing it may grant access to files
1021 * not previously allowed.
1022 *
1023 * To make it simple, deny any filesystem topology modification by landlocked
1024 * processes. Non-landlocked processes may still change the namespace of a
1025 * landlocked process, but this kind of threat must be handled by a system-wide
1026 * access-control security policy.
1027 *
1028 * This could be lifted in the future if Landlock can safely handle mount
1029 * namespace updates requested by a landlocked process. Indeed, we could
1030 * update the current domain (which is currently read-only) by taking into
1031 * account the accesses of the source and the destination of a new mount point.
1032 * However, it would also require to make all the child domains dynamically
1033 * inherit these new constraints. Anyway, for backward compatibility reasons,
1034 * a dedicated user space option would be required (e.g. as a ruleset flag).
1035 */
hook_sb_mount(const char * const dev_name,const struct path * const path,const char * const type,const unsigned long flags,void * const data)1036 static int hook_sb_mount(const char *const dev_name,
1037 const struct path *const path, const char *const type,
1038 const unsigned long flags, void *const data)
1039 {
1040 if (!landlock_get_current_domain())
1041 return 0;
1042 return -EPERM;
1043 }
1044
hook_move_mount(const struct path * const from_path,const struct path * const to_path)1045 static int hook_move_mount(const struct path *const from_path,
1046 const struct path *const to_path)
1047 {
1048 if (!landlock_get_current_domain())
1049 return 0;
1050 return -EPERM;
1051 }
1052
1053 /*
1054 * Removing a mount point may reveal a previously hidden file hierarchy, which
1055 * may then grant access to files, which may have previously been forbidden.
1056 */
hook_sb_umount(struct vfsmount * const mnt,const int flags)1057 static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
1058 {
1059 if (!landlock_get_current_domain())
1060 return 0;
1061 return -EPERM;
1062 }
1063
hook_sb_remount(struct super_block * const sb,void * const mnt_opts)1064 static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
1065 {
1066 if (!landlock_get_current_domain())
1067 return 0;
1068 return -EPERM;
1069 }
1070
1071 /*
1072 * pivot_root(2), like mount(2), changes the current mount namespace. It must
1073 * then be forbidden for a landlocked process.
1074 *
1075 * However, chroot(2) may be allowed because it only changes the relative root
1076 * directory of the current process. Moreover, it can be used to restrict the
1077 * view of the filesystem.
1078 */
hook_sb_pivotroot(const struct path * const old_path,const struct path * const new_path)1079 static int hook_sb_pivotroot(const struct path *const old_path,
1080 const struct path *const new_path)
1081 {
1082 if (!landlock_get_current_domain())
1083 return 0;
1084 return -EPERM;
1085 }
1086
1087 /* Path hooks */
1088
hook_path_link(struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry)1089 static int hook_path_link(struct dentry *const old_dentry,
1090 const struct path *const new_dir,
1091 struct dentry *const new_dentry)
1092 {
1093 return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
1094 false);
1095 }
1096
hook_path_rename(const struct path * const old_dir,struct dentry * const old_dentry,const struct path * const new_dir,struct dentry * const new_dentry,const unsigned int flags)1097 static int hook_path_rename(const struct path *const old_dir,
1098 struct dentry *const old_dentry,
1099 const struct path *const new_dir,
1100 struct dentry *const new_dentry,
1101 const unsigned int flags)
1102 {
1103 /* old_dir refers to old_dentry->d_parent and new_dir->mnt */
1104 return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
1105 !!(flags & RENAME_EXCHANGE));
1106 }
1107
hook_path_mkdir(const struct path * const dir,struct dentry * const dentry,const umode_t mode)1108 static int hook_path_mkdir(const struct path *const dir,
1109 struct dentry *const dentry, const umode_t mode)
1110 {
1111 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
1112 }
1113
hook_path_mknod(const struct path * const dir,struct dentry * const dentry,const umode_t mode,const unsigned int dev)1114 static int hook_path_mknod(const struct path *const dir,
1115 struct dentry *const dentry, const umode_t mode,
1116 const unsigned int dev)
1117 {
1118 const struct landlock_ruleset *const dom =
1119 landlock_get_current_domain();
1120
1121 if (!dom)
1122 return 0;
1123 return check_access_path(dom, dir, get_mode_access(mode));
1124 }
1125
hook_path_symlink(const struct path * const dir,struct dentry * const dentry,const char * const old_name)1126 static int hook_path_symlink(const struct path *const dir,
1127 struct dentry *const dentry,
1128 const char *const old_name)
1129 {
1130 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
1131 }
1132
hook_path_unlink(const struct path * const dir,struct dentry * const dentry)1133 static int hook_path_unlink(const struct path *const dir,
1134 struct dentry *const dentry)
1135 {
1136 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
1137 }
1138
hook_path_rmdir(const struct path * const dir,struct dentry * const dentry)1139 static int hook_path_rmdir(const struct path *const dir,
1140 struct dentry *const dentry)
1141 {
1142 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
1143 }
1144
1145 /* File hooks */
1146
get_file_access(const struct file * const file)1147 static inline access_mask_t get_file_access(const struct file *const file)
1148 {
1149 access_mask_t access = 0;
1150
1151 if (file->f_mode & FMODE_READ) {
1152 /* A directory can only be opened in read mode. */
1153 if (S_ISDIR(file_inode(file)->i_mode))
1154 return LANDLOCK_ACCESS_FS_READ_DIR;
1155 access = LANDLOCK_ACCESS_FS_READ_FILE;
1156 }
1157 if (file->f_mode & FMODE_WRITE)
1158 access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
1159 /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
1160 if (file->f_flags & __FMODE_EXEC)
1161 access |= LANDLOCK_ACCESS_FS_EXECUTE;
1162 return access;
1163 }
1164
hook_file_open(struct file * const file)1165 static int hook_file_open(struct file *const file)
1166 {
1167 const struct landlock_ruleset *const dom =
1168 landlock_get_current_domain();
1169
1170 if (!dom)
1171 return 0;
1172 /*
1173 * Because a file may be opened with O_PATH, get_file_access() may
1174 * return 0. This case will be handled with a future Landlock
1175 * evolution.
1176 */
1177 return check_access_path(dom, &file->f_path, get_file_access(file));
1178 }
1179
1180 static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
1181 LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
1182
1183 LSM_HOOK_INIT(sb_delete, hook_sb_delete),
1184 LSM_HOOK_INIT(sb_mount, hook_sb_mount),
1185 LSM_HOOK_INIT(move_mount, hook_move_mount),
1186 LSM_HOOK_INIT(sb_umount, hook_sb_umount),
1187 LSM_HOOK_INIT(sb_remount, hook_sb_remount),
1188 LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
1189
1190 LSM_HOOK_INIT(path_link, hook_path_link),
1191 LSM_HOOK_INIT(path_rename, hook_path_rename),
1192 LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
1193 LSM_HOOK_INIT(path_mknod, hook_path_mknod),
1194 LSM_HOOK_INIT(path_symlink, hook_path_symlink),
1195 LSM_HOOK_INIT(path_unlink, hook_path_unlink),
1196 LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
1197
1198 LSM_HOOK_INIT(file_open, hook_file_open),
1199 };
1200
landlock_add_fs_hooks(void)1201 __init void landlock_add_fs_hooks(void)
1202 {
1203 security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
1204 LANDLOCK_NAME);
1205 }
1206