1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * AppArmor security module
4 *
5 * This file contains AppArmor label definitions
6 *
7 * Copyright 2017 Canonical Ltd.
8 */
9
10 #ifndef __AA_LABEL_H
11 #define __AA_LABEL_H
12
13 #include <linux/atomic.h>
14 #include <linux/audit.h>
15 #include <linux/rbtree.h>
16 #include <linux/rcupdate.h>
17
18 #include "apparmor.h"
19 #include "lib.h"
20
21 struct aa_ns;
22
23 #define LOCAL_VEC_ENTRIES 8
24 #define DEFINE_VEC(T, V) \
25 struct aa_ ## T *(_ ## V ## _localtmp)[LOCAL_VEC_ENTRIES]; \
26 struct aa_ ## T **(V)
27
28 #define vec_setup(T, V, N, GFP) \
29 ({ \
30 if ((N) <= LOCAL_VEC_ENTRIES) { \
31 typeof(N) i; \
32 (V) = (_ ## V ## _localtmp); \
33 for (i = 0; i < (N); i++) \
34 (V)[i] = NULL; \
35 } else \
36 (V) = kzalloc(sizeof(struct aa_ ## T *) * (N), (GFP)); \
37 (V) ? 0 : -ENOMEM; \
38 })
39
40 #define vec_cleanup(T, V, N) \
41 do { \
42 int i; \
43 for (i = 0; i < (N); i++) { \
44 if (!IS_ERR_OR_NULL((V)[i])) \
45 aa_put_ ## T((V)[i]); \
46 } \
47 if ((V) != _ ## V ## _localtmp) \
48 kfree(V); \
49 } while (0)
50
51 #define vec_last(VEC, SIZE) ((VEC)[(SIZE) - 1])
52 #define vec_ns(VEC, SIZE) (vec_last((VEC), (SIZE))->ns)
53 #define vec_labelset(VEC, SIZE) (&vec_ns((VEC), (SIZE))->labels)
54 #define cleanup_domain_vec(V, L) cleanup_label_vec((V), (L)->size)
55
56 struct aa_profile;
57 #define VEC_FLAG_TERMINATE 1
58 int aa_vec_unique(struct aa_profile **vec, int n, int flags);
59 struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
60 gfp_t gfp);
61 #define aa_sort_and_merge_vec(N, V) \
62 aa_sort_and_merge_profiles((N), (struct aa_profile **)(V))
63
64
65 /* struct aa_labelset - set of labels for a namespace
66 *
67 * Labels are reference counted; aa_labelset does not contribute to label
68 * reference counts. Once a label's last refcount is put it is removed from
69 * the set.
70 */
71 struct aa_labelset {
72 rwlock_t lock;
73
74 struct rb_root root;
75 };
76
77 #define __labelset_for_each(LS, N) \
78 for ((N) = rb_first(&(LS)->root); (N); (N) = rb_next(N))
79
80 enum label_flags {
81 FLAG_HAT = 1, /* profile is a hat */
82 FLAG_UNCONFINED = 2, /* label unconfined only if all */
83 FLAG_NULL = 4, /* profile is null learning profile */
84 FLAG_IX_ON_NAME_ERROR = 8, /* fallback to ix on name lookup fail */
85 FLAG_IMMUTIBLE = 0x10, /* don't allow changes/replacement */
86 FLAG_USER_DEFINED = 0x20, /* user based profile - lower privs */
87 FLAG_NO_LIST_REF = 0x40, /* list doesn't keep profile ref */
88 FLAG_NS_COUNT = 0x80, /* carries NS ref count */
89 FLAG_IN_TREE = 0x100, /* label is in tree */
90 FLAG_PROFILE = 0x200, /* label is a profile */
91 FLAG_EXPLICIT = 0x400, /* explicit static label */
92 FLAG_STALE = 0x800, /* replaced/removed */
93 FLAG_RENAMED = 0x1000, /* label has renaming in it */
94 FLAG_REVOKED = 0x2000, /* label has revocation in it */
95
96 /* These flags must correspond with PATH_flags */
97 /* TODO: add new path flags */
98 };
99
100 struct aa_label;
101 struct aa_proxy {
102 struct kref count;
103 struct aa_label __rcu *label;
104 };
105
106 struct label_it {
107 int i, j;
108 };
109
110 /* struct aa_label - lazy labeling struct
111 * @count: ref count of active users
112 * @node: rbtree position
113 * @rcu: rcu callback struct
114 * @proxy: is set to the label that replaced this label
115 * @hname: text representation of the label (MAYBE_NULL)
116 * @flags: stale and other flags - values may change under label set lock
117 * @secid: secid that references this label
118 * @size: number of entries in @ent[]
119 * @ent: set of profiles for label, actual size determined by @size
120 */
121 struct aa_label {
122 struct kref count;
123 struct rb_node node;
124 struct rcu_head rcu;
125 struct aa_proxy *proxy;
126 __counted char *hname;
127 long flags;
128 u32 secid;
129 int size;
130 struct aa_profile *vec[];
131 };
132
133 #define last_error(E, FN) \
134 do { \
135 int __subE = (FN); \
136 if (__subE) \
137 (E) = __subE; \
138 } while (0)
139
140 #define label_isprofile(X) ((X)->flags & FLAG_PROFILE)
141 #define label_unconfined(X) ((X)->flags & FLAG_UNCONFINED)
142 #define unconfined(X) label_unconfined(X)
143 #define label_is_stale(X) ((X)->flags & FLAG_STALE)
144 #define __label_make_stale(X) ((X)->flags |= FLAG_STALE)
145 #define labels_ns(X) (vec_ns(&((X)->vec[0]), (X)->size))
146 #define labels_set(X) (&labels_ns(X)->labels)
147 #define labels_view(X) labels_ns(X)
148 #define labels_profile(X) ((X)->vec[(X)->size - 1])
149
150
151 int aa_label_next_confined(struct aa_label *l, int i);
152
153 /* for each profile in a label */
154 #define label_for_each(I, L, P) \
155 for ((I).i = 0; ((P) = (L)->vec[(I).i]); ++((I).i))
156
157 /* assumes break/goto ended label_for_each */
158 #define label_for_each_cont(I, L, P) \
159 for (++((I).i); ((P) = (L)->vec[(I).i]); ++((I).i))
160
161 #define next_comb(I, L1, L2) \
162 do { \
163 (I).j++; \
164 if ((I).j >= (L2)->size) { \
165 (I).i++; \
166 (I).j = 0; \
167 } \
168 } while (0)
169
170
171 /* for each combination of P1 in L1, and P2 in L2 */
172 #define label_for_each_comb(I, L1, L2, P1, P2) \
173 for ((I).i = (I).j = 0; \
174 ((P1) = (L1)->vec[(I).i]) && ((P2) = (L2)->vec[(I).j]); \
175 (I) = next_comb(I, L1, L2))
176
177 #define fn_for_each_comb(L1, L2, P1, P2, FN) \
178 ({ \
179 struct label_it i; \
180 int __E = 0; \
181 label_for_each_comb(i, (L1), (L2), (P1), (P2)) { \
182 last_error(__E, (FN)); \
183 } \
184 __E; \
185 })
186
187 /* for each profile that is enforcing confinement in a label */
188 #define label_for_each_confined(I, L, P) \
189 for ((I).i = aa_label_next_confined((L), 0); \
190 ((P) = (L)->vec[(I).i]); \
191 (I).i = aa_label_next_confined((L), (I).i + 1))
192
193 #define label_for_each_in_merge(I, A, B, P) \
194 for ((I).i = (I).j = 0; \
195 ((P) = aa_label_next_in_merge(&(I), (A), (B))); \
196 )
197
198 #define label_for_each_not_in_set(I, SET, SUB, P) \
199 for ((I).i = (I).j = 0; \
200 ((P) = __aa_label_next_not_in_set(&(I), (SET), (SUB))); \
201 )
202
203 #define next_in_ns(i, NS, L) \
204 ({ \
205 typeof(i) ___i = (i); \
206 while ((L)->vec[___i] && (L)->vec[___i]->ns != (NS)) \
207 (___i)++; \
208 (___i); \
209 })
210
211 #define label_for_each_in_ns(I, NS, L, P) \
212 for ((I).i = next_in_ns(0, (NS), (L)); \
213 ((P) = (L)->vec[(I).i]); \
214 (I).i = next_in_ns((I).i + 1, (NS), (L)))
215
216 #define fn_for_each_in_ns(L, P, FN) \
217 ({ \
218 struct label_it __i; \
219 struct aa_ns *__ns = labels_ns(L); \
220 int __E = 0; \
221 label_for_each_in_ns(__i, __ns, (L), (P)) { \
222 last_error(__E, (FN)); \
223 } \
224 __E; \
225 })
226
227
228 #define fn_for_each_XXX(L, P, FN, ...) \
229 ({ \
230 struct label_it i; \
231 int __E = 0; \
232 label_for_each ## __VA_ARGS__(i, (L), (P)) { \
233 last_error(__E, (FN)); \
234 } \
235 __E; \
236 })
237
238 #define fn_for_each(L, P, FN) fn_for_each_XXX(L, P, FN)
239 #define fn_for_each_confined(L, P, FN) fn_for_each_XXX(L, P, FN, _confined)
240
241 #define fn_for_each2_XXX(L1, L2, P, FN, ...) \
242 ({ \
243 struct label_it i; \
244 int __E = 0; \
245 label_for_each ## __VA_ARGS__(i, (L1), (L2), (P)) { \
246 last_error(__E, (FN)); \
247 } \
248 __E; \
249 })
250
251 #define fn_for_each_in_merge(L1, L2, P, FN) \
252 fn_for_each2_XXX((L1), (L2), P, FN, _in_merge)
253 #define fn_for_each_not_in_set(L1, L2, P, FN) \
254 fn_for_each2_XXX((L1), (L2), P, FN, _not_in_set)
255
256 #define LABEL_MEDIATES(L, C) \
257 ({ \
258 struct aa_profile *profile; \
259 struct label_it i; \
260 int ret = 0; \
261 label_for_each(i, (L), profile) { \
262 if (PROFILE_MEDIATES(profile, (C))) { \
263 ret = 1; \
264 break; \
265 } \
266 } \
267 ret; \
268 })
269
270
271 void aa_labelset_destroy(struct aa_labelset *ls);
272 void aa_labelset_init(struct aa_labelset *ls);
273 void __aa_labelset_update_subtree(struct aa_ns *ns);
274
275 void aa_label_destroy(struct aa_label *label);
276 void aa_label_free(struct aa_label *label);
277 void aa_label_kref(struct kref *kref);
278 bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
279 struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
280
281 bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub);
282 bool aa_label_is_unconfined_subset(struct aa_label *set, struct aa_label *sub);
283 struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
284 struct aa_label *set,
285 struct aa_label *sub);
286 bool aa_label_remove(struct aa_label *label);
287 struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *l);
288 bool aa_label_replace(struct aa_label *old, struct aa_label *new);
289 bool aa_label_make_newest(struct aa_labelset *ls, struct aa_label *old,
290 struct aa_label *new);
291
292 struct aa_label *aa_label_find(struct aa_label *l);
293
294 struct aa_profile *aa_label_next_in_merge(struct label_it *I,
295 struct aa_label *a,
296 struct aa_label *b);
297 struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b);
298 struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
299 gfp_t gfp);
300
301
302 bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
303
304 #define FLAGS_NONE 0
305 #define FLAG_SHOW_MODE 1
306 #define FLAG_VIEW_SUBNS 2
307 #define FLAG_HIDDEN_UNCONFINED 4
308 #define FLAG_ABS_ROOT 8
309 int aa_label_snxprint(char *str, size_t size, struct aa_ns *view,
310 struct aa_label *label, int flags);
311 int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
312 int flags, gfp_t gfp);
313 int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
314 struct aa_label *label, int flags, gfp_t gfp);
315 void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
316 struct aa_label *label, int flags, gfp_t gfp);
317 void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
318 struct aa_label *label, int flags, gfp_t gfp);
319 void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
320 gfp_t gfp);
321 void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp);
322 void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp);
323 void aa_label_printk(struct aa_label *label, gfp_t gfp);
324
325 struct aa_label *aa_label_strn_parse(struct aa_label *base, const char *str,
326 size_t n, gfp_t gfp, bool create,
327 bool force_stack);
328 struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
329 gfp_t gfp, bool create, bool force_stack);
330
aa_label_strn_split(const char * str,int n)331 static inline const char *aa_label_strn_split(const char *str, int n)
332 {
333 const char *pos;
334 unsigned int state;
335
336 state = aa_dfa_matchn_until(stacksplitdfa, DFA_START, str, n, &pos);
337 if (!ACCEPT_TABLE(stacksplitdfa)[state])
338 return NULL;
339
340 return pos - 3;
341 }
342
aa_label_str_split(const char * str)343 static inline const char *aa_label_str_split(const char *str)
344 {
345 const char *pos;
346 unsigned int state;
347
348 state = aa_dfa_match_until(stacksplitdfa, DFA_START, str, &pos);
349 if (!ACCEPT_TABLE(stacksplitdfa)[state])
350 return NULL;
351
352 return pos - 3;
353 }
354
355
356
357 struct aa_perms;
358 int aa_label_match(struct aa_profile *profile, struct aa_label *label,
359 unsigned int state, bool subns, u32 request,
360 struct aa_perms *perms);
361
362
363 /**
364 * __aa_get_label - get a reference count to uncounted label reference
365 * @l: reference to get a count on
366 *
367 * Returns: pointer to reference OR NULL if race is lost and reference is
368 * being repeated.
369 * Requires: lock held, and the return code MUST be checked
370 */
__aa_get_label(struct aa_label * l)371 static inline struct aa_label *__aa_get_label(struct aa_label *l)
372 {
373 if (l && kref_get_unless_zero(&l->count))
374 return l;
375
376 return NULL;
377 }
378
aa_get_label(struct aa_label * l)379 static inline struct aa_label *aa_get_label(struct aa_label *l)
380 {
381 if (l)
382 kref_get(&(l->count));
383
384 return l;
385 }
386
387
388 /**
389 * aa_get_label_rcu - increment refcount on a label that can be replaced
390 * @l: pointer to label that can be replaced (NOT NULL)
391 *
392 * Returns: pointer to a refcounted label.
393 * else NULL if no label
394 */
aa_get_label_rcu(struct aa_label __rcu ** l)395 static inline struct aa_label *aa_get_label_rcu(struct aa_label __rcu **l)
396 {
397 struct aa_label *c;
398
399 rcu_read_lock();
400 do {
401 c = rcu_dereference(*l);
402 } while (c && !kref_get_unless_zero(&c->count));
403 rcu_read_unlock();
404
405 return c;
406 }
407
408 /**
409 * aa_get_newest_label - find the newest version of @l
410 * @l: the label to check for newer versions of
411 *
412 * Returns: refcounted newest version of @l taking into account
413 * replacement, renames and removals
414 * return @l.
415 */
aa_get_newest_label(struct aa_label * l)416 static inline struct aa_label *aa_get_newest_label(struct aa_label *l)
417 {
418 if (!l)
419 return NULL;
420
421 if (label_is_stale(l)) {
422 struct aa_label *tmp;
423
424 AA_BUG(!l->proxy);
425 AA_BUG(!l->proxy->label);
426 /* BUG: only way this can happen is @l ref count and its
427 * replacement count have gone to 0 and are on their way
428 * to destruction. ie. we have a refcounting error
429 */
430 tmp = aa_get_label_rcu(&l->proxy->label);
431 AA_BUG(!tmp);
432
433 return tmp;
434 }
435
436 return aa_get_label(l);
437 }
438
aa_put_label(struct aa_label * l)439 static inline void aa_put_label(struct aa_label *l)
440 {
441 if (l)
442 kref_put(&l->count, aa_label_kref);
443 }
444
445
446 struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp);
447 void aa_proxy_kref(struct kref *kref);
448
aa_get_proxy(struct aa_proxy * proxy)449 static inline struct aa_proxy *aa_get_proxy(struct aa_proxy *proxy)
450 {
451 if (proxy)
452 kref_get(&(proxy->count));
453
454 return proxy;
455 }
456
aa_put_proxy(struct aa_proxy * proxy)457 static inline void aa_put_proxy(struct aa_proxy *proxy)
458 {
459 if (proxy)
460 kref_put(&proxy->count, aa_proxy_kref);
461 }
462
463 void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new);
464
465 #endif /* __AA_LABEL_H */
466