1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AppArmor security module
4 *
5 * This file contains AppArmor functions for unpacking policy loaded from
6 * userspace.
7 *
8 * Copyright (C) 1998-2008 Novell/SUSE
9 * Copyright 2009-2010 Canonical Ltd.
10 *
11 * AppArmor uses a serialized binary format for loading policy. To find
12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13 * All policy is validated before it is used.
14 */
15
16 #include <asm/unaligned.h>
17 #include <linux/ctype.h>
18 #include <linux/errno.h>
19 #include <linux/zlib.h>
20
21 #include "include/apparmor.h"
22 #include "include/audit.h"
23 #include "include/cred.h"
24 #include "include/crypto.h"
25 #include "include/match.h"
26 #include "include/path.h"
27 #include "include/policy.h"
28 #include "include/policy_unpack.h"
29
30 #define K_ABI_MASK 0x3ff
31 #define FORCE_COMPLAIN_FLAG 0x800
32 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
33 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
34
35 #define v5 5 /* base version */
36 #define v6 6 /* per entry policydb mediation check */
37 #define v7 7
38 #define v8 8 /* full network masking */
39
40 /*
41 * The AppArmor interface treats data as a type byte followed by the
42 * actual data. The interface has the notion of a named entry
43 * which has a name (AA_NAME typecode followed by name string) followed by
44 * the entries typecode and data. Named types allow for optional
45 * elements and extensions to be added and tested for without breaking
46 * backwards compatibility.
47 */
48
49 enum aa_code {
50 AA_U8,
51 AA_U16,
52 AA_U32,
53 AA_U64,
54 AA_NAME, /* same as string except it is items name */
55 AA_STRING,
56 AA_BLOB,
57 AA_STRUCT,
58 AA_STRUCTEND,
59 AA_LIST,
60 AA_LISTEND,
61 AA_ARRAY,
62 AA_ARRAYEND,
63 };
64
65 /*
66 * aa_ext is the read of the buffer containing the serialized profile. The
67 * data is copied into a kernel buffer in apparmorfs and then handed off to
68 * the unpack routines.
69 */
70 struct aa_ext {
71 void *start;
72 void *end;
73 void *pos; /* pointer to current position in the buffer */
74 u32 version;
75 };
76
77 /* audit callback for unpack fields */
audit_cb(struct audit_buffer * ab,void * va)78 static void audit_cb(struct audit_buffer *ab, void *va)
79 {
80 struct common_audit_data *sa = va;
81
82 if (aad(sa)->iface.ns) {
83 audit_log_format(ab, " ns=");
84 audit_log_untrustedstring(ab, aad(sa)->iface.ns);
85 }
86 if (aad(sa)->name) {
87 audit_log_format(ab, " name=");
88 audit_log_untrustedstring(ab, aad(sa)->name);
89 }
90 if (aad(sa)->iface.pos)
91 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
92 }
93
94 /**
95 * audit_iface - do audit message for policy unpacking/load/replace/remove
96 * @new: profile if it has been allocated (MAYBE NULL)
97 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
98 * @name: name of the profile being manipulated (MAYBE NULL)
99 * @info: any extra info about the failure (MAYBE NULL)
100 * @e: buffer position info
101 * @error: error code
102 *
103 * Returns: %0 or error
104 */
audit_iface(struct aa_profile * new,const char * ns_name,const char * name,const char * info,struct aa_ext * e,int error)105 static int audit_iface(struct aa_profile *new, const char *ns_name,
106 const char *name, const char *info, struct aa_ext *e,
107 int error)
108 {
109 struct aa_profile *profile = labels_profile(aa_current_raw_label());
110 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
111 if (e)
112 aad(&sa)->iface.pos = e->pos - e->start;
113 aad(&sa)->iface.ns = ns_name;
114 if (new)
115 aad(&sa)->name = new->base.hname;
116 else
117 aad(&sa)->name = name;
118 aad(&sa)->info = info;
119 aad(&sa)->error = error;
120
121 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
122 }
123
__aa_loaddata_update(struct aa_loaddata * data,long revision)124 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
125 {
126 AA_BUG(!data);
127 AA_BUG(!data->ns);
128 AA_BUG(!data->dents[AAFS_LOADDATA_REVISION]);
129 AA_BUG(!mutex_is_locked(&data->ns->lock));
130 AA_BUG(data->revision > revision);
131
132 data->revision = revision;
133 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
134 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
135 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
136 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
137 }
138
aa_rawdata_eq(struct aa_loaddata * l,struct aa_loaddata * r)139 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
140 {
141 if (l->size != r->size)
142 return false;
143 if (l->compressed_size != r->compressed_size)
144 return false;
145 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
146 return false;
147 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
148 }
149
150 /*
151 * need to take the ns mutex lock which is NOT safe most places that
152 * put_loaddata is called, so we have to delay freeing it
153 */
do_loaddata_free(struct work_struct * work)154 static void do_loaddata_free(struct work_struct *work)
155 {
156 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
157 struct aa_ns *ns = aa_get_ns(d->ns);
158
159 if (ns) {
160 mutex_lock_nested(&ns->lock, ns->level);
161 __aa_fs_remove_rawdata(d);
162 mutex_unlock(&ns->lock);
163 aa_put_ns(ns);
164 }
165
166 kfree_sensitive(d->hash);
167 kfree_sensitive(d->name);
168 kvfree(d->data);
169 kfree_sensitive(d);
170 }
171
aa_loaddata_kref(struct kref * kref)172 void aa_loaddata_kref(struct kref *kref)
173 {
174 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
175
176 if (d) {
177 INIT_WORK(&d->work, do_loaddata_free);
178 schedule_work(&d->work);
179 }
180 }
181
aa_loaddata_alloc(size_t size)182 struct aa_loaddata *aa_loaddata_alloc(size_t size)
183 {
184 struct aa_loaddata *d;
185
186 d = kzalloc(sizeof(*d), GFP_KERNEL);
187 if (d == NULL)
188 return ERR_PTR(-ENOMEM);
189 d->data = kvzalloc(size, GFP_KERNEL);
190 if (!d->data) {
191 kfree(d);
192 return ERR_PTR(-ENOMEM);
193 }
194 kref_init(&d->count);
195 INIT_LIST_HEAD(&d->list);
196
197 return d;
198 }
199
200 /* test if read will be in packed data bounds */
inbounds(struct aa_ext * e,size_t size)201 static bool inbounds(struct aa_ext *e, size_t size)
202 {
203 return (size <= e->end - e->pos);
204 }
205
kvmemdup(const void * src,size_t len)206 static void *kvmemdup(const void *src, size_t len)
207 {
208 void *p = kvmalloc(len, GFP_KERNEL);
209
210 if (p)
211 memcpy(p, src, len);
212 return p;
213 }
214
215 /**
216 * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
217 * @e: serialized data read head (NOT NULL)
218 * @chunk: start address for chunk of data (NOT NULL)
219 *
220 * Returns: the size of chunk found with the read head at the end of the chunk.
221 */
unpack_u16_chunk(struct aa_ext * e,char ** chunk)222 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
223 {
224 size_t size = 0;
225 void *pos = e->pos;
226
227 if (!inbounds(e, sizeof(u16)))
228 goto fail;
229 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
230 e->pos += sizeof(__le16);
231 if (!inbounds(e, size))
232 goto fail;
233 *chunk = e->pos;
234 e->pos += size;
235 return size;
236
237 fail:
238 e->pos = pos;
239 return 0;
240 }
241
242 /* unpack control byte */
unpack_X(struct aa_ext * e,enum aa_code code)243 static bool unpack_X(struct aa_ext *e, enum aa_code code)
244 {
245 if (!inbounds(e, 1))
246 return false;
247 if (*(u8 *) e->pos != code)
248 return false;
249 e->pos++;
250 return true;
251 }
252
253 /**
254 * unpack_nameX - check is the next element is of type X with a name of @name
255 * @e: serialized data extent information (NOT NULL)
256 * @code: type code
257 * @name: name to match to the serialized element. (MAYBE NULL)
258 *
259 * check that the next serialized data element is of type X and has a tag
260 * name @name. If @name is specified then there must be a matching
261 * name element in the stream. If @name is NULL any name element will be
262 * skipped and only the typecode will be tested.
263 *
264 * Returns true on success (both type code and name tests match) and the read
265 * head is advanced past the headers
266 *
267 * Returns: false if either match fails, the read head does not move
268 */
unpack_nameX(struct aa_ext * e,enum aa_code code,const char * name)269 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
270 {
271 /*
272 * May need to reset pos if name or type doesn't match
273 */
274 void *pos = e->pos;
275 /*
276 * Check for presence of a tagname, and if present name size
277 * AA_NAME tag value is a u16.
278 */
279 if (unpack_X(e, AA_NAME)) {
280 char *tag = NULL;
281 size_t size = unpack_u16_chunk(e, &tag);
282 /* if a name is specified it must match. otherwise skip tag */
283 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
284 goto fail;
285 } else if (name) {
286 /* if a name is specified and there is no name tag fail */
287 goto fail;
288 }
289
290 /* now check if type code matches */
291 if (unpack_X(e, code))
292 return true;
293
294 fail:
295 e->pos = pos;
296 return false;
297 }
298
unpack_u8(struct aa_ext * e,u8 * data,const char * name)299 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
300 {
301 void *pos = e->pos;
302
303 if (unpack_nameX(e, AA_U8, name)) {
304 if (!inbounds(e, sizeof(u8)))
305 goto fail;
306 if (data)
307 *data = *((u8 *)e->pos);
308 e->pos += sizeof(u8);
309 return true;
310 }
311
312 fail:
313 e->pos = pos;
314 return false;
315 }
316
unpack_u32(struct aa_ext * e,u32 * data,const char * name)317 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
318 {
319 void *pos = e->pos;
320
321 if (unpack_nameX(e, AA_U32, name)) {
322 if (!inbounds(e, sizeof(u32)))
323 goto fail;
324 if (data)
325 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
326 e->pos += sizeof(u32);
327 return true;
328 }
329
330 fail:
331 e->pos = pos;
332 return false;
333 }
334
unpack_u64(struct aa_ext * e,u64 * data,const char * name)335 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
336 {
337 void *pos = e->pos;
338
339 if (unpack_nameX(e, AA_U64, name)) {
340 if (!inbounds(e, sizeof(u64)))
341 goto fail;
342 if (data)
343 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
344 e->pos += sizeof(u64);
345 return true;
346 }
347
348 fail:
349 e->pos = pos;
350 return false;
351 }
352
unpack_array(struct aa_ext * e,const char * name)353 static size_t unpack_array(struct aa_ext *e, const char *name)
354 {
355 void *pos = e->pos;
356
357 if (unpack_nameX(e, AA_ARRAY, name)) {
358 int size;
359 if (!inbounds(e, sizeof(u16)))
360 goto fail;
361 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
362 e->pos += sizeof(u16);
363 return size;
364 }
365
366 fail:
367 e->pos = pos;
368 return 0;
369 }
370
unpack_blob(struct aa_ext * e,char ** blob,const char * name)371 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
372 {
373 void *pos = e->pos;
374
375 if (unpack_nameX(e, AA_BLOB, name)) {
376 u32 size;
377 if (!inbounds(e, sizeof(u32)))
378 goto fail;
379 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
380 e->pos += sizeof(u32);
381 if (inbounds(e, (size_t) size)) {
382 *blob = e->pos;
383 e->pos += size;
384 return size;
385 }
386 }
387
388 fail:
389 e->pos = pos;
390 return 0;
391 }
392
unpack_str(struct aa_ext * e,const char ** string,const char * name)393 static int unpack_str(struct aa_ext *e, const char **string, const char *name)
394 {
395 char *src_str;
396 size_t size = 0;
397 void *pos = e->pos;
398 *string = NULL;
399 if (unpack_nameX(e, AA_STRING, name)) {
400 size = unpack_u16_chunk(e, &src_str);
401 if (size) {
402 /* strings are null terminated, length is size - 1 */
403 if (src_str[size - 1] != 0)
404 goto fail;
405 *string = src_str;
406
407 return size;
408 }
409 }
410
411 fail:
412 e->pos = pos;
413 return 0;
414 }
415
unpack_strdup(struct aa_ext * e,char ** string,const char * name)416 static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
417 {
418 const char *tmp;
419 void *pos = e->pos;
420 int res = unpack_str(e, &tmp, name);
421 *string = NULL;
422
423 if (!res)
424 return 0;
425
426 *string = kmemdup(tmp, res, GFP_KERNEL);
427 if (!*string) {
428 e->pos = pos;
429 return 0;
430 }
431
432 return res;
433 }
434
435
436 /**
437 * unpack_dfa - unpack a file rule dfa
438 * @e: serialized data extent information (NOT NULL)
439 *
440 * returns dfa or ERR_PTR or NULL if no dfa
441 */
unpack_dfa(struct aa_ext * e)442 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
443 {
444 char *blob = NULL;
445 size_t size;
446 struct aa_dfa *dfa = NULL;
447
448 size = unpack_blob(e, &blob, "aadfa");
449 if (size) {
450 /*
451 * The dfa is aligned with in the blob to 8 bytes
452 * from the beginning of the stream.
453 * alignment adjust needed by dfa unpack
454 */
455 size_t sz = blob - (char *) e->start -
456 ((e->pos - e->start) & 7);
457 size_t pad = ALIGN(sz, 8) - sz;
458 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
459 TO_ACCEPT2_FLAG(YYTD_DATA32) | DFA_FLAG_VERIFY_STATES;
460 dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
461
462 if (IS_ERR(dfa))
463 return dfa;
464
465 }
466
467 return dfa;
468 }
469
470 /**
471 * unpack_trans_table - unpack a profile transition table
472 * @e: serialized data extent information (NOT NULL)
473 * @profile: profile to add the accept table to (NOT NULL)
474 *
475 * Returns: true if table successfully unpacked
476 */
unpack_trans_table(struct aa_ext * e,struct aa_profile * profile)477 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
478 {
479 void *saved_pos = e->pos;
480
481 /* exec table is optional */
482 if (unpack_nameX(e, AA_STRUCT, "xtable")) {
483 int i, size;
484
485 size = unpack_array(e, NULL);
486 /* currently 4 exec bits and entries 0-3 are reserved iupcx */
487 if (size > 16 - 4)
488 goto fail;
489 profile->file.trans.table = kcalloc(size, sizeof(char *),
490 GFP_KERNEL);
491 if (!profile->file.trans.table)
492 goto fail;
493
494 profile->file.trans.size = size;
495 for (i = 0; i < size; i++) {
496 char *str;
497 int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
498 /* unpack_strdup verifies that the last character is
499 * null termination byte.
500 */
501 if (!size2)
502 goto fail;
503 profile->file.trans.table[i] = str;
504 /* verify that name doesn't start with space */
505 if (isspace(*str))
506 goto fail;
507
508 /* count internal # of internal \0 */
509 for (c = j = 0; j < size2 - 1; j++) {
510 if (!str[j]) {
511 pos = j;
512 c++;
513 }
514 }
515 if (*str == ':') {
516 /* first character after : must be valid */
517 if (!str[1])
518 goto fail;
519 /* beginning with : requires an embedded \0,
520 * verify that exactly 1 internal \0 exists
521 * trailing \0 already verified by unpack_strdup
522 *
523 * convert \0 back to : for label_parse
524 */
525 if (c == 1)
526 str[pos] = ':';
527 else if (c > 1)
528 goto fail;
529 } else if (c)
530 /* fail - all other cases with embedded \0 */
531 goto fail;
532 }
533 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
534 goto fail;
535 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
536 goto fail;
537 }
538 return true;
539
540 fail:
541 aa_free_domain_entries(&profile->file.trans);
542 e->pos = saved_pos;
543 return false;
544 }
545
unpack_xattrs(struct aa_ext * e,struct aa_profile * profile)546 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
547 {
548 void *pos = e->pos;
549
550 if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
551 int i, size;
552
553 size = unpack_array(e, NULL);
554 profile->xattr_count = size;
555 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
556 if (!profile->xattrs)
557 goto fail;
558 for (i = 0; i < size; i++) {
559 if (!unpack_strdup(e, &profile->xattrs[i], NULL))
560 goto fail;
561 }
562 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
563 goto fail;
564 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
565 goto fail;
566 }
567
568 return true;
569
570 fail:
571 e->pos = pos;
572 return false;
573 }
574
unpack_secmark(struct aa_ext * e,struct aa_profile * profile)575 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
576 {
577 void *pos = e->pos;
578 int i, size;
579
580 if (unpack_nameX(e, AA_STRUCT, "secmark")) {
581 size = unpack_array(e, NULL);
582
583 profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
584 GFP_KERNEL);
585 if (!profile->secmark)
586 goto fail;
587
588 profile->secmark_count = size;
589
590 for (i = 0; i < size; i++) {
591 if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
592 goto fail;
593 if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
594 goto fail;
595 if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
596 goto fail;
597 }
598 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
599 goto fail;
600 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
601 goto fail;
602 }
603
604 return true;
605
606 fail:
607 if (profile->secmark) {
608 for (i = 0; i < size; i++)
609 kfree(profile->secmark[i].label);
610 kfree(profile->secmark);
611 profile->secmark_count = 0;
612 profile->secmark = NULL;
613 }
614
615 e->pos = pos;
616 return false;
617 }
618
unpack_rlimits(struct aa_ext * e,struct aa_profile * profile)619 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
620 {
621 void *pos = e->pos;
622
623 /* rlimits are optional */
624 if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
625 int i, size;
626 u32 tmp = 0;
627 if (!unpack_u32(e, &tmp, NULL))
628 goto fail;
629 profile->rlimits.mask = tmp;
630
631 size = unpack_array(e, NULL);
632 if (size > RLIM_NLIMITS)
633 goto fail;
634 for (i = 0; i < size; i++) {
635 u64 tmp2 = 0;
636 int a = aa_map_resource(i);
637 if (!unpack_u64(e, &tmp2, NULL))
638 goto fail;
639 profile->rlimits.limits[a].rlim_max = tmp2;
640 }
641 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
642 goto fail;
643 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
644 goto fail;
645 }
646 return true;
647
648 fail:
649 e->pos = pos;
650 return false;
651 }
652
strhash(const void * data,u32 len,u32 seed)653 static u32 strhash(const void *data, u32 len, u32 seed)
654 {
655 const char * const *key = data;
656
657 return jhash(*key, strlen(*key), seed);
658 }
659
datacmp(struct rhashtable_compare_arg * arg,const void * obj)660 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
661 {
662 const struct aa_data *data = obj;
663 const char * const *key = arg->key;
664
665 return strcmp(data->key, *key);
666 }
667
668 /**
669 * unpack_profile - unpack a serialized profile
670 * @e: serialized data extent information (NOT NULL)
671 *
672 * NOTE: unpack profile sets audit struct if there is a failure
673 */
unpack_profile(struct aa_ext * e,char ** ns_name)674 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
675 {
676 struct aa_profile *profile = NULL;
677 const char *tmpname, *tmpns = NULL, *name = NULL;
678 const char *info = "failed to unpack profile";
679 size_t ns_len;
680 struct rhashtable_params params = { 0 };
681 char *key = NULL;
682 struct aa_data *data;
683 int i, error = -EPROTO;
684 kernel_cap_t tmpcap;
685 u32 tmp;
686
687 *ns_name = NULL;
688
689 /* check that we have the right struct being passed */
690 if (!unpack_nameX(e, AA_STRUCT, "profile"))
691 goto fail;
692 if (!unpack_str(e, &name, NULL))
693 goto fail;
694 if (*name == '\0')
695 goto fail;
696
697 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
698 if (tmpns) {
699 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
700 if (!*ns_name) {
701 info = "out of memory";
702 goto fail;
703 }
704 name = tmpname;
705 }
706
707 profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
708 if (!profile)
709 return ERR_PTR(-ENOMEM);
710
711 /* profile renaming is optional */
712 (void) unpack_str(e, &profile->rename, "rename");
713
714 /* attachment string is optional */
715 (void) unpack_str(e, &profile->attach, "attach");
716
717 /* xmatch is optional and may be NULL */
718 profile->xmatch = unpack_dfa(e);
719 if (IS_ERR(profile->xmatch)) {
720 error = PTR_ERR(profile->xmatch);
721 profile->xmatch = NULL;
722 info = "bad xmatch";
723 goto fail;
724 }
725 /* xmatch_len is not optional if xmatch is set */
726 if (profile->xmatch) {
727 if (!unpack_u32(e, &tmp, NULL)) {
728 info = "missing xmatch len";
729 goto fail;
730 }
731 profile->xmatch_len = tmp;
732 }
733
734 /* disconnected attachment string is optional */
735 (void) unpack_str(e, &profile->disconnected, "disconnected");
736
737 /* per profile debug flags (complain, audit) */
738 if (!unpack_nameX(e, AA_STRUCT, "flags")) {
739 info = "profile missing flags";
740 goto fail;
741 }
742 info = "failed to unpack profile flags";
743 if (!unpack_u32(e, &tmp, NULL))
744 goto fail;
745 if (tmp & PACKED_FLAG_HAT)
746 profile->label.flags |= FLAG_HAT;
747 if (!unpack_u32(e, &tmp, NULL))
748 goto fail;
749 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
750 profile->mode = APPARMOR_COMPLAIN;
751 } else if (tmp == PACKED_MODE_ENFORCE) {
752 profile->mode = APPARMOR_ENFORCE;
753 } else if (tmp == PACKED_MODE_KILL) {
754 profile->mode = APPARMOR_KILL;
755 } else if (tmp == PACKED_MODE_UNCONFINED) {
756 profile->mode = APPARMOR_UNCONFINED;
757 profile->label.flags |= FLAG_UNCONFINED;
758 } else {
759 goto fail;
760 }
761 if (!unpack_u32(e, &tmp, NULL))
762 goto fail;
763 if (tmp)
764 profile->audit = AUDIT_ALL;
765
766 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
767 goto fail;
768
769 /* path_flags is optional */
770 if (unpack_u32(e, &profile->path_flags, "path_flags"))
771 profile->path_flags |= profile->label.flags &
772 PATH_MEDIATE_DELETED;
773 else
774 /* set a default value if path_flags field is not present */
775 profile->path_flags = PATH_MEDIATE_DELETED;
776
777 info = "failed to unpack profile capabilities";
778 if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
779 goto fail;
780 if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
781 goto fail;
782 if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
783 goto fail;
784 if (!unpack_u32(e, &tmpcap.cap[0], NULL))
785 goto fail;
786
787 info = "failed to unpack upper profile capabilities";
788 if (unpack_nameX(e, AA_STRUCT, "caps64")) {
789 /* optional upper half of 64 bit caps */
790 if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
791 goto fail;
792 if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
793 goto fail;
794 if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
795 goto fail;
796 if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
797 goto fail;
798 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
799 goto fail;
800 }
801
802 info = "failed to unpack extended profile capabilities";
803 if (unpack_nameX(e, AA_STRUCT, "capsx")) {
804 /* optional extended caps mediation mask */
805 if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
806 goto fail;
807 if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
808 goto fail;
809 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
810 goto fail;
811 }
812
813 if (!unpack_xattrs(e, profile)) {
814 info = "failed to unpack profile xattrs";
815 goto fail;
816 }
817
818 if (!unpack_rlimits(e, profile)) {
819 info = "failed to unpack profile rlimits";
820 goto fail;
821 }
822
823 if (!unpack_secmark(e, profile)) {
824 info = "failed to unpack profile secmark rules";
825 goto fail;
826 }
827
828 if (unpack_nameX(e, AA_STRUCT, "policydb")) {
829 /* generic policy dfa - optional and may be NULL */
830 info = "failed to unpack policydb";
831 profile->policy.dfa = unpack_dfa(e);
832 if (IS_ERR(profile->policy.dfa)) {
833 error = PTR_ERR(profile->policy.dfa);
834 profile->policy.dfa = NULL;
835 goto fail;
836 } else if (!profile->policy.dfa) {
837 error = -EPROTO;
838 goto fail;
839 }
840 if (!unpack_u32(e, &profile->policy.start[0], "start"))
841 /* default start state */
842 profile->policy.start[0] = DFA_START;
843 /* setup class index */
844 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
845 profile->policy.start[i] =
846 aa_dfa_next(profile->policy.dfa,
847 profile->policy.start[0],
848 i);
849 }
850 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
851 goto fail;
852 } else
853 profile->policy.dfa = aa_get_dfa(nulldfa);
854
855 /* get file rules */
856 profile->file.dfa = unpack_dfa(e);
857 if (IS_ERR(profile->file.dfa)) {
858 error = PTR_ERR(profile->file.dfa);
859 profile->file.dfa = NULL;
860 info = "failed to unpack profile file rules";
861 goto fail;
862 } else if (profile->file.dfa) {
863 if (!unpack_u32(e, &profile->file.start, "dfa_start"))
864 /* default start state */
865 profile->file.start = DFA_START;
866 } else if (profile->policy.dfa &&
867 profile->policy.start[AA_CLASS_FILE]) {
868 profile->file.dfa = aa_get_dfa(profile->policy.dfa);
869 profile->file.start = profile->policy.start[AA_CLASS_FILE];
870 } else
871 profile->file.dfa = aa_get_dfa(nulldfa);
872
873 if (!unpack_trans_table(e, profile)) {
874 info = "failed to unpack profile transition table";
875 goto fail;
876 }
877
878 if (unpack_nameX(e, AA_STRUCT, "data")) {
879 info = "out of memory";
880 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
881 if (!profile->data)
882 goto fail;
883
884 params.nelem_hint = 3;
885 params.key_len = sizeof(void *);
886 params.key_offset = offsetof(struct aa_data, key);
887 params.head_offset = offsetof(struct aa_data, head);
888 params.hashfn = strhash;
889 params.obj_cmpfn = datacmp;
890
891 if (rhashtable_init(profile->data, ¶ms)) {
892 info = "failed to init key, value hash table";
893 goto fail;
894 }
895
896 while (unpack_strdup(e, &key, NULL)) {
897 data = kzalloc(sizeof(*data), GFP_KERNEL);
898 if (!data) {
899 kfree_sensitive(key);
900 goto fail;
901 }
902
903 data->key = key;
904 data->size = unpack_blob(e, &data->data, NULL);
905 data->data = kvmemdup(data->data, data->size);
906 if (data->size && !data->data) {
907 kfree_sensitive(data->key);
908 kfree_sensitive(data);
909 goto fail;
910 }
911
912 rhashtable_insert_fast(profile->data, &data->head,
913 profile->data->p);
914 }
915
916 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
917 info = "failed to unpack end of key, value data table";
918 goto fail;
919 }
920 }
921
922 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
923 info = "failed to unpack end of profile";
924 goto fail;
925 }
926
927 return profile;
928
929 fail:
930 if (profile)
931 name = NULL;
932 else if (!name)
933 name = "unknown";
934 audit_iface(profile, NULL, name, info, e, error);
935 aa_free_profile(profile);
936
937 return ERR_PTR(error);
938 }
939
940 /**
941 * verify_head - unpack serialized stream header
942 * @e: serialized data read head (NOT NULL)
943 * @required: whether the header is required or optional
944 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
945 *
946 * Returns: error or 0 if header is good
947 */
verify_header(struct aa_ext * e,int required,const char ** ns)948 static int verify_header(struct aa_ext *e, int required, const char **ns)
949 {
950 int error = -EPROTONOSUPPORT;
951 const char *name = NULL;
952 *ns = NULL;
953
954 /* get the interface version */
955 if (!unpack_u32(e, &e->version, "version")) {
956 if (required) {
957 audit_iface(NULL, NULL, NULL, "invalid profile format",
958 e, error);
959 return error;
960 }
961 }
962
963 /* Check that the interface version is currently supported.
964 * if not specified use previous version
965 * Mask off everything that is not kernel abi version
966 */
967 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
968 audit_iface(NULL, NULL, NULL, "unsupported interface version",
969 e, error);
970 return error;
971 }
972
973 /* read the namespace if present */
974 if (unpack_str(e, &name, "namespace")) {
975 if (*name == '\0') {
976 audit_iface(NULL, NULL, NULL, "invalid namespace name",
977 e, error);
978 return error;
979 }
980 if (*ns && strcmp(*ns, name)) {
981 audit_iface(NULL, NULL, NULL, "invalid ns change", e,
982 error);
983 } else if (!*ns) {
984 *ns = kstrdup(name, GFP_KERNEL);
985 if (!*ns)
986 return -ENOMEM;
987 }
988 }
989
990 return 0;
991 }
992
verify_xindex(int xindex,int table_size)993 static bool verify_xindex(int xindex, int table_size)
994 {
995 int index, xtype;
996 xtype = xindex & AA_X_TYPE_MASK;
997 index = xindex & AA_X_INDEX_MASK;
998 if (xtype == AA_X_TABLE && index >= table_size)
999 return false;
1000 return true;
1001 }
1002
1003 /* verify dfa xindexes are in range of transition tables */
verify_dfa_xindex(struct aa_dfa * dfa,int table_size)1004 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
1005 {
1006 int i;
1007 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
1008 if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
1009 return false;
1010 if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
1011 return false;
1012 }
1013 return true;
1014 }
1015
1016 /**
1017 * verify_profile - Do post unpack analysis to verify profile consistency
1018 * @profile: profile to verify (NOT NULL)
1019 *
1020 * Returns: 0 if passes verification else error
1021 */
verify_profile(struct aa_profile * profile)1022 static int verify_profile(struct aa_profile *profile)
1023 {
1024 if (profile->file.dfa &&
1025 !verify_dfa_xindex(profile->file.dfa,
1026 profile->file.trans.size)) {
1027 audit_iface(profile, NULL, NULL, "Invalid named transition",
1028 NULL, -EPROTO);
1029 return -EPROTO;
1030 }
1031
1032 return 0;
1033 }
1034
aa_load_ent_free(struct aa_load_ent * ent)1035 void aa_load_ent_free(struct aa_load_ent *ent)
1036 {
1037 if (ent) {
1038 aa_put_profile(ent->rename);
1039 aa_put_profile(ent->old);
1040 aa_put_profile(ent->new);
1041 kfree(ent->ns_name);
1042 kfree_sensitive(ent);
1043 }
1044 }
1045
aa_load_ent_alloc(void)1046 struct aa_load_ent *aa_load_ent_alloc(void)
1047 {
1048 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1049 if (ent)
1050 INIT_LIST_HEAD(&ent->list);
1051 return ent;
1052 }
1053
deflate_compress(const char * src,size_t slen,char ** dst,size_t * dlen)1054 static int deflate_compress(const char *src, size_t slen, char **dst,
1055 size_t *dlen)
1056 {
1057 int error;
1058 struct z_stream_s strm;
1059 void *stgbuf, *dstbuf;
1060 size_t stglen = deflateBound(slen);
1061
1062 memset(&strm, 0, sizeof(strm));
1063
1064 if (stglen < slen)
1065 return -EFBIG;
1066
1067 strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
1068 MAX_MEM_LEVEL),
1069 GFP_KERNEL);
1070 if (!strm.workspace)
1071 return -ENOMEM;
1072
1073 error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
1074 if (error != Z_OK) {
1075 error = -ENOMEM;
1076 goto fail_deflate_init;
1077 }
1078
1079 stgbuf = kvzalloc(stglen, GFP_KERNEL);
1080 if (!stgbuf) {
1081 error = -ENOMEM;
1082 goto fail_stg_alloc;
1083 }
1084
1085 strm.next_in = src;
1086 strm.avail_in = slen;
1087 strm.next_out = stgbuf;
1088 strm.avail_out = stglen;
1089
1090 error = zlib_deflate(&strm, Z_FINISH);
1091 if (error != Z_STREAM_END) {
1092 error = -EINVAL;
1093 goto fail_deflate;
1094 }
1095 error = 0;
1096
1097 if (is_vmalloc_addr(stgbuf)) {
1098 dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
1099 if (dstbuf) {
1100 memcpy(dstbuf, stgbuf, strm.total_out);
1101 kvfree(stgbuf);
1102 }
1103 } else
1104 /*
1105 * If the staging buffer was kmalloc'd, then using krealloc is
1106 * probably going to be faster. The destination buffer will
1107 * always be smaller, so it's just shrunk, avoiding a memcpy
1108 */
1109 dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
1110
1111 if (!dstbuf) {
1112 error = -ENOMEM;
1113 goto fail_deflate;
1114 }
1115
1116 *dst = dstbuf;
1117 *dlen = strm.total_out;
1118
1119 fail_stg_alloc:
1120 zlib_deflateEnd(&strm);
1121 fail_deflate_init:
1122 kvfree(strm.workspace);
1123 return error;
1124
1125 fail_deflate:
1126 kvfree(stgbuf);
1127 goto fail_stg_alloc;
1128 }
1129
compress_loaddata(struct aa_loaddata * data)1130 static int compress_loaddata(struct aa_loaddata *data)
1131 {
1132
1133 AA_BUG(data->compressed_size > 0);
1134
1135 /*
1136 * Shortcut the no compression case, else we increase the amount of
1137 * storage required by a small amount
1138 */
1139 if (aa_g_rawdata_compression_level != 0) {
1140 void *udata = data->data;
1141 int error = deflate_compress(udata, data->size, &data->data,
1142 &data->compressed_size);
1143 if (error)
1144 return error;
1145
1146 kvfree(udata);
1147 } else
1148 data->compressed_size = data->size;
1149
1150 return 0;
1151 }
1152
1153 /**
1154 * aa_unpack - unpack packed binary profile(s) data loaded from user space
1155 * @udata: user data copied to kmem (NOT NULL)
1156 * @lh: list to place unpacked profiles in a aa_repl_ws
1157 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1158 *
1159 * Unpack user data and return refcounted allocated profile(s) stored in
1160 * @lh in order of discovery, with the list chain stored in base.list
1161 * or error
1162 *
1163 * Returns: profile(s) on @lh else error pointer if fails to unpack
1164 */
aa_unpack(struct aa_loaddata * udata,struct list_head * lh,const char ** ns)1165 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1166 const char **ns)
1167 {
1168 struct aa_load_ent *tmp, *ent;
1169 struct aa_profile *profile = NULL;
1170 int error;
1171 struct aa_ext e = {
1172 .start = udata->data,
1173 .end = udata->data + udata->size,
1174 .pos = udata->data,
1175 };
1176
1177 *ns = NULL;
1178 while (e.pos < e.end) {
1179 char *ns_name = NULL;
1180 void *start;
1181 error = verify_header(&e, e.pos == e.start, ns);
1182 if (error)
1183 goto fail;
1184
1185 start = e.pos;
1186 profile = unpack_profile(&e, &ns_name);
1187 if (IS_ERR(profile)) {
1188 error = PTR_ERR(profile);
1189 goto fail;
1190 }
1191
1192 error = verify_profile(profile);
1193 if (error)
1194 goto fail_profile;
1195
1196 if (aa_g_hash_policy)
1197 error = aa_calc_profile_hash(profile, e.version, start,
1198 e.pos - start);
1199 if (error)
1200 goto fail_profile;
1201
1202 ent = aa_load_ent_alloc();
1203 if (!ent) {
1204 error = -ENOMEM;
1205 goto fail_profile;
1206 }
1207
1208 ent->new = profile;
1209 ent->ns_name = ns_name;
1210 list_add_tail(&ent->list, lh);
1211 }
1212 udata->abi = e.version & K_ABI_MASK;
1213 if (aa_g_hash_policy) {
1214 udata->hash = aa_calc_hash(udata->data, udata->size);
1215 if (IS_ERR(udata->hash)) {
1216 error = PTR_ERR(udata->hash);
1217 udata->hash = NULL;
1218 goto fail;
1219 }
1220 }
1221 error = compress_loaddata(udata);
1222 if (error)
1223 goto fail;
1224 return 0;
1225
1226 fail_profile:
1227 aa_put_profile(profile);
1228
1229 fail:
1230 list_for_each_entry_safe(ent, tmp, lh, list) {
1231 list_del_init(&ent->list);
1232 aa_load_ent_free(ent);
1233 }
1234
1235 return error;
1236 }
1237
1238 #ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
1239 #include "policy_unpack_test.c"
1240 #endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */
1241