2 * AppArmor security module
4 * This file contains AppArmor functions for unpacking policy loaded from
7 * Copyright (C) 1998-2008 Novell/SUSE
8 * Copyright 2009-2010 Canonical Ltd.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2 of the
15 * AppArmor uses a serialized binary format for loading policy. To find
16 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
17 * All policy is validated before it is used.
20 #include <asm/unaligned.h>
21 #include <linux/ctype.h>
22 #include <linux/errno.h>
24 #include "include/apparmor.h"
25 #include "include/audit.h"
26 #include "include/cred.h"
27 #include "include/crypto.h"
28 #include "include/match.h"
29 #include "include/path.h"
30 #include "include/policy.h"
31 #include "include/policy_unpack.h"
33 #define K_ABI_MASK 0x3ff
34 #define FORCE_COMPLAIN_FLAG 0x800
35 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
36 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
38 #define v5 5 /* base version */
39 #define v6 6 /* per entry policydb mediation check */
41 #define v8 8 /* full network masking */
44 * The AppArmor interface treats data as a type byte followed by the
45 * actual data. The interface has the notion of a a named entry
46 * which has a name (AA_NAME typecode followed by name string) followed by
47 * the entries typecode and data. Named types allow for optional
48 * elements and extensions to be added and tested for without breaking
49 * backwards compatibility.
57 AA_NAME, /* same as string except it is items name */
69 * aa_ext is the read of the buffer containing the serialized profile. The
70 * data is copied into a kernel buffer in apparmorfs and then handed off to
71 * the unpack routines.
76 void *pos; /* pointer to current position in the buffer */
80 /* audit callback for unpack fields */
81 static void audit_cb(struct audit_buffer *ab, void *va)
83 struct common_audit_data *sa = va;
85 if (aad(sa)->iface.ns) {
86 audit_log_format(ab, " ns=");
87 audit_log_untrustedstring(ab, aad(sa)->iface.ns);
90 audit_log_format(ab, " name=");
91 audit_log_untrustedstring(ab, aad(sa)->name);
93 if (aad(sa)->iface.pos)
94 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
98 * audit_iface - do audit message for policy unpacking/load/replace/remove
99 * @new: profile if it has been allocated (MAYBE NULL)
100 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
101 * @name: name of the profile being manipulated (MAYBE NULL)
102 * @info: any extra info about the failure (MAYBE NULL)
103 * @e: buffer position info
106 * Returns: %0 or error
108 static int audit_iface(struct aa_profile *new, const char *ns_name,
109 const char *name, const char *info, struct aa_ext *e,
112 struct aa_profile *profile = labels_profile(aa_current_raw_label());
113 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
115 aad(&sa)->iface.pos = e->pos - e->start;
116 aad(&sa)->iface.ns = ns_name;
118 aad(&sa)->name = new->base.hname;
120 aad(&sa)->name = name;
121 aad(&sa)->info = info;
122 aad(&sa)->error = error;
124 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
127 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
131 AA_BUG(!data->dents[AAFS_LOADDATA_REVISION]);
132 AA_BUG(!mutex_is_locked(&data->ns->lock));
133 AA_BUG(data->revision > revision);
135 data->revision = revision;
136 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
137 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
138 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
139 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
142 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
144 if (l->size != r->size)
146 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
148 return memcmp(l->data, r->data, r->size) == 0;
152 * need to take the ns mutex lock which is NOT safe most places that
153 * put_loaddata is called, so we have to delay freeing it
155 static void do_loaddata_free(struct work_struct *work)
157 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
158 struct aa_ns *ns = aa_get_ns(d->ns);
161 mutex_lock_nested(&ns->lock, ns->level);
162 __aa_fs_remove_rawdata(d);
163 mutex_unlock(&ns->lock);
173 void aa_loaddata_kref(struct kref *kref)
175 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
178 INIT_WORK(&d->work, do_loaddata_free);
179 schedule_work(&d->work);
183 struct aa_loaddata *aa_loaddata_alloc(size_t size)
185 struct aa_loaddata *d;
187 d = kzalloc(sizeof(*d), GFP_KERNEL);
189 return ERR_PTR(-ENOMEM);
190 d->data = kvzalloc(size, GFP_KERNEL);
193 return ERR_PTR(-ENOMEM);
195 kref_init(&d->count);
196 INIT_LIST_HEAD(&d->list);
201 /* test if read will be in packed data bounds */
202 static bool inbounds(struct aa_ext *e, size_t size)
204 return (size <= e->end - e->pos);
207 static void *kvmemdup(const void *src, size_t len)
209 void *p = kvmalloc(len, GFP_KERNEL);
217 * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
218 * @e: serialized data read head (NOT NULL)
219 * @chunk: start address for chunk of data (NOT NULL)
221 * Returns: the size of chunk found with the read head at the end of the chunk.
223 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
227 if (!inbounds(e, sizeof(u16)))
229 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
230 e->pos += sizeof(__le16);
231 if (!inbounds(e, size))
238 /* unpack control byte */
239 static bool unpack_X(struct aa_ext *e, enum aa_code code)
243 if (*(u8 *) e->pos != code)
250 * unpack_nameX - check is the next element is of type X with a name of @name
251 * @e: serialized data extent information (NOT NULL)
253 * @name: name to match to the serialized element. (MAYBE NULL)
255 * check that the next serialized data element is of type X and has a tag
256 * name @name. If @name is specified then there must be a matching
257 * name element in the stream. If @name is NULL any name element will be
258 * skipped and only the typecode will be tested.
260 * Returns 1 on success (both type code and name tests match) and the read
261 * head is advanced past the headers
263 * Returns: 0 if either match fails, the read head does not move
265 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
268 * May need to reset pos if name or type doesn't match
272 * Check for presence of a tagname, and if present name size
273 * AA_NAME tag value is a u16.
275 if (unpack_X(e, AA_NAME)) {
277 size_t size = unpack_u16_chunk(e, &tag);
278 /* if a name is specified it must match. otherwise skip tag */
279 if (name && (!size || strcmp(name, tag)))
282 /* if a name is specified and there is no name tag fail */
286 /* now check if type code matches */
287 if (unpack_X(e, code))
295 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
297 if (unpack_nameX(e, AA_U32, name)) {
298 if (!inbounds(e, sizeof(u32)))
301 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
302 e->pos += sizeof(u32);
308 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
310 if (unpack_nameX(e, AA_U64, name)) {
311 if (!inbounds(e, sizeof(u64)))
314 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
315 e->pos += sizeof(u64);
321 static size_t unpack_array(struct aa_ext *e, const char *name)
323 if (unpack_nameX(e, AA_ARRAY, name)) {
325 if (!inbounds(e, sizeof(u16)))
327 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
328 e->pos += sizeof(u16);
334 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
336 if (unpack_nameX(e, AA_BLOB, name)) {
338 if (!inbounds(e, sizeof(u32)))
340 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
341 e->pos += sizeof(u32);
342 if (inbounds(e, (size_t) size)) {
351 static int unpack_str(struct aa_ext *e, const char **string, const char *name)
357 if (unpack_nameX(e, AA_STRING, name)) {
358 size = unpack_u16_chunk(e, &src_str);
360 /* strings are null terminated, length is size - 1 */
361 if (src_str[size - 1] != 0)
373 static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
377 int res = unpack_str(e, &tmp, name);
383 *string = kmemdup(tmp, res, GFP_KERNEL);
392 #define DFA_VALID_PERM_MASK 0xffffffff
393 #define DFA_VALID_PERM2_MASK 0xffffffff
396 * verify_accept - verify the accept tables of a dfa
397 * @dfa: dfa to verify accept tables of (NOT NULL)
398 * @flags: flags governing dfa
400 * Returns: 1 if valid accept tables else 0 if error
402 static bool verify_accept(struct aa_dfa *dfa, int flags)
406 /* verify accept permissions */
407 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
408 int mode = ACCEPT_TABLE(dfa)[i];
410 if (mode & ~DFA_VALID_PERM_MASK)
413 if (ACCEPT_TABLE2(dfa)[i] & ~DFA_VALID_PERM2_MASK)
420 * unpack_dfa - unpack a file rule dfa
421 * @e: serialized data extent information (NOT NULL)
423 * returns dfa or ERR_PTR or NULL if no dfa
425 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
429 struct aa_dfa *dfa = NULL;
431 size = unpack_blob(e, &blob, "aadfa");
434 * The dfa is aligned with in the blob to 8 bytes
435 * from the beginning of the stream.
436 * alignment adjust needed by dfa unpack
438 size_t sz = blob - (char *) e->start -
439 ((e->pos - e->start) & 7);
440 size_t pad = ALIGN(sz, 8) - sz;
441 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
442 TO_ACCEPT2_FLAG(YYTD_DATA32) | DFA_FLAG_VERIFY_STATES;
443 dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
448 if (!verify_accept(dfa, flags))
456 return ERR_PTR(-EPROTO);
460 * unpack_trans_table - unpack a profile transition table
461 * @e: serialized data extent information (NOT NULL)
462 * @profile: profile to add the accept table to (NOT NULL)
464 * Returns: 1 if table successfully unpacked
466 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
468 void *saved_pos = e->pos;
470 /* exec table is optional */
471 if (unpack_nameX(e, AA_STRUCT, "xtable")) {
474 size = unpack_array(e, NULL);
475 /* currently 4 exec bits and entries 0-3 are reserved iupcx */
478 profile->file.trans.table = kzalloc(sizeof(char *) * size,
480 if (!profile->file.trans.table)
483 profile->file.trans.size = size;
484 for (i = 0; i < size; i++) {
486 int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
487 /* unpack_strdup verifies that the last character is
488 * null termination byte.
492 profile->file.trans.table[i] = str;
493 /* verify that name doesn't start with space */
497 /* count internal # of internal \0 */
498 for (c = j = 0; j < size2 - 1; j++) {
505 /* first character after : must be valid */
508 /* beginning with : requires an embedded \0,
509 * verify that exactly 1 internal \0 exists
510 * trailing \0 already verified by unpack_strdup
512 * convert \0 back to : for label_parse
519 /* fail - all other cases with embedded \0 */
522 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
524 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
530 aa_free_domain_entries(&profile->file.trans);
535 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
539 if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
542 size = unpack_array(e, NULL);
543 profile->xattr_count = size;
544 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
545 if (!profile->xattrs)
547 for (i = 0; i < size; i++) {
548 if (!unpack_strdup(e, &profile->xattrs[i], NULL))
551 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
553 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
564 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
568 /* rlimits are optional */
569 if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
572 if (!unpack_u32(e, &tmp, NULL))
574 profile->rlimits.mask = tmp;
576 size = unpack_array(e, NULL);
577 if (size > RLIM_NLIMITS)
579 for (i = 0; i < size; i++) {
581 int a = aa_map_resource(i);
582 if (!unpack_u64(e, &tmp2, NULL))
584 profile->rlimits.limits[a].rlim_max = tmp2;
586 if (!unpack_nameX(e, AA_ARRAYEND, NULL))
588 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
598 static u32 strhash(const void *data, u32 len, u32 seed)
600 const char * const *key = data;
602 return jhash(*key, strlen(*key), seed);
605 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
607 const struct aa_data *data = obj;
608 const char * const *key = arg->key;
610 return strcmp(data->key, *key);
614 * unpack_profile - unpack a serialized profile
615 * @e: serialized data extent information (NOT NULL)
617 * NOTE: unpack profile sets audit struct if there is a failure
619 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
621 struct aa_profile *profile = NULL;
622 const char *tmpname, *tmpns = NULL, *name = NULL;
623 const char *info = "failed to unpack profile";
625 struct rhashtable_params params = { 0 };
627 struct aa_data *data;
628 int i, error = -EPROTO;
634 /* check that we have the right struct being passed */
635 if (!unpack_nameX(e, AA_STRUCT, "profile"))
637 if (!unpack_str(e, &name, NULL))
642 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
644 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
646 info = "out of memory";
652 profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
654 return ERR_PTR(-ENOMEM);
656 /* profile renaming is optional */
657 (void) unpack_str(e, &profile->rename, "rename");
659 /* attachment string is optional */
660 (void) unpack_str(e, &profile->attach, "attach");
662 /* xmatch is optional and may be NULL */
663 profile->xmatch = unpack_dfa(e);
664 if (IS_ERR(profile->xmatch)) {
665 error = PTR_ERR(profile->xmatch);
666 profile->xmatch = NULL;
670 /* xmatch_len is not optional if xmatch is set */
671 if (profile->xmatch) {
672 if (!unpack_u32(e, &tmp, NULL)) {
673 info = "missing xmatch len";
676 profile->xmatch_len = tmp;
679 /* disconnected attachment string is optional */
680 (void) unpack_str(e, &profile->disconnected, "disconnected");
682 /* per profile debug flags (complain, audit) */
683 if (!unpack_nameX(e, AA_STRUCT, "flags")) {
684 info = "profile missing flags";
687 info = "failed to unpack profile flags";
688 if (!unpack_u32(e, &tmp, NULL))
690 if (tmp & PACKED_FLAG_HAT)
691 profile->label.flags |= FLAG_HAT;
692 if (!unpack_u32(e, &tmp, NULL))
694 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
695 profile->mode = APPARMOR_COMPLAIN;
696 else if (tmp == PACKED_MODE_KILL)
697 profile->mode = APPARMOR_KILL;
698 else if (tmp == PACKED_MODE_UNCONFINED)
699 profile->mode = APPARMOR_UNCONFINED;
700 if (!unpack_u32(e, &tmp, NULL))
703 profile->audit = AUDIT_ALL;
705 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
708 /* path_flags is optional */
709 if (unpack_u32(e, &profile->path_flags, "path_flags"))
710 profile->path_flags |= profile->label.flags &
711 PATH_MEDIATE_DELETED;
713 /* set a default value if path_flags field is not present */
714 profile->path_flags = PATH_MEDIATE_DELETED;
716 info = "failed to unpack profile capabilities";
717 if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
719 if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
721 if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
723 if (!unpack_u32(e, &tmpcap.cap[0], NULL))
726 info = "failed to unpack upper profile capabilities";
727 if (unpack_nameX(e, AA_STRUCT, "caps64")) {
728 /* optional upper half of 64 bit caps */
729 if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
731 if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
733 if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
735 if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
737 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
741 info = "failed to unpack extended profile capabilities";
742 if (unpack_nameX(e, AA_STRUCT, "capsx")) {
743 /* optional extended caps mediation mask */
744 if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
746 if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
748 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
752 if (!unpack_xattrs(e, profile)) {
753 info = "failed to unpack profile xattrs";
757 if (!unpack_rlimits(e, profile)) {
758 info = "failed to unpack profile rlimits";
762 if (unpack_nameX(e, AA_STRUCT, "policydb")) {
763 /* generic policy dfa - optional and may be NULL */
764 info = "failed to unpack policydb";
765 profile->policy.dfa = unpack_dfa(e);
766 if (IS_ERR(profile->policy.dfa)) {
767 error = PTR_ERR(profile->policy.dfa);
768 profile->policy.dfa = NULL;
770 } else if (!profile->policy.dfa) {
774 if (!unpack_u32(e, &profile->policy.start[0], "start"))
775 /* default start state */
776 profile->policy.start[0] = DFA_START;
777 /* setup class index */
778 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
779 profile->policy.start[i] =
780 aa_dfa_next(profile->policy.dfa,
781 profile->policy.start[0],
784 if (!unpack_nameX(e, AA_STRUCTEND, NULL))
787 profile->policy.dfa = aa_get_dfa(nulldfa);
790 profile->file.dfa = unpack_dfa(e);
791 if (IS_ERR(profile->file.dfa)) {
792 error = PTR_ERR(profile->file.dfa);
793 profile->file.dfa = NULL;
794 info = "failed to unpack profile file rules";
796 } else if (profile->file.dfa) {
797 if (!unpack_u32(e, &profile->file.start, "dfa_start"))
798 /* default start state */
799 profile->file.start = DFA_START;
800 } else if (profile->policy.dfa &&
801 profile->policy.start[AA_CLASS_FILE]) {
802 profile->file.dfa = aa_get_dfa(profile->policy.dfa);
803 profile->file.start = profile->policy.start[AA_CLASS_FILE];
805 profile->file.dfa = aa_get_dfa(nulldfa);
807 if (!unpack_trans_table(e, profile)) {
808 info = "failed to unpack profile transition table";
812 if (unpack_nameX(e, AA_STRUCT, "data")) {
813 info = "out of memory";
814 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
818 params.nelem_hint = 3;
819 params.key_len = sizeof(void *);
820 params.key_offset = offsetof(struct aa_data, key);
821 params.head_offset = offsetof(struct aa_data, head);
822 params.hashfn = strhash;
823 params.obj_cmpfn = datacmp;
825 if (rhashtable_init(profile->data, ¶ms)) {
826 info = "failed to init key, value hash table";
830 while (unpack_strdup(e, &key, NULL)) {
831 data = kzalloc(sizeof(*data), GFP_KERNEL);
838 data->size = unpack_blob(e, &data->data, NULL);
839 data->data = kvmemdup(data->data, data->size);
840 if (data->size && !data->data) {
846 rhashtable_insert_fast(profile->data, &data->head,
850 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
851 info = "failed to unpack end of key, value data table";
856 if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
857 info = "failed to unpack end of profile";
868 audit_iface(profile, NULL, name, info, e, error);
869 aa_free_profile(profile);
871 return ERR_PTR(error);
875 * verify_head - unpack serialized stream header
876 * @e: serialized data read head (NOT NULL)
877 * @required: whether the header is required or optional
878 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
880 * Returns: error or 0 if header is good
882 static int verify_header(struct aa_ext *e, int required, const char **ns)
884 int error = -EPROTONOSUPPORT;
885 const char *name = NULL;
888 /* get the interface version */
889 if (!unpack_u32(e, &e->version, "version")) {
891 audit_iface(NULL, NULL, NULL, "invalid profile format",
897 /* Check that the interface version is currently supported.
898 * if not specified use previous version
899 * Mask off everything that is not kernel abi version
901 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
902 audit_iface(NULL, NULL, NULL, "unsupported interface version",
907 /* read the namespace if present */
908 if (unpack_str(e, &name, "namespace")) {
910 audit_iface(NULL, NULL, NULL, "invalid namespace name",
914 if (*ns && strcmp(*ns, name))
915 audit_iface(NULL, NULL, NULL, "invalid ns change", e,
924 static bool verify_xindex(int xindex, int table_size)
927 xtype = xindex & AA_X_TYPE_MASK;
928 index = xindex & AA_X_INDEX_MASK;
929 if (xtype == AA_X_TABLE && index >= table_size)
934 /* verify dfa xindexes are in range of transition tables */
935 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
938 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
939 if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
941 if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
948 * verify_profile - Do post unpack analysis to verify profile consistency
949 * @profile: profile to verify (NOT NULL)
951 * Returns: 0 if passes verification else error
953 static int verify_profile(struct aa_profile *profile)
955 if (profile->file.dfa &&
956 !verify_dfa_xindex(profile->file.dfa,
957 profile->file.trans.size)) {
958 audit_iface(profile, NULL, NULL, "Invalid named transition",
966 void aa_load_ent_free(struct aa_load_ent *ent)
969 aa_put_profile(ent->rename);
970 aa_put_profile(ent->old);
971 aa_put_profile(ent->new);
977 struct aa_load_ent *aa_load_ent_alloc(void)
979 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
981 INIT_LIST_HEAD(&ent->list);
986 * aa_unpack - unpack packed binary profile(s) data loaded from user space
987 * @udata: user data copied to kmem (NOT NULL)
988 * @lh: list to place unpacked profiles in a aa_repl_ws
989 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
991 * Unpack user data and return refcounted allocated profile(s) stored in
992 * @lh in order of discovery, with the list chain stored in base.list
995 * Returns: profile(s) on @lh else error pointer if fails to unpack
997 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1000 struct aa_load_ent *tmp, *ent;
1001 struct aa_profile *profile = NULL;
1004 .start = udata->data,
1005 .end = udata->data + udata->size,
1010 while (e.pos < e.end) {
1011 char *ns_name = NULL;
1013 error = verify_header(&e, e.pos == e.start, ns);
1018 profile = unpack_profile(&e, &ns_name);
1019 if (IS_ERR(profile)) {
1020 error = PTR_ERR(profile);
1024 error = verify_profile(profile);
1028 if (aa_g_hash_policy)
1029 error = aa_calc_profile_hash(profile, e.version, start,
1034 ent = aa_load_ent_alloc();
1041 ent->ns_name = ns_name;
1042 list_add_tail(&ent->list, lh);
1044 udata->abi = e.version & K_ABI_MASK;
1045 if (aa_g_hash_policy) {
1046 udata->hash = aa_calc_hash(udata->data, udata->size);
1047 if (IS_ERR(udata->hash)) {
1048 error = PTR_ERR(udata->hash);
1056 aa_put_profile(profile);
1059 list_for_each_entry_safe(ent, tmp, lh, list) {
1060 list_del_init(&ent->list);
1061 aa_load_ent_free(ent);