summaryrefslogtreecommitdiffstats
path: root/security/apparmor
diff options
context:
space:
mode:
authorJohn Johansen <john.johansen@canonical.com>2017-06-09 06:19:19 -0700
committerJohn Johansen <john.johansen@canonical.com>2017-06-10 17:11:38 -0700
commitf1bd904175e8190ce14aedee37e207ab51fe3b30 (patch)
tree6e96a11a3c1f8d0cd040cfaa0a6edce66fed0268 /security/apparmor
parent192ca6b55a866e838aee98d9cb6a0b5086467c03 (diff)
apparmor: add the base fns() for domain labels
Begin moving apparmor to using broader domain labels, that will allow run time computation of domain type splitting via "stacking" of profiles into a domain label vec. Signed-off-by: John Johansen <john.johansen@canonical.com>
Diffstat (limited to 'security/apparmor')
-rw-r--r--security/apparmor/include/label.h441
-rw-r--r--security/apparmor/label.c2120
2 files changed, 2561 insertions, 0 deletions
diff --git a/security/apparmor/include/label.h b/security/apparmor/include/label.h
new file mode 100644
index 000000000000..9a283b722755
--- /dev/null
+++ b/security/apparmor/include/label.h
@@ -0,0 +1,441 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor label definitions
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_LABEL_H
+#define __AA_LABEL_H
+
+#include <linux/atomic.h>
+#include <linux/audit.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+
+#include "apparmor.h"
+#include "lib.h"
+
+struct aa_ns;
+
+#define LOCAL_VEC_ENTRIES 8
+#define DEFINE_VEC(T, V) \
+ struct aa_ ## T *(_ ## V ## _localtmp)[LOCAL_VEC_ENTRIES]; \
+ struct aa_ ## T **(V)
+
+#define vec_setup(T, V, N, GFP) \
+({ \
+ if ((N) <= LOCAL_VEC_ENTRIES) { \
+ typeof(N) i; \
+ (V) = (_ ## V ## _localtmp); \
+ for (i = 0; i < (N); i++) \
+ (V)[i] = NULL; \
+ } else \
+ (V) = kzalloc(sizeof(struct aa_ ## T *) * (N), (GFP)); \
+ (V) ? 0 : -ENOMEM; \
+})
+
+#define vec_cleanup(T, V, N) \
+do { \
+ int i; \
+ for (i = 0; i < (N); i++) { \
+ if (!IS_ERR_OR_NULL((V)[i])) \
+ aa_put_ ## T((V)[i]); \
+ } \
+ if ((V) != _ ## V ## _localtmp) \
+ kfree(V); \
+} while (0)
+
+#define vec_last(VEC, SIZE) ((VEC)[(SIZE) - 1])
+#define vec_ns(VEC, SIZE) (vec_last((VEC), (SIZE))->ns)
+#define vec_labelset(VEC, SIZE) (&vec_ns((VEC), (SIZE))->labels)
+#define cleanup_domain_vec(V, L) cleanup_label_vec((V), (L)->size)
+
+struct aa_profile;
+#define VEC_FLAG_TERMINATE 1
+int aa_vec_unique(struct aa_profile **vec, int n, int flags);
+struct aa_label *aa_vec_find_or_create_label(struct aa_profile **vec, int len,
+ gfp_t gfp);
+#define aa_sort_and_merge_vec(N, V) \
+ aa_sort_and_merge_profiles((N), (struct aa_profile **)(V))
+
+
+/* struct aa_labelset - set of labels for a namespace
+ *
+ * Labels are reference counted; aa_labelset does not contribute to label
+ * reference counts. Once a label's last refcount is put it is removed from
+ * the set.
+ */
+struct aa_labelset {
+ rwlock_t lock;
+
+ struct rb_root root;
+};
+
+#define __labelset_for_each(LS, N) \
+ for ((N) = rb_first(&(LS)->root); (N); (N) = rb_next(N))
+
+void aa_labelset_destroy(struct aa_labelset *ls);
+void aa_labelset_init(struct aa_labelset *ls);
+
+
+enum label_flags {
+ FLAG_HAT = 1, /* profile is a hat */
+ FLAG_UNCONFINED = 2, /* label unconfined only if all */
+ FLAG_NULL = 4, /* profile is null learning profile */
+ FLAG_IX_ON_NAME_ERROR = 8, /* fallback to ix on name lookup fail */
+ FLAG_IMMUTIBLE = 0x10, /* don't allow changes/replacement */
+ FLAG_USER_DEFINED = 0x20, /* user based profile - lower privs */
+ FLAG_NO_LIST_REF = 0x40, /* list doesn't keep profile ref */
+ FLAG_NS_COUNT = 0x80, /* carries NS ref count */
+ FLAG_IN_TREE = 0x100, /* label is in tree */
+ FLAG_PROFILE = 0x200, /* label is a profile */
+ FLAG_EXPLICIT = 0x400, /* explicit static label */
+ FLAG_STALE = 0x800, /* replaced/removed */
+ FLAG_RENAMED = 0x1000, /* label has renaming in it */
+ FLAG_REVOKED = 0x2000, /* label has revocation in it */
+
+ /* These flags must correspond with PATH_flags */
+ /* TODO: add new path flags */
+};
+
+struct aa_label;
+struct aa_proxy {
+ struct kref count;
+ struct aa_label __rcu *label;
+};
+
+struct label_it {
+ int i, j;
+};
+
+/* struct aa_label - lazy labeling struct
+ * @count: ref count of active users
+ * @node: rbtree position
+ * @rcu: rcu callback struct
+ * @proxy: is set to the label that replaced this label
+ * @hname: text representation of the label (MAYBE_NULL)
+ * @flags: stale and other flags - values may change under label set lock
+ * @secid: secid that references this label
+ * @size: number of entries in @ent[]
+ * @ent: set of profiles for label, actual size determined by @size
+ */
+struct aa_label {
+ struct kref count;
+ struct rb_node node;
+ struct rcu_head rcu;
+ struct aa_proxy *proxy;
+ __counted char *hname;
+ long flags;
+ u32 secid;
+ int size;
+ struct aa_profile *vec[];
+};
+
+#define last_error(E, FN) \
+do { \
+ int __subE = (FN); \
+ if (__subE) \
+ (E) = __subE; \
+} while (0)
+
+#define label_isprofile(X) ((X)->flags & FLAG_PROFILE)
+#define label_unconfined(X) ((X)->flags & FLAG_UNCONFINED)
+#define unconfined(X) label_unconfined(X)
+#define label_is_stale(X) ((X)->flags & FLAG_STALE)
+#define __label_make_stale(X) ((X)->flags |= FLAG_STALE)
+#define labels_ns(X) (vec_ns(&((X)->vec[0]), (X)->size))
+#define labels_set(X) (&labels_ns(X)->labels)
+#define labels_profile(X) ((X)->vec[(X)->size - 1])
+
+
+int aa_label_next_confined(struct aa_label *l, int i);
+
+/* for each profile in a label */
+#define label_for_each(I, L, P) \
+ for ((I).i = 0; ((P) = (L)->vec[(I).i]); ++((I).i))
+
+/* assumes break/goto ended label_for_each */
+#define label_for_each_cont(I, L, P) \
+ for (++((I).i); ((P) = (L)->vec[(I).i]); ++((I).i))
+
+#define next_comb(I, L1, L2) \
+do { \
+ (I).j++; \
+ if ((I).j >= (L2)->size) { \
+ (I).i++; \
+ (I).j = 0; \
+ } \
+} while (0)
+
+
+/* for each combination of P1 in L1, and P2 in L2 */
+#define label_for_each_comb(I, L1, L2, P1, P2) \
+for ((I).i = (I).j = 0; \
+ ((P1) = (L1)->vec[(I).i]) && ((P2) = (L2)->vec[(I).j]); \
+ (I) = next_comb(I, L1, L2))
+
+#define fn_for_each_comb(L1, L2, P1, P2, FN) \
+({ \
+ struct label_it i; \
+ int __E = 0; \
+ label_for_each_comb(i, (L1), (L2), (P1), (P2)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+/* for each profile that is enforcing confinement in a label */
+#define label_for_each_confined(I, L, P) \
+ for ((I).i = aa_label_next_confined((L), 0); \
+ ((P) = (L)->vec[(I).i]); \
+ (I).i = aa_label_next_confined((L), (I).i + 1))
+
+#define label_for_each_in_merge(I, A, B, P) \
+ for ((I).i = (I).j = 0; \
+ ((P) = aa_label_next_in_merge(&(I), (A), (B))); \
+ )
+
+#define label_for_each_not_in_set(I, SET, SUB, P) \
+ for ((I).i = (I).j = 0; \
+ ((P) = __aa_label_next_not_in_set(&(I), (SET), (SUB))); \
+ )
+
+#define next_in_ns(i, NS, L) \
+({ \
+ typeof(i) ___i = (i); \
+ while ((L)->vec[___i] && (L)->vec[___i]->ns != (NS)) \
+ (___i)++; \
+ (___i); \
+})
+
+#define label_for_each_in_ns(I, NS, L, P) \
+ for ((I).i = next_in_ns(0, (NS), (L)); \
+ ((P) = (L)->vec[(I).i]); \
+ (I).i = next_in_ns((I).i + 1, (NS), (L)))
+
+#define fn_for_each_in_ns(L, P, FN) \
+({ \
+ struct label_it __i; \
+ struct aa_ns *__ns = labels_ns(L); \
+ int __E = 0; \
+ label_for_each_in_ns(__i, __ns, (L), (P)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+
+#define fn_for_each_XXX(L, P, FN, ...) \
+({ \
+ struct label_it i; \
+ int __E = 0; \
+ label_for_each ## __VA_ARGS__(i, (L), (P)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+#define fn_for_each(L, P, FN) fn_for_each_XXX(L, P, FN)
+#define fn_for_each_confined(L, P, FN) fn_for_each_XXX(L, P, FN, _confined)
+
+#define fn_for_each2_XXX(L1, L2, P, FN, ...) \
+({ \
+ struct label_it i; \
+ int __E = 0; \
+ label_for_each ## __VA_ARGS__(i, (L1), (L2), (P)) { \
+ last_error(__E, (FN)); \
+ } \
+ __E; \
+})
+
+#define fn_for_each_in_merge(L1, L2, P, FN) \
+ fn_for_each2_XXX((L1), (L2), P, FN, _in_merge)
+#define fn_for_each_not_in_set(L1, L2, P, FN) \
+ fn_for_each2_XXX((L1), (L2), P, FN, _not_in_set)
+
+#define LABEL_MEDIATES(L, C) \
+({ \
+ struct aa_profile *profile; \
+ struct label_it i; \
+ int ret = 0; \
+ label_for_each(i, (L), profile) { \
+ if (PROFILE_MEDIATES(profile, (C))) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+
+void aa_labelset_destroy(struct aa_labelset *ls);
+void aa_labelset_init(struct aa_labelset *ls);
+void __aa_labelset_update_subtree(struct aa_ns *ns);
+
+void aa_label_free(struct aa_label *label);
+void aa_label_kref(struct kref *kref);
+bool aa_label_init(struct aa_label *label, int size);
+struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
+
+bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub);
+struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
+ struct aa_label *set,
+ struct aa_label *sub);
+bool aa_label_remove(struct aa_label *label);
+struct aa_label *aa_label_insert(struct aa_labelset *ls, struct aa_label *l);
+bool aa_label_replace(struct aa_label *old, struct aa_label *new);
+bool aa_label_make_newest(struct aa_labelset *ls, struct aa_label *old,
+ struct aa_label *new);
+
+struct aa_label *aa_label_find(struct aa_label *l);
+
+struct aa_profile *aa_label_next_in_merge(struct label_it *I,
+ struct aa_label *a,
+ struct aa_label *b);
+struct aa_label *aa_label_find_merge(struct aa_label *a, struct aa_label *b);
+struct aa_label *aa_label_merge(struct aa_label *a, struct aa_label *b,
+ gfp_t gfp);
+
+
+bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
+
+#define FLAGS_NONE 0
+#define FLAG_SHOW_MODE 1
+#define FLAG_VIEW_SUBNS 2
+#define FLAG_HIDDEN_UNCONFINED 4
+int aa_label_snxprint(char *str, size_t size, struct aa_ns *view,
+ struct aa_label *label, int flags);
+int aa_label_asxprint(char **strp, struct aa_ns *ns, struct aa_label *label,
+ int flags, gfp_t gfp);
+int aa_label_acntsxprint(char __counted **strp, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_xaudit(struct audit_buffer *ab, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_seq_xprint(struct seq_file *f, struct aa_ns *ns,
+ struct aa_label *label, int flags, gfp_t gfp);
+void aa_label_xprintk(struct aa_ns *ns, struct aa_label *label, int flags,
+ gfp_t gfp);
+void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp);
+void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp);
+void aa_label_printk(struct aa_label *label, gfp_t gfp);
+
+struct aa_label *aa_label_parse(struct aa_label *base, const char *str,
+ gfp_t gfp, bool create, bool force_stack);
+
+
+struct aa_perms;
+int aa_label_match(struct aa_profile *profile, struct aa_label *label,
+ unsigned int state, bool subns, u32 request,
+ struct aa_perms *perms);
+
+
+/**
+ * __aa_get_label - get a reference count to uncounted label reference
+ * @l: reference to get a count on
+ *
+ * Returns: pointer to reference OR NULL if race is lost and reference is
+ * being repeated.
+ * Requires: lock held, and the return code MUST be checked
+ */
+static inline struct aa_label *__aa_get_label(struct aa_label *l)
+{
+ if (l && kref_get_unless_zero(&l->count))
+ return l;
+
+ return NULL;
+}
+
+static inline struct aa_label *aa_get_label(struct aa_label *l)
+{
+ if (l)
+ kref_get(&(l->count));
+
+ return l;
+}
+
+
+/**
+ * aa_get_label_rcu - increment refcount on a label that can be replaced
+ * @l: pointer to label that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted label.
+ * else NULL if no label
+ */
+static inline struct aa_label *aa_get_label_rcu(struct aa_label __rcu **l)
+{
+ struct aa_label *c;
+
+ rcu_read_lock();
+ do {
+ c = rcu_dereference(*l);
+ } while (c && !kref_get_unless_zero(&c->count));
+ rcu_read_unlock();
+
+ return c;
+}
+
+/**
+ * aa_get_newest_label - find the newest version of @l
+ * @l: the label to check for newer versions of
+ *
+ * Returns: refcounted newest version of @l taking into account
+ * replacement, renames and removals
+ * return @l.
+ */
+static inline struct aa_label *aa_get_newest_label(struct aa_label *l)
+{
+ if (!l)
+ return NULL;
+
+ if (label_is_stale(l)) {
+ struct aa_label *tmp;
+
+ AA_BUG(!l->proxy);
+ AA_BUG(!l->proxy->label);
+ /* BUG: only way this can happen is @l ref count and its
+ * replacement count have gone to 0 and are on their way
+ * to destruction. ie. we have a refcounting error
+ */
+ tmp = aa_get_label_rcu(&l->proxy->label);
+ AA_BUG(!tmp);
+
+ return tmp;
+ }
+
+ return aa_get_label(l);
+}
+
+static inline void aa_put_label(struct aa_label *l)
+{
+ if (l)
+ kref_put(&l->count, aa_label_kref);
+}
+
+
+struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp);
+void aa_proxy_kref(struct kref *kref);
+
+static inline struct aa_proxy *aa_get_proxy(struct aa_proxy *proxy)
+{
+ if (proxy)
+ kref_get(&(proxy->count));
+
+ return proxy;
+}
+
+static inline void aa_put_proxy(struct aa_proxy *proxy)
+{
+ if (proxy)
+ kref_put(&proxy->count, aa_proxy_kref);
+}
+
+void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new);
+
+#endif /* __AA_LABEL_H */
diff --git a/security/apparmor/label.c b/security/apparmor/label.c
new file mode 100644
index 000000000000..e052eaba1cf6
--- /dev/null
+++ b/security/apparmor/label.c
@@ -0,0 +1,2120 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor label definitions
+ *
+ * Copyright 2017 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/audit.h>
+#include <linux/seq_file.h>
+#include <linux/sort.h>
+
+#include "include/apparmor.h"
+#include "include/context.h"
+#include "include/label.h"
+#include "include/policy.h"
+#include "include/secid.h"
+
+
+/*
+ * the aa_label represents the set of profiles confining an object
+ *
+ * Labels maintain a reference count to the set of pointers they reference
+ * Labels are ref counted by
+ * tasks and object via the security field/security context off the field
+ * code - will take a ref count on a label if it needs the label
+ * beyond what is possible with an rcu_read_lock.
+ * profiles - each profile is a label
+ * secids - a pinned secid will keep a refcount of the label it is
+ * referencing
+ * objects - inode, files, sockets, ...
+ *
+ * Labels are not ref counted by the label set, so they maybe removed and
+ * freed when no longer in use.
+ *
+ */
+
+#define PROXY_POISON 97
+#define LABEL_POISON 100
+
+static void free_proxy(struct aa_proxy *proxy)
+{
+ if (proxy) {
+ /* p->label will not updated any more as p is dead */
+ aa_put_label(rcu_dereference_protected(proxy->label, true));
+ memset(proxy, 0, sizeof(*proxy));
+ proxy->label = (struct aa_label *) PROXY_POISON;
+ kfree(proxy);
+ }
+}
+
+void aa_proxy_kref(struct kref *kref)
+{
+ struct aa_proxy *proxy = container_of(kref, struct aa_proxy, count);
+
+ free_proxy(proxy);
+}
+
+struct aa_proxy *aa_alloc_proxy(struct aa_label *label, gfp_t gfp)
+{
+ struct aa_proxy *new;
+
+ new = kzalloc(sizeof(struct aa_proxy), gfp);
+ if (new) {
+ kref_init(&new->count);
+ rcu_assign_pointer(new->label, aa_get_label(label));
+ }
+ return new;
+}
+
+/* requires profile list write lock held */
+void __aa_proxy_redirect(struct aa_label *orig, struct aa_label *new)
+{
+ struct aa_label *tmp;
+
+ AA_BUG(!orig);
+ AA_BUG(!new);
+ AA_BUG(!write_is_locked(&labels_set(orig)->lock));
+
+ tmp = rcu_dereference_protected(orig->proxy->label,
+ &labels_ns(orig)->lock);
+ rcu_assign_pointer(orig->proxy->label, aa_get_label(new));
+ orig->flags |= FLAG_STALE;
+ aa_put_label(tmp);
+}
+
+static void __proxy_share(struct aa_label *old, struct aa_label *new)
+{
+ struct aa_proxy *proxy = new->proxy;
+
+ new->proxy = aa_get_proxy(old->proxy);
+ __aa_proxy_redirect(old, new);
+ aa_put_proxy(proxy);
+}
+
+
+/**
+ * ns_cmp - compare ns for label set ordering
+ * @a: ns to compare (NOT NULL)
+ * @b: ns to compare (NOT NULL)
+ *
+ * Returns: <0 if a < b
+ * ==0 if a == b
+ * >0 if a > b
+ */
+static int ns_cmp(struct aa_ns *a, struct aa_ns *b)
+{
+ int res;
+
+ AA_BUG(!a);
+ AA_BUG(!b);
+ AA_BUG(!a->base.hname);
+ AA_BUG(!b->base.hname);
+
+ if (a == b)
+ return 0;
+
+ res = a->level - b->level;
+ if (res)
+ return res;
+
+ return strcmp(a->base.hname, b->base.hname);
+}
+
+/**
+ * profile_cmp - profile comparision for set ordering
+ * @a: profile to compare (NOT NULL)
+ * @b: profile to compare (NOT NULL)
+ *
+ * Returns: <0 if a < b
+ * ==0 if a == b
+ * >0 if a > b
+ */
+static int profile_cmp(struct aa_profile *a, struct aa_profile *b)
+{
+ int res;
+
+ AA_BUG(!a);
+ AA_BUG(!b);
+ AA_BUG(!a->ns);
+ AA_BUG(!b->ns);
+ AA_BUG(!a->base.hname);
+ AA_BUG(!b->base.hname);
+
+ if (a == b || a->base.hname == b->base.hname)
+ return 0;
+ res = ns_cmp(a->ns, b->ns);
+ if (res)
+ return res;
+
+ return strcmp(a->base.hname, b->base.hname);
+}
+
+/**
+ * vec_cmp - label comparision for set ordering
+ * @a: label to compare (NOT NULL)
+ * @vec: vector of profiles to compare (NOT NULL)
+ * @n: length of @vec
+ *
+ * Returns: <0 if a < vec
+ * ==0 if a == vec
+ * >0 if a > vec
+ */
+static int vec_cmp(struct aa_profile **a, int an, struct aa_profile **b, int bn)
+{
+ int i;
+
+ AA_BUG(!a);
+ AA_BUG(!*a);
+ AA_BUG(!b);
+ AA_BUG(!*b);
+ AA_BUG(an <= 0);
+ AA_BUG(bn <= 0);
+
+ for (i = 0; i < an && i < bn; i++) {
+ int res = profile_cmp(a[i], b[i]);
+
+ if (res != 0)
+ return res;
+ }
+
+ return an - bn;
+}
+
+static bool vec_is_stale(struct aa_profile **vec, int n)
+{
+ int i;
+
+ AA_BUG(!vec);
+
+ for (i = 0; i < n; i++) {
+ if (profile_is_stale(vec[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static bool vec_unconfined(struct aa_profile **vec, int n)
+{
+ int i;
+
+ AA_BUG(!vec);
+
+ for (i = 0; i < n; i++) {
+ if (!profile_unconfined(vec[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static int sort_cmp(const void *a, const void *b)
+{
+ return profile_cmp(*(struct aa_profile **)a, *(struct aa_profile **)b);
+}
+
+/*
+ * assumes vec is sorted
+ * Assumes @vec has null terminator at vec[n], and will null terminate
+ * vec[n - dups]
+ */
+static inline int unique(struct aa_profile **vec, int n)
+{
+ int i, pos, dups = 0;
+
+ AA_BUG(n < 1);
+ AA_BUG(!vec);
+
+ pos = 0;
+ for (i = 1; i < n; i++) {
+ int res = profile_cmp(vec[pos], vec[i]);
+
+ AA_BUG(res > 0, "vec not sorted");
+ if (res == 0) {
+ /* drop duplicate */
+ aa_put_profile(vec[i]);
+ dups++;
+ continue;
+ }
+ pos++;
+ if (dups)
+ vec[pos] = vec[i];
+ }
+
+ AA_BUG(dups < 0);
+
+ return dups;
+}
+
+/**
+ * aa_vec_unique - canonical sort and unique a list of profiles
+ * @n: number of refcounted profiles in the list (@n > 0)
+ * @vec: list of profiles to sort and merge
+ *
+ * Returns: the number of duplicates eliminated == references put
+ *
+ * If @flags & VEC_FLAG_TERMINATE @vec has null terminator at vec[n], and will
+ * null terminate vec[n - dups]
+ */
+int aa_vec_unique(struct aa_profile **vec, int n, int flags)
+{
+ int i, dups = 0;
+
+ AA_BUG(n < 1);
+ AA_BUG(!vec);
+
+ /* vecs are usually small and inorder, have a fallback for larger */
+ if (n > 8) {
+ sort(vec, n, sizeof(struct aa_profile *), sort_cmp, NULL);
+ dups = unique(vec, n);
+ goto out;
+ }
+
+ /* insertion sort + unique in one */
+ for (i = 1; i < n; i++) {
+ struct aa_profile *tmp = vec[i];
+ int pos, j;
+
+ for (pos = i - 1 - dups; pos >= 0; pos--) {
+ int res = profile_cmp(vec[pos], tmp);
+
+ if (res == 0) {
+ /* drop duplicate entry */
+ aa_put_profile(tmp);
+ dups++;
+ goto continue_outer;
+ } else if (res < 0)
+ break;
+ }
+ /* pos is at entry < tmp, or index -1. Set to insert pos */
+ pos++;
+
+ for (j = i - dups; j > pos; j--)
+ vec[j] = vec[j - 1];
+ vec[pos] = tmp;
+continue_outer:
+ ;
+ }
+
+ AA_BUG(dups < 0);
+
+out:
+ if (flags & VEC_FLAG_TERMINATE)
+ vec[n - dups] = NULL;
+
+ return dups;
+}
+
+
+static void label_destroy(struct aa_label *label)
+{
+ struct aa_label *tmp;
+
+ AA_BUG(!label);
+
+ if (!label_isprofile(label)) {
+ struct aa_profile *profile;
+ struct label_it i;
+
+ aa_put_str(label->hname);
+
+ label_for_each(i, label, profile) {
+ aa_put_profile(profile);
+ label->vec[i.i] = (struct aa_profile *)
+ (LABEL_POISON + (long) i.i);
+ }
+ }
+
+ if (rcu_dereference_protected(label->proxy->label, true) == label)
+ rcu_assign_pointer(label->proxy->label, NULL);
+
+ aa_free_secid(label->secid);
+
+ tmp = rcu_dereference_protected(label->proxy->label, true);
+ if (tmp == label)
+ rcu_assign_pointer(label->proxy->label, NULL);
+
+ aa_put_proxy(label->proxy);
+ label->proxy = (struct aa_proxy *) PROXY_POISON + 1;
+}
+
+void aa_label_free(struct aa_label *label)
+{
+ if (!label)
+ return;
+
+ label_destroy(label);
+ kfree(label);
+}
+
+static void label_free_switch(struct aa_label *label)
+{
+ if (label->flags & FLAG_NS_COUNT)
+ aa_free_ns(labels_ns(label));
+ else if (label_isprofile(label))
+ aa_free_profile(labels_profile(label));
+ else
+ aa_label_free(label);
+}
+
+static void label_free_rcu(struct rcu_head *head)
+{
+ struct aa_label *label = container_of(head, struct aa_label, rcu);
+
+ if (label->flags & FLAG_IN_TREE)
+ (void) aa_label_remove(label);
+ label_free_switch(label);
+}
+
+void aa_label_kref(struct kref *kref)
+{
+ struct aa_label *label = container_of(kref, struct aa_label, count);
+ struct aa_ns *ns = labels_ns(label);
+
+ if (!ns) {
+ /* never live, no rcu callback needed, just using the fn */
+ label_free_switch(label);
+ return;
+ }
+ /* TODO: update labels_profile macro so it works here */
+ AA_BUG(label_isprofile(label) &&
+ on_list_rcu(&label->vec[0]->base.profiles));
+ AA_BUG(label_isprofile(label) &&
+ on_list_rcu(&label->vec[0]->base.list));
+
+ /* TODO: if compound label and not stale add to reclaim cache */
+ call_rcu(&label->rcu, label_free_rcu);
+}
+
+static void label_free_or_put_new(struct aa_label *label, struct aa_label *new)
+{
+ if (label != new)
+ /* need to free directly to break circular ref with proxy */
+ aa_label_free(new);
+ else
+ aa_put_label(new);
+}
+
+bool aa_label_init(struct aa_label *label, int size)
+{
+ AA_BUG(!label);
+ AA_BUG(size < 1);
+
+ label->secid = aa_alloc_secid();
+ if (label->secid == AA_SECID_INVALID)
+ return false;
+
+ label->size = size; /* doesn't include null */
+ label->vec[size] = NULL; /* null terminate */
+ kref_init(&label->count);
+ RB_CLEAR_NODE(&label->node);
+
+ return true;
+}
+
+/**
+ * aa_label_alloc - allocate a label with a profile vector of @size length
+ * @size: size of profile vector in the label
+ * @proxy: proxy to use OR null if to allocate a new one
+ * @gfp: memory allocation type
+ *
+ * Returns: new label
+ * else NULL if failed
+ */
+struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
+{
+ struct aa_label *new;
+
+ AA_BUG(size < 1);
+
+ /* + 1 for null terminator entry on vec */
+ new = kzalloc(sizeof(*new) + sizeof(struct aa_profile *) * (size + 1),
+ gfp);
+ AA_DEBUG("%s (%p)\n", __func__, new);
+ if (!new)
+ goto fail;
+
+ if (!aa_label_init(new, size))
+ goto fail;
+
+ if (!proxy) {
+ proxy = aa_alloc_proxy(new, gfp);
+ if (!proxy)
+ goto fail;
+ } else
+ aa_get_proxy(proxy);
+ /* just set new's proxy, don't redirect proxy here if it was passed in*/
+ new->proxy = proxy;
+
+ return new;
+
+fail:
+ kfree(new);
+
+ return NULL;
+}
+
+
+/**
+ * label_cmp - label comparision for set ordering
+ * @a: label to compare (NOT NULL)
+ * @b: label to compare (NOT NULL)
+ *
+ * Returns: <0 if a < b
+ * ==0 if a == b
+ * >0 if a > b
+ */
+static int label_cmp(struct aa_label *a, struct aa_label *b)
+{
+ AA_BUG(!b);
+
+ if (a == b)
+ return 0;
+
+ return vec_cmp(a->vec, a->size, b->vec, b->size);
+}
+
+/* helper fn for label_for_each_confined */
+int aa_label_next_confined(struct aa_label *label, int i)
+{
+ AA_BUG(!label);
+ AA_BUG(i < 0);
+
+ for (; i < label->size; i++) {
+ if (!profile_unconfined(label->vec[i]))
+ return i;
+ }
+
+ return i;
+}
+
+/**
+ * aa_label_next_not_in_set - return the next profile of @sub not in @set
+ * @I: label iterator
+ * @set: label to test against
+ * @sub: label to if is subset of @set
+ *
+ * Returns: profile in @sub that is not in @set, with iterator set pos after
+ * else NULL if @sub is a subset of @set
+ */
+struct aa_profile *__aa_label_next_not_in_set(struct label_it *I,
+ struct aa_label *set,
+ struct aa_label *sub)
+{
+ AA_BUG(!set);
+ AA_BUG(!I);
+ AA_BUG(I->i < 0);
+ AA_BUG(I->i > set->size);
+ AA_BUG(!sub);
+ AA_BUG(I->j < 0);
+ AA_BUG(I->j > sub->size);
+
+ while (I->j < sub->size && I->i < set->size) {
+ int res = profile_cmp(sub->vec[I->j], set->vec[I->i]);
+
+ if (res == 0) {
+ (I->j)++;
+ (I->i)++;
+ } else if (res > 0)
+ (I->i)++;
+ else
+ return sub->vec[(I->j)++];
+ }
+
+ if (I->j < sub->size)
+ return sub->vec[(I->j)++];
+
+ return NULL;
+}
+
+/**
+ * aa_label_is_subset - test if @sub is a subset of @set
+ * @set: label to test against
+ * @sub: label to test if is subset of @set
+ *
+ * Returns: true if @sub is subset of @set
+ * else false
+ */
+bool aa_label_is_subset(struct aa_label *set, struct aa_label *sub)
+{
+ struct label_it i = { };
+
+ AA_BUG(!set);
+ AA_BUG(!sub);
+
+ if (sub == set)
+ return true;
+
+ return __aa_label_next_not_in_set(&i, set, sub) == NULL;
+}
+
+
+
+/**
+ * __label_remove - remove @label from the label set
+ * @l: label to remove
+ * @new: label to redirect to
+ *
+ * Requires: labels_set(@label)->lock write_lock
+ * Returns: true if the label was in the tree and removed
+ */
+static bool __label_remove(struct aa_label *label, struct aa_label *new)
+{
+ struct aa_labelset *ls = labels_set(label);
+
+ AA_BUG(!ls);
+ AA_BUG(!label);
+ AA_BUG(!write_is_locked(&ls->lock));
+
+ if (new)
+ __aa_proxy_redirect(label, new);
+
+ if (!label_is_stale(label))
+ __label_make_stale(label);
+
+ if (label->flags & FLAG_IN_TREE) {
+ rb_erase(&label->node, &ls->root);
+ label->flags &= ~FLAG_IN_TREE;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * __label_replace - replace @old with @new in label set
+ * @old: label to remove from label set
+ * @new: label to replace @old with
+ *
+ * Requires: labels_set(@old)->lock write_lock
+ * valid ref count be held on @new
+ * Returns: true if @old was in set and replaced by @new
+ *
+ * Note: current implementation requires label set be order in such a way
+ * that @new directly replaces @old position in the set (ie.
+ * using pointer comparison of the label address would not work)
+ */
+static bool __label_replace(struct aa_label *old, struct aa_label *new)
+{
+ struct aa_labelset *ls = labels_set(old);
+
+ AA_BUG(!ls);
+ AA_BUG(!old);
+ AA_BUG(!new);
+ AA_BUG(!write_is_locked(&ls->lock));
+ AA_BUG(new->flags & FLAG_IN_TREE);
+
+ if (!label_is_stale(old))
+ __label_make_stale(old);
+
+ if (old->flags & FLAG_IN_TREE) {
+ rb_replace_node(&old->node, &new->node, &ls->root);
+ old->flags &= ~FLAG_IN_TREE;
+ new->flags |= FLAG_IN_TREE;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * __label_insert - attempt to insert @l into a label set
+ * @ls: set of labels to insert @l into (NOT NULL)
+ * @label: new label to insert (NOT NULL)
+ * @replace: whether insertion should replace existing entry that is not stale
+ *
+ * Requires: @ls->lock
+ * caller to hold a valid ref on l
+ * if @replace is true l has a preallocated proxy associated
+ * Returns: @l if successful in inserting @l - with additional refcount
+ * else ref counted equivalent label that is already in the set,
+ * the else condition only happens if @replace is false
+ */
+static struct aa_label *__label_insert(struct aa_labelset *ls,
+ struct aa_label *label, bool replace)
+{
+ struct rb_node **new, *parent = NULL;
+
+ AA_BUG(!ls);
+ AA_BUG(!label);
+ AA_BUG(labels_set(label) != ls);
+ AA_BUG(!write_is_locked(&ls->lock));
+ AA_BUG(label->flags & FLAG_IN_TREE);
+
+ /* Figure out where to put new node */
+ new = &ls->root.rb_node;
+ while (*new) {
+ struct aa_label *this = rb_entry(*new, struct aa_label, node);
+ int result = label_cmp(label, this);
+
+ parent = *new;
+ if (result == 0) {
+ /* !__aa_get_label means queued for destruction,
+ * so replace in place, however the label has
+ * died before the replacement so do not share
+ * the proxy
+ */
+ if (!replace && !label_is_stale(this)) {
+ if (__aa_get_label(this))
+ return this;
+ } else
+ __proxy_share(this, label);
+ AA_BUG(!__label_replace(this, label));
+ return aa_get_label(label);
+ } else if (result < 0)
+ new = &((*new)->rb_left);
+ else /* (result > 0) */
+ new = &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&label->node, parent, new);
+ rb_insert_color(&label->node, &ls->root);
+ label->flags |= FLAG_IN_TREE;
+
+ return aa_get_label(label);
+}
+
+/**
+ * __vec_find - find label that matches @vec in label set
+ * @vec: vec of profiles to find matching label for (NOT NULL)
+ * @n: length of @vec
+ *
+ * Requires: @vec_labelset(vec) lock held
+ * caller to hold a valid ref on l
+ *
+ * Returns: ref counted @label if matching label is in tree
+ * ref counted label that is equiv to @l in tree
+ * else NULL if @vec equiv is not in tree
+ */
+static struct aa_label *__vec_find(struct aa_profile **vec, int n)
+{
+ struct rb_node *node;
+
+ AA_BUG(!vec);
+ AA_BUG(!*vec);
+ AA_BUG(n <= 0);
+
+ node = vec_labelset(vec, n)->root.rb_node;
+ while (node) {
+ struct aa_label *this = rb_entry(node, struct aa_label, node);
+ int result = vec_cmp(this->vec, this->size, vec, n);
+
+ if (result > 0)
+ node = node->rb_left;
+ else if (result < 0)
+ node = node->rb_right;
+ else
+ return __aa_get_label(this);
+ }
+
+ return NULL;
+}
+
+/**
+ * __label_find - find label @label in label set
+ * @label: label to find (NOT NULL)
+ *
+ * Requires: labels_set(@label)->lock held
+ * caller to hold a valid ref on l
+ *
+ * Returns: ref counted @label if @label is in tree OR
+ * ref counted label that is equiv to @label in tree
+ * else NULL if @label or equiv is not in tree
+ */
+static struct aa_label *__label_find(struct aa_label *label)
+{
+ AA_BUG(!label);
+
+ return __vec_find(label->vec, label->size);
+}
+
+
+/**
+ * aa_label_remove - remove a label from the labelset
+ * @label: label to remove
+ *
+ * Returns: true if @label was removed from the tree
+ * else @label was not in tree so it could not be removed
+ */
+bool aa_label_remove(struct aa_label *label)
+{
+ struct aa_labelset *ls = labels_set(label);
+ unsigned long flags;
+ bool res;