c7c556f1e8
Refactor the logic for changing SELinux policy booleans in a similar manner to the refactoring of policy load, thereby reducing the size of the critical section when the policy write-lock is held and making it easier to convert the policy rwlock to RCU in the future. Instead of directly modifying the policydb in place, modify a copy and then swap it into place through a single pointer update. Only fully copy the portions of the policydb that are affected by boolean changes to avoid the full cost of a deep policydb copy. Introduce another level of indirection for the sidtab since changing booleans does not require updating the sidtab, unlike policy load. While we are here, create a common helper for notifying other kernel components and userspace of a policy change and call it from both security_set_bools() and selinux_policy_commit(). Based on an old (2004) patch by Kaigai Kohei [1] to convert the policy rwlock to RCU that was deferred at the time since it did not significantly improve performance and introduced complexity. Peter Enderborg later submitted a patch series to convert to RCU [2] that would have made changing booleans a much more expensive operation by requiring a full policydb_write();policydb_read(); sequence to deep copy the entire policydb and also had concerns regarding atomic allocations. This change is now simplified by the earlier work to encapsulate policy state in the selinux_policy struct and to refactor policy load. After this change, the last major obstacle to converting the policy rwlock to RCU is likely the sidtab live convert support. [1] https://lore.kernel.org/selinux/6e2f9128-e191-ebb3-0e87-74bfccb0767f@tycho.nsa.gov/ [2] https://lore.kernel.org/selinux/20180530141104.28569-1-peter.enderborg@sony.com/ Signed-off-by: Stephen Smalley <stephen.smalley.work@gmail.com> Signed-off-by: Paul Moore <paul@paul-moore.com>
184 lines
3.9 KiB
C
184 lines
3.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Implementation of the hash table type.
|
|
*
|
|
* Author : Stephen Smalley, <sds@tycho.nsa.gov>
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/errno.h>
|
|
#include "hashtab.h"
|
|
|
|
static struct kmem_cache *hashtab_node_cachep;
|
|
|
|
/*
|
|
* Here we simply round the number of elements up to the nearest power of two.
|
|
* I tried also other options like rouding down or rounding to the closest
|
|
* power of two (up or down based on which is closer), but I was unable to
|
|
* find any significant difference in lookup/insert performance that would
|
|
* justify switching to a different (less intuitive) formula. It could be that
|
|
* a different formula is actually more optimal, but any future changes here
|
|
* should be supported with performance/memory usage data.
|
|
*
|
|
* The total memory used by the htable arrays (only) with Fedora policy loaded
|
|
* is approximately 163 KB at the time of writing.
|
|
*/
|
|
static u32 hashtab_compute_size(u32 nel)
|
|
{
|
|
return nel == 0 ? 0 : roundup_pow_of_two(nel);
|
|
}
|
|
|
|
int hashtab_init(struct hashtab *h, u32 nel_hint)
|
|
{
|
|
h->size = hashtab_compute_size(nel_hint);
|
|
h->nel = 0;
|
|
if (!h->size)
|
|
return 0;
|
|
|
|
h->htable = kcalloc(h->size, sizeof(*h->htable), GFP_KERNEL);
|
|
return h->htable ? 0 : -ENOMEM;
|
|
}
|
|
|
|
int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
|
|
void *key, void *datum)
|
|
{
|
|
struct hashtab_node *newnode;
|
|
|
|
newnode = kmem_cache_zalloc(hashtab_node_cachep, GFP_KERNEL);
|
|
if (!newnode)
|
|
return -ENOMEM;
|
|
newnode->key = key;
|
|
newnode->datum = datum;
|
|
newnode->next = *dst;
|
|
*dst = newnode;
|
|
|
|
h->nel++;
|
|
return 0;
|
|
}
|
|
|
|
void hashtab_destroy(struct hashtab *h)
|
|
{
|
|
u32 i;
|
|
struct hashtab_node *cur, *temp;
|
|
|
|
for (i = 0; i < h->size; i++) {
|
|
cur = h->htable[i];
|
|
while (cur) {
|
|
temp = cur;
|
|
cur = cur->next;
|
|
kmem_cache_free(hashtab_node_cachep, temp);
|
|
}
|
|
h->htable[i] = NULL;
|
|
}
|
|
|
|
kfree(h->htable);
|
|
h->htable = NULL;
|
|
}
|
|
|
|
int hashtab_map(struct hashtab *h,
|
|
int (*apply)(void *k, void *d, void *args),
|
|
void *args)
|
|
{
|
|
u32 i;
|
|
int ret;
|
|
struct hashtab_node *cur;
|
|
|
|
for (i = 0; i < h->size; i++) {
|
|
cur = h->htable[i];
|
|
while (cur) {
|
|
ret = apply(cur->key, cur->datum, args);
|
|
if (ret)
|
|
return ret;
|
|
cur = cur->next;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
|
|
{
|
|
u32 i, chain_len, slots_used, max_chain_len;
|
|
struct hashtab_node *cur;
|
|
|
|
slots_used = 0;
|
|
max_chain_len = 0;
|
|
for (i = 0; i < h->size; i++) {
|
|
cur = h->htable[i];
|
|
if (cur) {
|
|
slots_used++;
|
|
chain_len = 0;
|
|
while (cur) {
|
|
chain_len++;
|
|
cur = cur->next;
|
|
}
|
|
|
|
if (chain_len > max_chain_len)
|
|
max_chain_len = chain_len;
|
|
}
|
|
}
|
|
|
|
info->slots_used = slots_used;
|
|
info->max_chain_len = max_chain_len;
|
|
}
|
|
|
|
int hashtab_duplicate(struct hashtab *new, struct hashtab *orig,
|
|
int (*copy)(struct hashtab_node *new,
|
|
struct hashtab_node *orig, void *args),
|
|
int (*destroy)(void *k, void *d, void *args),
|
|
void *args)
|
|
{
|
|
struct hashtab_node *cur, *tmp, *tail;
|
|
int i, rc;
|
|
|
|
memset(new, 0, sizeof(*new));
|
|
|
|
new->htable = kcalloc(orig->size, sizeof(*new->htable), GFP_KERNEL);
|
|
if (!new->htable)
|
|
return -ENOMEM;
|
|
|
|
new->size = orig->size;
|
|
|
|
for (i = 0; i < orig->size; i++) {
|
|
tail = NULL;
|
|
for (cur = orig->htable[i]; cur; cur = cur->next) {
|
|
tmp = kmem_cache_zalloc(hashtab_node_cachep,
|
|
GFP_KERNEL);
|
|
if (!tmp)
|
|
goto error;
|
|
rc = copy(tmp, cur, args);
|
|
if (rc) {
|
|
kmem_cache_free(hashtab_node_cachep, tmp);
|
|
goto error;
|
|
}
|
|
tmp->next = NULL;
|
|
if (!tail)
|
|
new->htable[i] = tmp;
|
|
else
|
|
tail->next = tmp;
|
|
tail = tmp;
|
|
new->nel++;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
|
|
error:
|
|
for (i = 0; i < new->size; i++) {
|
|
for (cur = new->htable[i]; cur; cur = tmp) {
|
|
tmp = cur->next;
|
|
destroy(cur->key, cur->datum, args);
|
|
kmem_cache_free(hashtab_node_cachep, cur);
|
|
}
|
|
}
|
|
kmem_cache_free(hashtab_node_cachep, new);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
void __init hashtab_cache_init(void)
|
|
{
|
|
hashtab_node_cachep = kmem_cache_create("hashtab_node",
|
|
sizeof(struct hashtab_node),
|
|
0, SLAB_PANIC, NULL);
|
|
}
|