ipc: merge ipc_rcu and kern_ipc_perm

ipc has two management structures that exist for every id:
 - struct kern_ipc_perm, it contains e.g. the permissions.
 - struct ipc_rcu, it contains the rcu head for rcu handling and the
   refcount.

The patch merges both structures.

As a bonus, we may save one cacheline, because both structures are
cacheline aligned.  In addition, it reduces the number of casts, instead
most codepaths can use container_of.

To simplify code, the ipc_rcu_alloc initializes the allocation to 0.

[manfred@colorfullife.com: really include the memset() into ipc_alloc_rcu()]
  Link: http://lkml.kernel.org/r/564f8612-0601-b267-514f-a9f650ec9b32@colorfullife.com
Link: http://lkml.kernel.org/r/20170525185107.12869-3-manfred@colorfullife.com
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Manfred Spraul 2017-07-12 14:34:41 -07:00 committed by Linus Torvalds
parent 1a23395672
commit dba4cdd39e
6 changed files with 66 additions and 61 deletions

View File

@ -20,6 +20,9 @@ struct kern_ipc_perm {
umode_t mode;
unsigned long seq;
void *security;
struct rcu_head rcu;
atomic_t refcount;
} ____cacheline_aligned_in_smp;
#endif /* _LINUX_IPC_H */

View File

@ -97,8 +97,8 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
static void msg_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct msg_queue *msq = ipc_rcu_to_struct(p);
struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
security_msg_queue_free(msq);
ipc_rcu_free(head);
@ -118,7 +118,10 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
BUILD_BUG_ON(offsetof(struct msg_queue, q_perm) != 0);
msq = container_of(ipc_rcu_alloc(sizeof(*msq)), struct msg_queue,
q_perm);
if (!msq)
return -ENOMEM;
@ -128,7 +131,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq, ipc_rcu_free);
ipc_rcu_putref(&msq->q_perm, ipc_rcu_free);
return retval;
}
@ -144,7 +147,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
/* ipc_addid() locks msq upon success. */
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
ipc_rcu_putref(msq, msg_rcu_free);
ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
return id;
}
@ -249,7 +252,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
free_msg(msg);
}
atomic_sub(msq->q_cbytes, &ns->msg_bytes);
ipc_rcu_putref(msq, msg_rcu_free);
ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
}
/*
@ -688,7 +691,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
/* enqueue the sender and prepare to block */
ss_add(msq, &s, msgsz);
if (!ipc_rcu_getref(msq)) {
if (!ipc_rcu_getref(&msq->q_perm)) {
err = -EIDRM;
goto out_unlock0;
}
@ -700,7 +703,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
rcu_read_lock();
ipc_lock_object(&msq->q_perm);
ipc_rcu_putref(msq, msg_rcu_free);
ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
/* raced with RMID? */
if (!ipc_valid_object(&msq->q_perm)) {
err = -EIDRM;

View File

@ -260,8 +260,8 @@ static void merge_queues(struct sem_array *sma)
static void sem_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct sem_array *sma = ipc_rcu_to_struct(p);
struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
security_sem_free(sma);
ipc_rcu_free(head);
@ -438,7 +438,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@ -469,20 +469,20 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC;
BUILD_BUG_ON(offsetof(struct sem_array, sem_perm) != 0);
size = sizeof(*sma) + nsems * sizeof(sma->sems[0]);
sma = ipc_rcu_alloc(size);
sma = container_of(ipc_rcu_alloc(size), struct sem_array, sem_perm);
if (!sma)
return -ENOMEM;
memset(sma, 0, size);
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
if (retval) {
ipc_rcu_putref(sma, ipc_rcu_free);
ipc_rcu_putref(&sma->sem_perm, ipc_rcu_free);
return retval;
}
@ -502,7 +502,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (id < 0) {
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return id;
}
ns->used_sems += nsems;
@ -1122,7 +1122,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
wake_up_q(&wake_q);
ns->used_sems -= sma->sem_nsems;
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
}
static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@ -1362,7 +1362,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_unlock;
}
if (nsems > SEMMSL_FAST) {
if (!ipc_rcu_getref(sma)) {
if (!ipc_rcu_getref(&sma->sem_perm)) {
err = -EIDRM;
goto out_unlock;
}
@ -1370,7 +1370,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
rcu_read_unlock();
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return -ENOMEM;
}
@ -1395,7 +1395,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
if (!ipc_rcu_getref(sma)) {
if (!ipc_rcu_getref(&sma->sem_perm)) {
err = -EIDRM;
goto out_rcu_wakeup;
}
@ -1404,20 +1404,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if (nsems > SEMMSL_FAST) {
sem_io = ipc_alloc(sizeof(ushort)*nsems);
if (sem_io == NULL) {
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return -ENOMEM;
}
}
if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
err = -EFAULT;
goto out_free;
}
for (i = 0; i < nsems; i++) {
if (sem_io[i] > SEMVMX) {
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
err = -ERANGE;
goto out_free;
}
@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
if (!ipc_rcu_getref(sma)) {
if (!ipc_rcu_getref(&sma->sem_perm)) {
rcu_read_unlock();
un = ERR_PTR(-EIDRM);
goto out;
@ -1709,7 +1709,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 2: allocate new undo structure */
new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
if (!new) {
ipc_rcu_putref(sma, sem_rcu_free);
ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
return ERR_PTR(-ENOMEM);
}

View File

@ -174,9 +174,10 @@ static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
static void shm_rcu_free(struct rcu_head *head)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct shmid_kernel *shp = ipc_rcu_to_struct(p);
struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
rcu);
struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
shm_perm);
security_shm_free(shp);
ipc_rcu_free(head);
}
@ -241,7 +242,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
user_shm_unlock(i_size_read(file_inode(shm_file)),
shp->mlock_user);
fput(shm_file);
ipc_rcu_putref(shp, shm_rcu_free);
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
}
/*
@ -542,7 +543,10 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
shp = ipc_rcu_alloc(sizeof(*shp));
BUILD_BUG_ON(offsetof(struct shmid_kernel, shm_perm) != 0);
shp = container_of(ipc_rcu_alloc(sizeof(*shp)), struct shmid_kernel,
shm_perm);
if (!shp)
return -ENOMEM;
@ -553,7 +557,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
if (error) {
ipc_rcu_putref(shp, ipc_rcu_free);
ipc_rcu_putref(&shp->shm_perm, ipc_rcu_free);
return error;
}
@ -624,7 +628,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
user_shm_unlock(size, shp->mlock_user);
fput(file);
no_file:
ipc_rcu_putref(shp, shm_rcu_free);
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
return error;
}

View File

@ -418,46 +418,45 @@ void ipc_free(void *ptr)
}
/**
* ipc_rcu_alloc - allocate ipc and rcu space
* ipc_rcu_alloc - allocate ipc space
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
* Returns the pointer to the object or NULL upon failure.
* Allocate memory for an ipc object.
* The first member must be struct kern_ipc_perm.
*/
void *ipc_rcu_alloc(int size)
struct kern_ipc_perm *ipc_rcu_alloc(int size)
{
/*
* We prepend the allocation with the rcu struct
*/
struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size);
struct kern_ipc_perm *out = ipc_alloc(size);
if (unlikely(!out))
return NULL;
memset(out, 0, size);
atomic_set(&out->refcount, 1);
return out + 1;
return out;
}
int ipc_rcu_getref(void *ptr)
int ipc_rcu_getref(struct kern_ipc_perm *ptr)
{
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
return atomic_inc_not_zero(&p->refcount);
return atomic_inc_not_zero(&ptr->refcount);
}
void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head))
void ipc_rcu_putref(struct kern_ipc_perm *ptr,
void (*func)(struct rcu_head *head))
{
struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1;
if (!atomic_dec_and_test(&p->refcount))
if (!atomic_dec_and_test(&ptr->refcount))
return;
call_rcu(&p->rcu, func);
call_rcu(&ptr->rcu, func);
}
void ipc_rcu_free(struct rcu_head *head)
void ipc_rcu_free(struct rcu_head *h)
{
struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
struct kern_ipc_perm *ptr = container_of(h, struct kern_ipc_perm, rcu);
kvfree(p);
kvfree(ptr);
}
/**

View File

@ -47,13 +47,6 @@ static inline void msg_exit_ns(struct ipc_namespace *ns) { }
static inline void shm_exit_ns(struct ipc_namespace *ns) { }
#endif
struct ipc_rcu {
struct rcu_head rcu;
atomic_t refcount;
} ____cacheline_aligned_in_smp;
#define ipc_rcu_to_struct(p) ((void *)(p+1))
/*
* Structure that holds the parameters needed by the ipc operations
* (see after)
@ -125,11 +118,14 @@ void ipc_free(void *ptr);
* Objects are reference counted, they start with reference count 1.
* getref increases the refcount, the putref call that reduces the recount
* to 0 schedules the rcu destruction. Caller must guarantee locking.
*
* struct kern_ipc_perm must be the first member in the allocated structure.
*/
void *ipc_rcu_alloc(int size);
int ipc_rcu_getref(void *ptr);
void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
void ipc_rcu_free(struct rcu_head *head);
struct kern_ipc_perm *ipc_rcu_alloc(int size);
int ipc_rcu_getref(struct kern_ipc_perm *ptr);
void ipc_rcu_putref(struct kern_ipc_perm *ptr,
void (*func)(struct rcu_head *head));
void ipc_rcu_free(struct rcu_head *h);
struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id);