kernel_optimize_test/include/linux/cpu_rmap.h
Gustavo A. R. Silva 3123227228 lib: cpu_rmap: Replace zero-length array with flexible-array member
The current codebase makes use of the zero-length array language
extension to the C90 standard, but the preferred mechanism to declare
variable-length types such as these ones is a flexible array member[1][2],
introduced in C99:

struct foo {
        int stuff;
        struct boo array[];
};

By making use of the mechanism above, we will get a compiler warning
in case the flexible array does not occur last in the structure, which
will help us prevent some kind of undefined behavior bugs from being
inadvertently introduced[3] to the codebase from now on.

Also, notice that, dynamic memory allocations won't be affected by
this change:

"Flexible array members have incomplete type, and so the sizeof operator
may not be applied. As a quirk of the original implementation of
zero-length arrays, sizeof evaluates to zero."[1]

This issue was found with the help of Coccinelle.

[1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
[2] https://github.com/KSPP/linux/issues/21
[3] commit 7649773293 ("cxgb3/l2t: Fix undefined behaviour")

Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
2020-04-18 15:44:55 -05:00

67 lines
1.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LINUX_CPU_RMAP_H
#define __LINUX_CPU_RMAP_H
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
*/
#include <linux/cpumask.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/kref.h>
/**
* struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped
* @used: Number of objects added
* @obj: Pointer to array of object pointers
* @near: For each CPU, the index and distance to the nearest object,
* based on affinity masks
*/
struct cpu_rmap {
struct kref refcount;
u16 size, used;
void **obj;
struct {
u16 index;
u16 dist;
} near[];
};
#define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
extern int cpu_rmap_put(struct cpu_rmap *rmap);
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
const struct cpumask *affinity);
static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
{
return rmap->near[cpu].index;
}
static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
{
return rmap->obj[rmap->near[cpu].index];
}
/**
* alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
* @size: Number of objects to be mapped
*
* Must be called in process context.
*/
static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
{
return alloc_cpu_rmap(size, GFP_KERNEL);
}
extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
#endif /* __LINUX_CPU_RMAP_H */