forked from luck/tmp_suning_uos_patched
latent_entropy: Mark functions with __latent_entropy
The __latent_entropy gcc attribute can be used only on functions and variables. If it is on a function then the plugin will instrument it for gathering control-flow entropy. If the attribute is on a variable then the plugin will initialize it with random contents. The variable must be an integer, an integer array type or a structure with integer fields. These specific functions have been selected because they are init functions (to help gather boot-time entropy), are called at unpredictable times, or they have variable loops, each of which provide some level of latent entropy. Signed-off-by: Emese Revfy <re.emese@gmail.com> [kees: expanded commit message] Signed-off-by: Kees Cook <keescook@chromium.org>
This commit is contained in:
parent
38addce8b6
commit
0766f788eb
|
@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
|
||||||
* Softirq action handler - move entries to local list and loop over them
|
* Softirq action handler - move entries to local list and loop over them
|
||||||
* while passing them to the queue registered handler.
|
* while passing them to the queue registered handler.
|
||||||
*/
|
*/
|
||||||
static void blk_done_softirq(struct softirq_action *h)
|
static __latent_entropy void blk_done_softirq(struct softirq_action *h)
|
||||||
{
|
{
|
||||||
struct list_head *cpu_list, local_list;
|
struct list_head *cpu_list, local_list;
|
||||||
|
|
||||||
|
|
|
@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
|
||||||
|
|
||||||
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
|
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
|
||||||
static void push_to_pool(struct work_struct *work);
|
static void push_to_pool(struct work_struct *work);
|
||||||
static __u32 input_pool_data[INPUT_POOL_WORDS];
|
static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
|
||||||
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
|
static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
|
||||||
|
|
||||||
static struct entropy_store input_pool = {
|
static struct entropy_store input_pool = {
|
||||||
.poolinfo = &poolinfo_table[0],
|
.poolinfo = &poolinfo_table[0],
|
||||||
|
|
|
@ -2759,6 +2759,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
|
||||||
return new_ns;
|
return new_ns;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__latent_entropy
|
||||||
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
|
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
|
||||||
struct user_namespace *user_ns, struct fs_struct *new_fs)
|
struct user_namespace *user_ns, struct fs_struct *new_fs)
|
||||||
{
|
{
|
||||||
|
|
|
@ -188,6 +188,13 @@
|
||||||
#endif /* GCC_VERSION >= 40300 */
|
#endif /* GCC_VERSION >= 40300 */
|
||||||
|
|
||||||
#if GCC_VERSION >= 40500
|
#if GCC_VERSION >= 40500
|
||||||
|
|
||||||
|
#ifndef __CHECKER__
|
||||||
|
#ifdef LATENT_ENTROPY_PLUGIN
|
||||||
|
#define __latent_entropy __attribute__((latent_entropy))
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark a position in code as unreachable. This can be used to
|
* Mark a position in code as unreachable. This can be used to
|
||||||
* suppress control flow warnings after asm blocks that transfer
|
* suppress control flow warnings after asm blocks that transfer
|
||||||
|
|
|
@ -406,6 +406,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
||||||
# define __attribute_const__ /* unimplemented */
|
# define __attribute_const__ /* unimplemented */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef __latent_entropy
|
||||||
|
# define __latent_entropy
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tell gcc if a function is cold. The compiler will assume any path
|
* Tell gcc if a function is cold. The compiler will assume any path
|
||||||
* directly leading to the call is unlikely.
|
* directly leading to the call is unlikely.
|
||||||
|
|
|
@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
|
||||||
void put_files_struct(struct files_struct *fs);
|
void put_files_struct(struct files_struct *fs);
|
||||||
void reset_files_struct(struct files_struct *);
|
void reset_files_struct(struct files_struct *);
|
||||||
int unshare_files(struct files_struct **);
|
int unshare_files(struct files_struct **);
|
||||||
struct files_struct *dup_fd(struct files_struct *, int *);
|
struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
|
||||||
void do_close_on_exec(struct files_struct *);
|
void do_close_on_exec(struct files_struct *);
|
||||||
int iterate_fd(struct files_struct *, unsigned,
|
int iterate_fd(struct files_struct *, unsigned,
|
||||||
int (*)(const void *, struct file *, unsigned),
|
int (*)(const void *, struct file *, unsigned),
|
||||||
|
|
|
@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
|
||||||
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
|
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
|
||||||
|
|
||||||
/* drivers/char/random.c */
|
/* drivers/char/random.c */
|
||||||
extern void add_disk_randomness(struct gendisk *disk);
|
extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
|
||||||
extern void rand_initialize_disk(struct gendisk *disk);
|
extern void rand_initialize_disk(struct gendisk *disk);
|
||||||
|
|
||||||
static inline sector_t get_start_sect(struct block_device *bdev)
|
static inline sector_t get_start_sect(struct block_device *bdev)
|
||||||
|
|
|
@ -39,7 +39,7 @@
|
||||||
|
|
||||||
/* These are for everybody (although not all archs will actually
|
/* These are for everybody (although not all archs will actually
|
||||||
discard it in modules) */
|
discard it in modules) */
|
||||||
#define __init __section(.init.text) __cold notrace
|
#define __init __section(.init.text) __cold notrace __latent_entropy
|
||||||
#define __initdata __section(.init.data)
|
#define __initdata __section(.init.data)
|
||||||
#define __initconst __constsection(.init.rodata)
|
#define __initconst __constsection(.init.rodata)
|
||||||
#define __exitdata __section(.exit.data)
|
#define __exitdata __section(.exit.data)
|
||||||
|
@ -86,7 +86,8 @@
|
||||||
#define __exit __section(.exit.text) __exitused __cold notrace
|
#define __exit __section(.exit.text) __exitused __cold notrace
|
||||||
|
|
||||||
/* Used for MEMORY_HOTPLUG */
|
/* Used for MEMORY_HOTPLUG */
|
||||||
#define __meminit __section(.meminit.text) __cold notrace
|
#define __meminit __section(.meminit.text) __cold notrace \
|
||||||
|
__latent_entropy
|
||||||
#define __meminitdata __section(.meminit.data)
|
#define __meminitdata __section(.meminit.data)
|
||||||
#define __meminitconst __constsection(.meminit.rodata)
|
#define __meminitconst __constsection(.meminit.rodata)
|
||||||
#define __memexit __section(.memexit.text) __exitused __cold notrace
|
#define __memexit __section(.memexit.text) __exitused __cold notrace
|
||||||
|
|
|
@ -30,8 +30,8 @@ static inline void add_latent_entropy(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void add_input_randomness(unsigned int type, unsigned int code,
|
extern void add_input_randomness(unsigned int type, unsigned int code,
|
||||||
unsigned int value);
|
unsigned int value) __latent_entropy;
|
||||||
extern void add_interrupt_randomness(int irq, int irq_flags);
|
extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
|
||||||
|
|
||||||
extern void get_random_bytes(void *buf, int nbytes);
|
extern void get_random_bytes(void *buf, int nbytes);
|
||||||
extern int add_random_ready_callback(struct random_ready_callback *rdy);
|
extern int add_random_ready_callback(struct random_ready_callback *rdy);
|
||||||
|
|
|
@ -404,7 +404,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
|
static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||||
|
struct mm_struct *oldmm)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
|
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
|
||||||
struct rb_node **rb_link, *rb_parent;
|
struct rb_node **rb_link, *rb_parent;
|
||||||
|
@ -1296,7 +1297,8 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
|
||||||
* parts of the process environment (as per the clone
|
* parts of the process environment (as per the clone
|
||||||
* flags). The actual kick-off is left to the caller.
|
* flags). The actual kick-off is left to the caller.
|
||||||
*/
|
*/
|
||||||
static struct task_struct *copy_process(unsigned long clone_flags,
|
static __latent_entropy struct task_struct *copy_process(
|
||||||
|
unsigned long clone_flags,
|
||||||
unsigned long stack_start,
|
unsigned long stack_start,
|
||||||
unsigned long stack_size,
|
unsigned long stack_size,
|
||||||
int __user *child_tidptr,
|
int __user *child_tidptr,
|
||||||
|
|
|
@ -170,7 +170,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
|
||||||
false));
|
false));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_process_callbacks(struct softirq_action *unused)
|
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
|
||||||
{
|
{
|
||||||
__rcu_process_callbacks(&rcu_sched_ctrlblk);
|
__rcu_process_callbacks(&rcu_sched_ctrlblk);
|
||||||
__rcu_process_callbacks(&rcu_bh_ctrlblk);
|
__rcu_process_callbacks(&rcu_bh_ctrlblk);
|
||||||
|
|
|
@ -3013,7 +3013,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
||||||
/*
|
/*
|
||||||
* Do RCU core processing for the current CPU.
|
* Do RCU core processing for the current CPU.
|
||||||
*/
|
*/
|
||||||
static void rcu_process_callbacks(struct softirq_action *unused)
|
static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
|
||||||
{
|
{
|
||||||
struct rcu_state *rsp;
|
struct rcu_state *rsp;
|
||||||
|
|
||||||
|
|
|
@ -8283,7 +8283,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
|
||||||
* run_rebalance_domains is triggered when needed from the scheduler tick.
|
* run_rebalance_domains is triggered when needed from the scheduler tick.
|
||||||
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
|
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
|
||||||
*/
|
*/
|
||||||
static void run_rebalance_domains(struct softirq_action *h)
|
static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
|
||||||
{
|
{
|
||||||
struct rq *this_rq = this_rq();
|
struct rq *this_rq = this_rq();
|
||||||
enum cpu_idle_type idle = this_rq->idle_balance ?
|
enum cpu_idle_type idle = this_rq->idle_balance ?
|
||||||
|
|
|
@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
|
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
|
||||||
|
|
||||||
static void tasklet_action(struct softirq_action *a)
|
static __latent_entropy void tasklet_action(struct softirq_action *a)
|
||||||
{
|
{
|
||||||
struct tasklet_struct *list;
|
struct tasklet_struct *list;
|
||||||
|
|
||||||
|
@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tasklet_hi_action(struct softirq_action *a)
|
static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
|
||||||
{
|
{
|
||||||
struct tasklet_struct *list;
|
struct tasklet_struct *list;
|
||||||
|
|
||||||
|
|
|
@ -1633,7 +1633,7 @@ static inline void __run_timers(struct timer_base *base)
|
||||||
/*
|
/*
|
||||||
* This function runs timers and the timer-tq in bottom half context.
|
* This function runs timers and the timer-tq in bottom half context.
|
||||||
*/
|
*/
|
||||||
static void run_timer_softirq(struct softirq_action *h)
|
static __latent_entropy void run_timer_softirq(struct softirq_action *h)
|
||||||
{
|
{
|
||||||
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ void irq_poll_complete(struct irq_poll *iop)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(irq_poll_complete);
|
EXPORT_SYMBOL(irq_poll_complete);
|
||||||
|
|
||||||
static void irq_poll_softirq(struct softirq_action *h)
|
static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
|
||||||
{
|
{
|
||||||
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
|
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
|
||||||
int rearm = 0, budget = irq_poll_budget;
|
int rearm = 0, budget = irq_poll_budget;
|
||||||
|
|
|
@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
|
static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* prandom_u32_state - seeded pseudo-random number generator.
|
* prandom_u32_state - seeded pseudo-random number generator.
|
||||||
|
|
|
@ -92,7 +92,7 @@ int _node_numa_mem_[MAX_NUMNODES];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
|
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
|
||||||
volatile u64 latent_entropy;
|
volatile u64 latent_entropy __latent_entropy;
|
||||||
EXPORT_SYMBOL(latent_entropy);
|
EXPORT_SYMBOL(latent_entropy);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -3855,7 +3855,7 @@ int netif_rx_ni(struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(netif_rx_ni);
|
EXPORT_SYMBOL(netif_rx_ni);
|
||||||
|
|
||||||
static void net_tx_action(struct softirq_action *h)
|
static __latent_entropy void net_tx_action(struct softirq_action *h)
|
||||||
{
|
{
|
||||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||||
|
|
||||||
|
@ -5187,7 +5187,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
|
||||||
return work;
|
return work;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void net_rx_action(struct softirq_action *h)
|
static __latent_entropy void net_rx_action(struct softirq_action *h)
|
||||||
{
|
{
|
||||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||||
unsigned long time_limit = jiffies + 2;
|
unsigned long time_limit = jiffies + 2;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user