sched/core: Remove the tsk_cpus_allowed() wrapper

So the original intention of tsk_cpus_allowed() was to 'future-proof'
the field - but it's pretty ineffectual at that, because half of
the code uses ->cpus_allowed directly ...

Also, the wrapper makes the code longer than the original expression!

So just get rid of it. This also shrinks <linux/sched.h> a bit.

Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2017-02-05 15:38:10 +01:00
parent 59ddbcb2f4
commit 0c98d344fe
16 changed files with 42 additions and 48 deletions

View File

@ -795,7 +795,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
* se we pin us down to CPU 0 for a short while * se we pin us down to CPU 0 for a short while
*/ */
alloc_cpumask_var(&old_mask, GFP_NOWAIT); alloc_cpumask_var(&old_mask, GFP_NOWAIT);
cpumask_copy(old_mask, tsk_cpus_allowed(current)); cpumask_copy(old_mask, &current->cpus_allowed);
set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
if (smp_ops && smp_ops->setup_cpu) if (smp_ops && smp_ops->setup_cpu)

View File

@ -140,7 +140,7 @@ void __spu_update_sched_info(struct spu_context *ctx)
* runqueue. The context will be rescheduled on the proper node * runqueue. The context will be rescheduled on the proper node
* if it is timesliced or preempted. * if it is timesliced or preempted.
*/ */
cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
/* Save the current cpu id for spu interrupt routing. */ /* Save the current cpu id for spu interrupt routing. */
ctx->last_ran = raw_smp_processor_id(); ctx->last_ran = raw_smp_processor_id();

View File

@ -106,7 +106,7 @@ static unsigned long run_on_cpu(unsigned long cpu,
cpumask_t old_affinity; cpumask_t old_affinity;
unsigned long ret; unsigned long ret;
cpumask_copy(&old_affinity, tsk_cpus_allowed(current)); cpumask_copy(&old_affinity, &current->cpus_allowed);
/* should return -EINVAL to userspace */ /* should return -EINVAL to userspace */
if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
return 0; return 0;

View File

@ -234,7 +234,7 @@ static unsigned int us2e_freq_get(unsigned int cpu)
cpumask_t cpus_allowed; cpumask_t cpus_allowed;
unsigned long clock_tick, estar; unsigned long clock_tick, estar;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); cpumask_copy(&cpus_allowed, &current->cpus_allowed);
set_cpus_allowed_ptr(current, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
clock_tick = sparc64_get_clock_tick(cpu) / 1000; clock_tick = sparc64_get_clock_tick(cpu) / 1000;
@ -252,7 +252,7 @@ static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
unsigned long clock_tick, divisor, old_divisor, estar; unsigned long clock_tick, divisor, old_divisor, estar;
cpumask_t cpus_allowed; cpumask_t cpus_allowed;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); cpumask_copy(&cpus_allowed, &current->cpus_allowed);
set_cpus_allowed_ptr(current, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;

View File

@ -82,7 +82,7 @@ static unsigned int us3_freq_get(unsigned int cpu)
unsigned long reg; unsigned long reg;
unsigned int ret; unsigned int ret;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); cpumask_copy(&cpus_allowed, &current->cpus_allowed);
set_cpus_allowed_ptr(current, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
reg = read_safari_cfg(); reg = read_safari_cfg();
@ -99,7 +99,7 @@ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
unsigned long new_bits, new_freq, reg; unsigned long new_bits, new_freq, reg;
cpumask_t cpus_allowed; cpumask_t cpus_allowed;
cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current)); cpumask_copy(&cpus_allowed, &current->cpus_allowed);
set_cpus_allowed_ptr(current, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = sparc64_get_clock_tick(cpu) / 1000; new_freq = sparc64_get_clock_tick(cpu) / 1000;

View File

@ -576,7 +576,7 @@ int hfi1_get_proc_affinity(int node)
struct hfi1_affinity_node *entry; struct hfi1_affinity_node *entry;
cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask; cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
const struct cpumask *node_mask, const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current); *proc_mask = &current->cpus_allowed;
struct hfi1_affinity_node_list *affinity = &node_affinity; struct hfi1_affinity_node_list *affinity = &node_affinity;
struct cpu_mask_set *set = &affinity->proc; struct cpu_mask_set *set = &affinity->proc;

View File

@ -856,7 +856,7 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
{ {
struct sdma_rht_node *rht_node; struct sdma_rht_node *rht_node;
struct sdma_engine *sde = NULL; struct sdma_engine *sde = NULL;
const struct cpumask *current_mask = tsk_cpus_allowed(current); const struct cpumask *current_mask = &current->cpus_allowed;
unsigned long cpu_id; unsigned long cpu_id;
/* /*

View File

@ -1995,9 +1995,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
} }
#endif #endif
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
static inline int tsk_nr_cpus_allowed(struct task_struct *p) static inline int tsk_nr_cpus_allowed(struct task_struct *p)
{ {
return p->nr_cpus_allowed; return p->nr_cpus_allowed;

View File

@ -981,7 +981,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
return rq; return rq;
/* Affinity changed (again). */ /* Affinity changed (again). */
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
return rq; return rq;
rq = move_queued_task(rq, p, dest_cpu); rq = move_queued_task(rq, p, dest_cpu);
@ -1259,10 +1259,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu) if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock; goto unlock;
if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
goto unlock; goto unlock;
if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
goto unlock; goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu); __migrate_swap_task(arg->src_task, arg->dst_cpu);
@ -1303,10 +1303,10 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out; goto out;
if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
goto out; goto out;
if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
goto out; goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
@ -1490,14 +1490,14 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
for_each_cpu(dest_cpu, nodemask) { for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu)) if (!cpu_active(dest_cpu))
continue; continue;
if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
return dest_cpu; return dest_cpu;
} }
} }
for (;;) { for (;;) {
/* Any allowed, online CPU? */ /* Any allowed, online CPU? */
for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { for_each_cpu(dest_cpu, &p->cpus_allowed) {
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
continue; continue;
if (!cpu_online(dest_cpu)) if (!cpu_online(dest_cpu))
@ -1552,7 +1552,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
if (tsk_nr_cpus_allowed(p) > 1) if (tsk_nr_cpus_allowed(p) > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else else
cpu = cpumask_any(tsk_cpus_allowed(p)); cpu = cpumask_any(&p->cpus_allowed);
/* /*
* In order not to call set_task_cpu() on a blocking task we need * In order not to call set_task_cpu() on a blocking task we need
@ -1564,7 +1564,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
* [ this allows ->select_task() to simply return task_cpu(p) and * [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ] * not worry about this generic constraint ]
*/ */
if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
!cpu_online(cpu))) !cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p); cpu = select_fallback_rq(task_cpu(p), p);
@ -5473,7 +5473,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
if (curr_cpu == target_cpu) if (curr_cpu == target_cpu)
return 0; return 0;
if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
return -EINVAL; return -EINVAL;
/* TODO: This is not properly updating schedstats */ /* TODO: This is not properly updating schedstats */

View File

@ -128,10 +128,10 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
const struct sched_dl_entity *dl_se = &p->dl; const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask && if (later_mask &&
cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
best_cpu = cpumask_any(later_mask); best_cpu = cpumask_any(later_mask);
goto out; goto out;
} else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) { dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
best_cpu = cpudl_maximum(cp); best_cpu = cpudl_maximum(cp);
if (later_mask) if (later_mask)

View File

@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
if (skip) if (skip)
continue; continue;
if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
continue; continue;
if (lowest_mask) { if (lowest_mask) {
cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
/* /*
* We have to ensure that we have at least one bit * We have to ensure that we have at least one bit

View File

@ -252,7 +252,7 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
* If we cannot preempt any rq, fall back to pick any * If we cannot preempt any rq, fall back to pick any
* online cpu. * online cpu.
*/ */
cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p)); cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
if (cpu >= nr_cpu_ids) { if (cpu >= nr_cpu_ids) {
/* /*
* Fail to find any suitable cpu. * Fail to find any suitable cpu.
@ -1235,7 +1235,7 @@ static void set_curr_task_dl(struct rq *rq)
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) cpumask_test_cpu(cpu, &p->cpus_allowed))
return 1; return 1;
return 0; return 0;
} }
@ -1384,8 +1384,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Retry if something changed. */ /* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) { if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq || if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu, !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
tsk_cpus_allowed(task)) ||
task_running(rq, task) || task_running(rq, task) ||
!dl_task(task) || !dl_task(task) ||
!task_on_rq_queued(task))) { !task_on_rq_queued(task))) {

View File

@ -1551,7 +1551,7 @@ static void task_numa_compare(struct task_numa_env *env,
*/ */
if (cur) { if (cur) {
/* Skip this swap candidate if cannot move to the source cpu */ /* Skip this swap candidate if cannot move to the source cpu */
if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur))) if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
goto unlock; goto unlock;
/* /*
@ -1661,7 +1661,7 @@ static void task_numa_find_cpu(struct task_numa_env *env,
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */ /* Skip this CPU if the source task cannot migrate */
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p))) if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
continue; continue;
env->dst_cpu = cpu; env->dst_cpu = cpu;
@ -5458,7 +5458,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/* Skip over this group if it has no CPUs allowed */ /* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_cpus(group), if (!cpumask_intersects(sched_group_cpus(group),
tsk_cpus_allowed(p))) &p->cpus_allowed))
continue; continue;
local_group = cpumask_test_cpu(this_cpu, local_group = cpumask_test_cpu(this_cpu,
@ -5578,7 +5578,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
return cpumask_first(sched_group_cpus(group)); return cpumask_first(sched_group_cpus(group));
/* Traverse only the allowed CPUs */ /* Traverse only the allowed CPUs */
for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
if (idle_cpu(i)) { if (idle_cpu(i)) {
struct rq *rq = cpu_rq(i); struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq); struct cpuidle_state *idle = idle_get_state(rq);
@ -5717,7 +5717,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
if (!test_idle_cores(target, false)) if (!test_idle_cores(target, false))
return -1; return -1;
cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p)); cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
for_each_cpu_wrap(core, cpus, target, wrap) { for_each_cpu_wrap(core, cpus, target, wrap) {
bool idle = true; bool idle = true;
@ -5751,7 +5751,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
return -1; return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) { for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
continue; continue;
if (idle_cpu(cpu)) if (idle_cpu(cpu))
return cpu; return cpu;
@ -5803,7 +5803,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
time = local_clock(); time = local_clock();
for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) { for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
continue; continue;
if (idle_cpu(cpu)) if (idle_cpu(cpu))
break; break;
@ -5958,7 +5958,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) { if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p); record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
&& cpumask_test_cpu(cpu, tsk_cpus_allowed(p)); && cpumask_test_cpu(cpu, &p->cpus_allowed);
} }
rcu_read_lock(); rcu_read_lock();
@ -6698,7 +6698,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
return 0; return 0;
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) { if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
int cpu; int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine); schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
@ -6718,7 +6718,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Prevent to re-select dst_cpu via env's cpus */ /* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
env->flags |= LBF_DST_PINNED; env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu; env->new_dst_cpu = cpu;
break; break;
@ -7252,7 +7252,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
/* /*
* Group imbalance indicates (and tries to solve) the problem where balancing * Group imbalance indicates (and tries to solve) the problem where balancing
* groups is inadequate due to tsk_cpus_allowed() constraints. * groups is inadequate due to ->cpus_allowed constraints.
* *
* Imagine a situation of two groups of 4 cpus each and 4 tasks each with a * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
* cpumask covering 1 cpu of the first group and 3 cpus of the second group. * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
@ -8211,8 +8211,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* if the curr task on busiest cpu can't be * if the curr task on busiest cpu can't be
* moved to this_cpu * moved to this_cpu
*/ */
if (!cpumask_test_cpu(this_cpu, if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
tsk_cpus_allowed(busiest->curr))) {
raw_spin_unlock_irqrestore(&busiest->lock, raw_spin_unlock_irqrestore(&busiest->lock,
flags); flags);
env.flags |= LBF_ALL_PINNED; env.flags |= LBF_ALL_PINNED;

View File

@ -1591,7 +1591,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) cpumask_test_cpu(cpu, &p->cpus_allowed))
return 1; return 1;
return 0; return 0;
} }
@ -1726,8 +1726,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* Also make sure that it wasn't scheduled on its rq. * Also make sure that it wasn't scheduled on its rq.
*/ */
if (unlikely(task_rq(task) != rq || if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
tsk_cpus_allowed(task)) ||
task_running(rq, task) || task_running(rq, task) ||
!rt_task(task) || !rt_task(task) ||
!task_on_rq_queued(task))) { !task_on_rq_queued(task))) {

View File

@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
* Kernel threads bound to a single CPU can safely use * Kernel threads bound to a single CPU can safely use
* smp_processor_id(): * smp_processor_id():
*/ */
if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
goto out; goto out;
/* /*

View File

@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
/* Silly tracepoints */ /* Silly tracepoints */
trace_foo_bar("hello", cnt, array, random_strings[len], trace_foo_bar("hello", cnt, array, random_strings[len],
tsk_cpus_allowed(current)); &current->cpus_allowed);
trace_foo_with_template_simple("HELLO", cnt); trace_foo_with_template_simple("HELLO", cnt);