drivers: fix up obsolete cpu function usage.

Thanks to spatch, plus manual removal of "&*".  Then a sweep for
for_each_cpu_mask => for_each_cpu.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: netdev@vger.kernel.org
This commit is contained in:
Rusty Russell 2015-03-05 10:49:16 +10:30
parent 51f7bd8590
commit f9b531fe14
6 changed files with 13 additions and 12 deletions
drivers
clocksource
cpuidle
crypto
irqchip
net/ethernet/tile

View File

@ -117,7 +117,8 @@ static void apbt_set_mode(enum clock_event_mode mode,
unsigned long period;
struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask),
pr_debug("%s CPU %d mode=%d\n", __func__,
cpumask_first(evt->cpumask),
mode);
switch (mode) {

View File

@ -292,7 +292,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
*/
smp_rmb();
for_each_cpu_mask(i, coupled->coupled_cpus)
for_each_cpu(i, &coupled->coupled_cpus)
if (cpu_online(i) && coupled->requested_state[i] < state)
state = coupled->requested_state[i];
@ -338,7 +338,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
{
int cpu;
for_each_cpu_mask(cpu, coupled->coupled_cpus)
for_each_cpu(cpu, &coupled->coupled_cpus)
if (cpu != this_cpu && cpu_online(cpu))
cpuidle_coupled_poke(cpu);
}
@ -638,7 +638,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
if (cpumask_empty(&dev->coupled_cpus))
return 0;
for_each_cpu_mask(cpu, dev->coupled_cpus) {
for_each_cpu(cpu, &dev->coupled_cpus) {
other_dev = per_cpu(cpuidle_devices, cpu);
if (other_dev && other_dev->coupled) {
coupled = other_dev->coupled;

View File

@ -1754,7 +1754,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
dev->dev.of_node->full_name);
return -EINVAL;
}
cpu_set(*id, p->sharing);
cpumask_set_cpu(*id, &p->sharing);
table[*id] = p;
}
return 0;
@ -1776,7 +1776,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
return -ENOMEM;
}
cpus_clear(p->sharing);
cpumask_clear(&p->sharing);
spin_lock_init(&p->lock);
p->q_type = q_type;
INIT_LIST_HEAD(&p->jobs);

View File

@ -512,7 +512,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
*/
smp_wmb();
for_each_cpu_mask(cpu, *mask) {
for_each_cpu(cpu, mask) {
u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
u16 tlist;

View File

@ -345,19 +345,19 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
int i;
cpumask_and(&tmp, cpumask, cpu_online_mask);
if (cpus_empty(tmp))
if (cpumask_empty(&tmp))
return -EINVAL;
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
gic_map_to_vpe(irq, first_cpu(tmp));
gic_map_to_vpe(irq, cpumask_first(&tmp));
/* Update the pcpu_masks */
for (i = 0; i < NR_CPUS; i++)
clear_bit(irq, pcpu_masks[i].pcpu_mask);
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
cpumask_copy(d->affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags);

View File

@ -1122,7 +1122,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
addr + i * sizeof(struct tile_net_comps);
/* If this is a network cpu, create an iqueue. */
if (cpu_isset(cpu, network_cpus_map)) {
if (cpumask_test_cpu(cpu, &network_cpus_map)) {
order = get_order(NOTIF_RING_SIZE);
page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
if (page == NULL) {
@ -1298,7 +1298,7 @@ static int tile_net_init_mpipe(struct net_device *dev)
int first_ring, ring;
int instance = mpipe_instance(dev);
struct mpipe_data *md = &mpipe_data[instance];
int network_cpus_count = cpus_weight(network_cpus_map);
int network_cpus_count = cpumask_weight(&network_cpus_map);
if (!hash_default) {
netdev_err(dev, "Networking requires hash_default!\n");