ACPI: change processors from array to per_cpu variable
Change processors from an array sized by NR_CPUS to a per_cpu variable. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Len Brown <len.brown@intel.com> Signed-off-by: Andi Kleen <ak@linux.intel.com>
This commit is contained in:
parent
5411552c70
commit
706546d023
|
@ -118,7 +118,7 @@ static const struct file_operations acpi_processor_info_fops = {
|
|||
.release = single_release,
|
||||
};
|
||||
|
||||
struct acpi_processor *processors[NR_CPUS];
|
||||
DEFINE_PER_CPU(struct acpi_processor *, processors);
|
||||
struct acpi_processor_errata errata __read_mostly;
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
|
@ -614,7 +614,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void *processor_device_array[NR_CPUS];
|
||||
static DEFINE_PER_CPU(void *, processor_device_array);
|
||||
|
||||
static int __cpuinit acpi_processor_start(struct acpi_device *device)
|
||||
{
|
||||
|
@ -638,15 +638,15 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
|
|||
* ACPI id of processors can be reported wrongly by the BIOS.
|
||||
* Don't trust it blindly
|
||||
*/
|
||||
if (processor_device_array[pr->id] != NULL &&
|
||||
processor_device_array[pr->id] != device) {
|
||||
if (per_cpu(processor_device_array, pr->id) != NULL &&
|
||||
per_cpu(processor_device_array, pr->id) != device) {
|
||||
printk(KERN_WARNING "BIOS reported wrong ACPI id "
|
||||
"for the processor\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
processor_device_array[pr->id] = device;
|
||||
per_cpu(processor_device_array, pr->id) = device;
|
||||
|
||||
processors[pr->id] = pr;
|
||||
per_cpu(processors, pr->id) = pr;
|
||||
|
||||
result = acpi_processor_add_fs(device);
|
||||
if (result)
|
||||
|
@ -753,7 +753,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
|
|||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
struct acpi_processor *pr = processors[cpu];
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
|
||||
if (action == CPU_ONLINE && pr) {
|
||||
acpi_processor_ppc_has_changed(pr);
|
||||
|
@ -825,8 +825,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
|
|||
pr->cdev = NULL;
|
||||
}
|
||||
|
||||
processors[pr->id] = NULL;
|
||||
processor_device_array[pr->id] = NULL;
|
||||
per_cpu(processors, pr->id) = NULL;
|
||||
per_cpu(processor_device_array, pr->id) = NULL;
|
||||
kfree(pr);
|
||||
|
||||
return 0;
|
||||
|
@ -1074,8 +1074,6 @@ static int __init acpi_processor_init(void)
|
|||
{
|
||||
int result = 0;
|
||||
|
||||
|
||||
memset(&processors, 0, sizeof(processors));
|
||||
memset(&errata, 0, sizeof(errata));
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -401,7 +401,7 @@ static void acpi_processor_idle(void)
|
|||
*/
|
||||
local_irq_disable();
|
||||
|
||||
pr = processors[smp_processor_id()];
|
||||
pr = __get_cpu_var(processors);
|
||||
if (!pr) {
|
||||
local_irq_enable();
|
||||
return;
|
||||
|
@ -1431,7 +1431,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
||||
|
||||
pr = processors[smp_processor_id()];
|
||||
pr = __get_cpu_var(processors);
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
@ -1471,7 +1471,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|||
u32 t1, t2;
|
||||
int sleep_ticks = 0;
|
||||
|
||||
pr = processors[smp_processor_id()];
|
||||
pr = __get_cpu_var(processors);
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
@ -1549,7 +1549,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||
u32 t1, t2;
|
||||
int sleep_ticks = 0;
|
||||
|
||||
pr = processors[smp_processor_id()];
|
||||
pr = __get_cpu_var(processors);
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
|
|
@ -89,7 +89,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
|
|||
if (event != CPUFREQ_INCOMPATIBLE)
|
||||
goto out;
|
||||
|
||||
pr = processors[policy->cpu];
|
||||
pr = per_cpu(processors, policy->cpu);
|
||||
if (!pr || !pr->performance)
|
||||
goto out;
|
||||
|
||||
|
@ -572,7 +572,7 @@ int acpi_processor_preregister_performance(
|
|||
|
||||
/* Call _PSD for all CPUs */
|
||||
for_each_possible_cpu(i) {
|
||||
pr = processors[i];
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr) {
|
||||
/* Look only at processors in ACPI namespace */
|
||||
continue;
|
||||
|
@ -603,7 +603,7 @@ int acpi_processor_preregister_performance(
|
|||
* domain info.
|
||||
*/
|
||||
for_each_possible_cpu(i) {
|
||||
pr = processors[i];
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
|
@ -624,7 +624,7 @@ int acpi_processor_preregister_performance(
|
|||
|
||||
cpus_clear(covered_cpus);
|
||||
for_each_possible_cpu(i) {
|
||||
pr = processors[i];
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
|
@ -651,7 +651,7 @@ int acpi_processor_preregister_performance(
|
|||
if (i == j)
|
||||
continue;
|
||||
|
||||
match_pr = processors[j];
|
||||
match_pr = per_cpu(processors, j);
|
||||
if (!match_pr)
|
||||
continue;
|
||||
|
||||
|
@ -680,7 +680,7 @@ int acpi_processor_preregister_performance(
|
|||
if (i == j)
|
||||
continue;
|
||||
|
||||
match_pr = processors[j];
|
||||
match_pr = per_cpu(processors, j);
|
||||
if (!match_pr)
|
||||
continue;
|
||||
|
||||
|
@ -697,7 +697,7 @@ int acpi_processor_preregister_performance(
|
|||
|
||||
err_ret:
|
||||
for_each_possible_cpu(i) {
|
||||
pr = processors[i];
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr || !pr->performance)
|
||||
continue;
|
||||
|
||||
|
@ -728,7 +728,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
|
|||
|
||||
mutex_lock(&performance_mutex);
|
||||
|
||||
pr = processors[cpu];
|
||||
pr = per_cpu(processors, cpu);
|
||||
if (!pr) {
|
||||
mutex_unlock(&performance_mutex);
|
||||
return -ENODEV;
|
||||
|
@ -766,7 +766,7 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
|
|||
|
||||
mutex_lock(&performance_mutex);
|
||||
|
||||
pr = processors[cpu];
|
||||
pr = per_cpu(processors, cpu);
|
||||
if (!pr) {
|
||||
mutex_unlock(&performance_mutex);
|
||||
return;
|
||||
|
|
|
@ -71,7 +71,7 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
* coordination between all CPUs.
|
||||
*/
|
||||
for_each_possible_cpu(i) {
|
||||
pr = processors[i];
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
|
@ -93,7 +93,7 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
|
||||
cpus_clear(covered_cpus);
|
||||
for_each_possible_cpu(i) {
|
||||
pr = processors[i];
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
|
@ -119,7 +119,7 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
if (i == j)
|
||||
continue;
|
||||
|
||||
match_pr = processors[j];
|
||||
match_pr = per_cpu(processors, j);
|
||||
if (!match_pr)
|
||||
continue;
|
||||
|
||||
|
@ -152,7 +152,7 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
if (i == j)
|
||||
continue;
|
||||
|
||||
match_pr = processors[j];
|
||||
match_pr = per_cpu(processors, j);
|
||||
if (!match_pr)
|
||||
continue;
|
||||
|
||||
|
@ -172,7 +172,7 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
|
||||
err_ret:
|
||||
for_each_possible_cpu(i) {
|
||||
pr = processors[i];
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
|
@ -214,7 +214,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
|
|||
struct acpi_processor_throttling *p_throttling;
|
||||
|
||||
cpu = p_tstate->cpu;
|
||||
pr = processors[cpu];
|
||||
pr = per_cpu(processors, cpu);
|
||||
if (!pr) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
|
||||
return 0;
|
||||
|
@ -1035,7 +1035,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|||
* cpus.
|
||||
*/
|
||||
for_each_cpu_mask(i, online_throttling_cpus) {
|
||||
match_pr = processors[i];
|
||||
match_pr = per_cpu(processors, i);
|
||||
/*
|
||||
* If the pointer is invalid, we will report the
|
||||
* error message and continue.
|
||||
|
|
|
@ -255,7 +255,7 @@ extern void acpi_processor_unregister_performance(struct
|
|||
int acpi_processor_notify_smm(struct module *calling_module);
|
||||
|
||||
/* for communication between multiple parts of the processor kernel module */
|
||||
extern struct acpi_processor *processors[NR_CPUS];
|
||||
DECLARE_PER_CPU(struct acpi_processor *, processors);
|
||||
extern struct acpi_processor_errata errata;
|
||||
|
||||
void arch_acpi_processor_init_pdc(struct acpi_processor *pr);
|
||||
|
|
Loading…
Reference in New Issue
Block a user