diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index eb730a80952c..2be895895943 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -876,14 +876,11 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) { unsigned int i; unsigned int working = 0; - -#ifdef ARCH_APICTIMER_STOPS_ON_C3 - struct cpuinfo_x86 *c = cpu_data + pr->id; + int timer_broadcast = 0; cpumask_t mask = cpumask_of_cpu(pr->id); - if (c->x86_vendor == X86_VENDOR_INTEL) { - on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); - } +#ifdef ARCH_APICTIMER_STOPS_ON_C3 + on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); #endif for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { @@ -896,15 +893,20 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) case ACPI_STATE_C2: acpi_processor_power_verify_c2(cx); +#ifdef ARCH_APICTIMER_STOPS_ON_C3 + /* Some AMD systems fake C3 as C2, but still + have timer troubles */ + if (cx->valid && + boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + timer_broadcast++; +#endif break; case ACPI_STATE_C3: acpi_processor_power_verify_c3(pr, cx); #ifdef ARCH_APICTIMER_STOPS_ON_C3 - if (cx->valid && c->x86_vendor == X86_VENDOR_INTEL) { - on_each_cpu(switch_APIC_timer_to_ipi, - &mask, 1, 1); - } + if (cx->valid) + timer_broadcast++; #endif break; } @@ -913,6 +915,9 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) working++; } + if (timer_broadcast) + on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); + return (working); }