More power management updates for v4.8-rc1

- Prevent the low-level assembly hibernate code on x86-64 from
    referring to __PAGE_OFFSET directly as a symbol which doesn't work
    when the kernel identity mapping base is randomized, in which case
    __PAGE_OFFSET is a variable (Rafael Wysocki).
 
  - Avoid selecting CPU_FREQ_STAT by default as the statistics are not
    required for proper cpufreq operation (Borislav Petkov).
 
  - Add Skylake-X and Broadwell-X IDs to the intel_pstate's list of
    processors where out-of-band (OBB) control of P-states is possible
    and if that is in use, intel_pstate should not attempt to manage
    P-states (Srinivas Pandruvada).
 
  - Drop some unnecessary checks from the wakeup IRQ handling code in
    the PM core (Markus Elfring).
 
  - Reduce the number operating performance point (OPP) lookups in
    one of the OPP framework's helper functions (Jisheng Zhang).
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABCAAGBQJXpKLNAAoJEILEb/54YlRx6L4P/39GR0kVB7vIyCajdWc4f3gh
 0zh5RbKC0YT4F6upebzgQ9uYS9cv4y+df7ShVwKQAea3wDReEmZhM/egOGw0Ls+8
 SS1MiJq1LSekyMIWH6cXwZsH69/V0LuWTBbzWYBgHUDbfEMlgwV5ZZMEH14/2bWw
 d4SLUUiW5P42im+IDAxpdYneKOrbJo3txj6WbOutgtIrHdPko6lF1dDouKvI1QTk
 zCBOkEB9nELq3rWN/sbPmHzbmbj/yFiiHk5+iqwuKKJZI8PQB9/C6Qmc3wvjtPpc
 GXLPI+OHqLgBMofGsiKOvm3hPQAIjf/ERsUilHLE3qOi/Mi0qj7U4dFivZrPWaCG
 j2bD+b36TffmG1r8L7NYbEKU60syeIFSRqAngbyswu6XF+NdVboaifENGcgM3tBC
 pUC1mFh/4PMKP5zW9mOwE6WSntZkw14CVR+A3fRFOuTBavNvjGdckwLi/aBsZdU3
 K4DJUFzdELF7+JdqnQV35yV2tgMbJhxQa/QBykiFBh3AyqliOZ8uBoIxxLinrGmH
 XWR3kB4ZPRBIStGI9IpG3lNhLLU7mLIdaFhGayicicAwFcLsXN7oKWVzYfUqWA9T
 1ptXApcRf6S9J1JnKHkznoQ1/D1pNYLbH+t7BOtWdK8SWBhMS2Lzo2kyKWsCFkJS
 16J8j1tcLDhN1dvcBT55
 =WvPB
 -----END PGP SIGNATURE-----

Merge tag 'pm-extra-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 "A few more fixes and cleanups in the x86-64 low-level hibernation
  code, PM core, cpufreq (Kconfig and intel_pstate), and the operating
  points framework.

  Specifics:

   - Prevent the low-level assembly hibernate code on x86-64 from
     referring to __PAGE_OFFSET directly as a symbol which doesn't work
     when the kernel identity mapping base is randomized, in which case
     __PAGE_OFFSET is a variable (Rafael Wysocki).

   - Avoid selecting CPU_FREQ_STAT by default as the statistics are not
     required for proper cpufreq operation (Borislav Petkov).

   - Add Skylake-X and Broadwell-X IDs to the intel_pstate's list of
     processors where out-of-band (OBB) control of P-states is possible
     and if that is in use, intel_pstate should not attempt to manage
     P-states (Srinivas Pandruvada).

   - Drop some unnecessary checks from the wakeup IRQ handling code in
     the PM core (Markus Elfring).

   - Reduce the number operating performance point (OPP) lookups in one
     of the OPP framework's helper functions (Jisheng Zhang)"

* tag 'pm-extra-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  x86/power/64: Do not refer to __PAGE_OFFSET from assembly code
  cpufreq: Do not default-yes CPU_FREQ_STAT
  cpufreq: intel_pstate: Add more out-of-band IDs
  PM / OPP: optimize dev_pm_opp_set_rate() performance a bit
  PM-wakeup: Delete unnecessary checks before three function calls
This commit is contained in:
Linus Torvalds 2016-08-05 23:26:16 -04:00
commit 11d8ec408d
6 changed files with 37 additions and 35 deletions

View File

@ -37,11 +37,11 @@ unsigned long jump_address_phys;
*/ */
unsigned long restore_cr3 __visible; unsigned long restore_cr3 __visible;
pgd_t *temp_level4_pgt __visible; unsigned long temp_level4_pgt __visible;
unsigned long relocated_restore_code __visible; unsigned long relocated_restore_code __visible;
static int set_up_temporary_text_mapping(void) static int set_up_temporary_text_mapping(pgd_t *pgd)
{ {
pmd_t *pmd; pmd_t *pmd;
pud_t *pud; pud_t *pud;
@ -71,7 +71,7 @@ static int set_up_temporary_text_mapping(void)
__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
set_pud(pud + pud_index(restore_jump_address), set_pud(pud + pud_index(restore_jump_address),
__pud(__pa(pmd) | _KERNPG_TABLE)); __pud(__pa(pmd) | _KERNPG_TABLE));
set_pgd(temp_level4_pgt + pgd_index(restore_jump_address), set_pgd(pgd + pgd_index(restore_jump_address),
__pgd(__pa(pud) | _KERNPG_TABLE)); __pgd(__pa(pud) | _KERNPG_TABLE));
return 0; return 0;
@ -90,15 +90,16 @@ static int set_up_temporary_mappings(void)
.kernel_mapping = true, .kernel_mapping = true,
}; };
unsigned long mstart, mend; unsigned long mstart, mend;
pgd_t *pgd;
int result; int result;
int i; int i;
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC); pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt) if (!pgd)
return -ENOMEM; return -ENOMEM;
/* Prepare a temporary mapping for the kernel text */ /* Prepare a temporary mapping for the kernel text */
result = set_up_temporary_text_mapping(); result = set_up_temporary_text_mapping(pgd);
if (result) if (result)
return result; return result;
@ -107,13 +108,12 @@ static int set_up_temporary_mappings(void)
mstart = pfn_mapped[i].start << PAGE_SHIFT; mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend = pfn_mapped[i].end << PAGE_SHIFT; mend = pfn_mapped[i].end << PAGE_SHIFT;
result = kernel_ident_mapping_init(&info, temp_level4_pgt, result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
mstart, mend);
if (result) if (result)
return result; return result;
} }
temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
return 0; return 0;
} }

View File

@ -72,8 +72,6 @@ ENTRY(restore_image)
/* code below has been relocated to a safe page */ /* code below has been relocated to a safe page */
ENTRY(core_restore_code) ENTRY(core_restore_code)
/* switch to temporary page tables */ /* switch to temporary page tables */
movq $__PAGE_OFFSET, %rcx
subq %rcx, %rax
movq %rax, %cr3 movq %rax, %cr3
/* flush TLB */ /* flush TLB */
movq %rbx, %rcx movq %rbx, %rcx

View File

@ -402,6 +402,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
unsigned long *freq)
{
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
break;
}
}
return opp;
}
/** /**
* dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
* @dev: device for which we do this operation * @dev: device for which we do this operation
@ -427,7 +443,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq) unsigned long *freq)
{ {
struct opp_table *opp_table; struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
opp_rcu_lockdep_assert(); opp_rcu_lockdep_assert();
@ -440,15 +455,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
if (IS_ERR(opp_table)) if (IS_ERR(opp_table))
return ERR_CAST(opp_table); return ERR_CAST(opp_table);
list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { return _find_freq_ceil(opp_table, freq);
if (temp_opp->available && temp_opp->rate >= *freq) {
opp = temp_opp;
*freq = opp->rate;
break;
}
}
return opp;
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
@ -612,7 +619,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
return PTR_ERR(opp_table); return PTR_ERR(opp_table);
} }
old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq); old_opp = _find_freq_ceil(opp_table, &old_freq);
if (!IS_ERR(old_opp)) { if (!IS_ERR(old_opp)) {
ou_volt = old_opp->u_volt; ou_volt = old_opp->u_volt;
ou_volt_min = old_opp->u_volt_min; ou_volt_min = old_opp->u_volt_min;
@ -622,7 +629,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
__func__, old_freq, PTR_ERR(old_opp)); __func__, old_freq, PTR_ERR(old_opp));
} }
opp = dev_pm_opp_find_freq_ceil(dev, &freq); opp = _find_freq_ceil(opp_table, &freq);
if (IS_ERR(opp)) { if (IS_ERR(opp)) {
ret = PTR_ERR(opp); ret = PTR_ERR(opp);
dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",

View File

@ -334,10 +334,9 @@ void device_wakeup_arm_wake_irqs(void)
struct wakeup_source *ws; struct wakeup_source *ws;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) { list_for_each_entry_rcu(ws, &wakeup_sources, entry)
if (ws->wakeirq) dev_pm_arm_wake_irq(ws->wakeirq);
dev_pm_arm_wake_irq(ws->wakeirq);
}
rcu_read_unlock(); rcu_read_unlock();
} }
@ -351,10 +350,9 @@ void device_wakeup_disarm_wake_irqs(void)
struct wakeup_source *ws; struct wakeup_source *ws;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) { list_for_each_entry_rcu(ws, &wakeup_sources, entry)
if (ws->wakeirq) dev_pm_disarm_wake_irq(ws->wakeirq);
dev_pm_disarm_wake_irq(ws->wakeirq);
}
rcu_read_unlock(); rcu_read_unlock();
} }
@ -390,9 +388,7 @@ int device_wakeup_disable(struct device *dev)
return -EINVAL; return -EINVAL;
ws = device_wakeup_detach(dev); ws = device_wakeup_detach(dev);
if (ws) wakeup_source_unregister(ws);
wakeup_source_unregister(ws);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(device_wakeup_disable); EXPORT_SYMBOL_GPL(device_wakeup_disable);

View File

@ -32,7 +32,6 @@ config CPU_FREQ_BOOST_SW
config CPU_FREQ_STAT config CPU_FREQ_STAT
bool "CPU frequency transition statistics" bool "CPU frequency transition statistics"
default y
help help
Export CPU frequency statistics information through sysfs. Export CPU frequency statistics information through sysfs.

View File

@ -1374,6 +1374,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params), ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
ICPU(INTEL_FAM6_BROADWELL_X, core_params),
ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
{} {}
}; };