forked from luck/tmp_suning_uos_patched
More power management updates for 5.2-rc1
- Fix recent regression causing kernels built with CONFIG_PM unset to crash on systems that support the Performance and Energy Bias Hint (EPB) by avoiding to compile the EPB-related code depending on CONFIG_PM when it is unset (Rafael Wysocki). - Clean up the transition notifier invocation code in the cpufreq core and change some users of cpufreq transition notifiers accordingly (Viresh Kumar). - Change MAINTAINERS to cover the schedutil governor as part of cpufreq (Viresh Kumar). - Simplify cpufreq_init_policy() to avoid redundant computations (Yue Hu). - Add explanatory comment to the cpufreq core (Rafael Wysocki). - Introduce a new flag, GENPD_FLAG_RPM_ALWAYS_ON, to the generic power domains (genpd) framework along with the first user of it (Leonard Crestez). -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAlzb4TASHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxiEAP/37uQOx+I8J3IU7HQcPIkdI1hgksLEzo g2eoREekjszIjFK9xa70X3V/QnGK4YSPQ/cHCjgXfVhwkO5TJzte5T5M2z9gUCDT 7OMYWCI6hP6Mo5UWlP4dQ9Cqce4SB3TdibadevxcVOhFAW/xz42y5Gr6s4WkexJf Swb2uoLS4gGANyhUhx6XEZ5NpWZkWcK2ygZ8VJZETnoIwxMSUW7FTJkF+4s2tXLZ GH+F5jWAbwPlg6g2c54lPL1HtiAvK+/018aF8CZMqUBec94RHDFybVOlb5sacfQW +Y0W/mc/6SMqT3OUcQ0H3Z/qkgwR8mL01hH6gCP1jA5OBljmTjzk0Bbc4c3n9BEN aRy4M8Qc/GXzEBPO3Z9AlYik6ALH9iUgL2hewGZAFN8kn9ZGPAqYsctdCVkfKL1u 4Esz5+wOsyYmBx910PozL+p2jbTH0x89sSo1qXUQr2JEiNm2iL4I4+ndqhuiq4LO sQPHCpe4HhYWzIQzJLDurv6hAxxU5PUsGg8XDEGlsyowIPDoIkMgC93RRLGZ/taY Ivc2FSlwLTSkzBHwVfckakXPvfyFdw8DFL2n66dQbXS9FFNshOF/TFx40iV42i5H wusyIZIT1y1H74De0EVntUho3xBo3nrrsu1o2NaXsTBoEsYwJiCji4yOZlI1Zh+m A9coiXKm4hY5 =LqTN -----END PGP SIGNATURE----- Merge tag 'pm-5.2-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull more power management updates from Rafael Wysocki: "These fix a recent regression causing kernels built with CONFIG_PM unset to crash on systems that support the Performance and Energy Bias Hint (EPB), clean up the cpufreq core and some users of transition notifiers and introduce a new power domain flag into the generic power domains framework (genpd). Specifics: - Fix recent regression causing kernels built with CONFIG_PM unset to crash on systems that support the Performance and Energy Bias Hint (EPB) by avoiding to compile the EPB-related code depending on CONFIG_PM when it is unset (Rafael Wysocki). - Clean up the transition notifier invocation code in the cpufreq core and change some users of cpufreq transition notifiers accordingly (Viresh Kumar). - Change MAINTAINERS to cover the schedutil governor as part of cpufreq (Viresh Kumar). - Simplify cpufreq_init_policy() to avoid redundant computations (Yue Hu). - Add explanatory comment to the cpufreq core (Rafael Wysocki). - Introduce a new flag, GENPD_FLAG_RPM_ALWAYS_ON, to the generic power domains (genpd) framework along with the first user of it (Leonard Crestez)" * tag 'pm-5.2-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: soc: imx: gpc: Use GENPD_FLAG_RPM_ALWAYS_ON for ERR009619 PM / Domains: Add GENPD_FLAG_RPM_ALWAYS_ON flag cpufreq: Update MAINTAINERS to include schedutil governor cpufreq: Don't find governor for setpolicy drivers in cpufreq_init_policy() cpufreq: Explain the kobject_put() in cpufreq_policy_alloc() cpufreq: Call transition notifier only once for each policy x86: intel_epb: Take CONFIG_PM into account
This commit is contained in:
commit
bfbfbf7368
|
@ -4133,7 +4133,9 @@ F: Documentation/admin-guide/pm/intel_pstate.rst
|
|||
F: Documentation/cpu-freq/
|
||||
F: Documentation/devicetree/bindings/cpufreq/
|
||||
F: drivers/cpufreq/
|
||||
F: kernel/sched/cpufreq*.c
|
||||
F: include/linux/cpufreq.h
|
||||
F: include/linux/sched/cpufreq.h
|
||||
F: tools/testing/selftests/cpufreq/
|
||||
|
||||
CPU FREQUENCY DRIVERS - ARM BIG LITTLE
|
||||
|
|
|
@ -758,15 +758,20 @@ static int cpufreq_callback(struct notifier_block *nb,
|
|||
unsigned long val, void *data)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
int cpu = freq->cpu;
|
||||
struct cpumask *cpus = freq->policy->cpus;
|
||||
int cpu, first = cpumask_first(cpus);
|
||||
unsigned int lpj;
|
||||
|
||||
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (!per_cpu(l_p_j_ref, cpu)) {
|
||||
per_cpu(l_p_j_ref, cpu) =
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||
per_cpu(l_p_j_ref_freq, cpu) = freq->old;
|
||||
if (!per_cpu(l_p_j_ref, first)) {
|
||||
for_each_cpu(cpu, cpus) {
|
||||
per_cpu(l_p_j_ref, cpu) =
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||
per_cpu(l_p_j_ref_freq, cpu) = freq->old;
|
||||
}
|
||||
|
||||
if (!global_l_p_j_ref) {
|
||||
global_l_p_j_ref = loops_per_jiffy;
|
||||
global_l_p_j_ref_freq = freq->old;
|
||||
|
@ -778,10 +783,11 @@ static int cpufreq_callback(struct notifier_block *nb,
|
|||
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
|
||||
global_l_p_j_ref_freq,
|
||||
freq->new);
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy =
|
||||
cpufreq_scale(per_cpu(l_p_j_ref, cpu),
|
||||
per_cpu(l_p_j_ref_freq, cpu),
|
||||
freq->new);
|
||||
|
||||
lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
|
||||
per_cpu(l_p_j_ref_freq, first), freq->new);
|
||||
for_each_cpu(cpu, cpus)
|
||||
per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
|
|
@ -653,19 +653,23 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
|
|||
void *data)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
unsigned int cpu = freq->cpu;
|
||||
struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
|
||||
unsigned int cpu;
|
||||
struct freq_table *ft;
|
||||
|
||||
if (!ft->ref_freq) {
|
||||
ft->ref_freq = freq->old;
|
||||
ft->clock_tick_ref = cpu_data(cpu).clock_tick;
|
||||
}
|
||||
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
|
||||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
|
||||
cpu_data(cpu).clock_tick =
|
||||
cpufreq_scale(ft->clock_tick_ref,
|
||||
ft->ref_freq,
|
||||
freq->new);
|
||||
for_each_cpu(cpu, freq->policy->cpus) {
|
||||
ft = &per_cpu(sparc64_freq_table, cpu);
|
||||
|
||||
if (!ft->ref_freq) {
|
||||
ft->ref_freq = freq->old;
|
||||
ft->clock_tick_ref = cpu_data(cpu).clock_tick;
|
||||
}
|
||||
|
||||
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
|
||||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
|
||||
cpu_data(cpu).clock_tick =
|
||||
cpufreq_scale(ft->clock_tick_ref, ft->ref_freq,
|
||||
freq->new);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -97,6 +97,7 @@ static void intel_epb_restore(void)
|
|||
wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static struct syscore_ops intel_epb_syscore_ops = {
|
||||
.suspend = intel_epb_save,
|
||||
.resume = intel_epb_restore,
|
||||
|
@ -193,6 +194,25 @@ static int intel_epb_offline(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void register_intel_ebp_syscore_ops(void)
|
||||
{
|
||||
register_syscore_ops(&intel_epb_syscore_ops);
|
||||
}
|
||||
#else /* !CONFIG_PM */
|
||||
static int intel_epb_online(unsigned int cpu)
|
||||
{
|
||||
intel_epb_restore();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_epb_offline(unsigned int cpu)
|
||||
{
|
||||
return intel_epb_save();
|
||||
}
|
||||
|
||||
static inline void register_intel_ebp_syscore_ops(void) {}
|
||||
#endif
|
||||
|
||||
static __init int intel_epb_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -206,7 +226,7 @@ static __init int intel_epb_init(void)
|
|||
if (ret < 0)
|
||||
goto err_out_online;
|
||||
|
||||
register_syscore_ops(&intel_epb_syscore_ops);
|
||||
register_intel_ebp_syscore_ops();
|
||||
return 0;
|
||||
|
||||
err_out_online:
|
||||
|
|
|
@ -979,7 +979,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|||
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
|
||||
mark_tsc_unstable("cpufreq changes");
|
||||
|
||||
set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc());
|
||||
set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -6698,10 +6698,8 @@ static void kvm_hyperv_tsc_notifier(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
struct kvm *kvm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i, send_ipi = 0;
|
||||
|
@ -6745,17 +6743,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
|
|||
*
|
||||
*/
|
||||
|
||||
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
|
||||
return 0;
|
||||
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
|
||||
return 0;
|
||||
|
||||
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
|
||||
smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
|
||||
|
||||
spin_lock(&kvm_lock);
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (vcpu->cpu != freq->cpu)
|
||||
if (vcpu->cpu != cpu)
|
||||
continue;
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
if (vcpu->cpu != smp_processor_id())
|
||||
|
@ -6777,8 +6770,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
|
|||
* guest context is entered kvmclock will be updated,
|
||||
* so the guest will not see stale values.
|
||||
*/
|
||||
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
|
||||
smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
int cpu;
|
||||
|
||||
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
|
||||
return 0;
|
||||
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
|
||||
return 0;
|
||||
|
||||
for_each_cpu(cpu, freq->policy->cpus)
|
||||
__kvmclock_cpufreq_notifier(freq, cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
|
|||
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
|
||||
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
|
||||
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
|
||||
#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
|
||||
|
||||
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
|
||||
const struct generic_pm_domain *genpd)
|
||||
|
@ -515,7 +516,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
|||
* (1) The domain is configured as always on.
|
||||
* (2) When the domain has a subdomain being powered on.
|
||||
*/
|
||||
if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
|
||||
if (genpd_is_always_on(genpd) ||
|
||||
genpd_is_rpm_always_on(genpd) ||
|
||||
atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
|
@ -1812,7 +1815,8 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
|||
}
|
||||
|
||||
/* Always-on domains must be powered on at initialization. */
|
||||
if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
|
||||
if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
|
||||
!genpd_status_on(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
if (genpd_is_cpu_domain(genpd) &&
|
||||
|
|
|
@ -340,11 +340,14 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
|
|||
struct cpufreq_freqs *freqs,
|
||||
unsigned int state)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
BUG_ON(irqs_disabled());
|
||||
|
||||
if (cpufreq_disabled())
|
||||
return;
|
||||
|
||||
freqs->policy = policy;
|
||||
freqs->flags = cpufreq_driver->flags;
|
||||
pr_debug("notification %u of frequency transition to %u kHz\n",
|
||||
state, freqs->new);
|
||||
|
@ -364,10 +367,8 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
|
|||
}
|
||||
}
|
||||
|
||||
for_each_cpu(freqs->cpu, policy->cpus) {
|
||||
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
|
||||
CPUFREQ_PRECHANGE, freqs);
|
||||
}
|
||||
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
|
||||
CPUFREQ_PRECHANGE, freqs);
|
||||
|
||||
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
|
||||
break;
|
||||
|
@ -377,11 +378,11 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
|
|||
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
|
||||
cpumask_pr_args(policy->cpus));
|
||||
|
||||
for_each_cpu(freqs->cpu, policy->cpus) {
|
||||
trace_cpu_frequency(freqs->new, freqs->cpu);
|
||||
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
|
||||
CPUFREQ_POSTCHANGE, freqs);
|
||||
}
|
||||
for_each_cpu(cpu, policy->cpus)
|
||||
trace_cpu_frequency(freqs->new, cpu);
|
||||
|
||||
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
|
||||
CPUFREQ_POSTCHANGE, freqs);
|
||||
|
||||
cpufreq_stats_record_transition(policy, freqs->new);
|
||||
policy->cur = freqs->new;
|
||||
|
@ -618,50 +619,52 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int cpufreq_parse_policy(char *str_governor,
|
||||
struct cpufreq_policy *policy)
|
||||
{
|
||||
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
|
||||
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
|
||||
return 0;
|
||||
}
|
||||
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_parse_governor - parse a governor string
|
||||
* cpufreq_parse_governor - parse a governor string only for !setpolicy
|
||||
*/
|
||||
static int cpufreq_parse_governor(char *str_governor,
|
||||
struct cpufreq_policy *policy)
|
||||
{
|
||||
if (cpufreq_driver->setpolicy) {
|
||||
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
|
||||
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
|
||||
return 0;
|
||||
}
|
||||
struct cpufreq_governor *t;
|
||||
|
||||
if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
struct cpufreq_governor *t;
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
|
||||
t = find_governor(str_governor);
|
||||
if (!t) {
|
||||
int ret;
|
||||
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
|
||||
ret = request_module("cpufreq_%s", str_governor);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
|
||||
t = find_governor(str_governor);
|
||||
if (!t) {
|
||||
int ret;
|
||||
}
|
||||
if (t && !try_module_get(t->owner))
|
||||
t = NULL;
|
||||
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
|
||||
ret = request_module("cpufreq_%s", str_governor);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&cpufreq_governor_mutex);
|
||||
|
||||
t = find_governor(str_governor);
|
||||
}
|
||||
if (t && !try_module_get(t->owner))
|
||||
t = NULL;
|
||||
|
||||
mutex_unlock(&cpufreq_governor_mutex);
|
||||
|
||||
if (t) {
|
||||
policy->governor = t;
|
||||
return 0;
|
||||
}
|
||||
if (t) {
|
||||
policy->governor = t;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
|
@ -783,8 +786,13 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
|
|||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (cpufreq_parse_governor(str_governor, &new_policy))
|
||||
return -EINVAL;
|
||||
if (cpufreq_driver->setpolicy) {
|
||||
if (cpufreq_parse_policy(str_governor, &new_policy))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (cpufreq_parse_governor(str_governor, &new_policy))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = cpufreq_set_policy(policy, &new_policy);
|
||||
|
||||
|
@ -1050,32 +1058,39 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
|
|||
|
||||
static int cpufreq_init_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_governor *gov = NULL;
|
||||
struct cpufreq_governor *gov = NULL, *def_gov = NULL;
|
||||
struct cpufreq_policy new_policy;
|
||||
|
||||
memcpy(&new_policy, policy, sizeof(*policy));
|
||||
|
||||
/* Update governor of new_policy to the governor used before hotplug */
|
||||
gov = find_governor(policy->last_governor);
|
||||
if (gov) {
|
||||
pr_debug("Restoring governor %s for cpu %d\n",
|
||||
def_gov = cpufreq_default_governor();
|
||||
|
||||
if (has_target()) {
|
||||
/*
|
||||
* Update governor of new_policy to the governor used before
|
||||
* hotplug
|
||||
*/
|
||||
gov = find_governor(policy->last_governor);
|
||||
if (gov) {
|
||||
pr_debug("Restoring governor %s for cpu %d\n",
|
||||
policy->governor->name, policy->cpu);
|
||||
} else {
|
||||
if (!def_gov)
|
||||
return -ENODATA;
|
||||
gov = def_gov;
|
||||
}
|
||||
new_policy.governor = gov;
|
||||
} else {
|
||||
gov = cpufreq_default_governor();
|
||||
if (!gov)
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
new_policy.governor = gov;
|
||||
|
||||
/* Use the default policy if there is no last_policy. */
|
||||
if (cpufreq_driver->setpolicy) {
|
||||
if (policy->last_policy)
|
||||
/* Use the default policy if there is no last_policy. */
|
||||
if (policy->last_policy) {
|
||||
new_policy.policy = policy->last_policy;
|
||||
else
|
||||
cpufreq_parse_governor(gov->name, &new_policy);
|
||||
} else {
|
||||
if (!def_gov)
|
||||
return -ENODATA;
|
||||
cpufreq_parse_policy(def_gov->name, &new_policy);
|
||||
}
|
||||
}
|
||||
/* set default policy */
|
||||
|
||||
return cpufreq_set_policy(policy, &new_policy);
|
||||
}
|
||||
|
||||
|
@ -1133,6 +1148,11 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
|
|||
cpufreq_global_kobject, "policy%u", cpu);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
|
||||
/*
|
||||
* The entire policy object will be freed below, but the extra
|
||||
* memory allocated for the kobject name needs to be freed by
|
||||
* releasing the kobject.
|
||||
*/
|
||||
kobject_put(&policy->kobj);
|
||||
goto err_free_real_cpus;
|
||||
}
|
||||
|
|
|
@ -431,10 +431,19 @@ static int imx_gpc_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Disable PU power down in normal operation if ERR009619 is present */
|
||||
/*
|
||||
* Disable PU power down by runtime PM if ERR009619 is present.
|
||||
*
|
||||
* The PRE clock will be paused for several cycles when turning on the
|
||||
* PU domain LDO from power down state. If PRE is in use at that time,
|
||||
* the IPU/PRG cannot get the correct display data from the PRE.
|
||||
*
|
||||
* This is not a concern when the whole system enters suspend state, so
|
||||
* it's safe to power down PU in this case.
|
||||
*/
|
||||
if (of_id_data->err009619_present)
|
||||
imx_gpc_domains[GPC_PGC_DOMAIN_PU].base.flags |=
|
||||
GENPD_FLAG_ALWAYS_ON;
|
||||
GENPD_FLAG_RPM_ALWAYS_ON;
|
||||
|
||||
/* Keep DISP always on if ERR006287 is present */
|
||||
if (of_id_data->err006287_present)
|
||||
|
|
|
@ -42,13 +42,6 @@ enum cpufreq_table_sorting {
|
|||
CPUFREQ_TABLE_SORTED_DESCENDING
|
||||
};
|
||||
|
||||
struct cpufreq_freqs {
|
||||
unsigned int cpu; /* cpu nr */
|
||||
unsigned int old;
|
||||
unsigned int new;
|
||||
u8 flags; /* flags of cpufreq_driver, see below. */
|
||||
};
|
||||
|
||||
struct cpufreq_cpuinfo {
|
||||
unsigned int max_freq;
|
||||
unsigned int min_freq;
|
||||
|
@ -156,6 +149,13 @@ struct cpufreq_policy {
|
|||
struct thermal_cooling_device *cdev;
|
||||
};
|
||||
|
||||
struct cpufreq_freqs {
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int old;
|
||||
unsigned int new;
|
||||
u8 flags; /* flags of cpufreq_driver, see below. */
|
||||
};
|
||||
|
||||
/* Only for ACPI */
|
||||
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
|
||||
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
|
||||
|
|
|
@ -53,12 +53,16 @@
|
|||
* driver must then comply with the so called,
|
||||
* last-man-standing algorithm, for the CPUs in the
|
||||
* PM domain.
|
||||
*
|
||||
* GENPD_FLAG_RPM_ALWAYS_ON: Instructs genpd to always keep the PM domain
|
||||
* powered on except for system suspend.
|
||||
*/
|
||||
#define GENPD_FLAG_PM_CLK (1U << 0)
|
||||
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
|
||||
#define GENPD_FLAG_ALWAYS_ON (1U << 2)
|
||||
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
|
||||
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
|
||||
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
|
||||
|
||||
enum gpd_status {
|
||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||
|
|
Loading…
Reference in New Issue
Block a user