Merge branches 'pm-cpuidle', 'pm-core', 'pm-domains', 'pm-avs' and 'pm-devfreq'
* pm-cpuidle: cpuidle: powernv: Avoid a branch in the core snooze_loop() loop cpuidle: powernv: Don't continually set thread priority in snooze_loop() cpuidle: powernv: Don't bounce between low and very low thread priority cpuidle: cpuidle-cps: remove unused variable powernv-cpuidle: Validate DT property array size * pm-core: PM / runtime: Document autosuspend-helper side effects PM / runtime: Fix autosuspend documentation * pm-domains: PM / Domains: Ignore domain-idle-states that are not compatible PM / Domains: Don't warn about IRQ safe device for an always on PM domain PM / Domains: Respect errors from genpd's ->power_off() callback PM / Domains: Enable users of genpd to specify always on PM domains PM / Domains: Clean up code validating genpd's status PM / Domain: remove conditional from error case * pm-avs: PM / AVS: rockchip-io: add io selectors and supplies for rk3328 * pm-devfreq: PM / devfreq: Move struct devfreq_governor to devfreq directory
This commit is contained in:
commit
060d0fbb43
|
@ -31,7 +31,9 @@ Optional properties:
|
|||
|
||||
- domain-idle-states : A phandle of an idle-state that shall be soaked into a
|
||||
generic domain power state. The idle state definitions are
|
||||
compatible with domain-idle-state specified in [1].
|
||||
compatible with domain-idle-state specified in [1]. phandles
|
||||
that are not compatible with domain-idle-state will be
|
||||
ignored.
|
||||
The domain-idle-state property reflects the idle state of this PM domain and
|
||||
not the idle states of the devices or sub-domains in the PM domain. Devices
|
||||
and sub-domains have their own idle-states independent of the parent
|
||||
|
|
|
@ -33,6 +33,7 @@ Required properties:
|
|||
- compatible: should be one of:
|
||||
- "rockchip,rk3188-io-voltage-domain" for rk3188
|
||||
- "rockchip,rk3288-io-voltage-domain" for rk3288
|
||||
- "rockchip,rk3328-io-voltage-domain" for rk3328
|
||||
- "rockchip,rk3368-io-voltage-domain" for rk3368
|
||||
- "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
|
||||
- "rockchip,rk3399-io-voltage-domain" for rk3399
|
||||
|
|
|
@ -478,15 +478,23 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
|
|||
- set the power.last_busy field to the current time
|
||||
|
||||
void pm_runtime_use_autosuspend(struct device *dev);
|
||||
- set the power.use_autosuspend flag, enabling autosuspend delays
|
||||
- set the power.use_autosuspend flag, enabling autosuspend delays; call
|
||||
pm_runtime_get_sync if the flag was previously cleared and
|
||||
power.autosuspend_delay is negative
|
||||
|
||||
void pm_runtime_dont_use_autosuspend(struct device *dev);
|
||||
- clear the power.use_autosuspend flag, disabling autosuspend delays
|
||||
- clear the power.use_autosuspend flag, disabling autosuspend delays;
|
||||
decrement the device's usage counter if the flag was previously set and
|
||||
power.autosuspend_delay is negative; call pm_runtime_idle
|
||||
|
||||
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
|
||||
- set the power.autosuspend_delay value to 'delay' (expressed in
|
||||
milliseconds); if 'delay' is negative then runtime suspends are
|
||||
prevented
|
||||
prevented; if power.use_autosuspend is set, pm_runtime_get_sync may be
|
||||
called or the device's usage counter may be decremented and
|
||||
pm_runtime_idle called depending on if power.autosuspend_delay is
|
||||
changed to or from a negative value; if power.use_autosuspend is clear,
|
||||
pm_runtime_idle is called
|
||||
|
||||
unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
|
||||
- calculate the time when the current autosuspend delay period will expire,
|
||||
|
@ -836,9 +844,8 @@ of the non-autosuspend counterparts:
|
|||
Instead of: pm_runtime_put_sync use: pm_runtime_put_sync_autosuspend.
|
||||
|
||||
Drivers may also continue to use the non-autosuspend helper functions; they
|
||||
will behave normally, not taking the autosuspend delay into account.
|
||||
Similarly, if the power.use_autosuspend field isn't set then the autosuspend
|
||||
helper functions will behave just like the non-autosuspend counterparts.
|
||||
will behave normally, which means sometimes taking the autosuspend delay into
|
||||
account (see pm_runtime_idle).
|
||||
|
||||
Under some circumstances a driver or subsystem may want to prevent a device
|
||||
from autosuspending immediately, even though the usage counter is zero and the
|
||||
|
|
|
@ -121,7 +121,9 @@ static const struct genpd_lock_ops genpd_spin_ops = {
|
|||
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
|
||||
#define genpd_unlock(p) p->lock_ops->unlock(p)
|
||||
|
||||
#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
|
||||
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
|
||||
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
|
||||
|
||||
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
|
||||
struct generic_pm_domain *genpd)
|
||||
|
@ -130,8 +132,12 @@ static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
|
|||
|
||||
ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
|
||||
|
||||
/* Warn once if IRQ safe dev in no sleep domain */
|
||||
if (ret)
|
||||
/*
|
||||
* Warn once if an IRQ safe device is attached to a no sleep domain, as
|
||||
* to indicate a suboptimal configuration for PM. For an always on
|
||||
* domain this isn't case, thus don't warn.
|
||||
*/
|
||||
if (ret && !genpd_is_always_on(genpd))
|
||||
dev_warn_once(dev, "PM domain %s will not be powered off\n",
|
||||
genpd->name);
|
||||
|
||||
|
@ -296,11 +302,15 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
|||
* (1) The domain is already in the "power off" state.
|
||||
* (2) System suspend is in progress.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
|| genpd->prepared_count > 0)
|
||||
if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
/*
|
||||
* Abort power off for the PM domain in the following situations:
|
||||
* (1) The domain is configured as always on.
|
||||
* (2) When the domain has a subdomain being powered on.
|
||||
*/
|
||||
if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
|
@ -373,7 +383,7 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
|||
struct gpd_link *link;
|
||||
int ret = 0;
|
||||
|
||||
if (genpd->status == GPD_STATE_ACTIVE)
|
||||
if (genpd_status_on(genpd))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -752,7 +762,7 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|
|||
{
|
||||
struct gpd_link *link;
|
||||
|
||||
if (genpd->status == GPD_STATE_POWER_OFF)
|
||||
if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
|
||||
return;
|
||||
|
||||
if (genpd->suspended_count != genpd->device_count
|
||||
|
@ -761,7 +771,8 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
|
|||
|
||||
/* Choose the deepest state when suspending */
|
||||
genpd->state_idx = genpd->state_count - 1;
|
||||
_genpd_power_off(genpd, false);
|
||||
if (_genpd_power_off(genpd, false))
|
||||
return;
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
||||
|
@ -793,7 +804,7 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
|
|||
{
|
||||
struct gpd_link *link;
|
||||
|
||||
if (genpd->status == GPD_STATE_ACTIVE)
|
||||
if (genpd_status_on(genpd))
|
||||
return;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
|
@ -1329,8 +1340,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|||
genpd_lock(subdomain);
|
||||
genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
&& subdomain->status != GPD_STATE_POWER_OFF) {
|
||||
if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1346,7 +1356,7 @@ static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
|||
list_add_tail(&link->master_node, &genpd->master_links);
|
||||
link->slave = subdomain;
|
||||
list_add_tail(&link->slave_node, &subdomain->slave_links);
|
||||
if (subdomain->status != GPD_STATE_POWER_OFF)
|
||||
if (genpd_status_on(subdomain))
|
||||
genpd_sd_counter_inc(genpd);
|
||||
|
||||
out:
|
||||
|
@ -1406,7 +1416,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
|||
list_del(&link->master_node);
|
||||
list_del(&link->slave_node);
|
||||
kfree(link);
|
||||
if (subdomain->status != GPD_STATE_POWER_OFF)
|
||||
if (genpd_status_on(subdomain))
|
||||
genpd_sd_counter_dec(genpd);
|
||||
|
||||
ret = 0;
|
||||
|
@ -1492,6 +1502,10 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
|
|||
genpd->dev_ops.start = pm_clk_resume;
|
||||
}
|
||||
|
||||
/* Always-on domains must be powered on at initialization. */
|
||||
if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
/* Use only one "off" state if there were no states declared */
|
||||
if (genpd->state_count == 0) {
|
||||
ret = genpd_set_default_power_state(genpd);
|
||||
|
@ -1700,12 +1714,12 @@ int of_genpd_add_provider_simple(struct device_node *np,
|
|||
|
||||
mutex_lock(&gpd_list_lock);
|
||||
|
||||
if (pm_genpd_present(genpd))
|
||||
if (pm_genpd_present(genpd)) {
|
||||
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
|
||||
|
||||
if (!ret) {
|
||||
genpd->provider = &np->fwnode;
|
||||
genpd->has_provider = true;
|
||||
if (!ret) {
|
||||
genpd->provider = &np->fwnode;
|
||||
genpd->has_provider = true;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&gpd_list_lock);
|
||||
|
@ -2079,11 +2093,6 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
|
|||
int err;
|
||||
u32 residency;
|
||||
u32 entry_latency, exit_latency;
|
||||
const struct of_device_id *match_id;
|
||||
|
||||
match_id = of_match_node(idle_state_match, state_node);
|
||||
if (!match_id)
|
||||
return -EINVAL;
|
||||
|
||||
err = of_property_read_u32(state_node, "entry-latency-us",
|
||||
&entry_latency);
|
||||
|
@ -2132,6 +2141,7 @@ int of_genpd_parse_idle_states(struct device_node *dn,
|
|||
int err, ret;
|
||||
int count;
|
||||
struct of_phandle_iterator it;
|
||||
const struct of_device_id *match_id;
|
||||
|
||||
count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
|
||||
if (count <= 0)
|
||||
|
@ -2144,6 +2154,9 @@ int of_genpd_parse_idle_states(struct device_node *dn,
|
|||
/* Loop over the phandles until all the requested entry is found */
|
||||
of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
|
||||
np = it.node;
|
||||
match_id = of_match_node(idle_state_match, np);
|
||||
if (!match_id)
|
||||
continue;
|
||||
ret = genpd_parse_state(&st[i++], np);
|
||||
if (ret) {
|
||||
pr_err
|
||||
|
@ -2155,8 +2168,11 @@ int of_genpd_parse_idle_states(struct device_node *dn,
|
|||
}
|
||||
}
|
||||
|
||||
*n = count;
|
||||
*states = st;
|
||||
*n = i;
|
||||
if (!i)
|
||||
kfree(st);
|
||||
else
|
||||
*states = st;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2221,7 +2237,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
|
|||
|
||||
if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
|
||||
goto exit;
|
||||
if (genpd->status == GPD_STATE_POWER_OFF)
|
||||
if (!genpd_status_on(genpd))
|
||||
snprintf(state, sizeof(state), "%s-%u",
|
||||
status_lookup[genpd->status], genpd->state_idx);
|
||||
else
|
||||
|
|
|
@ -118,7 +118,7 @@ static void __init cps_cpuidle_unregister(void)
|
|||
|
||||
static int __init cps_cpuidle_init(void)
|
||||
{
|
||||
int err, cpu, core, i;
|
||||
int err, cpu, i;
|
||||
struct cpuidle_device *device;
|
||||
|
||||
/* Detect supported states */
|
||||
|
@ -160,7 +160,6 @@ static int __init cps_cpuidle_init(void)
|
|||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
core = cpu_data[cpu].core;
|
||||
device = &per_cpu(cpuidle_dev, cpu);
|
||||
device->cpu = cpu;
|
||||
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
|
||||
|
|
|
@ -56,10 +56,9 @@ static int snooze_loop(struct cpuidle_device *dev,
|
|||
|
||||
snooze_exit_time = get_tb() + snooze_timeout;
|
||||
ppc64_runlatch_off();
|
||||
HMT_very_low();
|
||||
while (!need_resched()) {
|
||||
HMT_low();
|
||||
HMT_very_low();
|
||||
if (snooze_timeout_en && get_tb() > snooze_exit_time)
|
||||
if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -215,11 +214,25 @@ static inline void add_powernv_state(int index, const char *name,
|
|||
stop_psscr_table[index].mask = psscr_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 0 if prop1_len == prop2_len. Else returns -1
|
||||
*/
|
||||
static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
|
||||
const char *prop2, int prop2_len)
|
||||
{
|
||||
if (prop1_len == prop2_len)
|
||||
return 0;
|
||||
|
||||
pr_warn("cpuidle-powernv: array sizes don't match for %s and %s\n",
|
||||
prop1, prop2);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int powernv_add_idle_states(void)
|
||||
{
|
||||
struct device_node *power_mgt;
|
||||
int nr_idle_states = 1; /* Snooze */
|
||||
int dt_idle_states;
|
||||
int dt_idle_states, count;
|
||||
u32 latency_ns[CPUIDLE_STATE_MAX];
|
||||
u32 residency_ns[CPUIDLE_STATE_MAX];
|
||||
u32 flags[CPUIDLE_STATE_MAX];
|
||||
|
@ -244,6 +257,21 @@ static int powernv_add_idle_states(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
count = of_property_count_u32_elems(power_mgt,
|
||||
"ibm,cpu-idle-state-latencies-ns");
|
||||
|
||||
if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states,
|
||||
"ibm,cpu-idle-state-latencies-ns",
|
||||
count) != 0)
|
||||
goto out;
|
||||
|
||||
count = of_property_count_strings(power_mgt,
|
||||
"ibm,cpu-idle-state-names");
|
||||
if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags", dt_idle_states,
|
||||
"ibm,cpu-idle-state-names",
|
||||
count) != 0)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Since snooze is used as first idle state, max idle states allowed is
|
||||
* CPUIDLE_STATE_MAX -1
|
||||
|
@ -278,6 +306,22 @@ static int powernv_add_idle_states(void)
|
|||
has_stop_states = (flags[0] &
|
||||
(OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP));
|
||||
if (has_stop_states) {
|
||||
count = of_property_count_u64_elems(power_mgt,
|
||||
"ibm,cpu-idle-state-psscr");
|
||||
if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
|
||||
dt_idle_states,
|
||||
"ibm,cpu-idle-state-psscr",
|
||||
count) != 0)
|
||||
goto out;
|
||||
|
||||
count = of_property_count_u64_elems(power_mgt,
|
||||
"ibm,cpu-idle-state-psscr-mask");
|
||||
if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
|
||||
dt_idle_states,
|
||||
"ibm,cpu-idle-state-psscr-mask",
|
||||
count) != 0)
|
||||
goto out;
|
||||
|
||||
if (of_property_read_u64_array(power_mgt,
|
||||
"ibm,cpu-idle-state-psscr", psscr_val, dt_idle_states)) {
|
||||
pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n");
|
||||
|
@ -292,8 +336,21 @@ static int powernv_add_idle_states(void)
|
|||
}
|
||||
}
|
||||
|
||||
rc = of_property_read_u32_array(power_mgt,
|
||||
"ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
|
||||
count = of_property_count_u32_elems(power_mgt,
|
||||
"ibm,cpu-idle-state-residency-ns");
|
||||
|
||||
if (count < 0) {
|
||||
rc = count;
|
||||
} else if (validate_dt_prop_sizes("ibm,cpu-idle-state-flags",
|
||||
dt_idle_states,
|
||||
"ibm,cpu-idle-state-residency-ns",
|
||||
count) != 0) {
|
||||
goto out;
|
||||
} else {
|
||||
rc = of_property_read_u32_array(power_mgt,
|
||||
"ibm,cpu-idle-state-residency-ns",
|
||||
residency_ns, dt_idle_states);
|
||||
}
|
||||
|
||||
for (i = 0; i < dt_idle_states; i++) {
|
||||
unsigned int exit_latency, target_residency;
|
||||
|
|
|
@ -25,6 +25,35 @@
|
|||
#define DEVFREQ_GOV_SUSPEND 0x4
|
||||
#define DEVFREQ_GOV_RESUME 0x5
|
||||
|
||||
/**
|
||||
* struct devfreq_governor - Devfreq policy governor
|
||||
* @node: list node - contains registered devfreq governors
|
||||
* @name: Governor's name
|
||||
* @immutable: Immutable flag for governor. If the value is 1,
|
||||
* this govenror is never changeable to other governor.
|
||||
* @get_target_freq: Returns desired operating frequency for the device.
|
||||
* Basically, get_target_freq will run
|
||||
* devfreq_dev_profile.get_dev_status() to get the
|
||||
* status of the device (load = busy_time / total_time).
|
||||
* If no_central_polling is set, this callback is called
|
||||
* only with update_devfreq() notified by OPP.
|
||||
* @event_handler: Callback for devfreq core framework to notify events
|
||||
* to governors. Events include per device governor
|
||||
* init and exit, opp changes out of devfreq, suspend
|
||||
* and resume of per device devfreq during device idle.
|
||||
*
|
||||
* Note that the callbacks are called with devfreq->lock locked by devfreq.
|
||||
*/
|
||||
struct devfreq_governor {
|
||||
struct list_head node;
|
||||
|
||||
const char name[DEVFREQ_NAME_LEN];
|
||||
const unsigned int immutable;
|
||||
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
|
||||
int (*event_handler)(struct devfreq *devfreq,
|
||||
unsigned int event, void *data);
|
||||
};
|
||||
|
||||
/* Caution: devfreq->lock must be locked before calling update_devfreq */
|
||||
extern int update_devfreq(struct devfreq *devfreq);
|
||||
|
||||
|
|
|
@ -43,6 +43,10 @@
|
|||
#define RK3288_SOC_CON2_FLASH0 BIT(7)
|
||||
#define RK3288_SOC_FLASH_SUPPLY_NUM 2
|
||||
|
||||
#define RK3328_SOC_CON4 0x410
|
||||
#define RK3328_SOC_CON4_VCCIO2 BIT(7)
|
||||
#define RK3328_SOC_VCCIO2_SUPPLY_NUM 1
|
||||
|
||||
#define RK3368_SOC_CON15 0x43c
|
||||
#define RK3368_SOC_CON15_FLASH0 BIT(14)
|
||||
#define RK3368_SOC_FLASH_SUPPLY_NUM 2
|
||||
|
@ -166,6 +170,25 @@ static void rk3288_iodomain_init(struct rockchip_iodomain *iod)
|
|||
dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
|
||||
}
|
||||
|
||||
static void rk3328_iodomain_init(struct rockchip_iodomain *iod)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
/* if no vccio2 supply we should leave things alone */
|
||||
if (!iod->supplies[RK3328_SOC_VCCIO2_SUPPLY_NUM].reg)
|
||||
return;
|
||||
|
||||
/*
|
||||
* set vccio2 iodomain to also use this framework
|
||||
* instead of a special gpio.
|
||||
*/
|
||||
val = RK3328_SOC_CON4_VCCIO2 | (RK3328_SOC_CON4_VCCIO2 << 16);
|
||||
ret = regmap_write(iod->grf, RK3328_SOC_CON4, val);
|
||||
if (ret < 0)
|
||||
dev_warn(iod->dev, "couldn't update vccio2 vsel ctrl\n");
|
||||
}
|
||||
|
||||
static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
|
||||
{
|
||||
int ret;
|
||||
|
@ -247,6 +270,20 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3288 = {
|
|||
.init = rk3288_iodomain_init,
|
||||
};
|
||||
|
||||
static const struct rockchip_iodomain_soc_data soc_data_rk3328 = {
|
||||
.grf_offset = 0x410,
|
||||
.supply_names = {
|
||||
"vccio1",
|
||||
"vccio2",
|
||||
"vccio3",
|
||||
"vccio4",
|
||||
"vccio5",
|
||||
"vccio6",
|
||||
"pmuio",
|
||||
},
|
||||
.init = rk3328_iodomain_init,
|
||||
};
|
||||
|
||||
static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
|
||||
.grf_offset = 0x900,
|
||||
.supply_names = {
|
||||
|
@ -311,6 +348,10 @@ static const struct of_device_id rockchip_iodomain_match[] = {
|
|||
.compatible = "rockchip,rk3288-io-voltage-domain",
|
||||
.data = (void *)&soc_data_rk3288
|
||||
},
|
||||
{
|
||||
.compatible = "rockchip,rk3328-io-voltage-domain",
|
||||
.data = (void *)&soc_data_rk3328
|
||||
},
|
||||
{
|
||||
.compatible = "rockchip,rk3368-io-voltage-domain",
|
||||
.data = (void *)&soc_data_rk3368
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define DEVFREQ_POSTCHANGE (1)
|
||||
|
||||
struct devfreq;
|
||||
struct devfreq_governor;
|
||||
|
||||
/**
|
||||
* struct devfreq_dev_status - Data given from devfreq user device to
|
||||
|
@ -100,35 +101,6 @@ struct devfreq_dev_profile {
|
|||
unsigned int max_state;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devfreq_governor - Devfreq policy governor
|
||||
* @node: list node - contains registered devfreq governors
|
||||
* @name: Governor's name
|
||||
* @immutable: Immutable flag for governor. If the value is 1,
|
||||
* this govenror is never changeable to other governor.
|
||||
* @get_target_freq: Returns desired operating frequency for the device.
|
||||
* Basically, get_target_freq will run
|
||||
* devfreq_dev_profile.get_dev_status() to get the
|
||||
* status of the device (load = busy_time / total_time).
|
||||
* If no_central_polling is set, this callback is called
|
||||
* only with update_devfreq() notified by OPP.
|
||||
* @event_handler: Callback for devfreq core framework to notify events
|
||||
* to governors. Events include per device governor
|
||||
* init and exit, opp changes out of devfreq, suspend
|
||||
* and resume of per device devfreq during device idle.
|
||||
*
|
||||
* Note that the callbacks are called with devfreq->lock locked by devfreq.
|
||||
*/
|
||||
struct devfreq_governor {
|
||||
struct list_head node;
|
||||
|
||||
const char name[DEVFREQ_NAME_LEN];
|
||||
const unsigned int immutable;
|
||||
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
|
||||
int (*event_handler)(struct devfreq *devfreq,
|
||||
unsigned int event, void *data);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devfreq - Device devfreq structure
|
||||
* @node: list node - contains the devices with devfreq that have been
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
/* Defines used for the flags field in the struct generic_pm_domain */
|
||||
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
|
||||
#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
|
||||
#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
|
||||
|
||||
enum gpd_status {
|
||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||
|
|
Loading…
Reference in New Issue
Block a user