forked from luck/tmp_suning_uos_patched
Merge branch 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm
Pull ARM cpufreq updates for 5.10-rc1 from Viresh Kumar: "- STI cpufreq driver updates to allow new hardware (Alain Volmat). - Minor tegra driver fixes around initial frequency mismatch warnings (Jon Hunter). - dev_err simplification for s5pv210 driver (Krzysztof Kozlowski). - Qcom driver updates to allow new hardware and minor cleanup (Manivannan Sadhasivam and Matthias Kaehlcke). - Add missing MODULE_DEVICE_TABLE for armada driver (Pali Rohár). - Improved defer-probe handling in cpufreq-dt driver (Stephan Gerhold). - Call dev_pm_opp_of_remove_table() unconditionally for imx driver (Viresh Kumar)." * 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm: cpufreq: qcom: Don't add frequencies without an OPP cpufreq: qcom-hw: Add cpufreq support for SM8250 SoC cpufreq: qcom-hw: Use of_device_get_match_data for offsets and row size cpufreq: qcom-hw: Use devm_platform_ioremap_resource() to simplify code dt-bindings: cpufreq: cpufreq-qcom-hw: Document Qcom EPSS compatible cpufreq: qcom-hw: Make use of cpufreq driver_data for passing pdev cpufreq: armada-37xx: Add missing MODULE_DEVICE_TABLE cpufreq: arm: Kconfig: add CPUFREQ_DT depend for STI CPUFREQ cpufreq: dt-platdev: Blacklist st,stih418 SoC cpufreq: sti-cpufreq: add stih418 support cpufreq: s5pv210: Use dev_err instead of pr_err in probe cpufreq: s5pv210: Simplify with dev_err_probe() cpufreq: tegra186: Fix initial frequency cpufreq: dt: Refactor initialization to handle probe deferral properly opp: Handle multiple calls for same OPP table in _of_add_opp_table_v1() cpufreq: imx6q: Unconditionally call dev_pm_opp_of_remove_table() opp: Allow dev_pm_opp_get_opp_table() to return -EPROBE_DEFER
This commit is contained in:
commit
a17a733e37
|
@ -8,7 +8,7 @@ Properties:
|
|||
- compatible
|
||||
Usage: required
|
||||
Value type: <string>
|
||||
Definition: must be "qcom,cpufreq-hw".
|
||||
Definition: must be "qcom,cpufreq-hw" or "qcom,cpufreq-epss".
|
||||
|
||||
- clocks
|
||||
Usage: required
|
||||
|
|
|
@ -2044,8 +2044,9 @@ int of_genpd_add_provider_simple(struct device_node *np,
|
|||
if (genpd->set_performance_state) {
|
||||
ret = dev_pm_opp_of_add_table(&genpd->dev);
|
||||
if (ret) {
|
||||
dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
|
||||
ret);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
|
||||
ret);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -2054,7 +2055,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
|
|||
* state.
|
||||
*/
|
||||
genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
|
||||
WARN_ON(!genpd->opp_table);
|
||||
WARN_ON(IS_ERR(genpd->opp_table));
|
||||
}
|
||||
|
||||
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
|
||||
|
@ -2111,8 +2112,9 @@ int of_genpd_add_provider_onecell(struct device_node *np,
|
|||
if (genpd->set_performance_state) {
|
||||
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
|
||||
if (ret) {
|
||||
dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
|
||||
i, ret);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
|
||||
i, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -2121,7 +2123,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
|
|||
* performance state.
|
||||
*/
|
||||
genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
|
||||
WARN_ON(!genpd->opp_table);
|
||||
WARN_ON(IS_ERR(genpd->opp_table));
|
||||
}
|
||||
|
||||
genpd->provider = &np->fwnode;
|
||||
|
|
|
@ -283,7 +283,7 @@ config ARM_SPEAR_CPUFREQ
|
|||
|
||||
config ARM_STI_CPUFREQ
|
||||
tristate "STi CPUFreq support"
|
||||
depends on SOC_STIH407
|
||||
depends on CPUFREQ_DT && SOC_STIH407
|
||||
help
|
||||
This driver uses the generic OPP framework to match the running
|
||||
platform with a predefined set of suitable values. If not provided
|
||||
|
|
|
@ -484,6 +484,12 @@ static int __init armada37xx_cpufreq_driver_init(void)
|
|||
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
|
||||
late_initcall(armada37xx_cpufreq_driver_init);
|
||||
|
||||
static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
|
||||
{ .compatible = "marvell,armada-3700-nb-pm" },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
|
||||
|
||||
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
|
||||
MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -137,6 +137,7 @@ static const struct of_device_id blacklist[] __initconst = {
|
|||
|
||||
{ .compatible = "st,stih407", },
|
||||
{ .compatible = "st,stih410", },
|
||||
{ .compatible = "st,stih418", },
|
||||
|
||||
{ .compatible = "sigma,tango4", },
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/pm_opp.h>
|
||||
|
@ -24,18 +25,35 @@
|
|||
#include "cpufreq-dt.h"
|
||||
|
||||
struct private_data {
|
||||
struct opp_table *opp_table;
|
||||
struct list_head node;
|
||||
|
||||
cpumask_var_t cpus;
|
||||
struct device *cpu_dev;
|
||||
const char *reg_name;
|
||||
struct opp_table *opp_table;
|
||||
struct opp_table *reg_opp_table;
|
||||
bool have_static_opps;
|
||||
};
|
||||
|
||||
static LIST_HEAD(priv_list);
|
||||
|
||||
static struct freq_attr *cpufreq_dt_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
NULL, /* Extra space for boost-attr if required */
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct private_data *cpufreq_dt_find_data(int cpu)
|
||||
{
|
||||
struct private_data *priv;
|
||||
|
||||
list_for_each_entry(priv, &priv_list, node) {
|
||||
if (cpumask_test_cpu(cpu, priv->cpus))
|
||||
return priv;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
struct private_data *priv = policy->driver_data;
|
||||
|
@ -82,83 +100,24 @@ static const char *find_supply_name(struct device *dev)
|
|||
return name;
|
||||
}
|
||||
|
||||
static int resources_available(void)
|
||||
{
|
||||
struct device *cpu_dev;
|
||||
struct regulator *cpu_reg;
|
||||
struct clk *cpu_clk;
|
||||
int ret = 0;
|
||||
const char *name;
|
||||
|
||||
cpu_dev = get_cpu_device(0);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get cpu0 device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
ret = PTR_ERR_OR_ZERO(cpu_clk);
|
||||
if (ret) {
|
||||
/*
|
||||
* If cpu's clk node is present, but clock is not yet
|
||||
* registered, we should try defering probe.
|
||||
*/
|
||||
if (ret == -EPROBE_DEFER)
|
||||
dev_dbg(cpu_dev, "clock not ready, retry\n");
|
||||
else
|
||||
dev_err(cpu_dev, "failed to get clock: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
clk_put(cpu_clk);
|
||||
|
||||
ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
name = find_supply_name(cpu_dev);
|
||||
/* Platform doesn't require regulator */
|
||||
if (!name)
|
||||
return 0;
|
||||
|
||||
cpu_reg = regulator_get_optional(cpu_dev, name);
|
||||
ret = PTR_ERR_OR_ZERO(cpu_reg);
|
||||
if (ret) {
|
||||
/*
|
||||
* If cpu's regulator supply node is present, but regulator is
|
||||
* not yet registered, we should try defering probe.
|
||||
*/
|
||||
if (ret == -EPROBE_DEFER)
|
||||
dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
|
||||
else
|
||||
dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
regulator_put(cpu_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct opp_table *opp_table = NULL;
|
||||
struct private_data *priv;
|
||||
struct device *cpu_dev;
|
||||
struct clk *cpu_clk;
|
||||
unsigned int transition_latency;
|
||||
bool fallback = false;
|
||||
const char *name;
|
||||
int ret;
|
||||
|
||||
cpu_dev = get_cpu_device(policy->cpu);
|
||||
if (!cpu_dev) {
|
||||
pr_err("failed to get cpu%d device\n", policy->cpu);
|
||||
priv = cpufreq_dt_find_data(policy->cpu);
|
||||
if (!priv) {
|
||||
pr_err("failed to find data for cpu%d\n", policy->cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cpu_dev = priv->cpu_dev;
|
||||
cpumask_copy(policy->cpus, priv->cpus);
|
||||
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(cpu_clk)) {
|
||||
ret = PTR_ERR(cpu_clk);
|
||||
|
@ -166,45 +125,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Get OPP-sharing information from "operating-points-v2" bindings */
|
||||
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
|
||||
if (ret) {
|
||||
if (ret != -ENOENT)
|
||||
goto out_put_clk;
|
||||
|
||||
/*
|
||||
* operating-points-v2 not supported, fallback to old method of
|
||||
* finding shared-OPPs for backward compatibility if the
|
||||
* platform hasn't set sharing CPUs.
|
||||
*/
|
||||
if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
|
||||
fallback = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* OPP layer will be taking care of regulators now, but it needs to know
|
||||
* the name of the regulator first.
|
||||
*/
|
||||
name = find_supply_name(cpu_dev);
|
||||
if (name) {
|
||||
opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
|
||||
if (IS_ERR(opp_table)) {
|
||||
ret = PTR_ERR(opp_table);
|
||||
dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
|
||||
policy->cpu, ret);
|
||||
goto out_put_clk;
|
||||
}
|
||||
}
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put_regulator;
|
||||
}
|
||||
|
||||
priv->reg_name = name;
|
||||
priv->opp_table = opp_table;
|
||||
|
||||
/*
|
||||
* Initialize OPP tables for all policy->cpus. They will be shared by
|
||||
* all CPUs which have marked their CPUs shared with OPP bindings.
|
||||
|
@ -224,31 +144,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
*/
|
||||
ret = dev_pm_opp_get_opp_count(cpu_dev);
|
||||
if (ret <= 0) {
|
||||
dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
|
||||
ret = -EPROBE_DEFER;
|
||||
dev_err(cpu_dev, "OPP table can't be empty\n");
|
||||
ret = -ENODEV;
|
||||
goto out_free_opp;
|
||||
}
|
||||
|
||||
if (fallback) {
|
||||
cpumask_setall(policy->cpus);
|
||||
|
||||
/*
|
||||
* OPP tables are initialized only for policy->cpu, do it for
|
||||
* others as well.
|
||||
*/
|
||||
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
|
||||
if (ret)
|
||||
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (ret) {
|
||||
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
|
||||
goto out_free_opp;
|
||||
}
|
||||
|
||||
priv->cpu_dev = cpu_dev;
|
||||
policy->driver_data = priv;
|
||||
policy->clk = cpu_clk;
|
||||
policy->freq_table = freq_table;
|
||||
|
@ -280,11 +186,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
|
|||
out_free_opp:
|
||||
if (priv->have_static_opps)
|
||||
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
|
||||
kfree(priv);
|
||||
out_put_regulator:
|
||||
if (name)
|
||||
dev_pm_opp_put_regulators(opp_table);
|
||||
out_put_clk:
|
||||
clk_put(cpu_clk);
|
||||
|
||||
return ret;
|
||||
|
@ -312,12 +213,7 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
|
|||
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
||||
if (priv->have_static_opps)
|
||||
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
|
||||
if (priv->reg_name)
|
||||
dev_pm_opp_put_regulators(priv->opp_table);
|
||||
|
||||
clk_put(policy->clk);
|
||||
kfree(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -336,21 +232,119 @@ static struct cpufreq_driver dt_cpufreq_driver = {
|
|||
.suspend = cpufreq_generic_suspend,
|
||||
};
|
||||
|
||||
static int dt_cpufreq_early_init(struct device *dev, int cpu)
|
||||
{
|
||||
struct private_data *priv;
|
||||
struct device *cpu_dev;
|
||||
const char *reg_name;
|
||||
int ret;
|
||||
|
||||
/* Check if this CPU is already covered by some other policy */
|
||||
if (cpufreq_dt_find_data(cpu))
|
||||
return 0;
|
||||
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
priv->cpu_dev = cpu_dev;
|
||||
|
||||
/* Try to get OPP table early to ensure resources are available */
|
||||
priv->opp_table = dev_pm_opp_get_opp_table(cpu_dev);
|
||||
if (IS_ERR(priv->opp_table)) {
|
||||
ret = PTR_ERR(priv->opp_table);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev, "failed to get OPP table: %d\n", ret);
|
||||
goto free_cpumask;
|
||||
}
|
||||
|
||||
/*
|
||||
* OPP layer will be taking care of regulators now, but it needs to know
|
||||
* the name of the regulator first.
|
||||
*/
|
||||
reg_name = find_supply_name(cpu_dev);
|
||||
if (reg_name) {
|
||||
priv->reg_opp_table = dev_pm_opp_set_regulators(cpu_dev,
|
||||
®_name, 1);
|
||||
if (IS_ERR(priv->reg_opp_table)) {
|
||||
ret = PTR_ERR(priv->reg_opp_table);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cpu_dev, "failed to set regulators: %d\n",
|
||||
ret);
|
||||
goto put_table;
|
||||
}
|
||||
}
|
||||
|
||||
/* Find OPP sharing information so we can fill pri->cpus here */
|
||||
/* Get OPP-sharing information from "operating-points-v2" bindings */
|
||||
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->cpus);
|
||||
if (ret) {
|
||||
if (ret != -ENOENT)
|
||||
goto put_reg;
|
||||
|
||||
/*
|
||||
* operating-points-v2 not supported, fallback to all CPUs share
|
||||
* OPP for backward compatibility if the platform hasn't set
|
||||
* sharing CPUs.
|
||||
*/
|
||||
if (dev_pm_opp_get_sharing_cpus(cpu_dev, priv->cpus)) {
|
||||
cpumask_setall(priv->cpus);
|
||||
|
||||
/*
|
||||
* OPP tables are initialized only for cpu, do it for
|
||||
* others as well.
|
||||
*/
|
||||
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->cpus);
|
||||
if (ret)
|
||||
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
}
|
||||
|
||||
list_add(&priv->node, &priv_list);
|
||||
return 0;
|
||||
|
||||
put_reg:
|
||||
if (priv->reg_opp_table)
|
||||
dev_pm_opp_put_regulators(priv->reg_opp_table);
|
||||
put_table:
|
||||
dev_pm_opp_put_opp_table(priv->opp_table);
|
||||
free_cpumask:
|
||||
free_cpumask_var(priv->cpus);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dt_cpufreq_release(void)
|
||||
{
|
||||
struct private_data *priv, *tmp;
|
||||
|
||||
list_for_each_entry_safe(priv, tmp, &priv_list, node) {
|
||||
if (priv->reg_opp_table)
|
||||
dev_pm_opp_put_regulators(priv->reg_opp_table);
|
||||
dev_pm_opp_put_opp_table(priv->opp_table);
|
||||
free_cpumask_var(priv->cpus);
|
||||
list_del(&priv->node);
|
||||
}
|
||||
}
|
||||
|
||||
static int dt_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
|
||||
int ret;
|
||||
int ret, cpu;
|
||||
|
||||
/*
|
||||
* All per-cluster (CPUs sharing clock/voltages) initialization is done
|
||||
* from ->init(). In probe(), we just need to make sure that clk and
|
||||
* regulators are available. Else defer probe and retry.
|
||||
*
|
||||
* FIXME: Is checking this only for CPU0 sufficient ?
|
||||
*/
|
||||
ret = resources_available();
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Request resources early so we can return in case of -EPROBE_DEFER */
|
||||
for_each_possible_cpu(cpu) {
|
||||
ret = dt_cpufreq_early_init(&pdev->dev, cpu);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (data) {
|
||||
if (data->have_governor_per_policy)
|
||||
|
@ -366,15 +360,21 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
ret = cpufreq_register_driver(&dt_cpufreq_driver);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed register driver: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
dt_cpufreq_release();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dt_cpufreq_remove(struct platform_device *pdev)
|
||||
{
|
||||
cpufreq_unregister_driver(&dt_cpufreq_driver);
|
||||
dt_cpufreq_release();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,6 @@ static struct clk_bulk_data clks[] = {
|
|||
};
|
||||
|
||||
static struct device *cpu_dev;
|
||||
static bool free_opp;
|
||||
static struct cpufreq_frequency_table *freq_table;
|
||||
static unsigned int max_freq;
|
||||
static unsigned int transition_latency;
|
||||
|
@ -390,9 +389,6 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
goto put_reg;
|
||||
}
|
||||
|
||||
/* Because we have added the OPPs here, we must free them */
|
||||
free_opp = true;
|
||||
|
||||
if (of_machine_is_compatible("fsl,imx6ul") ||
|
||||
of_machine_is_compatible("fsl,imx6ull")) {
|
||||
ret = imx6ul_opp_check_speed_grading(cpu_dev);
|
||||
|
@ -507,8 +503,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
free_freq_table:
|
||||
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
out_free_opp:
|
||||
if (free_opp)
|
||||
dev_pm_opp_of_remove_table(cpu_dev);
|
||||
dev_pm_opp_of_remove_table(cpu_dev);
|
||||
put_reg:
|
||||
if (!IS_ERR(arm_reg))
|
||||
regulator_put(arm_reg);
|
||||
|
@ -528,8 +523,7 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
|
|||
{
|
||||
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
|
||||
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (free_opp)
|
||||
dev_pm_opp_of_remove_table(cpu_dev);
|
||||
dev_pm_opp_of_remove_table(cpu_dev);
|
||||
regulator_put(arm_reg);
|
||||
if (!IS_ERR(pu_reg))
|
||||
regulator_put(pu_reg);
|
||||
|
|
|
@ -19,18 +19,23 @@
|
|||
#define LUT_L_VAL GENMASK(7, 0)
|
||||
#define LUT_CORE_COUNT GENMASK(18, 16)
|
||||
#define LUT_VOLT GENMASK(11, 0)
|
||||
#define LUT_ROW_SIZE 32
|
||||
#define CLK_HW_DIV 2
|
||||
#define LUT_TURBO_IND 1
|
||||
|
||||
/* Register offsets */
|
||||
#define REG_ENABLE 0x0
|
||||
#define REG_FREQ_LUT 0x110
|
||||
#define REG_VOLT_LUT 0x114
|
||||
#define REG_PERF_STATE 0x920
|
||||
struct qcom_cpufreq_soc_data {
|
||||
u32 reg_enable;
|
||||
u32 reg_freq_lut;
|
||||
u32 reg_volt_lut;
|
||||
u32 reg_perf_state;
|
||||
u8 lut_row_size;
|
||||
};
|
||||
|
||||
struct qcom_cpufreq_data {
|
||||
void __iomem *base;
|
||||
const struct qcom_cpufreq_soc_data *soc_data;
|
||||
};
|
||||
|
||||
static unsigned long cpu_hw_rate, xo_rate;
|
||||
static struct platform_device *global_pdev;
|
||||
static bool icc_scaling_enabled;
|
||||
|
||||
static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
|
||||
|
@ -77,10 +82,11 @@ static int qcom_cpufreq_update_opp(struct device *cpu_dev,
|
|||
static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
{
|
||||
void __iomem *perf_state_reg = policy->driver_data;
|
||||
struct qcom_cpufreq_data *data = policy->driver_data;
|
||||
const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
|
||||
unsigned long freq = policy->freq_table[index].frequency;
|
||||
|
||||
writel_relaxed(index, perf_state_reg);
|
||||
writel_relaxed(index, data->base + soc_data->reg_perf_state);
|
||||
|
||||
if (icc_scaling_enabled)
|
||||
qcom_cpufreq_set_bw(policy, freq);
|
||||
|
@ -90,7 +96,8 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
|
|||
|
||||
static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
||||
{
|
||||
void __iomem *perf_state_reg;
|
||||
struct qcom_cpufreq_data *data;
|
||||
const struct qcom_cpufreq_soc_data *soc_data;
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int index;
|
||||
|
||||
|
@ -98,9 +105,10 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
|||
if (!policy)
|
||||
return 0;
|
||||
|
||||
perf_state_reg = policy->driver_data;
|
||||
data = policy->driver_data;
|
||||
soc_data = data->soc_data;
|
||||
|
||||
index = readl_relaxed(perf_state_reg);
|
||||
index = readl_relaxed(data->base + soc_data->reg_perf_state);
|
||||
index = min(index, LUT_MAX_ENTRIES - 1);
|
||||
|
||||
return policy->freq_table[index].frequency;
|
||||
|
@ -109,18 +117,18 @@ static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
|
|||
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
void __iomem *perf_state_reg = policy->driver_data;
|
||||
struct qcom_cpufreq_data *data = policy->driver_data;
|
||||
const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
|
||||
unsigned int index;
|
||||
|
||||
index = policy->cached_resolved_idx;
|
||||
writel_relaxed(index, perf_state_reg);
|
||||
writel_relaxed(index, data->base + soc_data->reg_perf_state);
|
||||
|
||||
return policy->freq_table[index].frequency;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
||||
struct cpufreq_policy *policy,
|
||||
void __iomem *base)
|
||||
struct cpufreq_policy *policy)
|
||||
{
|
||||
u32 data, src, lval, i, core_count, prev_freq = 0, freq;
|
||||
u32 volt;
|
||||
|
@ -128,6 +136,8 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
|||
struct dev_pm_opp *opp;
|
||||
unsigned long rate;
|
||||
int ret;
|
||||
struct qcom_cpufreq_data *drv_data = policy->driver_data;
|
||||
const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
|
||||
|
||||
table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
|
@ -154,14 +164,14 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
|||
}
|
||||
|
||||
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
|
||||
data = readl_relaxed(base + REG_FREQ_LUT +
|
||||
i * LUT_ROW_SIZE);
|
||||
data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
|
||||
i * soc_data->lut_row_size);
|
||||
src = FIELD_GET(LUT_SRC, data);
|
||||
lval = FIELD_GET(LUT_L_VAL, data);
|
||||
core_count = FIELD_GET(LUT_CORE_COUNT, data);
|
||||
|
||||
data = readl_relaxed(base + REG_VOLT_LUT +
|
||||
i * LUT_ROW_SIZE);
|
||||
data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
|
||||
i * soc_data->lut_row_size);
|
||||
volt = FIELD_GET(LUT_VOLT, data) * 1000;
|
||||
|
||||
if (src)
|
||||
|
@ -170,10 +180,15 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
|||
freq = cpu_hw_rate / 1000;
|
||||
|
||||
if (freq != prev_freq && core_count != LUT_TURBO_IND) {
|
||||
table[i].frequency = freq;
|
||||
qcom_cpufreq_update_opp(cpu_dev, freq, volt);
|
||||
dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
|
||||
if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
|
||||
table[i].frequency = freq;
|
||||
dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
|
||||
freq, core_count);
|
||||
} else {
|
||||
dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
|
||||
table[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
}
|
||||
|
||||
} else if (core_count == LUT_TURBO_IND) {
|
||||
table[i].frequency = CPUFREQ_ENTRY_INVALID;
|
||||
}
|
||||
|
@ -190,9 +205,13 @@ static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
|||
* as the boost frequency
|
||||
*/
|
||||
if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
|
||||
prev->frequency = prev_freq;
|
||||
prev->flags = CPUFREQ_BOOST_FREQ;
|
||||
qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt);
|
||||
if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
|
||||
prev->frequency = prev_freq;
|
||||
prev->flags = CPUFREQ_BOOST_FREQ;
|
||||
} else {
|
||||
dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
|
||||
freq);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -231,14 +250,38 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
|
|||
}
|
||||
}
|
||||
|
||||
static const struct qcom_cpufreq_soc_data qcom_soc_data = {
|
||||
.reg_enable = 0x0,
|
||||
.reg_freq_lut = 0x110,
|
||||
.reg_volt_lut = 0x114,
|
||||
.reg_perf_state = 0x920,
|
||||
.lut_row_size = 32,
|
||||
};
|
||||
|
||||
static const struct qcom_cpufreq_soc_data epss_soc_data = {
|
||||
.reg_enable = 0x0,
|
||||
.reg_freq_lut = 0x100,
|
||||
.reg_volt_lut = 0x200,
|
||||
.reg_perf_state = 0x320,
|
||||
.lut_row_size = 4,
|
||||
};
|
||||
|
||||
static const struct of_device_id qcom_cpufreq_hw_match[] = {
|
||||
{ .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
|
||||
{ .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
|
||||
|
||||
static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct device *dev = &global_pdev->dev;
|
||||
struct platform_device *pdev = cpufreq_get_driver_data();
|
||||
struct device *dev = &pdev->dev;
|
||||
struct of_phandle_args args;
|
||||
struct device_node *cpu_np;
|
||||
struct device *cpu_dev;
|
||||
struct resource *res;
|
||||
void __iomem *base;
|
||||
struct qcom_cpufreq_data *data;
|
||||
int ret, index;
|
||||
|
||||
cpu_dev = get_cpu_device(policy->cpu);
|
||||
|
@ -260,16 +303,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
|
||||
index = args.args[0];
|
||||
|
||||
res = platform_get_resource(global_pdev, IORESOURCE_MEM, index);
|
||||
if (!res)
|
||||
return -ENODEV;
|
||||
base = devm_platform_ioremap_resource(pdev, index);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
base = devm_ioremap(dev, res->start, resource_size(res));
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
data->soc_data = of_device_get_match_data(&pdev->dev);
|
||||
data->base = base;
|
||||
|
||||
/* HW should be in enabled state to proceed */
|
||||
if (!(readl_relaxed(base + REG_ENABLE) & 0x1)) {
|
||||
if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
|
||||
dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
|
||||
ret = -ENODEV;
|
||||
goto error;
|
||||
|
@ -282,9 +330,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
goto error;
|
||||
}
|
||||
|
||||
policy->driver_data = base + REG_PERF_STATE;
|
||||
policy->driver_data = data;
|
||||
|
||||
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base);
|
||||
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
|
||||
if (ret) {
|
||||
dev_err(dev, "Domain-%d failed to read LUT\n", index);
|
||||
goto error;
|
||||
|
@ -308,12 +356,13 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct device *cpu_dev = get_cpu_device(policy->cpu);
|
||||
void __iomem *base = policy->driver_data - REG_PERF_STATE;
|
||||
struct qcom_cpufreq_data *data = policy->driver_data;
|
||||
struct platform_device *pdev = cpufreq_get_driver_data();
|
||||
|
||||
dev_pm_opp_remove_all_dynamic(cpu_dev);
|
||||
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
|
||||
kfree(policy->freq_table);
|
||||
devm_iounmap(&global_pdev->dev, base);
|
||||
devm_iounmap(&pdev->dev, data->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -358,7 +407,7 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
|
|||
cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
|
||||
clk_put(clk);
|
||||
|
||||
global_pdev = pdev;
|
||||
cpufreq_qcom_hw_driver.driver_data = pdev;
|
||||
|
||||
/* Check for optional interconnect paths on CPU0 */
|
||||
cpu_dev = get_cpu_device(0);
|
||||
|
@ -383,12 +432,6 @@ static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
|
|||
return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
|
||||
}
|
||||
|
||||
static const struct of_device_id qcom_cpufreq_hw_match[] = {
|
||||
{ .compatible = "qcom,cpufreq-hw" },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
|
||||
|
||||
static struct platform_driver qcom_cpufreq_hw_driver = {
|
||||
.probe = qcom_cpufreq_hw_driver_probe,
|
||||
.remove = qcom_cpufreq_hw_driver_remove,
|
||||
|
|
|
@ -590,6 +590,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
|
|||
|
||||
static int s5pv210_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np;
|
||||
int id, result = 0;
|
||||
|
||||
|
@ -602,28 +603,20 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
|
|||
* cpufreq-dt driver.
|
||||
*/
|
||||
arm_regulator = regulator_get(NULL, "vddarm");
|
||||
if (IS_ERR(arm_regulator)) {
|
||||
if (PTR_ERR(arm_regulator) == -EPROBE_DEFER)
|
||||
pr_debug("vddarm regulator not ready, defer\n");
|
||||
else
|
||||
pr_err("failed to get regulator vddarm\n");
|
||||
return PTR_ERR(arm_regulator);
|
||||
}
|
||||
if (IS_ERR(arm_regulator))
|
||||
return dev_err_probe(dev, PTR_ERR(arm_regulator),
|
||||
"failed to get regulator vddarm\n");
|
||||
|
||||
int_regulator = regulator_get(NULL, "vddint");
|
||||
if (IS_ERR(int_regulator)) {
|
||||
if (PTR_ERR(int_regulator) == -EPROBE_DEFER)
|
||||
pr_debug("vddint regulator not ready, defer\n");
|
||||
else
|
||||
pr_err("failed to get regulator vddint\n");
|
||||
result = PTR_ERR(int_regulator);
|
||||
result = dev_err_probe(dev, PTR_ERR(int_regulator),
|
||||
"failed to get regulator vddint\n");
|
||||
goto err_int_regulator;
|
||||
}
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
|
||||
if (!np) {
|
||||
pr_err("%s: failed to find clock controller DT node\n",
|
||||
__func__);
|
||||
dev_err(dev, "failed to find clock controller DT node\n");
|
||||
result = -ENODEV;
|
||||
goto err_clock;
|
||||
}
|
||||
|
@ -631,7 +624,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
|
|||
clk_base = of_iomap(np, 0);
|
||||
of_node_put(np);
|
||||
if (!clk_base) {
|
||||
pr_err("%s: failed to map clock registers\n", __func__);
|
||||
dev_err(dev, "failed to map clock registers\n");
|
||||
result = -EFAULT;
|
||||
goto err_clock;
|
||||
}
|
||||
|
@ -639,8 +632,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
|
|||
for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
|
||||
id = of_alias_get_id(np, "dmc");
|
||||
if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
|
||||
pr_err("%s: failed to get alias of dmc node '%pOFn'\n",
|
||||
__func__, np);
|
||||
dev_err(dev, "failed to get alias of dmc node '%pOFn'\n", np);
|
||||
of_node_put(np);
|
||||
result = id;
|
||||
goto err_clk_base;
|
||||
|
@ -648,8 +640,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
|
|||
|
||||
dmc_base[id] = of_iomap(np, 0);
|
||||
if (!dmc_base[id]) {
|
||||
pr_err("%s: failed to map dmc%d registers\n",
|
||||
__func__, id);
|
||||
dev_err(dev, "failed to map dmc%d registers\n", id);
|
||||
of_node_put(np);
|
||||
result = -EFAULT;
|
||||
goto err_dmc;
|
||||
|
@ -658,7 +649,7 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
|
|||
|
||||
for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
|
||||
if (!dmc_base[id]) {
|
||||
pr_err("%s: failed to find dmc%d node\n", __func__, id);
|
||||
dev_err(dev, "failed to find dmc%d node\n", id);
|
||||
result = -ENODEV;
|
||||
goto err_dmc;
|
||||
}
|
||||
|
|
|
@ -141,7 +141,8 @@ static const struct reg_field sti_stih407_dvfs_regfields[DVFS_MAX_REGFIELDS] = {
|
|||
static const struct reg_field *sti_cpufreq_match(void)
|
||||
{
|
||||
if (of_machine_is_compatible("st,stih407") ||
|
||||
of_machine_is_compatible("st,stih410"))
|
||||
of_machine_is_compatible("st,stih410") ||
|
||||
of_machine_is_compatible("st,stih418"))
|
||||
return sti_stih407_dvfs_regfields;
|
||||
|
||||
return NULL;
|
||||
|
@ -258,7 +259,8 @@ static int sti_cpufreq_init(void)
|
|||
int ret;
|
||||
|
||||
if ((!of_machine_is_compatible("st,stih407")) &&
|
||||
(!of_machine_is_compatible("st,stih410")))
|
||||
(!of_machine_is_compatible("st,stih410")) &&
|
||||
(!of_machine_is_compatible("st,stih418")))
|
||||
return -ENODEV;
|
||||
|
||||
ddata.cpu = get_cpu_device(0);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#define EDVD_CORE_VOLT_FREQ(core) (0x20 + (core) * 0x4)
|
||||
#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0
|
||||
#define EDVD_CORE_VOLT_FREQ_F_MASK 0xffff
|
||||
#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16
|
||||
|
||||
struct tegra186_cpufreq_cluster_info {
|
||||
|
@ -91,10 +92,39 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
|
||||
{
|
||||
struct cpufreq_frequency_table *tbl;
|
||||
struct cpufreq_policy *policy;
|
||||
void __iomem *edvd_reg;
|
||||
unsigned int i, freq = 0;
|
||||
u32 ndiv;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
tbl = policy->freq_table;
|
||||
edvd_reg = policy->driver_data;
|
||||
ndiv = readl(edvd_reg) & EDVD_CORE_VOLT_FREQ_F_MASK;
|
||||
|
||||
for (i = 0; tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
if ((tbl[i].driver_data & EDVD_CORE_VOLT_FREQ_F_MASK) == ndiv) {
|
||||
freq = tbl[i].frequency;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
return freq;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver tegra186_cpufreq_driver = {
|
||||
.name = "tegra186",
|
||||
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
|
||||
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
|
||||
.get = tegra186_cpufreq_get,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = tegra186_cpufreq_set_target,
|
||||
.init = tegra186_cpufreq_init,
|
||||
|
|
|
@ -1068,7 +1068,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
|
|||
*/
|
||||
opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
|
||||
if (!opp_table)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_init(&opp_table->lock);
|
||||
mutex_init(&opp_table->genpd_virt_dev_lock);
|
||||
|
@ -1079,8 +1079,8 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
|
|||
|
||||
opp_dev = _add_opp_dev(dev, opp_table);
|
||||
if (!opp_dev) {
|
||||
kfree(opp_table);
|
||||
return NULL;
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
_of_init_opp_table(opp_table, dev, index);
|
||||
|
@ -1089,16 +1089,21 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
|
|||
opp_table->clk = clk_get(dev, NULL);
|
||||
if (IS_ERR(opp_table->clk)) {
|
||||
ret = PTR_ERR(opp_table->clk);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
|
||||
ret);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err;
|
||||
|
||||
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
|
||||
}
|
||||
|
||||
/* Find interconnect path(s) for the device */
|
||||
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err;
|
||||
|
||||
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head);
|
||||
INIT_LIST_HEAD(&opp_table->opp_list);
|
||||
|
@ -1107,6 +1112,10 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
|
|||
/* Secure the device table modification */
|
||||
list_add(&opp_table->node, &opp_tables);
|
||||
return opp_table;
|
||||
|
||||
err:
|
||||
kfree(opp_table);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void _get_opp_table_kref(struct opp_table *opp_table)
|
||||
|
@ -1129,7 +1138,7 @@ static struct opp_table *_opp_get_opp_table(struct device *dev, int index)
|
|||
if (opp_table) {
|
||||
if (!_add_opp_dev_unlocked(dev, opp_table)) {
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
opp_table = NULL;
|
||||
opp_table = ERR_PTR(-ENOMEM);
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -1581,8 +1590,8 @@ struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
|
|||
struct opp_table *opp_table;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(opp_table))
|
||||
return opp_table;
|
||||
|
||||
/* Make sure there are no concurrent readers while updating opp_table */
|
||||
WARN_ON(!list_empty(&opp_table->opp_list));
|
||||
|
@ -1640,8 +1649,8 @@ struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
|
|||
struct opp_table *opp_table;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(opp_table))
|
||||
return opp_table;
|
||||
|
||||
/* Make sure there are no concurrent readers while updating opp_table */
|
||||
WARN_ON(!list_empty(&opp_table->opp_list));
|
||||
|
@ -1733,8 +1742,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
|
|||
int ret, i;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(opp_table))
|
||||
return opp_table;
|
||||
|
||||
/* This should be called before OPPs are initialized */
|
||||
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
|
||||
|
@ -1843,8 +1852,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
|
|||
int ret;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(opp_table))
|
||||
return opp_table;
|
||||
|
||||
/* This should be called before OPPs are initialized */
|
||||
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
|
||||
|
@ -1911,8 +1920,8 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (!IS_ERR(opp_table))
|
||||
return opp_table;
|
||||
|
||||
/* This should be called before OPPs are initialized */
|
||||
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
|
||||
|
@ -1992,8 +2001,8 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
|
|||
const char **name = names;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(opp_table))
|
||||
return opp_table;
|
||||
|
||||
/*
|
||||
* If the genpd's OPP table isn't already initialized, parsing of the
|
||||
|
@ -2163,8 +2172,8 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
|
|||
int ret;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!opp_table)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(opp_table))
|
||||
return PTR_ERR(opp_table);
|
||||
|
||||
/* Fix regulator count for dynamic OPPs */
|
||||
opp_table->regulator_count = 1;
|
||||
|
|
|
@ -886,11 +886,25 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
|
|||
const __be32 *val;
|
||||
int nr, ret = 0;
|
||||
|
||||
mutex_lock(&opp_table->lock);
|
||||
if (opp_table->parsed_static_opps) {
|
||||
opp_table->parsed_static_opps++;
|
||||
mutex_unlock(&opp_table->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
opp_table->parsed_static_opps = 1;
|
||||
mutex_unlock(&opp_table->lock);
|
||||
|
||||
prop = of_find_property(dev->of_node, "operating-points", NULL);
|
||||
if (!prop)
|
||||
return -ENODEV;
|
||||
if (!prop->value)
|
||||
return -ENODATA;
|
||||
if (!prop) {
|
||||
ret = -ENODEV;
|
||||
goto remove_static_opp;
|
||||
}
|
||||
if (!prop->value) {
|
||||
ret = -ENODATA;
|
||||
goto remove_static_opp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each OPP is a set of tuples consisting of frequency and
|
||||
|
@ -899,13 +913,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
|
|||
nr = prop->length / sizeof(u32);
|
||||
if (nr % 2) {
|
||||
dev_err(dev, "%s: Invalid OPP table\n", __func__);
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto remove_static_opp;
|
||||
}
|
||||
|
||||
mutex_lock(&opp_table->lock);
|
||||
opp_table->parsed_static_opps = 1;
|
||||
mutex_unlock(&opp_table->lock);
|
||||
|
||||
val = prop->value;
|
||||
while (nr) {
|
||||
unsigned long freq = be32_to_cpup(val++) * 1000;
|
||||
|
@ -915,12 +926,14 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
|
|||
if (ret) {
|
||||
dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
|
||||
__func__, freq, ret);
|
||||
_opp_remove_all_static(opp_table);
|
||||
return ret;
|
||||
goto remove_static_opp;
|
||||
}
|
||||
nr -= 2;
|
||||
}
|
||||
|
||||
remove_static_opp:
|
||||
_opp_remove_all_static(opp_table);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -947,8 +960,8 @@ int dev_pm_opp_of_add_table(struct device *dev)
|
|||
int ret;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
|
||||
if (!opp_table)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(opp_table))
|
||||
return PTR_ERR(opp_table);
|
||||
|
||||
/*
|
||||
* OPPs have two version of bindings now. Also try the old (v1)
|
||||
|
@ -1002,8 +1015,8 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
|
|||
}
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
|
||||
if (!opp_table)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(opp_table))
|
||||
return PTR_ERR(opp_table);
|
||||
|
||||
ret = _of_add_opp_table_v2(dev, opp_table);
|
||||
if (ret)
|
||||
|
|
|
@ -93,7 +93,7 @@ static int exynos_asv_update_opps(struct exynos_asv *asv)
|
|||
continue;
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(cpu);
|
||||
if (IS_ERR_OR_NULL(opp_table))
|
||||
if (IS_ERR(opp_table))
|
||||
continue;
|
||||
|
||||
if (!last_opp_table || opp_table != last_opp_table) {
|
||||
|
|
Loading…
Reference in New Issue
Block a user