ACPI and power management fixes for 3.9-rc2
- Two fixes for the new intel_pstate driver from Dirk Brandewie. - Fix for incorrect usage of the .find_bridge() callback from struct acpi_bus_type in the USB core and subsequent removal of that callback from Rafael J. Wysocki. - ACPI processor driver cleanups from Chen Gang and Syam Sidhardhan. - ACPI initialization and error messages fix from Joe Perches. - Operating Performance Points documentation improvement from Nishanth Menon. - Fixes for memory leaks and potential concurrency issues and sysfs attributes leaks during device removal in the core device PM QoS code from Rafael J. Wysocki. - Calxeda Highbank cpufreq driver simplification from Emilio López. - cpufreq comment cleanup from Namhyung Kim. - Fix for a section mismatch in Calxeda Highbank interprocessor communication code from Mark Langsdorf (this is not a PM fix strictly speaking, but the code in question went in through the PM tree). / -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQIcBAABAgAGBQJROQy6AAoJEKhOf7ml8uNsdBMP/RhXZoPEGdPU/rO/O1g3nW7s KY3H06aEzcXzIiYIimkZI3OvmNIGA+D+lFnd9xcFZX6hkmrCrwZtK63oTqBaU6G1 FnOg/ttDA6j5zBegqbhOcSwy5Xt5ZbiLI4WOtlr1rASeQars6KdUBs35q+KLhl6q IjfkiLp7plC+aDasyz/KXt5vE4YRbiujk/FuKKSFfNM1/p1IeEXZP7DddygZQNPH YQk5U8Had6AKrd2XQnSLpw+nlHTMM1KeCfhTQ5ZgDNmZgH3TVVp+TPSESiY+KJGY 4EEe1x76Cfj1/xODu2qVOPzSuCTWwDVNJRwkLML30yZkxqnTECO5YhgBUW7y3/L2 twgxXsm/IGuBMQs1CecmEq9SKwobvc/6/0oMGqPR+vQH1+vwOI3sBxo4IPj8eS6f I9uHT6B8gieofPqJbBc7oX5PkOR7tJMD1Jg3Iqa3BF6oT4/p52mx4AjcY8v1x+bH ykTrnpqwtggzoMgLQ86TaFZeV3jR0LHrxYl1FVq2CL/ehQpVvuP0KgtGjZKydZqo DCN7ZLvyNGm3QSW3FIP61iHuazfL+3z30Psva9viVKcw5xzrSMx/DwIosnH3/bg0 j31RNh1PDz+hSVtczP3oH7tbTAZMSpnqf3Alu/hgcNxjJ8yO7zzvDGZCpprJNNvP SW31Ju9W+ulnDm91vfko =9vaj -----END PGP SIGNATURE----- Merge tag 'pm+acpi-3.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull ACPI and power management fixes from Rafael J Wysocki: - Two fixes for the new intel_pstate driver from Dirk Brandewie. - Fix for incorrect usage of the .find_bridge() callback from struct acpi_bus_type in the USB core and subsequent removal of that callback from Rafael J Wysocki. - ACPI processor driver cleanups from Chen Gang and Syam Sidhardhan. - ACPI initialization and error messages fix from Joe Perches. - Operating Performance Points documentation improvement from Nishanth Menon. - Fixes for memory leaks and potential concurrency issues and sysfs attributes leaks during device removal in the core device PM QoS code from Rafael J Wysocki. - Calxeda Highbank cpufreq driver simplification from Emilio López. - cpufreq comment cleanup from Namhyung Kim. - Fix for a section mismatch in Calxeda Highbank interprocessor communication code from Mark Langsdorf (this is not a PM fix strictly speaking, but the code in question went in through the PM tree). * tag 'pm+acpi-3.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq / intel_pstate: Do not load on VM that does not report max P state. cpufreq / intel_pstate: Fix intel_pstate_init() error path ACPI / glue: Drop .find_bridge() callback from struct acpi_bus_type ACPI / glue: Add .match() callback to struct acpi_bus_type ACPI / porocessor: Beautify code, pr->id is u32 which is never < 0 ACPI / processor: Remove redundant NULL check before kfree ACPI / Sleep: Avoid interleaved message on errors PM / QoS: Remove device PM QoS sysfs attributes at the right place PM / QoS: Fix concurrency issues and memory leaks in device PM QoS cpufreq: highbank: do not initialize array with a loop PM / OPP: improve introductory documentation cpufreq: Fix a typo in comment mailbox, pl320-ipc: remove __init from probe function
This commit is contained in:
commit
c89b148fd3
|
@ -1,6 +1,5 @@
|
|||
*=============*
|
||||
* OPP Library *
|
||||
*=============*
|
||||
Operating Performance Points (OPP) Library
|
||||
==========================================
|
||||
|
||||
(C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
|
||||
|
||||
|
@ -16,15 +15,31 @@ Contents
|
|||
|
||||
1. Introduction
|
||||
===============
|
||||
1.1 What is an Operating Performance Point (OPP)?
|
||||
|
||||
Complex SoCs of today consists of a multiple sub-modules working in conjunction.
|
||||
In an operational system executing varied use cases, not all modules in the SoC
|
||||
need to function at their highest performing frequency all the time. To
|
||||
facilitate this, sub-modules in a SoC are grouped into domains, allowing some
|
||||
domains to run at lower voltage and frequency while other domains are loaded
|
||||
more. The set of discrete tuples consisting of frequency and voltage pairs that
|
||||
domains to run at lower voltage and frequency while other domains run at
|
||||
voltage/frequency pairs that are higher.
|
||||
|
||||
The set of discrete tuples consisting of frequency and voltage pairs that
|
||||
the device will support per domain are called Operating Performance Points or
|
||||
OPPs.
|
||||
|
||||
As an example:
|
||||
Let us consider an MPU device which supports the following:
|
||||
{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
|
||||
{1GHz at minimum voltage of 1.3V}
|
||||
|
||||
We can represent these as three OPPs as the following {Hz, uV} tuples:
|
||||
{300000000, 1000000}
|
||||
{800000000, 1200000}
|
||||
{1000000000, 1300000}
|
||||
|
||||
1.2 Operating Performance Points Library
|
||||
|
||||
OPP library provides a set of helper functions to organize and query the OPP
|
||||
information. The library is located in drivers/base/power/opp.c and the header
|
||||
is located in include/linux/opp.h. OPP library can be enabled by enabling
|
||||
|
|
|
@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
|
|||
{
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
if (type && type->bus && type->find_device) {
|
||||
if (type && type->match && type->find_device) {
|
||||
down_write(&bus_type_sem);
|
||||
list_add_tail(&type->list, &bus_type_list);
|
||||
up_write(&bus_type_sem);
|
||||
printk(KERN_INFO PREFIX "bus type %s registered\n",
|
||||
type->bus->name);
|
||||
printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
|
@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
|
|||
down_write(&bus_type_sem);
|
||||
list_del_init(&type->list);
|
||||
up_write(&bus_type_sem);
|
||||
printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
|
||||
type->bus->name);
|
||||
printk(KERN_INFO PREFIX "bus type %s unregistered\n",
|
||||
type->name);
|
||||
return 0;
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
|
||||
|
||||
static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
|
||||
static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *tmp, *ret = NULL;
|
||||
|
||||
if (!type)
|
||||
return NULL;
|
||||
|
||||
down_read(&bus_type_sem);
|
||||
list_for_each_entry(tmp, &bus_type_list, list) {
|
||||
if (tmp->bus == type) {
|
||||
if (tmp->match(dev)) {
|
||||
ret = tmp;
|
||||
break;
|
||||
}
|
||||
|
@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
|
||||
{
|
||||
struct acpi_bus_type *tmp;
|
||||
int ret = -ENODEV;
|
||||
|
||||
down_read(&bus_type_sem);
|
||||
list_for_each_entry(tmp, &bus_type_list, list) {
|
||||
if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_read(&bus_type_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
|
||||
void *addr_p, void **ret_p)
|
||||
{
|
||||
|
@ -261,29 +241,12 @@ static int acpi_unbind_one(struct device *dev)
|
|||
|
||||
static int acpi_platform_notify(struct device *dev)
|
||||
{
|
||||
struct acpi_bus_type *type;
|
||||
struct acpi_bus_type *type = acpi_get_bus_type(dev);
|
||||
acpi_handle handle;
|
||||
int ret;
|
||||
|
||||
ret = acpi_bind_one(dev, NULL);
|
||||
if (ret && (!dev->bus || !dev->parent)) {
|
||||
/* bridge devices genernally haven't bus or parent */
|
||||
ret = acpi_find_bridge_device(dev, &handle);
|
||||
if (!ret) {
|
||||
ret = acpi_bind_one(dev, handle);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
type = acpi_get_bus_type(dev->bus);
|
||||
if (ret) {
|
||||
if (!type || !type->find_device) {
|
||||
DBG("No ACPI bus support for %s\n", dev_name(dev));
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret && type) {
|
||||
ret = type->find_device(dev, &handle);
|
||||
if (ret) {
|
||||
DBG("Unable to get handle for %s\n", dev_name(dev));
|
||||
|
@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
|
|||
{
|
||||
struct acpi_bus_type *type;
|
||||
|
||||
type = acpi_get_bus_type(dev->bus);
|
||||
type = acpi_get_bus_type(dev);
|
||||
if (type && type->cleanup)
|
||||
type->cleanup(dev);
|
||||
|
||||
|
|
|
@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
|||
}
|
||||
|
||||
exit:
|
||||
if (buffer.pointer)
|
||||
kfree(buffer.pointer);
|
||||
kfree(buffer.pointer);
|
||||
return apic_id;
|
||||
}
|
||||
|
||||
|
|
|
@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
|
|||
return 0;
|
||||
#endif
|
||||
|
||||
BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
|
||||
BUG_ON(pr->id >= nr_cpu_ids);
|
||||
|
||||
/*
|
||||
* Buggy BIOS check
|
||||
|
|
|
@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
|
|||
status = acpi_get_sleep_type_data(i, &type_a, &type_b);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
sleep_states[i] = 1;
|
||||
pr_cont(" S%d", i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
|
|||
hibernation_set_ops(old_suspend_ordering ?
|
||||
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
|
||||
sleep_states[ACPI_STATE_S4] = 1;
|
||||
pr_cont(KERN_CONT " S4");
|
||||
if (nosigcheck)
|
||||
return;
|
||||
|
||||
|
@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
|
|||
{
|
||||
acpi_status status;
|
||||
u8 type_a, type_b;
|
||||
char supported[ACPI_S_STATE_COUNT * 3 + 1];
|
||||
char *pos = supported;
|
||||
int i;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
|
|||
acpi_sleep_dmi_check();
|
||||
|
||||
sleep_states[ACPI_STATE_S0] = 1;
|
||||
pr_info(PREFIX "(supports S0");
|
||||
|
||||
acpi_sleep_suspend_setup();
|
||||
acpi_sleep_hibernate_setup();
|
||||
|
@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
|
|||
status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
sleep_states[ACPI_STATE_S5] = 1;
|
||||
pr_cont(" S5");
|
||||
pm_power_off_prepare = acpi_power_off_prepare;
|
||||
pm_power_off = acpi_power_off;
|
||||
}
|
||||
pr_cont(")\n");
|
||||
|
||||
supported[0] = 0;
|
||||
for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
|
||||
if (sleep_states[i])
|
||||
pos += sprintf(pos, " S%d", i);
|
||||
}
|
||||
pr_info(PREFIX "(supports%s)\n", supported);
|
||||
|
||||
/*
|
||||
* Register the tts_notifier to reboot notifier list so that the _TTS
|
||||
* object can also be evaluated when the system enters S5.
|
||||
|
|
|
@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static struct acpi_bus_type ata_acpi_bus = {
|
||||
.find_bridge = ata_acpi_find_dummy,
|
||||
.name = "ATA",
|
||||
.find_device = ata_acpi_find_device,
|
||||
};
|
||||
|
||||
|
|
|
@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
|
|||
dev_warn(dev, "parent %s should not be sleeping\n",
|
||||
dev_name(dev->parent));
|
||||
list_add_tail(&dev->power.entry, &dpm_list);
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
}
|
||||
|
||||
|
@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
|
|||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
complete_all(&dev->power.completion);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
list_del_init(&dev->power.entry);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
device_wakeup_disable(dev);
|
||||
|
|
|
@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
|
|||
{
|
||||
if (!dev->power.early_init) {
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
dev->power.qos = NULL;
|
||||
dev->power.early_init = true;
|
||||
}
|
||||
}
|
||||
|
@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
|
|||
|
||||
static inline void device_pm_sleep_init(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_add(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
}
|
||||
static inline void device_pm_add(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_remove(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
pm_runtime_remove(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
|
@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
|
|||
struct pm_qos_flags *pqf;
|
||||
s32 val;
|
||||
|
||||
if (!qos)
|
||||
if (IS_ERR_OR_NULL(qos))
|
||||
return PM_QOS_FLAGS_UNDEFINED;
|
||||
|
||||
pqf = &qos->flags;
|
||||
|
@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
|
|||
*/
|
||||
s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
|
||||
return IS_ERR_OR_NULL(dev->power.qos) ?
|
||||
0 : pm_qos_read_value(&dev->power.qos->latency);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
|
||||
* @dev: target device
|
||||
*
|
||||
* Called from the device PM subsystem during device insertion under
|
||||
* device_pm_lock().
|
||||
*/
|
||||
void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
dev->power.qos = NULL;
|
||||
dev->power.power_state = PMSG_ON;
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev);
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_destroy
|
||||
|
@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|||
struct pm_qos_constraints *c;
|
||||
struct pm_qos_flags *f;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/*
|
||||
* If the device's PM QoS resume latency limit or PM QoS flags have been
|
||||
* exposed to user space, they have to be hidden at this point.
|
||||
*/
|
||||
dev_pm_qos_hide_latency_limit(dev);
|
||||
dev_pm_qos_hide_flags(dev);
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
qos = dev->power.qos;
|
||||
if (!qos)
|
||||
goto out;
|
||||
|
@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
|||
}
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.qos = NULL;
|
||||
dev->power.qos = ERR_PTR(-ENODEV);
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
kfree(c->notifiers);
|
||||
|
@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
|||
"%s() called for already added request\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
req->dev = dev;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.qos) {
|
||||
if (dev->power.power_state.event == PM_EVENT_INVALID) {
|
||||
/* The device has been removed from the system. */
|
||||
req->dev = NULL;
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
} else {
|
||||
/*
|
||||
* Allocate the constraints data on the first call to
|
||||
* add_request, i.e. only if the data is not already
|
||||
* allocated and if the device has not been removed.
|
||||
*/
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
}
|
||||
}
|
||||
if (IS_ERR(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (!dev->power.qos)
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
|
||||
if (!ret) {
|
||||
req->dev = dev;
|
||||
req->type = type;
|
||||
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
return ret;
|
||||
|
@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
|||
s32 curr_value;
|
||||
int ret = 0;
|
||||
|
||||
if (!req->dev->power.qos)
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(!dev_pm_qos_request_active(req),
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
if (IS_ERR_OR_NULL(req->dev->power.qos))
|
||||
return -ENODEV;
|
||||
|
||||
switch(req->type) {
|
||||
|
@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
|
|||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
ret = __dev_pm_qos_update_request(req, new_value);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
|
||||
static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
|
|||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
ret = __dev_pm_qos_update_request(req, new_value);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
if (IS_ERR_OR_NULL(req->dev->power.qos))
|
||||
return -ENODEV;
|
||||
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_request - modifies an existing qos request
|
||||
|
@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
|||
*/
|
||||
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN(!dev_pm_qos_request_active(req),
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (req->dev->power.qos) {
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
} else {
|
||||
/* Return if the device has been removed */
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
ret = __dev_pm_qos_remove_request(req);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
|
@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
|
|||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.qos)
|
||||
ret = dev->power.power_state.event != PM_EVENT_INVALID ?
|
||||
dev_pm_qos_constraints_allocate(dev) : -ENODEV;
|
||||
if (IS_ERR(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (!dev->power.qos)
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
|
||||
if (!ret)
|
||||
ret = blocking_notifier_chain_register(
|
||||
|
@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
|
|||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/* Silently return if the constraints object is not present. */
|
||||
if (dev->power.qos)
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos))
|
||||
retval = blocking_notifier_chain_unregister(
|
||||
dev->power.qos->latency.notifiers,
|
||||
notifier);
|
||||
|
@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
|
|||
static void __dev_pm_qos_drop_user_request(struct device *dev,
|
||||
enum dev_pm_qos_req_type type)
|
||||
{
|
||||
struct dev_pm_qos_request *req = NULL;
|
||||
|
||||
switch(type) {
|
||||
case DEV_PM_QOS_LATENCY:
|
||||
dev_pm_qos_remove_request(dev->power.qos->latency_req);
|
||||
req = dev->power.qos->latency_req;
|
||||
dev->power.qos->latency_req = NULL;
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
dev_pm_qos_remove_request(dev->power.qos->flags_req);
|
||||
req = dev->power.qos->flags_req;
|
||||
dev->power.qos->flags_req = NULL;
|
||||
break;
|
||||
}
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
|
|||
if (!device_is_registered(dev) || value < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.qos && dev->power.qos->latency_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (dev->power.qos->latency_req)
|
||||
ret = -EEXIST;
|
||||
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->latency_req = req;
|
||||
ret = pm_qos_sysfs_add_latency(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
|
||||
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
|
||||
* @dev: Device whose PM QoS latency limit is to be hidden from user space.
|
||||
*/
|
||||
void dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (dev->power.qos && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_latency_limit(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
|
||||
|
||||
|
@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
|
|||
if (!device_is_registered(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.qos && dev->power.qos->flags_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
if (ret < 0) {
|
||||
kfree(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos))
|
||||
ret = -ENODEV;
|
||||
else if (dev->power.qos->flags_req)
|
||||
ret = -EEXIST;
|
||||
|
||||
if (ret < 0) {
|
||||
__dev_pm_qos_remove_request(req);
|
||||
kfree(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev->power.qos->flags_req = req;
|
||||
ret = pm_qos_sysfs_add_flags(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
|
||||
fail:
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
|
||||
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
|
||||
* @dev: Device whose PM QoS flags are to be hidden from user space.
|
||||
*/
|
||||
void dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (dev->power.qos && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
pm_runtime_get_sync(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
__dev_pm_qos_hide_flags(dev);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
|
||||
|
||||
|
@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
|||
s32 value;
|
||||
int ret;
|
||||
|
||||
if (!dev->power.qos || !dev->power.qos->flags_req)
|
||||
return -EINVAL;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
value = dev_pm_qos_requested_flags(dev);
|
||||
if (set)
|
||||
value |= mask;
|
||||
|
@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
|||
|
||||
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else /* !CONFIG_PM_RUNTIME */
|
||||
static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
|
||||
static void __dev_pm_qos_hide_flags(struct device *dev) {}
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
|
|
@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
|
|||
|
||||
void dpm_sysfs_remove(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
rpm_sysfs_remove(dev);
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
|
||||
sysfs_remove_group(&dev->kobj, &pm_attr_group);
|
||||
|
|
|
@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
|
|||
* dbs: used as a shortform for demand based switching It helps to keep variable
|
||||
* names smaller, simpler
|
||||
* cdbs: common dbs
|
||||
* on_*: On-demand governor
|
||||
* od_*: On-demand governor
|
||||
* cs_*: Conservative governor
|
||||
*/
|
||||
|
||||
|
|
|
@ -28,13 +28,7 @@
|
|||
|
||||
static int hb_voltage_change(unsigned int freq)
|
||||
{
|
||||
int i;
|
||||
u32 msg[HB_CPUFREQ_IPC_LEN];
|
||||
|
||||
msg[0] = HB_CPUFREQ_CHANGE_NOTE;
|
||||
msg[1] = freq / 1000000;
|
||||
for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
|
||||
msg[i] = 0;
|
||||
u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
|
||||
|
||||
return pl320_ipc_transmit(msg);
|
||||
}
|
||||
|
|
|
@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
|
||||
cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
if (!policy->cpuinfo.max_freq)
|
||||
return -ENODEV;
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min, &max);
|
||||
|
||||
limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
|
||||
|
@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
|
|||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static void intel_pstate_exit(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
sysfs_remove_group(intel_pstate_kobject,
|
||||
&intel_pstate_attr_group);
|
||||
debugfs_remove_recursive(debugfs_parent);
|
||||
|
||||
cpufreq_unregister_driver(&intel_pstate_driver);
|
||||
|
||||
if (!all_cpu_data)
|
||||
return;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
del_timer_sync(&all_cpu_data[cpu]->timer);
|
||||
kfree(all_cpu_data[cpu]);
|
||||
}
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
vfree(all_cpu_data);
|
||||
}
|
||||
module_exit(intel_pstate_exit);
|
||||
|
||||
static int __initdata no_load;
|
||||
|
||||
static int __init intel_pstate_init(void)
|
||||
{
|
||||
int rc = 0;
|
||||
int cpu, rc = 0;
|
||||
const struct x86_cpu_id *id;
|
||||
|
||||
if (no_load)
|
||||
|
@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
|
|||
intel_pstate_sysfs_expose_params();
|
||||
return rc;
|
||||
out:
|
||||
intel_pstate_exit();
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
del_timer_sync(&all_cpu_data[cpu]->timer);
|
||||
kfree(all_cpu_data[cpu]);
|
||||
}
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
vfree(all_cpu_data);
|
||||
return -ENODEV;
|
||||
}
|
||||
device_initcall(intel_pstate_init);
|
||||
|
|
|
@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
|
||||
|
||||
static int __init pl320_probe(struct amba_device *adev,
|
||||
const struct amba_id *id)
|
||||
static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -331,8 +331,14 @@ static void pci_acpi_cleanup(struct device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static bool pci_acpi_bus_match(struct device *dev)
|
||||
{
|
||||
return dev->bus == &pci_bus_type;
|
||||
}
|
||||
|
||||
static struct acpi_bus_type acpi_pci_bus = {
|
||||
.bus = &pci_bus_type,
|
||||
.name = "PCI",
|
||||
.match = pci_acpi_bus_match,
|
||||
.find_device = acpi_pci_find_device,
|
||||
.setup = pci_acpi_setup,
|
||||
.cleanup = pci_acpi_cleanup,
|
||||
|
|
|
@ -353,8 +353,14 @@ static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
|
|||
/* complete initialization of a PNPACPI device includes having
|
||||
* pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
|
||||
*/
|
||||
static bool acpi_pnp_bus_match(struct device *dev)
|
||||
{
|
||||
return dev->bus == &pnp_bus_type;
|
||||
}
|
||||
|
||||
static struct acpi_bus_type __initdata acpi_pnp_bus = {
|
||||
.bus = &pnp_bus_type,
|
||||
.name = "PNP",
|
||||
.match = acpi_pnp_bus_match,
|
||||
.find_device = acpi_pnp_find_device,
|
||||
};
|
||||
|
||||
|
|
|
@ -71,9 +71,14 @@ struct kmem_cache *scsi_sdb_cache;
|
|||
#ifdef CONFIG_ACPI
|
||||
#include <acpi/acpi_bus.h>
|
||||
|
||||
static bool acpi_scsi_bus_match(struct device *dev)
|
||||
{
|
||||
return dev->bus == &scsi_bus_type;
|
||||
}
|
||||
|
||||
int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
|
||||
{
|
||||
bus->bus = &scsi_bus_type;
|
||||
bus->match = acpi_scsi_bus_match;
|
||||
return register_acpi_bus_type(bus);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
|
||||
|
|
|
@ -210,9 +210,14 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool usb_acpi_bus_match(struct device *dev)
|
||||
{
|
||||
return is_usb_device(dev) || is_usb_port(dev);
|
||||
}
|
||||
|
||||
static struct acpi_bus_type usb_acpi_bus = {
|
||||
.bus = &usb_bus_type,
|
||||
.find_bridge = usb_acpi_find_device,
|
||||
.name = "USB",
|
||||
.match = usb_acpi_bus_match,
|
||||
.find_device = usb_acpi_find_device,
|
||||
};
|
||||
|
||||
|
|
|
@ -437,11 +437,9 @@ void acpi_remove_dir(struct acpi_device *);
|
|||
*/
|
||||
struct acpi_bus_type {
|
||||
struct list_head list;
|
||||
struct bus_type *bus;
|
||||
/* For general devices under the bus */
|
||||
const char *name;
|
||||
bool (*match)(struct device *dev);
|
||||
int (*find_device) (struct device *, acpi_handle *);
|
||||
/* For bridges, such as PCI root bridge, IDE controller */
|
||||
int (*find_bridge) (struct device *, acpi_handle *);
|
||||
void (*setup)(struct device *);
|
||||
void (*cleanup)(struct device *);
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue
Block a user