forked from luck/tmp_suning_uos_patched
cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
Impact: Reduce memory usage, use new API. This is part of an effort to reduce structure sizes for machines configured with large NR_CPUS. cpumask_t gets replaced by cpumask_var_t, which is either struct cpumask[1] (small NR_CPUS) or struct cpumask * (large NR_CPUS). (Changes to powernow-k* by <travis>.) Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ee943a82b6
commit
2fdf66b491
|
@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
|||
}
|
||||
}
|
||||
|
||||
static void free_acpi_perf_data(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
|
||||
for_each_possible_cpu(i)
|
||||
free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
|
||||
->shared_cpu_map);
|
||||
free_percpu(acpi_perf_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* acpi_cpufreq_early_init - initialize ACPI P-States library
|
||||
*
|
||||
|
@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
|||
*/
|
||||
static int __init acpi_cpufreq_early_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
dprintk("acpi_cpufreq_early_init\n");
|
||||
|
||||
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
|
||||
|
@ -534,6 +546,15 @@ static int __init acpi_cpufreq_early_init(void)
|
|||
dprintk("Memory allocation error for acpi_perf_data.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for_each_possible_cpu(i) {
|
||||
if (!alloc_cpumask_var(&per_cpu_ptr(acpi_perf_data, i)
|
||||
->shared_cpu_map, GFP_KERNEL)) {
|
||||
|
||||
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
|
||||
free_acpi_perf_data();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Do initialization in ACPI core */
|
||||
acpi_processor_preregister_performance(acpi_perf_data);
|
||||
|
@ -604,9 +625,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
*/
|
||||
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
|
||||
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
|
||||
policy->cpus = perf->shared_cpu_map;
|
||||
cpumask_copy(&policy->cpus, perf->shared_cpu_map);
|
||||
}
|
||||
policy->related_cpus = perf->shared_cpu_map;
|
||||
cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
dmi_check_system(sw_any_bug_dmi_table);
|
||||
|
@ -795,7 +816,7 @@ static int __init acpi_cpufreq_init(void)
|
|||
|
||||
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
|
||||
if (ret)
|
||||
free_percpu(acpi_perf_data);
|
||||
free_acpi_perf_data();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -310,6 +310,12 @@ static int powernow_acpi_init(void)
|
|||
goto err0;
|
||||
}
|
||||
|
||||
if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
|
||||
GFP_KERNEL)) {
|
||||
retval = -ENOMEM;
|
||||
goto err05;
|
||||
}
|
||||
|
||||
if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
|
||||
retval = -EIO;
|
||||
goto err1;
|
||||
|
@ -412,6 +418,8 @@ static int powernow_acpi_init(void)
|
|||
err2:
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
err1:
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
err05:
|
||||
kfree(acpi_processor_perf);
|
||||
err0:
|
||||
printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
|
||||
|
@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
|
|||
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
|
||||
if (acpi_processor_perf) {
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
kfree(acpi_processor_perf);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
|
|||
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
struct cpufreq_frequency_table *powernow_table;
|
||||
int ret_val;
|
||||
int ret_val = -ENODEV;
|
||||
|
||||
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
|
||||
dprintk("register performance failed: bad ACPI data\n");
|
||||
|
@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
/* notify BIOS that we exist */
|
||||
acpi_processor_notify_smm(THIS_MODULE);
|
||||
|
||||
if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
|
||||
printk(KERN_ERR PFX
|
||||
"unable to alloc powernow_k8_data cpumask\n");
|
||||
ret_val = -ENOMEM;
|
||||
goto err_out_mem;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_mem:
|
||||
|
@ -826,7 +833,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
|||
/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
|
||||
data->acpi_data.state_count = 0;
|
||||
|
||||
return -ENODEV;
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
|
||||
|
@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
|
|||
{
|
||||
if (data->acpi_data.state_count)
|
||||
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
|
||||
free_cpumask_var(data->acpi_data.shared_cpu_map);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
data->cpu = pol->cpu;
|
||||
data->currpstate = HW_PSTATE_INVALID;
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
rc = powernow_k8_cpu_init_acpi(data);
|
||||
if (rc) {
|
||||
/*
|
||||
* Use the PSB BIOS structure. This is only availabe on
|
||||
* an UP version, and is deprecated by AMD.
|
||||
|
@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
|||
"ACPI maintainers and complain to your BIOS "
|
||||
"vendor.\n");
|
||||
#endif
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
if (pol->cpu != 0) {
|
||||
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
|
||||
"CPU other than CPU0. Complain to your BIOS "
|
||||
"vendor.\n");
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
rc = find_psb_table(data);
|
||||
if (rc) {
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -826,6 +826,11 @@ static int acpi_processor_add(struct acpi_device *device)
|
|||
if (!pr)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
|
||||
kfree(pr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pr->handle = device->handle;
|
||||
strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
|
||||
strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
|
||||
|
@ -845,10 +850,8 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
|
|||
|
||||
pr = acpi_driver_data(device);
|
||||
|
||||
if (pr->id >= nr_cpu_ids) {
|
||||
kfree(pr);
|
||||
return 0;
|
||||
}
|
||||
if (pr->id >= nr_cpu_ids)
|
||||
goto free;
|
||||
|
||||
if (type == ACPI_BUS_REMOVAL_EJECT) {
|
||||
if (acpi_processor_handle_eject(pr))
|
||||
|
@ -873,6 +876,9 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
|
|||
|
||||
per_cpu(processors, pr->id) = NULL;
|
||||
per_cpu(processor_device_array, pr->id) = NULL;
|
||||
|
||||
free:
|
||||
free_cpumask_var(pr->throttling.shared_cpu_map);
|
||||
kfree(pr);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -588,12 +588,15 @@ int acpi_processor_preregister_performance(
|
|||
int count, count_target;
|
||||
int retval = 0;
|
||||
unsigned int i, j;
|
||||
cpumask_t covered_cpus;
|
||||
cpumask_var_t covered_cpus;
|
||||
struct acpi_processor *pr;
|
||||
struct acpi_psd_package *pdomain;
|
||||
struct acpi_processor *match_pr;
|
||||
struct acpi_psd_package *match_pdomain;
|
||||
|
||||
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&performance_mutex);
|
||||
|
||||
retval = 0;
|
||||
|
@ -617,7 +620,7 @@ int acpi_processor_preregister_performance(
|
|||
}
|
||||
|
||||
pr->performance = percpu_ptr(performance, i);
|
||||
cpu_set(i, pr->performance->shared_cpu_map);
|
||||
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
||||
if (acpi_processor_get_psd(pr)) {
|
||||
retval = -EINVAL;
|
||||
continue;
|
||||
|
@ -650,18 +653,18 @@ int acpi_processor_preregister_performance(
|
|||
}
|
||||
}
|
||||
|
||||
cpus_clear(covered_cpus);
|
||||
cpumask_clear(covered_cpus);
|
||||
for_each_possible_cpu(i) {
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
if (cpu_isset(i, covered_cpus))
|
||||
if (cpumask_test_cpu(i, covered_cpus))
|
||||
continue;
|
||||
|
||||
pdomain = &(pr->performance->domain_info);
|
||||
cpu_set(i, pr->performance->shared_cpu_map);
|
||||
cpu_set(i, covered_cpus);
|
||||
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
||||
cpumask_set_cpu(i, covered_cpus);
|
||||
if (pdomain->num_processors <= 1)
|
||||
continue;
|
||||
|
||||
|
@ -699,8 +702,8 @@ int acpi_processor_preregister_performance(
|
|||
goto err_ret;
|
||||
}
|
||||
|
||||
cpu_set(j, covered_cpus);
|
||||
cpu_set(j, pr->performance->shared_cpu_map);
|
||||
cpumask_set_cpu(j, covered_cpus);
|
||||
cpumask_set_cpu(j, pr->performance->shared_cpu_map);
|
||||
count++;
|
||||
}
|
||||
|
||||
|
@ -718,8 +721,8 @@ int acpi_processor_preregister_performance(
|
|||
|
||||
match_pr->performance->shared_type =
|
||||
pr->performance->shared_type;
|
||||
match_pr->performance->shared_cpu_map =
|
||||
pr->performance->shared_cpu_map;
|
||||
cpumask_copy(match_pr->performance->shared_cpu_map,
|
||||
pr->performance->shared_cpu_map);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -731,14 +734,15 @@ int acpi_processor_preregister_performance(
|
|||
|
||||
/* Assume no coordination on any error parsing domain info */
|
||||
if (retval) {
|
||||
cpus_clear(pr->performance->shared_cpu_map);
|
||||
cpu_set(i, pr->performance->shared_cpu_map);
|
||||
cpumask_clear(pr->performance->shared_cpu_map);
|
||||
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
||||
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
}
|
||||
pr->performance = NULL; /* Will be set for real in register */
|
||||
}
|
||||
|
||||
mutex_unlock(&performance_mutex);
|
||||
free_cpumask_var(covered_cpus);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_processor_preregister_performance);
|
||||
|
|
|
@ -61,11 +61,14 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
int count, count_target;
|
||||
int retval = 0;
|
||||
unsigned int i, j;
|
||||
cpumask_t covered_cpus;
|
||||
cpumask_var_t covered_cpus;
|
||||
struct acpi_processor *pr, *match_pr;
|
||||
struct acpi_tsd_package *pdomain, *match_pdomain;
|
||||
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
|
||||
|
||||
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Now that we have _TSD data from all CPUs, lets setup T-state
|
||||
* coordination between all CPUs.
|
||||
|
@ -91,19 +94,19 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
if (retval)
|
||||
goto err_ret;
|
||||
|
||||
cpus_clear(covered_cpus);
|
||||
cpumask_clear(covered_cpus);
|
||||
for_each_possible_cpu(i) {
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
if (cpu_isset(i, covered_cpus))
|
||||
if (cpumask_test_cpu(i, covered_cpus))
|
||||
continue;
|
||||
pthrottling = &pr->throttling;
|
||||
|
||||
pdomain = &(pthrottling->domain_info);
|
||||
cpu_set(i, pthrottling->shared_cpu_map);
|
||||
cpu_set(i, covered_cpus);
|
||||
cpumask_set_cpu(i, pthrottling->shared_cpu_map);
|
||||
cpumask_set_cpu(i, covered_cpus);
|
||||
/*
|
||||
* If the number of processor in the TSD domain is 1, it is
|
||||
* unnecessary to parse the coordination for this CPU.
|
||||
|
@ -144,8 +147,8 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
goto err_ret;
|
||||
}
|
||||
|
||||
cpu_set(j, covered_cpus);
|
||||
cpu_set(j, pthrottling->shared_cpu_map);
|
||||
cpumask_set_cpu(j, covered_cpus);
|
||||
cpumask_set_cpu(j, pthrottling->shared_cpu_map);
|
||||
count++;
|
||||
}
|
||||
for_each_possible_cpu(j) {
|
||||
|
@ -165,12 +168,14 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
* If some CPUS have the same domain, they
|
||||
* will have the same shared_cpu_map.
|
||||
*/
|
||||
match_pthrottling->shared_cpu_map =
|
||||
pthrottling->shared_cpu_map;
|
||||
cpumask_copy(match_pthrottling->shared_cpu_map,
|
||||
pthrottling->shared_cpu_map);
|
||||
}
|
||||
}
|
||||
|
||||
err_ret:
|
||||
free_cpumask_var(covered_cpus);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
|
@ -182,8 +187,8 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
*/
|
||||
if (retval) {
|
||||
pthrottling = &(pr->throttling);
|
||||
cpus_clear(pthrottling->shared_cpu_map);
|
||||
cpu_set(i, pthrottling->shared_cpu_map);
|
||||
cpumask_clear(pthrottling->shared_cpu_map);
|
||||
cpumask_set_cpu(i, pthrottling->shared_cpu_map);
|
||||
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
|
||||
}
|
||||
}
|
||||
|
@ -567,7 +572,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
|
|||
pthrottling = &pr->throttling;
|
||||
pthrottling->tsd_valid_flag = 1;
|
||||
pthrottling->shared_type = pdomain->coord_type;
|
||||
cpu_set(pr->id, pthrottling->shared_cpu_map);
|
||||
cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
|
||||
/*
|
||||
* If the coordination type is not defined in ACPI spec,
|
||||
* the tsd_valid_flag will be clear and coordination type
|
||||
|
@ -826,7 +831,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
|
|||
|
||||
static int acpi_processor_get_throttling(struct acpi_processor *pr)
|
||||
{
|
||||
cpumask_t saved_mask;
|
||||
cpumask_var_t saved_mask;
|
||||
int ret;
|
||||
|
||||
if (!pr)
|
||||
|
@ -834,14 +839,20 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
|
|||
|
||||
if (!pr->flags.throttling)
|
||||
return -ENODEV;
|
||||
|
||||
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Migrate task to the cpu pointed by pr.
|
||||
*/
|
||||
saved_mask = current->cpus_allowed;
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
|
||||
cpumask_copy(saved_mask, ¤t->cpus_allowed);
|
||||
/* FIXME: use work_on_cpu() */
|
||||
set_cpus_allowed_ptr(current, cpumask_of(pr->id));
|
||||
ret = pr->throttling.acpi_processor_get_throttling(pr);
|
||||
/* restore the previous state */
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
set_cpus_allowed_ptr(current, saved_mask);
|
||||
free_cpumask_var(saved_mask);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -986,13 +997,13 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
|
|||
|
||||
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
||||
{
|
||||
cpumask_t saved_mask;
|
||||
cpumask_var_t saved_mask;
|
||||
int ret = 0;
|
||||
unsigned int i;
|
||||
struct acpi_processor *match_pr;
|
||||
struct acpi_processor_throttling *p_throttling;
|
||||
struct throttling_tstate t_state;
|
||||
cpumask_t online_throttling_cpus;
|
||||
cpumask_var_t online_throttling_cpus;
|
||||
|
||||
if (!pr)
|
||||
return -EINVAL;
|
||||
|
@ -1003,17 +1014,25 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|||
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
|
||||
return -EINVAL;
|
||||
|
||||
saved_mask = current->cpus_allowed;
|
||||
if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
|
||||
free_cpumask_var(saved_mask);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpumask_copy(saved_mask, ¤t->cpus_allowed);
|
||||
t_state.target_state = state;
|
||||
p_throttling = &(pr->throttling);
|
||||
cpus_and(online_throttling_cpus, cpu_online_map,
|
||||
p_throttling->shared_cpu_map);
|
||||
cpumask_and(online_throttling_cpus, cpu_online_mask,
|
||||
p_throttling->shared_cpu_map);
|
||||
/*
|
||||
* The throttling notifier will be called for every
|
||||
* affected cpu in order to get one proper T-state.
|
||||
* The notifier event is THROTTLING_PRECHANGE.
|
||||
*/
|
||||
for_each_cpu_mask_nr(i, online_throttling_cpus) {
|
||||
for_each_cpu(i, online_throttling_cpus) {
|
||||
t_state.cpu = i;
|
||||
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
|
||||
&t_state);
|
||||
|
@ -1025,7 +1044,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|||
* it can be called only for the cpu pointed by pr.
|
||||
*/
|
||||
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
|
||||
/* FIXME: use work_on_cpu() */
|
||||
set_cpus_allowed_ptr(current, cpumask_of(pr->id));
|
||||
ret = p_throttling->acpi_processor_set_throttling(pr,
|
||||
t_state.target_state);
|
||||
} else {
|
||||
|
@ -1034,7 +1054,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|||
* it is necessary to set T-state for every affected
|
||||
* cpus.
|
||||
*/
|
||||
for_each_cpu_mask_nr(i, online_throttling_cpus) {
|
||||
for_each_cpu(i, online_throttling_cpus) {
|
||||
match_pr = per_cpu(processors, i);
|
||||
/*
|
||||
* If the pointer is invalid, we will report the
|
||||
|
@ -1056,7 +1076,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|||
continue;
|
||||
}
|
||||
t_state.cpu = i;
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
|
||||
/* FIXME: use work_on_cpu() */
|
||||
set_cpus_allowed_ptr(current, cpumask_of(i));
|
||||
ret = match_pr->throttling.
|
||||
acpi_processor_set_throttling(
|
||||
match_pr, t_state.target_state);
|
||||
|
@ -1068,13 +1089,16 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
|
|||
* affected cpu to update the T-states.
|
||||
* The notifier event is THROTTLING_POSTCHANGE
|
||||
*/
|
||||
for_each_cpu_mask_nr(i, online_throttling_cpus) {
|
||||
for_each_cpu(i, online_throttling_cpus) {
|
||||
t_state.cpu = i;
|
||||
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
|
||||
&t_state);
|
||||
}
|
||||
/* restore the previous state */
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
/* FIXME: use work_on_cpu() */
|
||||
set_cpus_allowed_ptr(current, saved_mask);
|
||||
free_cpumask_var(online_throttling_cpus);
|
||||
free_cpumask_var(saved_mask);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1120,7 +1144,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
|
|||
if (acpi_processor_get_tsd(pr)) {
|
||||
pthrottling = &pr->throttling;
|
||||
pthrottling->tsd_valid_flag = 0;
|
||||
cpu_set(pr->id, pthrottling->shared_cpu_map);
|
||||
cpumask_set_cpu(pr->id, pthrottling->shared_cpu_map);
|
||||
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ struct acpi_processor_performance {
|
|||
unsigned int state_count;
|
||||
struct acpi_processor_px *states;
|
||||
struct acpi_psd_package domain_info;
|
||||
cpumask_t shared_cpu_map;
|
||||
cpumask_var_t shared_cpu_map;
|
||||
unsigned int shared_type;
|
||||
};
|
||||
|
||||
|
@ -172,7 +172,7 @@ struct acpi_processor_throttling {
|
|||
unsigned int state_count;
|
||||
struct acpi_processor_tx_tss *states_tss;
|
||||
struct acpi_tsd_package domain_info;
|
||||
cpumask_t shared_cpu_map;
|
||||
cpumask_var_t shared_cpu_map;
|
||||
int (*acpi_processor_get_throttling) (struct acpi_processor * pr);
|
||||
int (*acpi_processor_set_throttling) (struct acpi_processor * pr,
|
||||
int state);
|
||||
|
|
Loading…
Reference in New Issue
Block a user