forked from luck/tmp_suning_uos_patched
A small set of SMP core code changes:
- Rework the smp function call core code to avoid the allocation of an additional cpumask. - Remove the not longer required GFP argument from on_each_cpu_cond() and on_each_cpu_cond_mask() and fixup the callers. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl4vcrATHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYocr1D/4ptWrZKsgBxGKBP34lvJAjd0KRqVoz J9dLAN+AAs6YZSnOmRBX1b9d9IL2PrccOEF+J/Ja3ZkB+PAoAQ9W3uCHkZ77WUph xx5eJahZCo+3nZ6amGgS2cPdG8WjxSK3enxPcU4pJhV/QaaP7R9BZt5YQgreYAQO kRi0qyt10AExLqLd+077GX5DKcEOXwwVG/qckUQK2h8Kkd68vTbjDxggvsHwmpSE MHaszv85UpE+YQbT6DyG5Hi4kK3AJeODBy/fKr2VODIBLZpKiuQ5kK4lbNHYPpVB wXw0umXHLQggrKoPKo58ayoCXD0bAG9JT0rvapjUJIz1/9YejQ6lB/t5f0dPbSrU al4CJq/pfNky4H6uLWFVbAXJabJuBcB/eG1csaM88Yw0pEXkbnHCOkJAdosoDhhl qNQYg4yaE9tTuy1chXDMntH0R0Qztqry6+DMsczJxT21TgERsHCRJV+mGLV46/ZN GXJEoJ/cnjNJlqj8GirjbksPRbxuvmQNHRVrTh8qOSxbPKUQZfZocp9HHNmFsBaN Q07VgWMHXzYj1L4r3cbJ/ONpOCo66lw7F//MNGk0eIWdeL6H7XZvJQPX+YUrLsZc tVlZh8mZOGbRiM8g1dN0BSJO7QrVYmJWGb0oQQtv5tVSRN/V8Y9VZ8YX8lpYlF1e ETkrZLGhTJWp4A== =M4aK -----END PGP SIGNATURE----- Merge tag 'smp-core-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull core SMP updates from Thomas Gleixner: "A small set of SMP core code changes: - Rework the smp function call core code to avoid the allocation of an additional cpumask - Remove the not longer required GFP argument from on_each_cpu_cond() and on_each_cpu_cond_mask() and fixup the callers" * tag 'smp-core-2020-01-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: smp: Remove allocation mask from on_each_cpu_cond.*() smp: Add a smp_cond_func_t argument to smp_call_function_many() smp: Use smp_cond_func_t as type for the conditional function
This commit is contained in:
commit
ab67f60025
|
@ -708,7 +708,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|||
(void *)info, 1);
|
||||
else
|
||||
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
|
||||
(void *)info, 1, GFP_ATOMIC, cpumask);
|
||||
(void *)info, 1, cpumask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1433,7 +1433,7 @@ static bool has_bh_in_lru(int cpu, void *dummy)
|
|||
|
||||
void invalidate_bh_lrus(void)
|
||||
{
|
||||
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
|
||||
on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/llist.h>
|
||||
|
||||
typedef void (*smp_call_func_t)(void *info);
|
||||
typedef bool (*smp_cond_func_t)(int cpu, void *info);
|
||||
struct __call_single_data {
|
||||
struct llist_node llist;
|
||||
smp_call_func_t func;
|
||||
|
@ -49,13 +50,11 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
|
|||
* cond_func returns a positive value. This may include the local
|
||||
* processor.
|
||||
*/
|
||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags);
|
||||
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait);
|
||||
|
||||
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags, const struct cpumask *mask);
|
||||
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, const struct cpumask *mask);
|
||||
|
||||
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
|
||||
|
||||
|
|
95
kernel/smp.c
95
kernel/smp.c
|
@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_any);
|
||||
|
||||
/**
|
||||
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on (only runs on online subset).
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
* @info: An arbitrary pointer to pass to the function.
|
||||
* @wait: If true, wait (atomically) until function has completed
|
||||
* on other CPUs.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler. Preemption
|
||||
* must be disabled when calling this function.
|
||||
*/
|
||||
void smp_call_function_many(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info, bool wait)
|
||||
static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info,
|
||||
bool wait, smp_cond_func_t cond_func)
|
||||
{
|
||||
struct call_function_data *cfd;
|
||||
int cpu, next_cpu, this_cpu = smp_processor_id();
|
||||
|
@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
|
|||
|
||||
/* Fastpath: do that cpu by itself. */
|
||||
if (next_cpu >= nr_cpu_ids) {
|
||||
smp_call_function_single(cpu, func, info, wait);
|
||||
if (!cond_func || (cond_func && cond_func(cpu, info)))
|
||||
smp_call_function_single(cpu, func, info, wait);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
|
|||
for_each_cpu(cpu, cfd->cpumask) {
|
||||
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
|
||||
|
||||
if (cond_func && !cond_func(cpu, info))
|
||||
continue;
|
||||
|
||||
csd_lock(csd);
|
||||
if (wait)
|
||||
csd->flags |= CSD_FLAG_SYNCHRONOUS;
|
||||
|
@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on (only runs on online subset).
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
* @info: An arbitrary pointer to pass to the function.
|
||||
* @wait: If true, wait (atomically) until function has completed
|
||||
* on other CPUs.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler. Preemption
|
||||
* must be disabled when calling this function.
|
||||
*/
|
||||
void smp_call_function_many(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info, bool wait)
|
||||
{
|
||||
smp_call_function_many_cond(mask, func, info, wait, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_many);
|
||||
|
||||
/**
|
||||
|
@ -668,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||
* @info: An arbitrary pointer to pass to both functions.
|
||||
* @wait: If true, wait (atomically) until function has
|
||||
* completed on other CPUs.
|
||||
* @gfp_flags: GFP flags to use when allocating the cpumask
|
||||
* used internally by the function.
|
||||
*
|
||||
* The function might sleep if the GFP flags indicates a non
|
||||
* atomic allocation is allowed.
|
||||
*
|
||||
* Preemption is disabled to protect against CPUs going offline but not online.
|
||||
* CPUs going online during the call will not be seen or sent an IPI.
|
||||
|
@ -680,46 +686,27 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||
* You must not call this function with disabled interrupts or
|
||||
* from a hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags, const struct cpumask *mask)
|
||||
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, const struct cpumask *mask)
|
||||
{
|
||||
cpumask_var_t cpus;
|
||||
int cpu, ret;
|
||||
int cpu = get_cpu();
|
||||
|
||||
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
|
||||
smp_call_function_many_cond(mask, func, info, wait, cond_func);
|
||||
if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
|
||||
preempt_disable();
|
||||
for_each_cpu(cpu, mask)
|
||||
if (cond_func(cpu, info))
|
||||
__cpumask_set_cpu(cpu, cpus);
|
||||
on_each_cpu_mask(cpus, func, info, wait);
|
||||
preempt_enable();
|
||||
free_cpumask_var(cpus);
|
||||
} else {
|
||||
/*
|
||||
* No free cpumask, bother. No matter, we'll
|
||||
* just have to IPI them one by one.
|
||||
*/
|
||||
preempt_disable();
|
||||
for_each_cpu(cpu, mask)
|
||||
if (cond_func(cpu, info)) {
|
||||
ret = smp_call_function_single(cpu, func,
|
||||
info, wait);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
preempt_enable();
|
||||
local_irq_save(flags);
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||
|
||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags)
|
||||
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait)
|
||||
{
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
|
||||
cpu_online_mask);
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||
|
||||
|
|
12
kernel/up.c
12
kernel/up.c
|
@ -68,9 +68,8 @@ EXPORT_SYMBOL(on_each_cpu_mask);
|
|||
* Preemption is disabled here to make sure the cond_func is called under the
|
||||
* same condtions in UP and SMP.
|
||||
*/
|
||||
void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags, const struct cpumask *mask)
|
||||
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, const struct cpumask *mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -84,11 +83,10 @@ void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
|
|||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||
|
||||
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
||||
smp_call_func_t func, void *info, bool wait,
|
||||
gfp_t gfp_flags)
|
||||
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait)
|
||||
{
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
|
||||
on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user