sched/topology: Rename topology_thread_cpumask() to topology_sibling_cpumask()
Rename topology_thread_cpumask() to topology_sibling_cpumask() for more consistency with scheduler code. Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Benoit Cousson <bcousson@baylibre.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Guenter Roeck <linux@roeck-us.net> Cc: Jean Delvare <jdelvare@suse.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Drokin <oleg.drokin@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Viresh Kumar <viresh.kumar@linaro.org> Link: http://lkml.kernel.org/r/1432645896-12588-2-git-send-email-bgolaszewski@baylibre.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0fb0328d34
commit
06931e6224
|
@ -44,7 +44,7 @@ these macros in include/asm-XXX/topology.h:
|
|||
#define topology_physical_package_id(cpu)
|
||||
#define topology_core_id(cpu)
|
||||
#define topology_book_id(cpu)
|
||||
#define topology_thread_cpumask(cpu)
|
||||
#define topology_sibling_cpumask(cpu)
|
||||
#define topology_core_cpumask(cpu)
|
||||
#define topology_book_cpumask(cpu)
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
|
|||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
||||
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
|
||||
void init_cpu_topology(void);
|
||||
void store_cpu_topology(unsigned int cpuid);
|
||||
|
|
|
@ -18,7 +18,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
|
|||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
|
||||
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
||||
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||
|
||||
void init_cpu_topology(void);
|
||||
void store_cpu_topology(unsigned int cpuid);
|
||||
|
|
|
@ -53,7 +53,7 @@ void build_cpu_to_node_map(void);
|
|||
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#endif
|
||||
|
||||
extern void arch_fix_phys_package_id(int num, u32 slot);
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
|
||||
#define topology_core_id(cpu) (cpu_data[cpu].core)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu])
|
||||
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_TOPOLOGY_H */
|
||||
|
|
|
@ -87,7 +87,7 @@ static inline int prrn_is_enabled(void)
|
|||
#include <asm/smp.h>
|
||||
|
||||
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
|
||||
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
#endif
|
||||
|
|
|
@ -217,7 +217,7 @@ static DEFINE_RAW_SPINLOCK(tlbivax_lock);
|
|||
static int mm_is_core_local(struct mm_struct *mm)
|
||||
{
|
||||
return cpumask_subset(mm_cpumask(mm),
|
||||
topology_thread_cpumask(smp_processor_id()));
|
||||
topology_sibling_cpumask(smp_processor_id()));
|
||||
}
|
||||
|
||||
struct tlb_flush_param {
|
||||
|
|
|
@ -22,7 +22,8 @@ DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
|
|||
|
||||
#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
|
||||
#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
|
||||
#define topology_sibling_cpumask(cpu) \
|
||||
(&per_cpu(cpu_topology, cpu).thread_mask)
|
||||
#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
|
||||
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
|
||||
|
|
|
@ -41,7 +41,7 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
|
|||
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
extern cpumask_t cpu_core_map[NR_CPUS];
|
||||
|
|
|
@ -55,7 +55,7 @@ static inline const struct cpumask *cpumask_of_node(int node)
|
|||
#define topology_physical_package_id(cpu) ((void)(cpu), 0)
|
||||
#define topology_core_id(cpu) (cpu)
|
||||
#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
|
||||
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
||||
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_TILE_TOPOLOGY_H */
|
||||
|
|
|
@ -124,7 +124,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
|||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#endif
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot)
|
||||
|
|
|
@ -2621,7 +2621,7 @@ static void intel_pmu_cpu_starting(int cpu)
|
|||
if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
|
||||
void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
|
||||
|
||||
for_each_cpu(i, topology_thread_cpumask(cpu)) {
|
||||
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
|
||||
struct intel_shared_regs *pc;
|
||||
|
||||
pc = per_cpu(cpu_hw_events, i).shared_regs;
|
||||
|
@ -2641,7 +2641,7 @@ static void intel_pmu_cpu_starting(int cpu)
|
|||
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
|
||||
int h = x86_pmu.num_counters >> 1;
|
||||
|
||||
for_each_cpu(i, topology_thread_cpumask(cpu)) {
|
||||
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
|
||||
struct intel_excl_cntrs *c;
|
||||
|
||||
c = per_cpu(cpu_hw_events, i).excl_cntrs;
|
||||
|
@ -3403,7 +3403,7 @@ static __init int fixup_ht_bug(void)
|
|||
if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
|
||||
return 0;
|
||||
|
||||
w = cpumask_weight(topology_thread_cpumask(cpu));
|
||||
w = cpumask_weight(topology_sibling_cpumask(cpu));
|
||||
if (w > 1) {
|
||||
pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
|
||||
return 0;
|
||||
|
|
|
@ -24,7 +24,7 @@ static int get_first_sibling(unsigned int cpu)
|
|||
{
|
||||
unsigned int ret;
|
||||
|
||||
ret = cpumask_first(topology_thread_cpumask(cpu));
|
||||
ret = cpumask_first(topology_sibling_cpumask(cpu));
|
||||
if (ret < nr_cpu_ids)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ static void round_robin_cpu(unsigned int tsk_index)
|
|||
mutex_lock(&round_robin_lock);
|
||||
cpumask_clear(tmp);
|
||||
for_each_cpu(cpu, pad_busy_cpus)
|
||||
cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
|
||||
cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
|
||||
cpumask_andnot(tmp, cpu_online_mask, tmp);
|
||||
/* avoid HT sibilings if possible */
|
||||
if (cpumask_empty(tmp))
|
||||
|
|
|
@ -61,7 +61,7 @@ static DEVICE_ATTR_RO(physical_package_id);
|
|||
define_id_show_func(core_id);
|
||||
static DEVICE_ATTR_RO(core_id);
|
||||
|
||||
define_siblings_show_func(thread_siblings, thread_cpumask);
|
||||
define_siblings_show_func(thread_siblings, sibling_cpumask);
|
||||
static DEVICE_ATTR_RO(thread_siblings);
|
||||
static DEVICE_ATTR_RO(thread_siblings_list);
|
||||
|
||||
|
|
|
@ -1304,7 +1304,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
|
|||
if (!cpumask_test_cpu(cpu, thread_mask)) {
|
||||
++count;
|
||||
cpumask_or(thread_mask, thread_mask,
|
||||
topology_thread_cpumask(cpu));
|
||||
topology_sibling_cpumask(cpu));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
|
|||
/* return cpumask of HTs in the same core */
|
||||
static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
|
||||
{
|
||||
cpumask_copy(mask, topology_thread_cpumask(cpu));
|
||||
cpumask_copy(mask, topology_sibling_cpumask(cpu));
|
||||
}
|
||||
|
||||
static void cfs_node_to_cpumask(int node, cpumask_t *mask)
|
||||
|
|
|
@ -557,7 +557,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
|
|||
* there are.
|
||||
*/
|
||||
/* weight is # of HTs */
|
||||
if (cpumask_weight(topology_thread_cpumask(0)) > 1) {
|
||||
if (cpumask_weight(topology_sibling_cpumask(0)) > 1) {
|
||||
/* depress thread factor for hyper-thread */
|
||||
factor = factor - (factor >> 1) + (factor >> 3);
|
||||
}
|
||||
|
@ -2768,7 +2768,7 @@ int ptlrpc_hr_init(void)
|
|||
|
||||
init_waitqueue_head(&ptlrpc_hr.hr_waitq);
|
||||
|
||||
weight = cpumask_weight(topology_thread_cpumask(0));
|
||||
weight = cpumask_weight(topology_sibling_cpumask(0));
|
||||
|
||||
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
|
||||
hrp->hrp_cpt = i;
|
||||
|
|
|
@ -191,8 +191,8 @@ static inline int cpu_to_mem(int cpu)
|
|||
#ifndef topology_core_id
|
||||
#define topology_core_id(cpu) ((void)(cpu), 0)
|
||||
#endif
|
||||
#ifndef topology_thread_cpumask
|
||||
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
||||
#ifndef topology_sibling_cpumask
|
||||
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
#ifndef topology_core_cpumask
|
||||
#define topology_core_cpumask(cpu) cpumask_of(cpu)
|
||||
|
@ -201,7 +201,7 @@ static inline int cpu_to_mem(int cpu)
|
|||
#ifdef CONFIG_SCHED_SMT
|
||||
static inline const struct cpumask *cpu_smt_mask(int cpu)
|
||||
{
|
||||
return topology_thread_cpumask(cpu);
|
||||
return topology_sibling_cpumask(cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
|
|||
/* Update distances based on topology */
|
||||
for_each_cpu(cpu, update_mask) {
|
||||
if (cpu_rmap_copy_neigh(rmap, cpu,
|
||||
topology_thread_cpumask(cpu), 1))
|
||||
topology_sibling_cpumask(cpu), 1))
|
||||
continue;
|
||||
if (cpu_rmap_copy_neigh(rmap, cpu,
|
||||
topology_core_cpumask(cpu), 2))
|
||||
|
|
Loading…
Reference in New Issue
Block a user