forked from luck/tmp_suning_uos_patched
[IA64] fix generic/up builds
Jesse Barnes provided the original version of this patch months ago, but other changes kept conflicting with it, so it got deferred. Greg Edwards dug it out of obscurity just over a week ago, and almost immediately another conflicting patch appeared (Bob Picco's memory-less nodes). I've resolved the conflicts and got it running again. CONFIG_SGI_TIOCX is set to "y" in defconfig, which causes a Tiger to not boot (oops in tiocx_init). But that can be resolved later ... get this in now before it gets stale again. Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
564601a5d1
commit
8d7e35174d
|
@ -17,6 +17,7 @@ obj-$(CONFIG_IA64_PALINFO) += palinfo.o
|
|||
obj-$(CONFIG_IOSAPIC) += iosapic.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
|
||||
obj-$(CONFIG_NUMA) += numa.o
|
||||
obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
|
||||
obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
|
||||
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
|
||||
|
|
|
@ -640,8 +640,10 @@ acpi_boot_init (void)
|
|||
if (smp_boot_data.cpu_phys_id[cpu] != hard_smp_processor_id())
|
||||
node_cpuid[i++].phys_id = smp_boot_data.cpu_phys_id[cpu];
|
||||
}
|
||||
build_cpu_to_node_map();
|
||||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
build_cpu_to_node_map();
|
||||
#endif
|
||||
/* Make boot-up look pretty */
|
||||
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
|
||||
|
|
57
arch/ia64/kernel/numa.c
Normal file
57
arch/ia64/kernel/numa.c
Normal file
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* ia64 kernel NUMA specific stuff
|
||||
*
|
||||
* Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
|
||||
* Copyright (C) 2004 Silicon Graphics, Inc.
|
||||
* Jesse Barnes <jbarnes@sgi.com>
|
||||
*/
|
||||
#include <linux/config.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
|
||||
EXPORT_SYMBOL(cpu_to_node_map);
|
||||
|
||||
cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
|
||||
|
||||
/**
|
||||
* build_cpu_to_node_map - setup cpu to node and node to cpumask arrays
|
||||
*
|
||||
* Build cpu to node mapping and initialize the per node cpu masks using
|
||||
* info from the node_cpuid array handed to us by ACPI.
|
||||
*/
|
||||
void __init build_cpu_to_node_map(void)
|
||||
{
|
||||
int cpu, i, node;
|
||||
|
||||
for(node=0; node < MAX_NUMNODES; node++)
|
||||
cpus_clear(node_to_cpu_mask[node]);
|
||||
|
||||
for(cpu = 0; cpu < NR_CPUS; ++cpu) {
|
||||
node = -1;
|
||||
for (i = 0; i < NR_CPUS; ++i)
|
||||
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
|
||||
node = node_cpuid[i].nid;
|
||||
break;
|
||||
}
|
||||
cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
|
||||
if (node >= 0)
|
||||
cpu_set(cpu, node_to_cpu_mask[node]);
|
||||
}
|
||||
}
|
|
@ -525,47 +525,6 @@ smp_build_cpu_map (void)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
/* on which node is each logical CPU (one cacheline even for 64 CPUs) */
|
||||
u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
|
||||
EXPORT_SYMBOL(cpu_to_node_map);
|
||||
/* which logical CPUs are on which nodes */
|
||||
cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
|
||||
|
||||
/*
|
||||
* Build cpu to node mapping and initialize the per node cpu masks.
|
||||
*/
|
||||
void __init
|
||||
build_cpu_to_node_map (void)
|
||||
{
|
||||
int cpu, i, node;
|
||||
|
||||
for(node=0; node<MAX_NUMNODES; node++)
|
||||
cpus_clear(node_to_cpu_mask[node]);
|
||||
for(cpu = 0; cpu < NR_CPUS; ++cpu) {
|
||||
/*
|
||||
* All Itanium NUMA platforms I know use ACPI, so maybe we
|
||||
* can drop this ifdef completely. [EF]
|
||||
*/
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
node = -1;
|
||||
for (i = 0; i < NR_CPUS; ++i)
|
||||
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
|
||||
node = node_cpuid[i].nid;
|
||||
break;
|
||||
}
|
||||
#else
|
||||
# error Fixme: Dunno how to build CPU-to-node map.
|
||||
#endif
|
||||
cpu_to_node_map[cpu] = (node >= 0) ? node : 0;
|
||||
if (node >= 0)
|
||||
cpu_set(cpu, node_to_cpu_mask[node]);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
/*
|
||||
* Cycle through the APs sending Wakeup IPIs to boot each.
|
||||
*/
|
||||
|
|
|
@ -125,6 +125,33 @@ static unsigned long __init compute_pernodesize(int node)
|
|||
return pernodesize;
|
||||
}
|
||||
|
||||
/**
|
||||
* per_cpu_node_setup - setup per-cpu areas on each node
|
||||
* @cpu_data: per-cpu area on this node
|
||||
* @node: node to setup
|
||||
*
|
||||
* Copy the static per-cpu data into the region we just set aside and then
|
||||
* setup __per_cpu_offset for each CPU on this node. Return a pointer to
|
||||
* the end of the area.
|
||||
*/
|
||||
static void *per_cpu_node_setup(void *cpu_data, int node)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu;
|
||||
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||
if (node == node_cpuid[cpu].nid) {
|
||||
memcpy(__va(cpu_data), __phys_per_cpu_start,
|
||||
__per_cpu_end - __per_cpu_start);
|
||||
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
|
||||
__per_cpu_start;
|
||||
cpu_data += PERCPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return cpu_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* fill_pernode - initialize pernode data.
|
||||
* @node: the node id.
|
||||
|
@ -135,7 +162,7 @@ static void __init fill_pernode(int node, unsigned long pernode,
|
|||
unsigned long pernodesize)
|
||||
{
|
||||
void *cpu_data;
|
||||
int cpus = early_nr_cpus_node(node), cpu;
|
||||
int cpus = early_nr_cpus_node(node);
|
||||
struct bootmem_data *bdp = &mem_data[node].bootmem_data;
|
||||
|
||||
mem_data[node].pernode_addr = pernode;
|
||||
|
@ -155,23 +182,11 @@ static void __init fill_pernode(int node, unsigned long pernode,
|
|||
mem_data[node].pgdat->bdata = bdp;
|
||||
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
|
||||
|
||||
/*
|
||||
* Copy the static per-cpu data into the region we
|
||||
* just set aside and then setup __per_cpu_offset
|
||||
* for each CPU on this node.
|
||||
*/
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||
if (node == node_cpuid[cpu].nid) {
|
||||
memcpy(__va(cpu_data), __phys_per_cpu_start,
|
||||
__per_cpu_end - __per_cpu_start);
|
||||
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
|
||||
__per_cpu_start;
|
||||
cpu_data += PERCPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
cpu_data = per_cpu_node_setup(cpu_data, node);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* find_pernode_space - allocate memory for memory map and per-node structures
|
||||
* @start: physical start of range
|
||||
|
@ -300,8 +315,8 @@ static void __init reserve_pernode_space(void)
|
|||
*/
|
||||
static void __init initialize_pernode_data(void)
|
||||
{
|
||||
int cpu, node;
|
||||
pg_data_t *pgdat_list[MAX_NUMNODES];
|
||||
int cpu, node;
|
||||
|
||||
for_each_online_node(node)
|
||||
pgdat_list[node] = mem_data[node].pgdat;
|
||||
|
@ -311,12 +326,22 @@ static void __init initialize_pernode_data(void)
|
|||
memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
|
||||
sizeof(pgdat_list));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Set the node_data pointer for each per-cpu struct */
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||
node = node_cpuid[cpu].nid;
|
||||
per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
|
||||
}
|
||||
#else
|
||||
{
|
||||
struct cpuinfo_ia64 *cpu0_cpu_info;
|
||||
cpu = 0;
|
||||
node = node_cpuid[cpu].nid;
|
||||
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
|
||||
((char *)&per_cpu__cpu_info - __per_cpu_start));
|
||||
cpu0_cpu_info->node_data = mem_data[node].node_data;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -461,6 +486,7 @@ void __init find_memory(void)
|
|||
find_initrd();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* per_cpu_init - setup per-cpu variables
|
||||
*
|
||||
|
@ -471,15 +497,15 @@ void *per_cpu_init(void)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
if (smp_processor_id() == 0) {
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||
per_cpu(local_per_cpu_offset, cpu) =
|
||||
__per_cpu_offset[cpu];
|
||||
}
|
||||
}
|
||||
if (smp_processor_id() != 0)
|
||||
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
||||
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
|
||||
|
||||
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/**
|
||||
* show_mem - give short summary of memory stats
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#ifndef _ASM_IA64_SN_ARCH_H
|
||||
#define _ASM_IA64_SN_ARCH_H
|
||||
|
||||
#include <linux/numa.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/sn/types.h>
|
||||
|
|
|
@ -81,11 +81,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
|
||||
#endif
|
||||
|
||||
|
||||
#define get_node_number(addr) NASID_GET(addr)
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user