Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  Fix divide by zero error in build_clear_page() and build_copy_page()
  [MIPS] Fix typo in header guard
  [MIPS] Fix build error - Delete debugging crap that crept in with CMP
  [MIPS] Add accessors for random register.
  [MIPS] IP27: misc fixes
  [MIPS] IP27: Fix clockevent setup
  [MIPS] IP27: Fix bootmem memory setup
  [MIPS] remove CONFIG_CPU_R4000 line from Makefile
  [MIPS] Fix check for valid stack pointer during backtrace
  [MIPS] Add missing braces to pte_mkyoung
  [MIPS] R4700: Fix build_tlb_probe_entry
  [MIPS] Alchemy: dbdma: add API to delete custom DDMA device ids.
  [MIPS] Alchemy: export get_au1x00_speed for modules
This commit is contained in:
Linus Torvalds 2008-06-05 14:29:53 -07:00
commit 5965087dc9
17 changed files with 101 additions and 168 deletions

View File

@ -45,6 +45,7 @@ unsigned int get_au1x00_speed(void)
{
return au1x00_clock;
}
EXPORT_SYMBOL(get_au1x00_speed);
/*
* The UART baud base is not known at compile time ... if

View File

@ -216,6 +216,17 @@ u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
}
EXPORT_SYMBOL(au1xxx_ddma_add_device);
void au1xxx_ddma_del_device(u32 devid)
{
dbdev_tab_t *p = find_dbdev_id(devid);
if (p != NULL) {
memset(p, 0, sizeof(dbdev_tab_t));
p->dev_id = ~0;
}
}
EXPORT_SYMBOL(au1xxx_ddma_del_device);
/* Allocate a channel and return a non-zero descriptor if successful. */
u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
void (*callback)(int, void *), void *callparam)

View File

@ -30,7 +30,6 @@ obj-$(CONFIG_CPU_LOONGSON2) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_MIPS32) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_MIPS64) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_R4000) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R4300) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R4X00) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R5000) += r4k_fpu.o r4k_switch.o

View File

@ -88,15 +88,17 @@ static void show_raw_backtrace(unsigned long reg29)
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
#define IS_KVA01(a) ((((unsigned int)a) & 0xc0000000) == 0x80000000)
if (IS_KVA01(sp)) {
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr))
print_ip_sym(addr);
while (!kstack_end(sp)) {
unsigned long __user *p =
(unsigned long __user *)(unsigned long)sp++;
if (__get_user(addr, p)) {
printk(" (Bad stack address)");
break;
}
printk("\n");
if (__kernel_text_address(addr))
print_ip_sym(addr);
}
printk("\n");
}
#ifdef CONFIG_KALLSYMS

View File

@ -58,27 +58,8 @@ static int mips_cpu_timer_irq;
static int mips_cpu_perf_irq;
extern int cp0_perfcount_irq;
DEFINE_PER_CPU(unsigned int, tickcount);
#define tickcount_this_cpu __get_cpu_var(tickcount)
static unsigned long ledbitmask;
static void mips_timer_dispatch(void)
{
#if defined(CONFIG_MIPS_MALTA) || defined(CONFIG_MIPS_ATLAS)
/*
* Yes, this is very tacky, won't work as expected with SMTC and
* dyntick will break it,
* but it gives me a nice warm feeling during debug
*/
#define LEDBAR 0xbf000408
if (tickcount_this_cpu++ >= HZ) {
tickcount_this_cpu = 0;
change_bit(smp_processor_id(), &ledbitmask);
smp_wmb(); /* Make sure every one else sees the change */
/* This will pick up any recent changes made by other CPU's */
*(unsigned int *)LEDBAR = ledbitmask;
}
#endif
do_IRQ(mips_cpu_timer_irq);
}

View File

@ -310,8 +310,8 @@ void __cpuinit build_clear_page(void)
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, 0xa000);
off = min(8, pref_bias_clear_store / cache_line_size) *
cache_line_size;
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
* cache_line_size : 0;
while (off) {
build_clear_pref(&buf, -off);
off -= cache_line_size;
@ -454,12 +454,14 @@ void __cpuinit build_copy_page(void)
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, 0xa000);
off = min(8, pref_bias_copy_load / cache_line_size) * cache_line_size;
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
cache_line_size : 0;
while (off) {
build_copy_load_pref(&buf, -off);
off -= cache_line_size;
}
off = min(8, pref_bias_copy_store / cache_line_size) * cache_line_size;
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
cache_line_size : 0;
while (off) {
build_copy_store_pref(&buf, -off);
off -= cache_line_size;

View File

@ -224,8 +224,9 @@ static u32 final_handler[64] __cpuinitdata;
static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
{
switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0 needs this, too. */
/* Found by experiment: R4600 v2.0/R4700 needs this, too. */
case CPU_R4600:
case CPU_R4700:
case CPU_R5000:
case CPU_R5000A:
case CPU_NEVADA:

View File

@ -13,6 +13,22 @@
#include <asm/sn/intr.h>
#include <asm/sn/sn0/hub.h>
/*
* Most of the IOC3 PCI config register aren't present
* we emulate what is needed for a normal PCI enumeration
*/
static u32 emulate_ioc3_cfg(int where, int size)
{
if (size == 1 && where == 0x3d)
return 0x01;
else if (size == 2 && where == 0x3c)
return 0x0100;
else if (size == 4 && where == 0x3c)
return 0x00000100;
return 0;
}
/*
* The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
* not really documented, so right now I can't write code which uses it.
@ -64,7 +80,7 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
* generic PCI code a chance to look at the wrong register.
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
*value = 0;
*value = emulate_ioc3_cfg(where, size);
return PCIBIOS_SUCCESSFUL;
}
@ -127,7 +143,7 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
* generic PCI code a chance to look at the wrong register.
*/
if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
*value = 0;
*value = emulate_ioc3_cfg(where, size);
return PCIBIOS_SUCCESSFUL;
}

View File

@ -47,6 +47,9 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
static int num_bridges = 0;
bridge_t *bridge;
int slot;
extern int pci_probe_only;
pci_probe_only = 1;
printk("a bridge\n");
@ -100,6 +103,11 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
*/
bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP |
BRIDGE_CTRL_MEM_SWAP;
#ifdef CONFIG_PAGE_SIZE_4KB
bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
#else /* 16kB or larger */
bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE;
#endif
/*
* Hmm... IRIX sets additional bits in the address which

View File

@ -161,27 +161,6 @@ cnodeid_t get_compact_nodeid(void)
return NASID_TO_COMPACT_NODEID(get_nasid());
}
/* Extracted from the IOC3 meta driver. FIXME. */
static inline void ioc3_sio_init(void)
{
struct ioc3 *ioc3;
nasid_t nid;
long loops;
nid = get_nasid();
ioc3 = (struct ioc3 *) KL_CONFIG_CH_CONS_INFO(nid)->memory_base;
ioc3->sscr_a = 0; /* PIO mode for uarta. */
ioc3->sscr_b = 0; /* PIO mode for uartb. */
ioc3->sio_iec = ~0;
ioc3->sio_ies = (SIO_IR_SA_INT | SIO_IR_SB_INT);
loops=1000000; while(loops--);
ioc3->sregs.uarta.iu_fcr = 0;
ioc3->sregs.uartb.iu_fcr = 0;
loops=1000000; while(loops--);
}
static inline void ioc3_eth_init(void)
{
struct ioc3 *ioc3;
@ -234,7 +213,6 @@ void __init plat_mem_setup(void)
panic("Kernel compiled for N mode.");
#endif
ioc3_sio_init();
ioc3_eth_init();
per_cpu_init();

View File

@ -33,10 +33,6 @@
#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
#define SLOT_IGNORED 0xffff
static short __initdata slot_lastfilled_cache[MAX_COMPACT_NODES];
static unsigned short __initdata slot_psize_cache[MAX_COMPACT_NODES][MAX_MEM_SLOTS];
static struct bootmem_data __initdata plat_node_bdata[MAX_COMPACT_NODES];
struct node_data *__node_data[MAX_COMPACT_NODES];
@ -267,51 +263,6 @@ static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
}
/*
* Return the number of pages of memory provided by the given slot
* on the specified node.
*/
static pfn_t __init slot_getsize(cnodeid_t node, int slot)
{
return (pfn_t) slot_psize_cache[node][slot];
}
/*
* Return highest slot filled
*/
static int __init node_getlastslot(cnodeid_t node)
{
return (int) slot_lastfilled_cache[node];
}
/*
* Return the pfn of the last free page of memory on a node.
*/
static pfn_t __init node_getmaxclick(cnodeid_t node)
{
pfn_t slot_psize;
int slot;
/*
* Start at the top slot. When we find a slot with memory in it,
* that's the winner.
*/
for (slot = (MAX_MEM_SLOTS - 1); slot >= 0; slot--) {
if ((slot_psize = slot_getsize(node, slot))) {
if (slot_psize == SLOT_IGNORED)
continue;
/* Return the basepfn + the slot size, minus 1. */
return slot_getbasepfn(node, slot) + slot_psize - 1;
}
}
/*
* If there's no memory on the node, return 0. This is likely
* to cause problems.
*/
return 0;
}
static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
{
nasid_t nasid;
@ -404,13 +355,13 @@ static void __init mlreset(void)
static void __init szmem(void)
{
pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
int slot, ignore;
int slot;
cnodeid_t node;
num_physpages = 0;
for_each_online_node(node) {
ignore = nodebytes = 0;
nodebytes = 0;
for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
slot_psize = slot_psize_compute(node, slot);
if (slot == 0)
@ -420,21 +371,20 @@ static void __init szmem(void)
* kernel text.
*/
nodebytes += (1LL << SLOT_SHIFT);
if (!slot_psize)
continue;
if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
(slot0sz << PAGE_SHIFT))
ignore = 1;
if (ignore && slot_psize) {
(slot0sz << PAGE_SHIFT)) {
printk("Ignoring slot %d onwards on node %d\n",
slot, node);
slot_psize_cache[node][slot] = SLOT_IGNORED;
slot = MAX_MEM_SLOTS;
continue;
}
num_physpages += slot_psize;
slot_psize_cache[node][slot] =
(unsigned short) slot_psize;
if (slot_psize)
slot_lastfilled_cache[node] = slot;
add_active_range(node, slot_getbasepfn(node, slot),
slot_getbasepfn(node, slot) + slot_psize);
}
}
}
@ -442,18 +392,20 @@ static void __init szmem(void)
static void __init node_mem_init(cnodeid_t node)
{
pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
pfn_t slot_lastpfn = slot_firstpfn + slot_getsize(node, 0);
pfn_t slot_freepfn = node_getfirstfree(node);
struct pglist_data *pd;
unsigned long bootmap_size;
pfn_t start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
/*
* Allocate the node data structures on the node first.
*/
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
pd = NODE_DATA(node);
pd->bdata = &plat_node_bdata[node];
NODE_DATA(node)->bdata = &plat_node_bdata[node];
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
cpus_clear(hub_data(node)->h_cpus);
@ -461,12 +413,12 @@ static void __init node_mem_init(cnodeid_t node)
sizeof(struct hub_data));
bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
slot_firstpfn, slot_lastpfn);
free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
(slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
start_pfn, end_pfn);
free_bootmem_with_active_regions(node, end_pfn);
reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size,
BOOTMEM_DEFAULT);
sparse_memory_present_with_active_regions(node);
}
/*
@ -515,16 +467,15 @@ void __init paging_init(void)
pagetable_init();
for_each_online_node(node) {
pfn_t start_pfn = slot_getbasepfn(node, 0);
pfn_t end_pfn = node_getmaxclick(node) + 1;
pfn_t start_pfn, end_pfn;
zones_size[ZONE_NORMAL] = end_pfn - start_pfn;
free_area_init_node(node, NODE_DATA(node),
zones_size, start_pfn, NULL);
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
if (end_pfn > max_low_pfn)
max_low_pfn = end_pfn;
}
zones_size[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(zones_size);
}
void __init mem_init(void)
@ -535,34 +486,10 @@ void __init mem_init(void)
high_memory = (void *) __va(num_physpages << PAGE_SHIFT);
for_each_online_node(node) {
unsigned slot, numslots;
struct page *end, *p;
/*
* This will free up the bootmem, ie, slot 0 memory.
*/
totalram_pages += free_all_bootmem_node(NODE_DATA(node));
/*
* We need to manually do the other slots.
*/
numslots = node_getlastslot(node);
for (slot = 1; slot <= numslots; slot++) {
p = nid_page_nr(node, slot_getbasepfn(node, slot) -
slot_getbasepfn(node, 0));
/*
* Free valid memory in current slot.
*/
for (end = p + slot_getsize(node, slot); p < end; p++) {
/* if (!page_is_ram(pgnr)) continue; */
/* commented out until page_is_ram works */
ClearPageReserved(p);
init_page_count(p);
__free_page(p);
totalram_pages++;
}
}
}
totalram_pages -= setup_zero_pages(); /* This comes from node 0 */

View File

@ -176,11 +176,14 @@ static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
static void __cpuinit ip27_init_secondary(void)
{
per_cpu_init();
local_irq_enable();
}
static void __cpuinit ip27_smp_finish(void)
{
extern void hub_rt_clock_event_init(void);
hub_rt_clock_event_init();
local_irq_enable();
}
static void __init ip27_cpus_done(void)

View File

@ -160,10 +160,13 @@ static void rt_set_mode(enum clock_event_mode mode,
int rt_timer_irq;
static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
static DEFINE_PER_CPU(char [11], hub_rt_name);
static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
int slice = cputoslice(cpu);
/*
@ -192,10 +195,7 @@ struct irqaction hub_rt_irqaction = {
#define NSEC_PER_CYCLE 800
#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
static DEFINE_PER_CPU(char [11], hub_rt_name);
static void __cpuinit hub_rt_clock_event_init(void)
void __cpuinit hub_rt_clock_event_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
@ -203,17 +203,16 @@ static void __cpuinit hub_rt_clock_event_init(void)
int irq = rt_timer_irq;
sprintf(name, "hub-rt %d", cpu);
cd->name = "HUB-RT",
cd->features = CLOCK_EVT_FEAT_ONESHOT,
cd->name = name;
cd->features = CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, CYCLES_PER_SEC);
cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 200,
cd->irq = irq,
cd->cpumask = cpumask_of_cpu(cpu),
cd->rating = 300,
cd->set_next_event = rt_next_event,
cd->set_mode = rt_set_mode,
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of_cpu(cpu);
cd->set_next_event = rt_next_event;
cd->set_mode = rt_set_mode;
clockevents_register_device(cd);
}
@ -261,6 +260,7 @@ void __init plat_time_init(void)
{
hub_rt_clocksource_init();
hub_rt_clock_event_global_init();
hub_rt_clock_event_init();
}
void __cpuinit cpu_time_init(void)
@ -281,7 +281,6 @@ void __cpuinit cpu_time_init(void)
printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed);
hub_rt_clock_event_init();
set_c0_status(SRB_TIMOCLK);
}

View File

@ -355,6 +355,7 @@ void au1xxx_dbdma_dump(u32 chanid);
u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr);
u32 au1xxx_ddma_add_device(dbdev_tab_t *dev);
extern void au1xxx_ddma_del_device(u32 devid);
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp);
/*

View File

@ -765,6 +765,9 @@ do { \
#define read_c0_index() __read_32bit_c0_register($0, 0)
#define write_c0_index(val) __write_32bit_c0_register($0, 0, val)
#define read_c0_random() __read_32bit_c0_register($1, 0)
#define write_c0_random(val) __write_32bit_c0_register($1, 0, val)
#define read_c0_entrylo0() __read_ulong_c0_register($2, 0)
#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val)

View File

@ -239,9 +239,10 @@ static inline pte_t pte_mkdirty(pte_t pte)
static inline pte_t pte_mkyoung(pte_t pte)
{
pte.pte_low |= _PAGE_ACCESSED;
if (pte.pte_low & _PAGE_READ)
if (pte.pte_low & _PAGE_READ) {
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
}
return pte;
}
#else

View File

@ -3,7 +3,7 @@
*
*/
#ifndef __ASM_RTLX_H
#ifndef __ASM_RTLX_H_
#define __ASM_RTLX_H_
#include <irq.h>