forked from luck/tmp_suning_uos_patched
544cdabe64
The 8xx can only support a max of 8M during early boot (it seems a lot of 8xx boards only have 8M so the bug was never triggered), but the early allocator isn't aware of this. The following change makes it able to run with larger memory. Signed-off-by: John Traill <john.traill@freescale.com> Signed-off-by: Vitaly Bordug <vitb@kernel.crashing.org> Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
298 lines
7.0 KiB
C
298 lines
7.0 KiB
C
/*
|
|
* PowerPC version
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
*
|
|
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
|
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
|
* Copyright (C) 1996 Paul Mackerras
|
|
* PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
|
|
*
|
|
* Derived from "arch/i386/mm/init.c"
|
|
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/stddef.h>
|
|
#include <linux/init.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/initrd.h>
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/io.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/machdep.h>
|
|
#include <asm/btext.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/lmb.h>
|
|
#include <asm/sections.h>
|
|
|
|
#include "mmu_decl.h"
|
|
|
|
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
|
|
/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
|
|
#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
|
|
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
|
|
#endif
|
|
#endif
|
|
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
|
|
|
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
|
|
unsigned long total_memory;
|
|
unsigned long total_lowmem;
|
|
|
|
unsigned long ppc_memstart;
|
|
unsigned long ppc_memoffset = PAGE_OFFSET;
|
|
|
|
int boot_mapsize;
|
|
#ifdef CONFIG_PPC_PMAC
|
|
unsigned long agp_special_page;
|
|
EXPORT_SYMBOL(agp_special_page);
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
pte_t *kmap_pte;
|
|
pgprot_t kmap_prot;
|
|
|
|
EXPORT_SYMBOL(kmap_prot);
|
|
EXPORT_SYMBOL(kmap_pte);
|
|
#endif
|
|
|
|
void MMU_init(void);
|
|
|
|
/* XXX should be in current.h -- paulus */
|
|
extern struct task_struct *current_set[NR_CPUS];
|
|
|
|
extern int init_bootmem_done;
|
|
|
|
/*
|
|
* this tells the system to map all of ram with the segregs
|
|
* (i.e. page tables) instead of the bats.
|
|
* -- Cort
|
|
*/
|
|
int __map_without_bats;
|
|
int __map_without_ltlbs;
|
|
|
|
/* max amount of low RAM to map in */
|
|
unsigned long __max_low_memory = MAX_LOW_MEM;
|
|
|
|
/*
|
|
* limit of what is accessible with initial MMU setup -
|
|
* 256MB usually, but only 16MB on 601.
|
|
*/
|
|
unsigned long __initial_memory_limit = 0x10000000;
|
|
|
|
/*
|
|
* Check for command-line options that affect what MMU_init will do.
|
|
*/
|
|
void MMU_setup(void)
|
|
{
|
|
/* Check for nobats option (used in mapin_ram). */
|
|
if (strstr(cmd_line, "nobats")) {
|
|
__map_without_bats = 1;
|
|
}
|
|
|
|
if (strstr(cmd_line, "noltlbs")) {
|
|
__map_without_ltlbs = 1;
|
|
}
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
__map_without_bats = 1;
|
|
__map_without_ltlbs = 1;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* MMU_init sets up the basic memory mappings for the kernel,
|
|
* including both RAM and possibly some I/O regions,
|
|
* and sets up the page tables and the MMU hardware ready to go.
|
|
*/
|
|
void __init MMU_init(void)
|
|
{
|
|
if (ppc_md.progress)
|
|
ppc_md.progress("MMU:enter", 0x111);
|
|
|
|
/* 601 can only access 16MB at the moment */
|
|
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
|
|
__initial_memory_limit = 0x01000000;
|
|
/* 8xx can only access 8MB at the moment */
|
|
if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
|
|
__initial_memory_limit = 0x00800000;
|
|
|
|
/* parse args from command line */
|
|
MMU_setup();
|
|
|
|
if (lmb.memory.cnt > 1) {
|
|
lmb.memory.cnt = 1;
|
|
lmb_analyze();
|
|
printk(KERN_WARNING "Only using first contiguous memory region");
|
|
}
|
|
|
|
total_memory = lmb_end_of_DRAM();
|
|
total_lowmem = total_memory;
|
|
|
|
#ifdef CONFIG_FSL_BOOKE
|
|
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
|
|
* entries, so we need to adjust lowmem to match the amount we can map
|
|
* in the fixed entries */
|
|
adjust_total_lowmem();
|
|
#endif /* CONFIG_FSL_BOOKE */
|
|
|
|
if (total_lowmem > __max_low_memory) {
|
|
total_lowmem = __max_low_memory;
|
|
#ifndef CONFIG_HIGHMEM
|
|
total_memory = total_lowmem;
|
|
lmb_enforce_memory_limit(total_lowmem);
|
|
lmb_analyze();
|
|
#endif /* CONFIG_HIGHMEM */
|
|
}
|
|
|
|
/* Initialize the MMU hardware */
|
|
if (ppc_md.progress)
|
|
ppc_md.progress("MMU:hw init", 0x300);
|
|
MMU_init_hw();
|
|
|
|
/* Map in all of RAM starting at KERNELBASE */
|
|
if (ppc_md.progress)
|
|
ppc_md.progress("MMU:mapin", 0x301);
|
|
mapin_ram();
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
ioremap_base = PKMAP_BASE;
|
|
#else
|
|
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
|
|
#endif /* CONFIG_HIGHMEM */
|
|
ioremap_bot = ioremap_base;
|
|
|
|
/* Map in I/O resources */
|
|
if (ppc_md.progress)
|
|
ppc_md.progress("MMU:setio", 0x302);
|
|
if (ppc_md.setup_io_mappings)
|
|
ppc_md.setup_io_mappings();
|
|
|
|
/* Initialize the context management stuff */
|
|
mmu_context_init();
|
|
|
|
if (ppc_md.progress)
|
|
ppc_md.progress("MMU:exit", 0x211);
|
|
|
|
/* From now on, btext is no longer BAT mapped if it was at all */
|
|
#ifdef CONFIG_BOOTX_TEXT
|
|
btext_unmap();
|
|
#endif
|
|
}
|
|
|
|
/* This is only called until mem_init is done. */
|
|
void __init *early_get_page(void)
|
|
{
|
|
void *p;
|
|
|
|
if (init_bootmem_done) {
|
|
p = alloc_bootmem_pages(PAGE_SIZE);
|
|
} else {
|
|
p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
|
__initial_memory_limit));
|
|
}
|
|
return p;
|
|
}
|
|
|
|
/* Free up now-unused memory */
|
|
static void free_sec(unsigned long start, unsigned long end, const char *name)
|
|
{
|
|
unsigned long cnt = 0;
|
|
|
|
while (start < end) {
|
|
ClearPageReserved(virt_to_page(start));
|
|
init_page_count(virt_to_page(start));
|
|
free_page(start);
|
|
cnt++;
|
|
start += PAGE_SIZE;
|
|
}
|
|
if (cnt) {
|
|
printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
|
|
totalram_pages += cnt;
|
|
}
|
|
}
|
|
|
|
void free_initmem(void)
|
|
{
|
|
#define FREESEC(TYPE) \
|
|
free_sec((unsigned long)(&__ ## TYPE ## _begin), \
|
|
(unsigned long)(&__ ## TYPE ## _end), \
|
|
#TYPE);
|
|
|
|
printk ("Freeing unused kernel memory:");
|
|
FREESEC(init);
|
|
printk("\n");
|
|
ppc_md.progress = NULL;
|
|
#undef FREESEC
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
{
|
|
if (start < end)
|
|
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
|
|
for (; start < end; start += PAGE_SIZE) {
|
|
ClearPageReserved(virt_to_page(start));
|
|
init_page_count(virt_to_page(start));
|
|
free_page(start);
|
|
totalram_pages++;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PROC_KCORE
|
|
static struct kcore_list kcore_vmem;
|
|
|
|
static int __init setup_kcore(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < lmb.memory.cnt; i++) {
|
|
unsigned long base;
|
|
unsigned long size;
|
|
struct kcore_list *kcore_mem;
|
|
|
|
base = lmb.memory.region[i].base;
|
|
size = lmb.memory.region[i].size;
|
|
|
|
kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
|
|
if (!kcore_mem)
|
|
panic("%s: kmalloc failed\n", __FUNCTION__);
|
|
|
|
/* must stay under 32 bits */
|
|
if ( 0xfffffffful - (unsigned long)__va(base) < size) {
|
|
size = 0xfffffffful - (unsigned long)(__va(base));
|
|
printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
|
|
size);
|
|
}
|
|
|
|
kclist_add(kcore_mem, __va(base), size);
|
|
}
|
|
|
|
kclist_add(&kcore_vmem, (void *)VMALLOC_START,
|
|
VMALLOC_END-VMALLOC_START);
|
|
|
|
return 0;
|
|
}
|
|
module_init(setup_kcore);
|
|
#endif
|