tmp_suning_uos_patched/include/asm-powerpc/page.h
Andrea Righi 27ac792ca0 PAGE_ALIGN(): correctly handle 64-bit values on 32-bit architectures
On 32-bit architectures PAGE_ALIGN() truncates 64-bit values to the 32-bit
boundary. For example:

	u64 val = PAGE_ALIGN(size);

always returns a value < 4GB even if size is greater than 4GB.

The problem resides in PAGE_MASK definition (from include/asm-x86/page.h for
example):

#define PAGE_SHIFT      12
#define PAGE_SIZE       (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK       (~(PAGE_SIZE-1))
...
#define PAGE_ALIGN(addr)       (((addr)+PAGE_SIZE-1)&PAGE_MASK)

The "~" is performed on a 32-bit value, so everything in "and" with
PAGE_MASK greater than 4GB will be truncated to the 32-bit boundary.
Using the ALIGN() macro seems to be the right way, because it uses
typeof(addr) for the mask.

Also move the PAGE_ALIGN() definitions out of include/asm-*/page.h in
include/linux/mm.h.

See also lkml discussion: http://lkml.org/lkml/2008/6/11/237

[akpm@linux-foundation.org: fix drivers/media/video/uvc/uvc_queue.c]
[akpm@linux-foundation.org: fix v850]
[akpm@linux-foundation.org: fix powerpc]
[akpm@linux-foundation.org: fix arm]
[akpm@linux-foundation.org: fix mips]
[akpm@linux-foundation.org: fix drivers/media/video/pvrusb2/pvrusb2-dvb.c]
[akpm@linux-foundation.org: fix drivers/mtd/maps/uclinux.c]
[akpm@linux-foundation.org: fix powerpc]
Signed-off-by: Andrea Righi <righi.andrea@gmail.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 10:47:21 -07:00

226 lines
6.4 KiB
C

#ifndef _ASM_POWERPC_PAGE_H
#define _ASM_POWERPC_PAGE_H
/*
* Copyright (C) 2001,2005 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/asm-compat.h>
#include <asm/kdump.h>
#include <asm/types.h>
/*
* On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
* page size. When using 64K pages however, whether we are really supporting
* 64K pages in HW or not is irrelevant to those definitions.
*/
#ifdef CONFIG_PPC_64K_PAGES
#define PAGE_SHIFT 16
#else
#define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
/*
* Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
* assign PAGE_MASK to a larger type it gets extended the way we want
* (i.e. with 1s in the high bits)
*/
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
/*
* KERNELBASE is the virtual address of the start of the kernel, it's often
* the same as PAGE_OFFSET, but _might not be_.
*
* The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
*
* PAGE_OFFSET is the virtual address of the start of lowmem.
*
* PHYSICAL_START is the physical address of the start of the kernel.
*
* MEMORY_START is the physical address of the start of lowmem.
*
* KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
* ppc32 and based on how they are set we determine MEMORY_START.
*
* For the linear mapping the following equation should be true:
* KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
*
* Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
*
* There are two was to determine a physical address from a virtual one:
* va = pa + PAGE_OFFSET - MEMORY_START
* va = pa + KERNELBASE - PHYSICAL_START
*
* If you want to know something's offset from the start of the kernel you
* should subtract KERNELBASE.
*
* If you want to test if something's a kernel address, use is_kernel_addr().
*/
#define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
#define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_FLATMEM)
#ifndef __ASSEMBLY__
extern phys_addr_t memstart_addr;
extern phys_addr_t kernstart_addr;
#endif
#define PHYSICAL_START kernstart_addr
#define MEMORY_START memstart_addr
#else
#define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
#define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
#endif
#ifdef CONFIG_FLATMEM
#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define __va(x) ((void *)((unsigned long)(x) - PHYSICAL_START + KERNELBASE))
#define __pa(x) ((unsigned long)(x) + PHYSICAL_START - KERNELBASE)
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
* and needs to be executable. This means the whole heap ends
* up being executable.
*/
#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifdef __powerpc64__
#include <asm/page_64.h>
#else
#include <asm/page_32.h>
#endif
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
/*
* Don't compare things with KERNELBASE or PAGE_OFFSET to test for
* "kernelness", use is_kernel_addr() - it should do what you want.
*/
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
#ifndef __ASSEMBLY__
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/* These are used to make use of C type-checking. */
/* PTE level */
typedef struct { pte_basic_t pte; } pte_t;
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) })
/* 64k pages additionally define a bigger "real PTE" type that gathers
* the "second half" part of the PTE for pseudo 64k pages
*/
#ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
#endif
/* PMD level */
#ifdef CONFIG_PPC64
typedef struct { unsigned long pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) })
/* PUD level exusts only on 4k pages */
#ifndef CONFIG_PPC_64K_PAGES
typedef struct { unsigned long pud; } pud_t;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) })
#endif /* !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */
/* PGD level */
typedef struct { unsigned long pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) })
/* Page protection bits */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) })
#else
/*
* .. while these make it easier on the compiler
*/
typedef pte_basic_t pte_t;
#define pte_val(x) (x)
#define __pte(x) (x)
#ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef unsigned long real_pte_t;
#endif
#ifdef CONFIG_PPC64
typedef unsigned long pmd_t;
#define pmd_val(x) (x)
#define __pmd(x) (x)
#ifndef CONFIG_PPC_64K_PAGES
typedef unsigned long pud_t;
#define pud_val(x) (x)
#define __pud(x) (x)
#endif /* !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */
typedef unsigned long pgd_t;
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
typedef unsigned long pgprot_t;
#define __pgd(x) (x)
#define __pgprot(x) (x)
#endif
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
struct page *p);
extern int page_is_ram(unsigned long pfn);
struct vm_area_struct;
typedef struct page *pgtable_t;
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PAGE_H */