x86: add _PAGE_IOMAP pte flag for IO mappings
Use one of the software-defined PTE bits to indicate that a mapping is intended for an IO address. On native hardware this is irrelevent, since a physical address is a physical address. But in a virtual environment, physical addresses are also virtualized, so there needs to be some way to distinguish between pseudo-physical addresses and actual hardware addresses; _PAGE_IOMAP indicates this intent. By default, __supported_pte_mask masks out _PAGE_IOMAP, so it doesn't even appear in the final pagetable. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
07bb2f6236
commit
be43d72835
@ -558,7 +558,7 @@ void zap_low_mappings(void)
|
|||||||
|
|
||||||
int nx_enabled;
|
int nx_enabled;
|
||||||
|
|
||||||
pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
|
pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
|
||||||
EXPORT_SYMBOL_GPL(__supported_pte_mask);
|
EXPORT_SYMBOL_GPL(__supported_pte_mask);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
|
@ -89,7 +89,7 @@ early_param("gbpages", parse_direct_gbpages_on);
|
|||||||
|
|
||||||
int after_bootmem;
|
int after_bootmem;
|
||||||
|
|
||||||
unsigned long __supported_pte_mask __read_mostly = ~0UL;
|
pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
|
||||||
EXPORT_SYMBOL_GPL(__supported_pte_mask);
|
EXPORT_SYMBOL_GPL(__supported_pte_mask);
|
||||||
|
|
||||||
static int do_not_nx __cpuinitdata;
|
static int do_not_nx __cpuinitdata;
|
||||||
|
@ -242,16 +242,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
|||||||
switch (prot_val) {
|
switch (prot_val) {
|
||||||
case _PAGE_CACHE_UC:
|
case _PAGE_CACHE_UC:
|
||||||
default:
|
default:
|
||||||
prot = PAGE_KERNEL_NOCACHE;
|
prot = PAGE_KERNEL_IO_NOCACHE;
|
||||||
break;
|
break;
|
||||||
case _PAGE_CACHE_UC_MINUS:
|
case _PAGE_CACHE_UC_MINUS:
|
||||||
prot = PAGE_KERNEL_UC_MINUS;
|
prot = PAGE_KERNEL_IO_UC_MINUS;
|
||||||
break;
|
break;
|
||||||
case _PAGE_CACHE_WC:
|
case _PAGE_CACHE_WC:
|
||||||
prot = PAGE_KERNEL_WC;
|
prot = PAGE_KERNEL_IO_WC;
|
||||||
break;
|
break;
|
||||||
case _PAGE_CACHE_WB:
|
case _PAGE_CACHE_WB:
|
||||||
prot = PAGE_KERNEL;
|
prot = PAGE_KERNEL_IO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
||||||
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
||||||
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
|
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
|
||||||
#define _PAGE_BIT_UNUSED2 10
|
#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
|
||||||
#define _PAGE_BIT_UNUSED3 11
|
#define _PAGE_BIT_UNUSED3 11
|
||||||
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
|
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
|
||||||
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
|
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
|
||||||
@ -32,7 +32,7 @@
|
|||||||
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
|
#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
|
||||||
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
|
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
|
||||||
#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
|
#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
|
||||||
#define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
|
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
|
||||||
#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
|
#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
|
||||||
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
|
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
|
||||||
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
|
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
|
||||||
@ -99,6 +99,11 @@
|
|||||||
#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
|
#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
|
||||||
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
|
#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
|
||||||
|
|
||||||
|
#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
|
||||||
|
#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
|
||||||
|
#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
|
||||||
|
#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
|
||||||
|
|
||||||
#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
|
#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
|
||||||
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
|
#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
|
||||||
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
|
#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
|
||||||
@ -113,6 +118,11 @@
|
|||||||
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
|
#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
|
||||||
#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
|
#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
|
||||||
|
|
||||||
|
#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
|
||||||
|
#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
|
||||||
|
#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
|
||||||
|
#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
|
||||||
|
|
||||||
/* xwr */
|
/* xwr */
|
||||||
#define __P000 PAGE_NONE
|
#define __P000 PAGE_NONE
|
||||||
#define __P001 PAGE_READONLY
|
#define __P001 PAGE_READONLY
|
||||||
|
Loading…
Reference in New Issue
Block a user