forked from luck/tmp_suning_uos_patched
[PATCH] i386: PARAVIRT: revert map_pt_hook.
Back out the map_pt_hook to clear the way for kmap_atomic_pte. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Zachary Amsden <zach@vmware.com>
This commit is contained in:
parent
d4c104771a
commit
a27fe809b8
@ -302,8 +302,6 @@ struct paravirt_ops paravirt_ops = {
|
||||
.flush_tlb_single = native_flush_tlb_single,
|
||||
.flush_tlb_others = native_flush_tlb_others,
|
||||
|
||||
.map_pt_hook = paravirt_nop,
|
||||
|
||||
.alloc_pt = paravirt_nop,
|
||||
.alloc_pd = paravirt_nop,
|
||||
.alloc_pd_clone = paravirt_nop,
|
||||
|
@ -851,8 +851,10 @@ static inline int __init activate_vmi(void)
|
||||
paravirt_ops.release_pt = vmi_release_pt;
|
||||
paravirt_ops.release_pd = vmi_release_pd;
|
||||
}
|
||||
#if 0
|
||||
para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping,
|
||||
SetLinearMapping);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These MUST always be patched. Don't support indirect jumps
|
||||
|
@ -169,8 +169,6 @@ struct paravirt_ops
|
||||
void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
|
||||
unsigned long va);
|
||||
|
||||
void (*map_pt_hook)(int type, pte_t *va, u32 pfn);
|
||||
|
||||
/* Hooks for allocating/releasing pagetable pages */
|
||||
void (*alloc_pt)(u32 pfn);
|
||||
void (*alloc_pd)(u32 pfn);
|
||||
@ -862,11 +860,6 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
||||
PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
|
||||
}
|
||||
|
||||
static inline void paravirt_map_pt_hook(int type, pte_t *va, u32 pfn)
|
||||
{
|
||||
PVOP_VCALL3(map_pt_hook, type, va, pfn);
|
||||
}
|
||||
|
||||
static inline void paravirt_alloc_pt(unsigned pfn)
|
||||
{
|
||||
PVOP_VCALL1(alloc_pt, pfn);
|
||||
|
@ -267,7 +267,6 @@ extern void vmalloc_sync_all(void);
|
||||
*/
|
||||
#define pte_update(mm, addr, ptep) do { } while (0)
|
||||
#define pte_update_defer(mm, addr, ptep) do { } while (0)
|
||||
#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
|
||||
|
||||
#define raw_ptep_get_and_clear(xp) native_ptep_get_and_clear(xp)
|
||||
#endif
|
||||
@ -477,23 +476,9 @@ extern pte_t *lookup_address(unsigned long address);
|
||||
|
||||
#if defined(CONFIG_HIGHPTE)
|
||||
#define pte_offset_map(dir, address) \
|
||||
({ \
|
||||
pte_t *__ptep; \
|
||||
unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
|
||||
__ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
|
||||
paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
|
||||
__ptep = __ptep + pte_index(address); \
|
||||
__ptep; \
|
||||
})
|
||||
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
|
||||
#define pte_offset_map_nested(dir, address) \
|
||||
({ \
|
||||
pte_t *__ptep; \
|
||||
unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
|
||||
__ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
|
||||
paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
|
||||
__ptep = __ptep + pte_index(address); \
|
||||
__ptep; \
|
||||
})
|
||||
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
|
||||
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
|
||||
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user