forked from luck/tmp_suning_uos_patched
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: - Make cpumask_of_node() more robust against invalid node IDs - Simplify and speed up load_mm_cr4() - Unexport and remove various unused set_memory_*() APIs - Misc cleanups * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Fix cpumask_of_node() error condition x86/mm: Remove the unused set_memory_wt() function x86/mm: Remove set_pages_x() and set_pages_nx() x86/mm: Remove the unused set_memory_array_*() functions x86/mm: Unexport set_memory_x() and set_memory_nx() x86/fixmap: Cleanup outdated comments x86/kconfig: Remove X86_DIRECT_GBPAGES dependency on !DEBUG_PAGEALLOC x86/mm: Avoid redundant interrupt disable in load_mm_cr4()
This commit is contained in:
commit
ac51667b5b
|
@ -1503,7 +1503,7 @@ config X86_5LEVEL
|
|||
|
||||
config X86_DIRECT_GBPAGES
|
||||
def_bool y
|
||||
depends on X86_64 && !DEBUG_PAGEALLOC
|
||||
depends on X86_64
|
||||
---help---
|
||||
Certain kernel features effectively disable kernel
|
||||
linear 1 GB mappings (even if the CPU otherwise
|
||||
|
|
|
@ -2108,7 +2108,7 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|||
|
||||
static void refresh_pce(void *ignored)
|
||||
{
|
||||
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
|
||||
load_mm_cr4_irqsoff(this_cpu_read(cpu_tlbstate.loaded_mm));
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
|
||||
|
|
|
@ -42,8 +42,7 @@
|
|||
* Because of this, FIXADDR_TOP x86 integration was left as later work.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
/* used by vmalloc.c, vsyscall.lds.S.
|
||||
*
|
||||
/*
|
||||
* Leave one empty page between vmalloc'ed areas and
|
||||
* the start of the fixmap.
|
||||
*/
|
||||
|
@ -120,7 +119,7 @@ enum fixed_addresses {
|
|||
* before ioremap() is functional.
|
||||
*
|
||||
* If necessary we round it up to the next 512 pages boundary so
|
||||
* that we can have a single pgd entry and a single pte table:
|
||||
* that we can have a single pmd entry and a single pte table:
|
||||
*/
|
||||
#define NR_FIX_BTMAPS 64
|
||||
#define FIX_BTMAPS_SLOTS 8
|
||||
|
|
|
@ -28,16 +28,16 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
|
|||
|
||||
DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
|
||||
|
||||
static inline void load_mm_cr4(struct mm_struct *mm)
|
||||
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm)
|
||||
{
|
||||
if (static_branch_unlikely(&rdpmc_always_available_key) ||
|
||||
atomic_read(&mm->context.perf_rdpmc_allowed))
|
||||
cr4_set_bits(X86_CR4_PCE);
|
||||
cr4_set_bits_irqsoff(X86_CR4_PCE);
|
||||
else
|
||||
cr4_clear_bits(X86_CR4_PCE);
|
||||
cr4_clear_bits_irqsoff(X86_CR4_PCE);
|
||||
}
|
||||
#else
|
||||
static inline void load_mm_cr4(struct mm_struct *mm) {}
|
||||
static inline void load_mm_cr4_irqsoff(struct mm_struct *mm) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||
|
|
|
@ -40,7 +40,6 @@ int _set_memory_wt(unsigned long addr, int numpages);
|
|||
int _set_memory_wb(unsigned long addr, int numpages);
|
||||
int set_memory_uc(unsigned long addr, int numpages);
|
||||
int set_memory_wc(unsigned long addr, int numpages);
|
||||
int set_memory_wt(unsigned long addr, int numpages);
|
||||
int set_memory_wb(unsigned long addr, int numpages);
|
||||
int set_memory_np(unsigned long addr, int numpages);
|
||||
int set_memory_4k(unsigned long addr, int numpages);
|
||||
|
@ -48,11 +47,6 @@ int set_memory_encrypted(unsigned long addr, int numpages);
|
|||
int set_memory_decrypted(unsigned long addr, int numpages);
|
||||
int set_memory_np_noalias(unsigned long addr, int numpages);
|
||||
|
||||
int set_memory_array_uc(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wc(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wt(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wb(unsigned long *addr, int addrinarray);
|
||||
|
||||
int set_pages_array_uc(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wc(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wt(struct page **pages, int addrinarray);
|
||||
|
@ -80,8 +74,6 @@ int set_pages_array_wb(struct page **pages, int addrinarray);
|
|||
|
||||
int set_pages_uc(struct page *page, int numpages);
|
||||
int set_pages_wb(struct page *page, int numpages);
|
||||
int set_pages_x(struct page *page, int numpages);
|
||||
int set_pages_nx(struct page *page, int numpages);
|
||||
int set_pages_ro(struct page *page, int numpages);
|
||||
int set_pages_rw(struct page *page, int numpages);
|
||||
|
||||
|
|
|
@ -290,26 +290,42 @@ static inline void __cr4_set(unsigned long cr4)
|
|||
}
|
||||
|
||||
/* Set in this cpu's CR4. */
|
||||
static inline void cr4_set_bits(unsigned long mask)
|
||||
static inline void cr4_set_bits_irqsoff(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4, flags;
|
||||
unsigned long cr4;
|
||||
|
||||
local_irq_save(flags);
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
if ((cr4 | mask) != cr4)
|
||||
__cr4_set(cr4 | mask);
|
||||
}
|
||||
|
||||
/* Clear in this cpu's CR4. */
|
||||
static inline void cr4_clear_bits_irqsoff(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4;
|
||||
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
if ((cr4 & ~mask) != cr4)
|
||||
__cr4_set(cr4 & ~mask);
|
||||
}
|
||||
|
||||
/* Set in this cpu's CR4. */
|
||||
static inline void cr4_set_bits(unsigned long mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
cr4_set_bits_irqsoff(mask);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Clear in this cpu's CR4. */
|
||||
static inline void cr4_clear_bits(unsigned long mask)
|
||||
{
|
||||
unsigned long cr4, flags;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
||||
if ((cr4 & ~mask) != cr4)
|
||||
__cr4_set(cr4 & ~mask);
|
||||
cr4_clear_bits_irqsoff(mask);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
{
|
||||
int error;
|
||||
|
||||
set_pages_x(image->control_code_page, 1);
|
||||
set_memory_x((unsigned long)page_address(image->control_code_page), 1);
|
||||
error = machine_kexec_alloc_page_tables(image);
|
||||
if (error)
|
||||
return error;
|
||||
|
@ -162,7 +162,7 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
*/
|
||||
void machine_kexec_cleanup(struct kimage *image)
|
||||
{
|
||||
set_pages_nx(image->control_code_page, 1);
|
||||
set_memory_nx((unsigned long)page_address(image->control_code_page), 1);
|
||||
machine_kexec_free_page_tables(image);
|
||||
}
|
||||
|
||||
|
|
|
@ -916,7 +916,7 @@ static void mark_nxdata_nx(void)
|
|||
|
||||
if (__supported_pte_mask & _PAGE_NX)
|
||||
printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
|
||||
set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
|
||||
set_memory_nx(start, size >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
|
|
|
@ -861,9 +861,9 @@ void numa_remove_cpu(int cpu)
|
|||
*/
|
||||
const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
if (node >= nr_node_ids) {
|
||||
if ((unsigned)node >= nr_node_ids) {
|
||||
printk(KERN_WARNING
|
||||
"cpumask_of_node(%d): node > nr_node_ids(%u)\n",
|
||||
"cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
|
||||
node, nr_node_ids);
|
||||
dump_stack();
|
||||
return cpu_none_mask;
|
||||
|
|
|
@ -1819,63 +1819,6 @@ int set_memory_uc(unsigned long addr, int numpages)
|
|||
}
|
||||
EXPORT_SYMBOL(set_memory_uc);
|
||||
|
||||
static int _set_memory_array(unsigned long *addr, int numpages,
|
||||
enum page_cache_mode new_type)
|
||||
{
|
||||
enum page_cache_mode set_type;
|
||||
int i, j;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < numpages; i++) {
|
||||
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
|
||||
new_type, NULL);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* If WC, set to UC- first and then WC */
|
||||
set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
|
||||
_PAGE_CACHE_MODE_UC_MINUS : new_type;
|
||||
|
||||
ret = change_page_attr_set(addr, numpages,
|
||||
cachemode2pgprot(set_type), 1);
|
||||
|
||||
if (!ret && new_type == _PAGE_CACHE_MODE_WC)
|
||||
ret = change_page_attr_set_clr(addr, numpages,
|
||||
cachemode2pgprot(
|
||||
_PAGE_CACHE_MODE_WC),
|
||||
__pgprot(_PAGE_CACHE_MASK),
|
||||
0, CPA_ARRAY, NULL);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
for (j = 0; j < i; j++)
|
||||
free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int set_memory_array_uc(unsigned long *addr, int numpages)
|
||||
{
|
||||
return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_UC_MINUS);
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_array_uc);
|
||||
|
||||
int set_memory_array_wc(unsigned long *addr, int numpages)
|
||||
{
|
||||
return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WC);
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_array_wc);
|
||||
|
||||
int set_memory_array_wt(unsigned long *addr, int numpages)
|
||||
{
|
||||
return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WT);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_memory_array_wt);
|
||||
|
||||
int _set_memory_wc(unsigned long addr, int numpages)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1915,23 +1858,6 @@ int _set_memory_wt(unsigned long addr, int numpages)
|
|||
cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
|
||||
}
|
||||
|
||||
int set_memory_wt(unsigned long addr, int numpages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_MODE_WT, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = _set_memory_wt(addr, numpages);
|
||||
if (ret)
|
||||
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_memory_wt);
|
||||
|
||||
int _set_memory_wb(unsigned long addr, int numpages)
|
||||
{
|
||||
/* WB cache mode is hard wired to all cache attribute bits being 0 */
|
||||
|
@ -1952,24 +1878,6 @@ int set_memory_wb(unsigned long addr, int numpages)
|
|||
}
|
||||
EXPORT_SYMBOL(set_memory_wb);
|
||||
|
||||
int set_memory_array_wb(unsigned long *addr, int numpages)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/* WB cache mode is hard wired to all cache attribute bits being 0 */
|
||||
ret = change_page_attr_clear(addr, numpages,
|
||||
__pgprot(_PAGE_CACHE_MASK), 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < numpages; i++)
|
||||
free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_array_wb);
|
||||
|
||||
int set_memory_x(unsigned long addr, int numpages)
|
||||
{
|
||||
if (!(__supported_pte_mask & _PAGE_NX))
|
||||
|
@ -1977,7 +1885,6 @@ int set_memory_x(unsigned long addr, int numpages)
|
|||
|
||||
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_x);
|
||||
|
||||
int set_memory_nx(unsigned long addr, int numpages)
|
||||
{
|
||||
|
@ -1986,7 +1893,6 @@ int set_memory_nx(unsigned long addr, int numpages)
|
|||
|
||||
return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_nx);
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
{
|
||||
|
@ -2190,22 +2096,6 @@ int set_pages_array_wb(struct page **pages, int numpages)
|
|||
}
|
||||
EXPORT_SYMBOL(set_pages_array_wb);
|
||||
|
||||
int set_pages_x(struct page *page, int numpages)
|
||||
{
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
return set_memory_x(addr, numpages);
|
||||
}
|
||||
EXPORT_SYMBOL(set_pages_x);
|
||||
|
||||
int set_pages_nx(struct page *page, int numpages)
|
||||
{
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
return set_memory_nx(addr, numpages);
|
||||
}
|
||||
EXPORT_SYMBOL(set_pages_nx);
|
||||
|
||||
int set_pages_ro(struct page *page, int numpages)
|
||||
{
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
|
|
@ -440,7 +440,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
||||
|
||||
if (next != real_prev) {
|
||||
load_mm_cr4(next);
|
||||
load_mm_cr4_irqsoff(next);
|
||||
switch_ldt(real_prev, next);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user