forked from luck/tmp_suning_uos_patched
x86/mm/cpa: Simplify the code after making cpa->vaddr invariant
Since cpa->vaddr is invariant, this means we can remove all workarounds that deal with it changing. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.366619025@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
98bfc9b038
commit
5fe26b7a8f
|
@ -124,7 +124,6 @@ static int pageattr_test(void)
|
|||
unsigned int level;
|
||||
int i, k;
|
||||
int err;
|
||||
unsigned long test_addr;
|
||||
|
||||
if (print)
|
||||
printk(KERN_INFO "CPA self-test:\n");
|
||||
|
@ -181,8 +180,7 @@ static int pageattr_test(void)
|
|||
|
||||
switch (i % 3) {
|
||||
case 0:
|
||||
test_addr = addr[i];
|
||||
err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0);
|
||||
err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
|
@ -226,8 +224,7 @@ static int pageattr_test(void)
|
|||
failed++;
|
||||
continue;
|
||||
}
|
||||
test_addr = addr[i];
|
||||
err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0);
|
||||
err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0);
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR "CPA reverting failed: %d\n", err);
|
||||
failed++;
|
||||
|
|
|
@ -1908,15 +1908,13 @@ EXPORT_SYMBOL_GPL(set_memory_array_wt);
|
|||
int _set_memory_wc(unsigned long addr, int numpages)
|
||||
{
|
||||
int ret;
|
||||
unsigned long addr_copy = addr;
|
||||
|
||||
ret = change_page_attr_set(&addr, numpages,
|
||||
cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
|
||||
0);
|
||||
if (!ret) {
|
||||
ret = change_page_attr_set_clr(&addr_copy, numpages,
|
||||
cachemode2pgprot(
|
||||
_PAGE_CACHE_MODE_WC),
|
||||
ret = change_page_attr_set_clr(&addr, numpages,
|
||||
cachemode2pgprot(_PAGE_CACHE_MODE_WC),
|
||||
__pgprot(_PAGE_CACHE_MASK),
|
||||
0, 0, NULL);
|
||||
}
|
||||
|
@ -2064,7 +2062,6 @@ int set_memory_global(unsigned long addr, int numpages)
|
|||
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
|
||||
{
|
||||
struct cpa_data cpa;
|
||||
unsigned long start;
|
||||
int ret;
|
||||
|
||||
/* Nothing to do if memory encryption is not active */
|
||||
|
@ -2075,8 +2072,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
|
|||
if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
|
||||
addr &= PAGE_MASK;
|
||||
|
||||
start = addr;
|
||||
|
||||
memset(&cpa, 0, sizeof(cpa));
|
||||
cpa.vaddr = &addr;
|
||||
cpa.numpages = numpages;
|
||||
|
@ -2091,7 +2086,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
|
|||
/*
|
||||
* Before changing the encryption attribute, we need to flush caches.
|
||||
*/
|
||||
cpa_flush_range(start, numpages, 1);
|
||||
cpa_flush_range(addr, numpages, 1);
|
||||
|
||||
ret = __change_page_attr_set_clr(&cpa, 1);
|
||||
|
||||
|
@ -2102,7 +2097,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
|
|||
* in case TLB flushing gets optimized in the cpa_flush_range()
|
||||
* path use the same logic as above.
|
||||
*/
|
||||
cpa_flush_range(start, numpages, 0);
|
||||
cpa_flush_range(addr, numpages, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user