forked from luck/tmp_suning_uos_patched
2efbc58f15
SWIOTLB checks range of incoming CPU addresses to be bounced and sees if the device can access it through its DMA window without requiring bouncing. In such cases it just chooses to skip bouncing. But for cases like secure guests on powerpc platform all addresses need to be bounced into the shared pool of memory because the host cannot access it otherwise. Hence the need to do the bouncing is not related to device's DMA window and use of bounce buffers is forced by setting swiotlb_force. Also, connect the shared memory conversion functions into the ARCH_HAS_MEM_ENCRYPT hooks and call swiotlb_update_mem_attributes() to convert SWIOTLB's memory pool to shared memory. Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com> [ bauerman: Use ARCH_HAS_MEM_ENCRYPT hooks to share swiotlb memory pool. ] Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190820021326.6884-15-bauerman@linux.ibm.com
86 lines
1.7 KiB
C
86 lines
1.7 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* Secure VM platform
|
|
*
|
|
* Copyright 2018 IBM Corporation
|
|
* Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/machdep.h>
|
|
#include <asm/svm.h>
|
|
#include <asm/swiotlb.h>
|
|
#include <asm/ultravisor.h>
|
|
|
|
static int __init init_svm(void)
|
|
{
|
|
if (!is_secure_guest())
|
|
return 0;
|
|
|
|
/* Don't release the SWIOTLB buffer. */
|
|
ppc_swiotlb_enable = 1;
|
|
|
|
/*
|
|
* Since the guest memory is inaccessible to the host, devices always
|
|
* need to use the SWIOTLB buffer for DMA even if dma_capable() says
|
|
* otherwise.
|
|
*/
|
|
swiotlb_force = SWIOTLB_FORCE;
|
|
|
|
/* Share the SWIOTLB buffer with the host. */
|
|
swiotlb_update_mem_attributes();
|
|
|
|
return 0;
|
|
}
|
|
machine_early_initcall(pseries, init_svm);
|
|
|
|
int set_memory_encrypted(unsigned long addr, int numpages)
|
|
{
|
|
if (!PAGE_ALIGNED(addr))
|
|
return -EINVAL;
|
|
|
|
uv_unshare_page(PHYS_PFN(__pa(addr)), numpages);
|
|
|
|
return 0;
|
|
}
|
|
|
|
int set_memory_decrypted(unsigned long addr, int numpages)
|
|
{
|
|
if (!PAGE_ALIGNED(addr))
|
|
return -EINVAL;
|
|
|
|
uv_share_page(PHYS_PFN(__pa(addr)), numpages);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* There's one dispatch log per CPU. */
|
|
#define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)
|
|
|
|
static struct page *dtl_page_store[NR_DTL_PAGE];
|
|
static long dtl_nr_pages;
|
|
|
|
static bool is_dtl_page_shared(struct page *page)
|
|
{
|
|
long i;
|
|
|
|
for (i = 0; i < dtl_nr_pages; i++)
|
|
if (dtl_page_store[i] == page)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
void dtl_cache_ctor(void *addr)
|
|
{
|
|
unsigned long pfn = PHYS_PFN(__pa(addr));
|
|
struct page *page = pfn_to_page(pfn);
|
|
|
|
if (!is_dtl_page_shared(page)) {
|
|
dtl_page_store[dtl_nr_pages] = page;
|
|
dtl_nr_pages++;
|
|
WARN_ON(dtl_nr_pages >= NR_DTL_PAGE);
|
|
uv_share_page(pfn, 1);
|
|
}
|
|
}
|