forked from luck/tmp_suning_uos_patched
mm: remove the prot argument from vm_map_ram
This is always PAGE_KERNEL - for long term mappings with other properties vmap should be used. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Cc: Gao Xiang <xiang@kernel.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haiyang Zhang <haiyangz@microsoft.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Laura Abbott <labbott@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Michael Kelley <mikelley@microsoft.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Sakari Ailus <sakari.ailus@linux.intel.com> Cc: Stephen Hemminger <sthemmin@microsoft.com> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: Wei Liu <wei.liu@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mackerras <paulus@ozlabs.org> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Link: http://lkml.kernel.org/r/20200414131348.444715-19-hch@lst.de Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
855e57a119
commit
d4efd79a81
|
@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
|
|||
{
|
||||
struct mock_dmabuf *mock = to_mock(dma_buf);
|
||||
|
||||
return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
|
||||
return vm_map_ram(mock->pages, mock->npages, 0);
|
||||
}
|
||||
|
||||
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
||||
|
|
|
@ -309,8 +309,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
|
|||
if (buf->db_attach)
|
||||
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
|
||||
else
|
||||
buf->vaddr = vm_map_ram(buf->pages,
|
||||
buf->num_pages, -1, PAGE_KERNEL);
|
||||
buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
|
||||
}
|
||||
|
||||
/* add offset in case userptr is not page-aligned */
|
||||
|
|
|
@ -107,8 +107,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
|
|||
buf->vaddr = (__force void *)
|
||||
ioremap(__pfn_to_phys(nums[0]), size + offset);
|
||||
} else {
|
||||
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
|
||||
PAGE_KERNEL);
|
||||
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
|
||||
}
|
||||
|
||||
if (!buf->vaddr)
|
||||
|
|
|
@ -274,7 +274,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
|
|||
|
||||
i = 0;
|
||||
while (1) {
|
||||
dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL);
|
||||
dst = vm_map_ram(rq->out, nrpages_out, -1);
|
||||
|
||||
/* retry two more times (totally 3 times) */
|
||||
if (dst || ++i >= 3)
|
||||
|
|
|
@ -477,7 +477,7 @@ _xfs_buf_map_pages(
|
|||
nofs_flag = memalloc_nofs_save();
|
||||
do {
|
||||
bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
|
||||
-1, PAGE_KERNEL);
|
||||
-1);
|
||||
if (bp->b_addr)
|
||||
break;
|
||||
vm_unmap_aliases();
|
||||
|
|
|
@ -88,8 +88,7 @@ struct vmap_area {
|
|||
* Highlevel APIs for driver use
|
||||
*/
|
||||
extern void vm_unmap_ram(const void *mem, unsigned int count);
|
||||
extern void *vm_map_ram(struct page **pages, unsigned int count,
|
||||
int node, pgprot_t prot);
|
||||
extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
|
||||
extern void vm_unmap_aliases(void);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
|
|
@ -351,7 +351,7 @@ void vunmap(const void *addr)
|
|||
}
|
||||
EXPORT_SYMBOL(vunmap);
|
||||
|
||||
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
|
||||
void *vm_map_ram(struct page **pages, unsigned int count, int node)
|
||||
{
|
||||
BUG();
|
||||
return NULL;
|
||||
|
|
|
@ -1835,7 +1835,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
|
|||
*
|
||||
* Returns: a pointer to the address that has been mapped, or %NULL on failure
|
||||
*/
|
||||
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
|
||||
void *vm_map_ram(struct page **pages, unsigned int count, int node)
|
||||
{
|
||||
unsigned long size = (unsigned long)count << PAGE_SHIFT;
|
||||
unsigned long addr;
|
||||
|
@ -1859,7 +1859,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
|
|||
|
||||
kasan_unpoison_vmalloc(mem, size);
|
||||
|
||||
if (map_kernel_range(addr, size, prot, pages) < 0) {
|
||||
if (map_kernel_range(addr, size, PAGE_KERNEL, pages) < 0) {
|
||||
vm_unmap_ram(mem, count);
|
||||
return NULL;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user