forked from luck/tmp_suning_uos_patched
drm/xen/xen_drm_front_gem.c: convert to use vm_map_pages()
Convert to use vm_map_pages() to map range of kernel memory to user vma. Link: http://lkml.kernel.org/r/ff8e10ba778d79419c66ee8215bccf01560540fd.1552921225.git.jrdr.linux@gmail.com Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com> Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: David Airlie <airlied@linux.ie> Cc: Heiko Stuebner <heiko@sntech.de> Cc: Joerg Roedel <joro@8bytes.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kees Cook <keescook@chromium.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mauro Carvalho Chehab <mchehab@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Pawel Osciak <pawel@osciak.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Sandy Huang <hjc@rock-chips.com> Cc: Stefan Richter <stefanr@s5r6.in-berlin.de> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Thierry Reding <treding@nvidia.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2f69b3c8ba
commit
e60b72b1a9
|
@ -224,8 +224,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
|
|||
static int gem_mmap_obj(struct xen_gem_object *xen_obj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long addr = vma->vm_start;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
|
||||
|
@ -252,18 +251,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
|
|||
* FIXME: as we insert all the pages now then no .fault handler must
|
||||
* be called, so don't provide one
|
||||
*/
|
||||
for (i = 0; i < xen_obj->num_pages; i++) {
|
||||
int ret;
|
||||
ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
|
||||
if (ret < 0)
|
||||
DRM_ERROR("Failed to map pages into vma: %d\n", ret);
|
||||
|
||||
ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
|
|
Loading…
Reference in New Issue
Block a user