kernel_optimize_test/include/drm/drm_gem_shmem_helper.h
Rob Herring edbd7f318c drm/shmem: Use mutex_trylock in drm_gem_shmem_purge
Lockdep reports a circular locking dependency with pages_lock taken in
the shrinker callback. The deadlock can't actually happen with current
users at least as a BO will never be purgeable when pages_lock is held.
To be safe, let's use mutex_trylock() instead and bail if a BO is locked
already.

WARNING: possible circular locking dependency detected
5.3.0-rc1+ #100 Tainted: G             L
------------------------------------------------------
kswapd0/171 is trying to acquire lock:
000000009b9823fd (&shmem->pages_lock){+.+.}, at: drm_gem_shmem_purge+0x20/0x40

but task is already holding lock:
00000000f82369b6 (fs_reclaim){+.+.}, at: __fs_reclaim_acquire+0x0/0x40

which lock already depends on the new lock.

the existing dependency chain (in reverse order) is:

-> #1 (fs_reclaim){+.+.}:
       fs_reclaim_acquire.part.18+0x34/0x40
       fs_reclaim_acquire+0x20/0x28
       __kmalloc_node+0x6c/0x4c0
       kvmalloc_node+0x38/0xa8
       drm_gem_get_pages+0x80/0x1d0
       drm_gem_shmem_get_pages+0x58/0xa0
       drm_gem_shmem_get_pages_sgt+0x48/0xd0
       panfrost_mmu_map+0x38/0xf8 [panfrost]
       panfrost_gem_open+0xc0/0xe8 [panfrost]
       drm_gem_handle_create_tail+0xe8/0x198
       drm_gem_handle_create+0x3c/0x50
       panfrost_gem_create_with_handle+0x70/0xa0 [panfrost]
       panfrost_ioctl_create_bo+0x48/0x80 [panfrost]
       drm_ioctl_kernel+0xb8/0x110
       drm_ioctl+0x244/0x3f0
       do_vfs_ioctl+0xbc/0x910
       ksys_ioctl+0x78/0xa8
       __arm64_sys_ioctl+0x1c/0x28
       el0_svc_common.constprop.0+0x90/0x168
       el0_svc_handler+0x28/0x78
       el0_svc+0x8/0xc

-> #0 (&shmem->pages_lock){+.+.}:
       __lock_acquire+0xa2c/0x1d70
       lock_acquire+0xdc/0x228
       __mutex_lock+0x8c/0x800
       mutex_lock_nested+0x1c/0x28
       drm_gem_shmem_purge+0x20/0x40
       panfrost_gem_shrinker_scan+0xc0/0x180 [panfrost]
       do_shrink_slab+0x208/0x500
       shrink_slab+0x10c/0x2c0
       shrink_node+0x28c/0x4d8
       balance_pgdat+0x2c8/0x570
       kswapd+0x22c/0x638
       kthread+0x128/0x130
       ret_from_fork+0x10/0x18

other info that might help us debug this:

 Possible unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(fs_reclaim);
                               lock(&shmem->pages_lock);
                               lock(fs_reclaim);
  lock(&shmem->pages_lock);

 *** DEADLOCK ***

3 locks held by kswapd0/171:
 #0: 00000000f82369b6 (fs_reclaim){+.+.}, at: __fs_reclaim_acquire+0x0/0x40
 #1: 00000000ceb37808 (shrinker_rwsem){++++}, at: shrink_slab+0xbc/0x2c0
 #2: 00000000f31efa81 (&pfdev->shrinker_lock){+.+.}, at: panfrost_gem_shrinker_scan+0x34/0x180 [panfrost]

Fixes: 17acb9f35e ("drm/shmem: Add madvise state and purge helpers")
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <maxime.ripard@bootlin.com>
Cc: Sean Paul <sean@poorly.run>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: Rob Herring <robh@kernel.org>
Reviewed-by: Steven Price <steven.price@arm.com>
Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190823021216.5862-6-robh@kernel.org
2019-08-28 10:02:39 -05:00

175 lines
4.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRM_GEM_SHMEM_HELPER_H__
#define __DRM_GEM_SHMEM_HELPER_H__
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
struct dma_buf_attachment;
struct drm_mode_create_dumb;
struct drm_printer;
struct sg_table;
/**
* struct drm_gem_shmem_object - GEM object backed by shmem
*/
struct drm_gem_shmem_object {
/**
* @base: Base GEM object
*/
struct drm_gem_object base;
/**
* @pages_lock: Protects the page table and use count
*/
struct mutex pages_lock;
/**
* @pages: Page table
*/
struct page **pages;
/**
* @pages_use_count:
*
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
unsigned int pages_use_count;
int madv;
struct list_head madv_list;
/**
* @pages_mark_dirty_on_put:
*
* Mark pages as dirty when they are put.
*/
unsigned int pages_mark_dirty_on_put : 1;
/**
* @pages_mark_accessed_on_put:
*
* Mark pages as accessed when they are put.
*/
unsigned int pages_mark_accessed_on_put : 1;
/**
* @sgt: Scatter/gather table for imported PRIME buffers
*/
struct sg_table *sgt;
/**
* @vmap_lock: Protects the vmap address and use count
*/
struct mutex vmap_lock;
/**
* @vaddr: Kernel virtual address of the backing memory
*/
void *vaddr;
/**
* @vmap_use_count:
*
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
unsigned int vmap_use_count;
};
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
/**
* DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
* @name: name for the generated structure
*
* This macro autogenerates a suitable &struct file_operations for shmem based
* drivers, which can be assigned to &drm_driver.fops. Note that this structure
* cannot be shared between drivers, because it contains a reference to the
* current module using THIS_MODULE.
*
* Note that the declaration is already marked as static - if you need a
* non-static version of this you're probably doing it wrong and will break the
* THIS_MODULE reference by accident.
*/
#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
static const struct file_operations name = {\
.owner = THIS_MODULE,\
.open = drm_open,\
.release = drm_release,\
.unlocked_ioctl = drm_ioctl,\
.compat_ioctl = drm_compat_ioctl,\
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
.mmap = drm_gem_shmem_mmap, \
}
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free_object(struct drm_gem_object *obj);
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_object *obj);
void drm_gem_shmem_unpin(struct drm_gem_object *obj);
void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
!shmem->vmap_use_count && shmem->sgt &&
!shmem->base.dma_buf && !shmem->base.import_attach;
}
void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
bool drm_gem_shmem_purge(struct drm_gem_object *obj);
struct drm_gem_shmem_object *
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
uint32_t *handle);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
/**
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
*
* This macro provides a shortcut for setting the shmem GEM operations in
* the &drm_driver structure.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
.gem_prime_mmap = drm_gem_prime_mmap, \
.dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */