forked from luck/tmp_suning_uos_patched
uacce: Remove mm_exit() op
The mm_exit() op will be removed from the SVA API. When a process dies and its mm goes away, the IOMMU driver won't notify device drivers anymore. Drivers should expect to handle a lot more aborted DMA. On the upside, it does greatly simplify the queue management. The uacce_mm struct, that tracks all queues bound to an mm, was only used by the mm_exit() callback. Remove it. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Acked-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Zhangfei Gao <zhangfei.gao@linaro.org> Link: https://lore.kernel.org/r/20200423125329.782066-2-jean-philippe@linaro.org Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
da656a0425
commit
fb01562e5a
|
@ -90,109 +90,39 @@ static long uacce_fops_compat_ioctl(struct file *filep,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int uacce_sva_exit(struct device *dev, struct iommu_sva *handle,
|
static int uacce_bind_queue(struct uacce_device *uacce, struct uacce_queue *q)
|
||||||
void *data)
|
|
||||||
{
|
{
|
||||||
struct uacce_mm *uacce_mm = data;
|
int pasid;
|
||||||
struct uacce_queue *q;
|
struct iommu_sva *handle;
|
||||||
|
|
||||||
/*
|
if (!(uacce->flags & UACCE_DEV_SVA))
|
||||||
* No new queue can be added concurrently because no caller can have a
|
return 0;
|
||||||
* reference to this mm. But there may be concurrent calls to
|
|
||||||
* uacce_mm_put(), so we need the lock.
|
|
||||||
*/
|
|
||||||
mutex_lock(&uacce_mm->lock);
|
|
||||||
list_for_each_entry(q, &uacce_mm->queues, list)
|
|
||||||
uacce_put_queue(q);
|
|
||||||
uacce_mm->mm = NULL;
|
|
||||||
mutex_unlock(&uacce_mm->lock);
|
|
||||||
|
|
||||||
|
handle = iommu_sva_bind_device(uacce->parent, current->mm, NULL);
|
||||||
|
if (IS_ERR(handle))
|
||||||
|
return PTR_ERR(handle);
|
||||||
|
|
||||||
|
pasid = iommu_sva_get_pasid(handle);
|
||||||
|
if (pasid == IOMMU_PASID_INVALID) {
|
||||||
|
iommu_sva_unbind_device(handle);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
q->handle = handle;
|
||||||
|
q->pasid = pasid;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct iommu_sva_ops uacce_sva_ops = {
|
static void uacce_unbind_queue(struct uacce_queue *q)
|
||||||
.mm_exit = uacce_sva_exit,
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct uacce_mm *uacce_mm_get(struct uacce_device *uacce,
|
|
||||||
struct uacce_queue *q,
|
|
||||||
struct mm_struct *mm)
|
|
||||||
{
|
{
|
||||||
struct uacce_mm *uacce_mm = NULL;
|
if (!q->handle)
|
||||||
struct iommu_sva *handle = NULL;
|
return;
|
||||||
int ret;
|
iommu_sva_unbind_device(q->handle);
|
||||||
|
q->handle = NULL;
|
||||||
lockdep_assert_held(&uacce->mm_lock);
|
|
||||||
|
|
||||||
list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
|
|
||||||
if (uacce_mm->mm == mm) {
|
|
||||||
mutex_lock(&uacce_mm->lock);
|
|
||||||
list_add(&q->list, &uacce_mm->queues);
|
|
||||||
mutex_unlock(&uacce_mm->lock);
|
|
||||||
return uacce_mm;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uacce_mm = kzalloc(sizeof(*uacce_mm), GFP_KERNEL);
|
|
||||||
if (!uacce_mm)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (uacce->flags & UACCE_DEV_SVA) {
|
|
||||||
/*
|
|
||||||
* Safe to pass an incomplete uacce_mm, since mm_exit cannot
|
|
||||||
* fire while we hold a reference to the mm.
|
|
||||||
*/
|
|
||||||
handle = iommu_sva_bind_device(uacce->parent, mm, uacce_mm);
|
|
||||||
if (IS_ERR(handle))
|
|
||||||
goto err_free;
|
|
||||||
|
|
||||||
ret = iommu_sva_set_ops(handle, &uacce_sva_ops);
|
|
||||||
if (ret)
|
|
||||||
goto err_unbind;
|
|
||||||
|
|
||||||
uacce_mm->pasid = iommu_sva_get_pasid(handle);
|
|
||||||
if (uacce_mm->pasid == IOMMU_PASID_INVALID)
|
|
||||||
goto err_unbind;
|
|
||||||
}
|
|
||||||
|
|
||||||
uacce_mm->mm = mm;
|
|
||||||
uacce_mm->handle = handle;
|
|
||||||
INIT_LIST_HEAD(&uacce_mm->queues);
|
|
||||||
mutex_init(&uacce_mm->lock);
|
|
||||||
list_add(&q->list, &uacce_mm->queues);
|
|
||||||
list_add(&uacce_mm->list, &uacce->mm_list);
|
|
||||||
|
|
||||||
return uacce_mm;
|
|
||||||
|
|
||||||
err_unbind:
|
|
||||||
if (handle)
|
|
||||||
iommu_sva_unbind_device(handle);
|
|
||||||
err_free:
|
|
||||||
kfree(uacce_mm);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void uacce_mm_put(struct uacce_queue *q)
|
|
||||||
{
|
|
||||||
struct uacce_mm *uacce_mm = q->uacce_mm;
|
|
||||||
|
|
||||||
lockdep_assert_held(&q->uacce->mm_lock);
|
|
||||||
|
|
||||||
mutex_lock(&uacce_mm->lock);
|
|
||||||
list_del(&q->list);
|
|
||||||
mutex_unlock(&uacce_mm->lock);
|
|
||||||
|
|
||||||
if (list_empty(&uacce_mm->queues)) {
|
|
||||||
if (uacce_mm->handle)
|
|
||||||
iommu_sva_unbind_device(uacce_mm->handle);
|
|
||||||
list_del(&uacce_mm->list);
|
|
||||||
kfree(uacce_mm);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uacce_fops_open(struct inode *inode, struct file *filep)
|
static int uacce_fops_open(struct inode *inode, struct file *filep)
|
||||||
{
|
{
|
||||||
struct uacce_mm *uacce_mm = NULL;
|
|
||||||
struct uacce_device *uacce;
|
struct uacce_device *uacce;
|
||||||
struct uacce_queue *q;
|
struct uacce_queue *q;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -205,21 +135,16 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
|
||||||
if (!q)
|
if (!q)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
mutex_lock(&uacce->mm_lock);
|
ret = uacce_bind_queue(uacce, q);
|
||||||
uacce_mm = uacce_mm_get(uacce, q, current->mm);
|
if (ret)
|
||||||
mutex_unlock(&uacce->mm_lock);
|
|
||||||
if (!uacce_mm) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out_with_mem;
|
goto out_with_mem;
|
||||||
}
|
|
||||||
|
|
||||||
q->uacce = uacce;
|
q->uacce = uacce;
|
||||||
q->uacce_mm = uacce_mm;
|
|
||||||
|
|
||||||
if (uacce->ops->get_queue) {
|
if (uacce->ops->get_queue) {
|
||||||
ret = uacce->ops->get_queue(uacce, uacce_mm->pasid, q);
|
ret = uacce->ops->get_queue(uacce, q->pasid, q);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_with_mm;
|
goto out_with_bond;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_waitqueue_head(&q->wait);
|
init_waitqueue_head(&q->wait);
|
||||||
|
@ -227,12 +152,14 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
|
||||||
uacce->inode = inode;
|
uacce->inode = inode;
|
||||||
q->state = UACCE_Q_INIT;
|
q->state = UACCE_Q_INIT;
|
||||||
|
|
||||||
|
mutex_lock(&uacce->queues_lock);
|
||||||
|
list_add(&q->list, &uacce->queues);
|
||||||
|
mutex_unlock(&uacce->queues_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_with_mm:
|
out_with_bond:
|
||||||
mutex_lock(&uacce->mm_lock);
|
uacce_unbind_queue(q);
|
||||||
uacce_mm_put(q);
|
|
||||||
mutex_unlock(&uacce->mm_lock);
|
|
||||||
out_with_mem:
|
out_with_mem:
|
||||||
kfree(q);
|
kfree(q);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -241,14 +168,12 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
|
||||||
static int uacce_fops_release(struct inode *inode, struct file *filep)
|
static int uacce_fops_release(struct inode *inode, struct file *filep)
|
||||||
{
|
{
|
||||||
struct uacce_queue *q = filep->private_data;
|
struct uacce_queue *q = filep->private_data;
|
||||||
struct uacce_device *uacce = q->uacce;
|
|
||||||
|
|
||||||
|
mutex_lock(&q->uacce->queues_lock);
|
||||||
|
list_del(&q->list);
|
||||||
|
mutex_unlock(&q->uacce->queues_lock);
|
||||||
uacce_put_queue(q);
|
uacce_put_queue(q);
|
||||||
|
uacce_unbind_queue(q);
|
||||||
mutex_lock(&uacce->mm_lock);
|
|
||||||
uacce_mm_put(q);
|
|
||||||
mutex_unlock(&uacce->mm_lock);
|
|
||||||
|
|
||||||
kfree(q);
|
kfree(q);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -513,8 +438,8 @@ struct uacce_device *uacce_alloc(struct device *parent,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err_with_uacce;
|
goto err_with_uacce;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&uacce->mm_list);
|
INIT_LIST_HEAD(&uacce->queues);
|
||||||
mutex_init(&uacce->mm_lock);
|
mutex_init(&uacce->queues_lock);
|
||||||
device_initialize(&uacce->dev);
|
device_initialize(&uacce->dev);
|
||||||
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
|
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
|
||||||
uacce->dev.class = uacce_class;
|
uacce->dev.class = uacce_class;
|
||||||
|
@ -561,8 +486,7 @@ EXPORT_SYMBOL_GPL(uacce_register);
|
||||||
*/
|
*/
|
||||||
void uacce_remove(struct uacce_device *uacce)
|
void uacce_remove(struct uacce_device *uacce)
|
||||||
{
|
{
|
||||||
struct uacce_mm *uacce_mm;
|
struct uacce_queue *q, *next_q;
|
||||||
struct uacce_queue *q;
|
|
||||||
|
|
||||||
if (!uacce)
|
if (!uacce)
|
||||||
return;
|
return;
|
||||||
|
@ -574,24 +498,12 @@ void uacce_remove(struct uacce_device *uacce)
|
||||||
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
|
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
|
||||||
|
|
||||||
/* ensure no open queue remains */
|
/* ensure no open queue remains */
|
||||||
mutex_lock(&uacce->mm_lock);
|
mutex_lock(&uacce->queues_lock);
|
||||||
list_for_each_entry(uacce_mm, &uacce->mm_list, list) {
|
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
|
||||||
/*
|
uacce_put_queue(q);
|
||||||
* We don't take the uacce_mm->lock here. Since we hold the
|
uacce_unbind_queue(q);
|
||||||
* device's mm_lock, no queue can be added to or removed from
|
|
||||||
* this uacce_mm. We may run concurrently with mm_exit, but
|
|
||||||
* uacce_put_queue() is serialized and iommu_sva_unbind_device()
|
|
||||||
* waits for the lock that mm_exit is holding.
|
|
||||||
*/
|
|
||||||
list_for_each_entry(q, &uacce_mm->queues, list)
|
|
||||||
uacce_put_queue(q);
|
|
||||||
|
|
||||||
if (uacce->flags & UACCE_DEV_SVA) {
|
|
||||||
iommu_sva_unbind_device(uacce_mm->handle);
|
|
||||||
uacce_mm->handle = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&uacce->mm_lock);
|
mutex_unlock(&uacce->queues_lock);
|
||||||
|
|
||||||
/* disable sva now since no opened queues */
|
/* disable sva now since no opened queues */
|
||||||
if (uacce->flags & UACCE_DEV_SVA)
|
if (uacce->flags & UACCE_DEV_SVA)
|
||||||
|
|
|
@ -68,19 +68,21 @@ enum uacce_q_state {
|
||||||
* @uacce: pointer to uacce
|
* @uacce: pointer to uacce
|
||||||
* @priv: private pointer
|
* @priv: private pointer
|
||||||
* @wait: wait queue head
|
* @wait: wait queue head
|
||||||
* @list: index into uacce_mm
|
* @list: index into uacce queues list
|
||||||
* @uacce_mm: the corresponding mm
|
|
||||||
* @qfrs: pointer of qfr regions
|
* @qfrs: pointer of qfr regions
|
||||||
* @state: queue state machine
|
* @state: queue state machine
|
||||||
|
* @pasid: pasid associated to the mm
|
||||||
|
* @handle: iommu_sva handle returned by iommu_sva_bind_device()
|
||||||
*/
|
*/
|
||||||
struct uacce_queue {
|
struct uacce_queue {
|
||||||
struct uacce_device *uacce;
|
struct uacce_device *uacce;
|
||||||
void *priv;
|
void *priv;
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct uacce_mm *uacce_mm;
|
|
||||||
struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
|
struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
|
||||||
enum uacce_q_state state;
|
enum uacce_q_state state;
|
||||||
|
int pasid;
|
||||||
|
struct iommu_sva *handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -96,8 +98,8 @@ struct uacce_queue {
|
||||||
* @cdev: cdev of the uacce
|
* @cdev: cdev of the uacce
|
||||||
* @dev: dev of the uacce
|
* @dev: dev of the uacce
|
||||||
* @priv: private pointer of the uacce
|
* @priv: private pointer of the uacce
|
||||||
* @mm_list: list head of uacce_mm->list
|
* @queues: list of queues
|
||||||
* @mm_lock: lock for mm_list
|
* @queues_lock: lock for queues list
|
||||||
* @inode: core vfs
|
* @inode: core vfs
|
||||||
*/
|
*/
|
||||||
struct uacce_device {
|
struct uacce_device {
|
||||||
|
@ -112,27 +114,9 @@ struct uacce_device {
|
||||||
struct cdev *cdev;
|
struct cdev *cdev;
|
||||||
struct device dev;
|
struct device dev;
|
||||||
void *priv;
|
void *priv;
|
||||||
struct list_head mm_list;
|
|
||||||
struct mutex mm_lock;
|
|
||||||
struct inode *inode;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* struct uacce_mm - keep track of queues bound to a process
|
|
||||||
* @list: index into uacce_device
|
|
||||||
* @queues: list of queues
|
|
||||||
* @mm: the mm struct
|
|
||||||
* @lock: protects the list of queues
|
|
||||||
* @pasid: pasid of the uacce_mm
|
|
||||||
* @handle: iommu_sva handle return from iommu_sva_bind_device
|
|
||||||
*/
|
|
||||||
struct uacce_mm {
|
|
||||||
struct list_head list;
|
|
||||||
struct list_head queues;
|
struct list_head queues;
|
||||||
struct mm_struct *mm;
|
struct mutex queues_lock;
|
||||||
struct mutex lock;
|
struct inode *inode;
|
||||||
int pasid;
|
|
||||||
struct iommu_sva *handle;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_UACCE)
|
#if IS_ENABLED(CONFIG_UACCE)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user