forked from luck/tmp_suning_uos_patched
Merge branch 'work.vmci' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vmci iov_iter updates from Al Viro: "Get rid of "is it an iovec or an entire array?" flags in vmxi - just use iov_iter. Simplifies the living hell out of that code..." * 'work.vmci' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: vmci: the same on the send side... vmci: simplify qp_dequeue_locked() vmci: get rid of qp_memcpy_from_queue() vmci: fix buf_size in case of iovec-based accesses
This commit is contained in:
commit
d76e0a050e
|
@ -129,23 +129,6 @@
|
|||
* *_MEM state, and vice versa.
|
||||
*/
|
||||
|
||||
/*
|
||||
* VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these
|
||||
* types are passed around to enqueue and dequeue routines. Note that
|
||||
* often the functions passed are simply wrappers around memcpy
|
||||
* itself.
|
||||
*
|
||||
* Note: In order for the memcpy typedefs to be compatible with the VMKernel,
|
||||
* there's an unused last parameter for the hosted side. In
|
||||
* ESX, that parameter holds a buffer type.
|
||||
*/
|
||||
typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
|
||||
u64 queue_offset, const void *src,
|
||||
size_t src_offset, size_t size);
|
||||
typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
|
||||
const struct vmci_queue *queue,
|
||||
u64 queue_offset, size_t size);
|
||||
|
||||
/* The Kernel specific component of the struct vmci_queue structure. */
|
||||
struct vmci_queue_kern_if {
|
||||
struct mutex __mutex; /* Protects the queue. */
|
||||
|
@ -351,11 +334,10 @@ static void *qp_alloc_queue(u64 size, u32 flags)
|
|||
* by traversing the offset -> page translation structure for the queue.
|
||||
* Assumes that offset + size does not wrap around in the queue.
|
||||
*/
|
||||
static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
||||
u64 queue_offset,
|
||||
const void *src,
|
||||
size_t size,
|
||||
bool is_iovec)
|
||||
static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
|
||||
u64 queue_offset,
|
||||
struct iov_iter *from,
|
||||
size_t size)
|
||||
{
|
||||
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
|
||||
size_t bytes_copied = 0;
|
||||
|
@ -380,23 +362,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
|||
else
|
||||
to_copy = size - bytes_copied;
|
||||
|
||||
if (is_iovec) {
|
||||
struct msghdr *msg = (struct msghdr *)src;
|
||||
int err;
|
||||
|
||||
/* The iovec will track bytes_copied internally. */
|
||||
err = memcpy_from_msg((u8 *)va + page_offset,
|
||||
msg, to_copy);
|
||||
if (err != 0) {
|
||||
if (kernel_if->host)
|
||||
kunmap(kernel_if->u.h.page[page_index]);
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
}
|
||||
} else {
|
||||
memcpy((u8 *)va + page_offset,
|
||||
(u8 *)src + bytes_copied, to_copy);
|
||||
if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
|
||||
from)) {
|
||||
if (kernel_if->host)
|
||||
kunmap(kernel_if->u.h.page[page_index]);
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
}
|
||||
|
||||
bytes_copied += to_copy;
|
||||
if (kernel_if->host)
|
||||
kunmap(kernel_if->u.h.page[page_index]);
|
||||
|
@ -411,11 +382,9 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
|||
* by traversing the offset -> page translation structure for the queue.
|
||||
* Assumes that offset + size does not wrap around in the queue.
|
||||
*/
|
||||
static int __qp_memcpy_from_queue(void *dest,
|
||||
const struct vmci_queue *queue,
|
||||
u64 queue_offset,
|
||||
size_t size,
|
||||
bool is_iovec)
|
||||
static int qp_memcpy_from_queue_iter(struct iov_iter *to,
|
||||
const struct vmci_queue *queue,
|
||||
u64 queue_offset, size_t size)
|
||||
{
|
||||
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
|
||||
size_t bytes_copied = 0;
|
||||
|
@ -427,6 +396,7 @@ static int __qp_memcpy_from_queue(void *dest,
|
|||
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
|
||||
void *va;
|
||||
size_t to_copy;
|
||||
int err;
|
||||
|
||||
if (kernel_if->host)
|
||||
va = kmap(kernel_if->u.h.page[page_index]);
|
||||
|
@ -440,23 +410,12 @@ static int __qp_memcpy_from_queue(void *dest,
|
|||
else
|
||||
to_copy = size - bytes_copied;
|
||||
|
||||
if (is_iovec) {
|
||||
struct msghdr *msg = dest;
|
||||
int err;
|
||||
|
||||
/* The iovec will track bytes_copied internally. */
|
||||
err = memcpy_to_msg(msg, (u8 *)va + page_offset,
|
||||
to_copy);
|
||||
if (err != 0) {
|
||||
if (kernel_if->host)
|
||||
kunmap(kernel_if->u.h.page[page_index]);
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
}
|
||||
} else {
|
||||
memcpy((u8 *)dest + bytes_copied,
|
||||
(u8 *)va + page_offset, to_copy);
|
||||
err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
|
||||
if (err != to_copy) {
|
||||
if (kernel_if->host)
|
||||
kunmap(kernel_if->u.h.page[page_index]);
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
}
|
||||
|
||||
bytes_copied += to_copy;
|
||||
if (kernel_if->host)
|
||||
kunmap(kernel_if->u.h.page[page_index]);
|
||||
|
@ -569,54 +528,6 @@ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
|
|||
return VMCI_SUCCESS;
|
||||
}
|
||||
|
||||
static int qp_memcpy_to_queue(struct vmci_queue *queue,
|
||||
u64 queue_offset,
|
||||
const void *src, size_t src_offset, size_t size)
|
||||
{
|
||||
return __qp_memcpy_to_queue(queue, queue_offset,
|
||||
(u8 *)src + src_offset, size, false);
|
||||
}
|
||||
|
||||
static int qp_memcpy_from_queue(void *dest,
|
||||
size_t dest_offset,
|
||||
const struct vmci_queue *queue,
|
||||
u64 queue_offset, size_t size)
|
||||
{
|
||||
return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
|
||||
queue, queue_offset, size, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copies from a given iovec from a VMCI Queue.
|
||||
*/
|
||||
static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
|
||||
u64 queue_offset,
|
||||
const void *msg,
|
||||
size_t src_offset, size_t size)
|
||||
{
|
||||
|
||||
/*
|
||||
* We ignore src_offset because src is really a struct iovec * and will
|
||||
* maintain offset internally.
|
||||
*/
|
||||
return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copies to a given iovec from a VMCI Queue.
|
||||
*/
|
||||
static int qp_memcpy_from_queue_iov(void *dest,
|
||||
size_t dest_offset,
|
||||
const struct vmci_queue *queue,
|
||||
u64 queue_offset, size_t size)
|
||||
{
|
||||
/*
|
||||
* We ignore dest_offset because dest is really a struct iovec * and
|
||||
* will maintain offset internally.
|
||||
*/
|
||||
return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates kernel VA space of specified size plus space for the queue
|
||||
* and kernel interface. This is different from the guest queue allocator,
|
||||
|
@ -2629,12 +2540,11 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
|
|||
static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
|
||||
struct vmci_queue *consume_q,
|
||||
const u64 produce_q_size,
|
||||
const void *buf,
|
||||
size_t buf_size,
|
||||
vmci_memcpy_to_queue_func memcpy_to_queue)
|
||||
struct iov_iter *from)
|
||||
{
|
||||
s64 free_space;
|
||||
u64 tail;
|
||||
size_t buf_size = iov_iter_count(from);
|
||||
size_t written;
|
||||
ssize_t result;
|
||||
|
||||
|
@ -2654,15 +2564,15 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
|
|||
written = (size_t) (free_space > buf_size ? buf_size : free_space);
|
||||
tail = vmci_q_header_producer_tail(produce_q->q_header);
|
||||
if (likely(tail + written < produce_q_size)) {
|
||||
result = memcpy_to_queue(produce_q, tail, buf, 0, written);
|
||||
result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
|
||||
} else {
|
||||
/* Tail pointer wraps around. */
|
||||
|
||||
const size_t tmp = (size_t) (produce_q_size - tail);
|
||||
|
||||
result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
|
||||
result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
|
||||
if (result >= VMCI_SUCCESS)
|
||||
result = memcpy_to_queue(produce_q, 0, buf, tmp,
|
||||
result = qp_memcpy_to_queue_iter(produce_q, 0, from,
|
||||
written - tmp);
|
||||
}
|
||||
|
||||
|
@ -2690,11 +2600,10 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
|
|||
static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
|
||||
struct vmci_queue *consume_q,
|
||||
const u64 consume_q_size,
|
||||
void *buf,
|
||||
size_t buf_size,
|
||||
vmci_memcpy_from_queue_func memcpy_from_queue,
|
||||
struct iov_iter *to,
|
||||
bool update_consumer)
|
||||
{
|
||||
size_t buf_size = iov_iter_count(to);
|
||||
s64 buf_ready;
|
||||
u64 head;
|
||||
size_t read;
|
||||
|
@ -2716,15 +2625,15 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
|
|||
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
|
||||
head = vmci_q_header_consumer_head(produce_q->q_header);
|
||||
if (likely(head + read < consume_q_size)) {
|
||||
result = memcpy_from_queue(buf, 0, consume_q, head, read);
|
||||
result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
|
||||
} else {
|
||||
/* Head pointer wraps around. */
|
||||
|
||||
const size_t tmp = (size_t) (consume_q_size - head);
|
||||
|
||||
result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
|
||||
result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
|
||||
if (result >= VMCI_SUCCESS)
|
||||
result = memcpy_from_queue(buf, tmp, consume_q, 0,
|
||||
result = qp_memcpy_from_queue_iter(to, consume_q, 0,
|
||||
read - tmp);
|
||||
|
||||
}
|
||||
|
@ -3118,18 +3027,21 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
|
|||
int buf_type)
|
||||
{
|
||||
ssize_t result;
|
||||
struct iov_iter from;
|
||||
struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
|
||||
|
||||
if (!qpair || !buf)
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
|
||||
iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
|
||||
|
||||
qp_lock(qpair);
|
||||
|
||||
do {
|
||||
result = qp_enqueue_locked(qpair->produce_q,
|
||||
qpair->consume_q,
|
||||
qpair->produce_q_size,
|
||||
buf, buf_size,
|
||||
qp_memcpy_to_queue);
|
||||
&from);
|
||||
|
||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||
!qp_wait_for_ready_queue(qpair))
|
||||
|
@ -3159,18 +3071,21 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
|
|||
int buf_type)
|
||||
{
|
||||
ssize_t result;
|
||||
struct iov_iter to;
|
||||
struct kvec v = {.iov_base = buf, .iov_len = buf_size};
|
||||
|
||||
if (!qpair || !buf)
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
|
||||
iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
|
||||
|
||||
qp_lock(qpair);
|
||||
|
||||
do {
|
||||
result = qp_dequeue_locked(qpair->produce_q,
|
||||
qpair->consume_q,
|
||||
qpair->consume_q_size,
|
||||
buf, buf_size,
|
||||
qp_memcpy_from_queue, true);
|
||||
&to, true);
|
||||
|
||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||
!qp_wait_for_ready_queue(qpair))
|
||||
|
@ -3200,19 +3115,22 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
|
|||
size_t buf_size,
|
||||
int buf_type)
|
||||
{
|
||||
struct iov_iter to;
|
||||
struct kvec v = {.iov_base = buf, .iov_len = buf_size};
|
||||
ssize_t result;
|
||||
|
||||
if (!qpair || !buf)
|
||||
return VMCI_ERROR_INVALID_ARGS;
|
||||
|
||||
iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
|
||||
|
||||
qp_lock(qpair);
|
||||
|
||||
do {
|
||||
result = qp_dequeue_locked(qpair->produce_q,
|
||||
qpair->consume_q,
|
||||
qpair->consume_q_size,
|
||||
buf, buf_size,
|
||||
qp_memcpy_from_queue, false);
|
||||
&to, false);
|
||||
|
||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||
!qp_wait_for_ready_queue(qpair))
|
||||
|
@ -3253,8 +3171,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
|
|||
result = qp_enqueue_locked(qpair->produce_q,
|
||||
qpair->consume_q,
|
||||
qpair->produce_q_size,
|
||||
msg, iov_size,
|
||||
qp_memcpy_to_queue_iov);
|
||||
&msg->msg_iter);
|
||||
|
||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||
!qp_wait_for_ready_queue(qpair))
|
||||
|
@ -3295,9 +3212,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
|
|||
result = qp_dequeue_locked(qpair->produce_q,
|
||||
qpair->consume_q,
|
||||
qpair->consume_q_size,
|
||||
msg, iov_size,
|
||||
qp_memcpy_from_queue_iov,
|
||||
true);
|
||||
&msg->msg_iter, true);
|
||||
|
||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||
!qp_wait_for_ready_queue(qpair))
|
||||
|
@ -3339,9 +3254,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
|
|||
result = qp_dequeue_locked(qpair->produce_q,
|
||||
qpair->consume_q,
|
||||
qpair->consume_q_size,
|
||||
msg, iov_size,
|
||||
qp_memcpy_from_queue_iov,
|
||||
false);
|
||||
&msg->msg_iter, false);
|
||||
|
||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||
!qp_wait_for_ready_queue(qpair))
|
||||
|
|
Loading…
Reference in New Issue
Block a user