forked from luck/tmp_suning_uos_patched
Merge branch 'gup_flag-cleanups'
Merge the gup_flags cleanups from Lorenzo Stoakes:
"This patch series adjusts functions in the get_user_pages* family such
that desired FOLL_* flags are passed as an argument rather than
implied by flags.
The purpose of this change is to make the use of FOLL_FORCE explicit
so it is easier to grep for and clearer to callers that this flag is
being used. The use of FOLL_FORCE is an issue as it overrides missing
VM_READ/VM_WRITE flags for the VMA whose pages we are reading
from/writing to, which can result in surprising behaviour.
The patch series came out of the discussion around commit 38e0885465
("mm: check VMA flags to avoid invalid PROT_NONE NUMA balancing"),
which addressed a BUG_ON() being triggered when a page was faulted in
with PROT_NONE set but having been overridden by FOLL_FORCE.
do_numa_page() was run on the assumption the page _must_ be one marked
for NUMA node migration as an actual PROT_NONE page would have been
dealt with prior to this code path, however FOLL_FORCE introduced a
situation where this assumption did not hold.
See
https://marc.info/?l=linux-mm&m=147585445805166
for the patch proposal"
Additionally, there's a fix for an ancient bug related to FOLL_FORCE and
FOLL_WRITE by me.
[ This branch was rebased recently to add a few more acked-by's and
reviewed-by's ]
* gup_flag-cleanups:
mm: replace access_process_vm() write parameter with gup_flags
mm: replace access_remote_vm() write parameter with gup_flags
mm: replace __access_remote_vm() write parameter with gup_flags
mm: replace get_user_pages_remote() write/force parameters with gup_flags
mm: replace get_user_pages() write/force parameters with gup_flags
mm: replace get_vaddr_frames() write/force parameters with gup_flags
mm: replace get_user_pages_locked() write/force parameters with gup_flags
mm: replace get_user_pages_unlocked() write/force parameters with gup_flags
mm: remove write/force parameters from __get_user_pages_unlocked()
mm: remove write/force parameters from __get_user_pages_locked()
mm: remove gup_flags FOLL_WRITE games from __get_user_pages()
This commit is contained in:
commit
63ae602cea
|
@ -157,14 +157,16 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
|
|||
static inline int
|
||||
read_int(struct task_struct *task, unsigned long addr, int * data)
|
||||
{
|
||||
int copied = access_process_vm(task, addr, data, sizeof(int), 0);
|
||||
int copied = access_process_vm(task, addr, data, sizeof(int),
|
||||
FOLL_FORCE);
|
||||
return (copied == sizeof(int)) ? 0 : -EIO;
|
||||
}
|
||||
|
||||
static inline int
|
||||
write_int(struct task_struct *task, unsigned long addr, int data)
|
||||
{
|
||||
int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
|
||||
int copied = access_process_vm(task, addr, &data, sizeof(int),
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
return (copied == sizeof(int)) ? 0 : -EIO;
|
||||
}
|
||||
|
||||
|
@ -281,7 +283,8 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
/* When I and D space are separate, these will need to be fixed. */
|
||||
case PTRACE_PEEKTEXT: /* read word at location addr. */
|
||||
case PTRACE_PEEKDATA:
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
|
||||
FOLL_FORCE);
|
||||
ret = -EIO;
|
||||
if (copied != sizeof(tmp))
|
||||
break;
|
||||
|
|
|
@ -271,7 +271,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
case BFIN_MEM_ACCESS_CORE:
|
||||
case BFIN_MEM_ACCESS_CORE_ONLY:
|
||||
copied = access_process_vm(child, addr, &tmp,
|
||||
to_copy, 0);
|
||||
to_copy, FOLL_FORCE);
|
||||
if (copied)
|
||||
break;
|
||||
|
||||
|
@ -324,7 +324,8 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
case BFIN_MEM_ACCESS_CORE:
|
||||
case BFIN_MEM_ACCESS_CORE_ONLY:
|
||||
copied = access_process_vm(child, addr, &data,
|
||||
to_copy, 1);
|
||||
to_copy,
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
break;
|
||||
case BFIN_MEM_ACCESS_DMA:
|
||||
if (safe_dma_memcpy(paddr, &data, to_copy))
|
||||
|
|
|
@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
|
|||
err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
|
||||
noinpages,
|
||||
0, /* read access only for in data */
|
||||
0, /* no force */
|
||||
inpages,
|
||||
NULL);
|
||||
|
||||
|
@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
|
|||
if (oper.do_cipher){
|
||||
err = get_user_pages((unsigned long int)oper.cipher_outdata,
|
||||
nooutpages,
|
||||
1, /* write access for out data */
|
||||
0, /* no force */
|
||||
FOLL_WRITE, /* write access for out data */
|
||||
outpages,
|
||||
NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
|
|
@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
|
|||
/* The trampoline page is globally mapped, no page table to traverse.*/
|
||||
tmp = *(unsigned long*)addr;
|
||||
} else {
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
|
||||
|
||||
if (copied != sizeof(tmp))
|
||||
break;
|
||||
|
@ -279,7 +279,7 @@ static int insn_size(struct task_struct *child, unsigned long pc)
|
|||
int opsize = 0;
|
||||
|
||||
/* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
|
||||
copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0);
|
||||
copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE);
|
||||
if (copied != sizeof(opcode))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||
u64 virt_addr=simple_strtoull(buf, NULL, 16);
|
||||
int ret;
|
||||
|
||||
ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
|
||||
ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
|
||||
if (ret<=0) {
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk("Virtual address %lx is not existing.\n",virt_addr);
|
||||
|
|
|
@ -453,7 +453,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
|
||||
copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
|
||||
if (copied != sizeof(ret))
|
||||
return -EIO;
|
||||
*val = ret;
|
||||
|
@ -489,7 +489,8 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
|
|||
*ia64_rse_skip_regs(krbs, regnum) = val;
|
||||
}
|
||||
}
|
||||
} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
|
||||
} else if (access_process_vm(child, addr, &val, sizeof(val),
|
||||
FOLL_FORCE | FOLL_WRITE)
|
||||
!= sizeof(val))
|
||||
return -EIO;
|
||||
return 0;
|
||||
|
@ -543,7 +544,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
|
|||
ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (access_process_vm(child, addr, &val, sizeof(val), 1)
|
||||
if (access_process_vm(child, addr, &val, sizeof(val),
|
||||
FOLL_FORCE | FOLL_WRITE)
|
||||
!= sizeof(val))
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -559,7 +561,8 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
|
|||
|
||||
/* now copy word for word from user rbs to kernel rbs: */
|
||||
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
|
||||
if (access_process_vm(child, addr, &val, sizeof(val), 0)
|
||||
if (access_process_vm(child, addr, &val, sizeof(val),
|
||||
FOLL_FORCE)
|
||||
!= sizeof(val))
|
||||
return -EIO;
|
||||
|
||||
|
@ -1156,7 +1159,8 @@ arch_ptrace (struct task_struct *child, long request,
|
|||
case PTRACE_PEEKTEXT:
|
||||
case PTRACE_PEEKDATA:
|
||||
/* read word at location addr */
|
||||
if (access_process_vm(child, addr, &data, sizeof(data), 0)
|
||||
if (access_process_vm(child, addr, &data, sizeof(data),
|
||||
FOLL_FORCE)
|
||||
!= sizeof(data))
|
||||
return -EIO;
|
||||
/* ensure return value is not mistaken for error code */
|
||||
|
|
|
@ -493,7 +493,8 @@ unregister_all_debug_traps(struct task_struct *child)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < p->nr_trap; i++)
|
||||
access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
|
||||
access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
p->nr_trap = 0;
|
||||
}
|
||||
|
||||
|
@ -537,7 +538,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
|
|||
unsigned long next_insn, code;
|
||||
unsigned long addr = next_pc & ~3;
|
||||
|
||||
if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
|
||||
if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
|
||||
FOLL_FORCE)
|
||||
!= sizeof(next_insn)) {
|
||||
return -1; /* error */
|
||||
}
|
||||
|
@ -546,7 +548,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
|
|||
if (register_debug_trap(child, next_pc, next_insn, &code)) {
|
||||
return -1; /* error */
|
||||
}
|
||||
if (access_process_vm(child, addr, &code, sizeof(code), 1)
|
||||
if (access_process_vm(child, addr, &code, sizeof(code),
|
||||
FOLL_FORCE | FOLL_WRITE)
|
||||
!= sizeof(code)) {
|
||||
return -1; /* error */
|
||||
}
|
||||
|
@ -562,7 +565,8 @@ withdraw_debug_trap(struct pt_regs *regs)
|
|||
addr = (regs->bpc - 2) & ~3;
|
||||
regs->bpc -= 2;
|
||||
if (unregister_debug_trap(current, addr, &code)) {
|
||||
access_process_vm(current, addr, &code, sizeof(code), 1);
|
||||
access_process_vm(current, addr, &code, sizeof(code),
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
invalidate_cache();
|
||||
}
|
||||
}
|
||||
|
@ -589,7 +593,8 @@ void user_enable_single_step(struct task_struct *child)
|
|||
/* Compute next pc. */
|
||||
pc = get_stack_long(child, PT_BPC);
|
||||
|
||||
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
|
||||
if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
|
||||
FOLL_FORCE)
|
||||
!= sizeof(insn))
|
||||
return;
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
break;
|
||||
|
||||
copied = access_process_vm(child, (u64)addrOthers, &tmp,
|
||||
sizeof(tmp), 0);
|
||||
sizeof(tmp), FOLL_FORCE);
|
||||
if (copied != sizeof(tmp))
|
||||
break;
|
||||
ret = put_user(tmp, (u32 __user *) (unsigned long) data);
|
||||
|
@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
break;
|
||||
ret = 0;
|
||||
if (access_process_vm(child, (u64)addrOthers, &data,
|
||||
sizeof(data), 1) == sizeof(data))
|
||||
sizeof(data),
|
||||
FOLL_FORCE | FOLL_WRITE) == sizeof(data))
|
||||
break;
|
||||
ret = -EIO;
|
||||
break;
|
||||
|
|
|
@ -287,7 +287,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||
pages += nr;
|
||||
|
||||
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
|
||||
write, 0, pages);
|
||||
pages, write ? FOLL_WRITE : 0);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
|
|
@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
break;
|
||||
|
||||
copied = access_process_vm(child, (u64)addrOthers, &tmp,
|
||||
sizeof(tmp), 0);
|
||||
sizeof(tmp), FOLL_FORCE);
|
||||
if (copied != sizeof(tmp))
|
||||
break;
|
||||
ret = put_user(tmp, (u32 __user *)data);
|
||||
|
@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
break;
|
||||
ret = 0;
|
||||
if (access_process_vm(child, (u64)addrOthers, &tmp,
|
||||
sizeof(tmp), 1) == sizeof(tmp))
|
||||
sizeof(tmp),
|
||||
FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
|
||||
break;
|
||||
ret = -EIO;
|
||||
break;
|
||||
|
|
|
@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||
/* Try to get the remaining pages with get_user_pages */
|
||||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
|
||||
ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
|
||||
write ? FOLL_WRITE : 0);
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0)
|
||||
ret = (ret < 0) ? nr : ret + nr;
|
||||
|
|
|
@ -131,7 +131,7 @@ read_tsk_long(struct task_struct *child,
|
|||
{
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(child, addr, res, sizeof(*res), 0);
|
||||
copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
|
||||
|
||||
return copied != sizeof(*res) ? -EIO : 0;
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ read_tsk_short(struct task_struct *child,
|
|||
{
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(child, addr, res, sizeof(*res), 0);
|
||||
copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
|
||||
|
||||
return copied != sizeof(*res) ? -EIO : 0;
|
||||
}
|
||||
|
@ -153,7 +153,8 @@ write_tsk_short(struct task_struct *child,
|
|||
{
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(child, addr, &val, sizeof(val), 1);
|
||||
copied = access_process_vm(child, addr, &val, sizeof(val),
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
|
||||
return copied != sizeof(val) ? -EIO : 0;
|
||||
}
|
||||
|
@ -164,7 +165,8 @@ write_tsk_long(struct task_struct *child,
|
|||
{
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(child, addr, &val, sizeof(val), 1);
|
||||
copied = access_process_vm(child, addr, &val, sizeof(val),
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
|
||||
return copied != sizeof(val) ? -EIO : 0;
|
||||
}
|
||||
|
|
|
@ -258,7 +258,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||
pages += nr;
|
||||
|
||||
ret = get_user_pages_unlocked(start,
|
||||
(end - start) >> PAGE_SHIFT, write, 0, pages);
|
||||
(end - start) >> PAGE_SHIFT, pages,
|
||||
write ? FOLL_WRITE : 0);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
|
|
@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
|
|||
if (copy_from_user(kbuf, (void __user *) uaddr, len))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
int len2 = access_process_vm(target, uaddr, kbuf, len, 0);
|
||||
int len2 = access_process_vm(target, uaddr, kbuf, len,
|
||||
FOLL_FORCE);
|
||||
if (len2 != len)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
|
|||
if (copy_to_user((void __user *) uaddr, kbuf, len))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
int len2 = access_process_vm(target, uaddr, kbuf, len, 1);
|
||||
int len2 = access_process_vm(target, uaddr, kbuf, len,
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
if (len2 != len)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target,
|
|||
if (access_process_vm(target,
|
||||
(unsigned long)
|
||||
®_window[pos],
|
||||
k, sizeof(*k), 0)
|
||||
k, sizeof(*k),
|
||||
FOLL_FORCE)
|
||||
!= sizeof(*k))
|
||||
return -EFAULT;
|
||||
k++;
|
||||
|
@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target,
|
|||
if (access_process_vm(target,
|
||||
(unsigned long)
|
||||
®_window[pos],
|
||||
®, sizeof(reg), 0)
|
||||
®, sizeof(reg),
|
||||
FOLL_FORCE)
|
||||
!= sizeof(reg))
|
||||
return -EFAULT;
|
||||
if (access_process_vm(target,
|
||||
(unsigned long) u,
|
||||
®, sizeof(reg), 1)
|
||||
®, sizeof(reg),
|
||||
FOLL_FORCE | FOLL_WRITE)
|
||||
!= sizeof(reg))
|
||||
return -EFAULT;
|
||||
pos++;
|
||||
|
@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target,
|
|||
(unsigned long)
|
||||
®_window[pos],
|
||||
(void *) k,
|
||||
sizeof(*k), 1)
|
||||
sizeof(*k),
|
||||
FOLL_FORCE | FOLL_WRITE)
|
||||
!= sizeof(*k))
|
||||
return -EFAULT;
|
||||
k++;
|
||||
|
@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target,
|
|||
if (access_process_vm(target,
|
||||
(unsigned long)
|
||||
u,
|
||||
®, sizeof(reg), 0)
|
||||
®, sizeof(reg),
|
||||
FOLL_FORCE)
|
||||
!= sizeof(reg))
|
||||
return -EFAULT;
|
||||
if (access_process_vm(target,
|
||||
(unsigned long)
|
||||
®_window[pos],
|
||||
®, sizeof(reg), 1)
|
||||
®, sizeof(reg),
|
||||
FOLL_FORCE | FOLL_WRITE)
|
||||
!= sizeof(reg))
|
||||
return -EFAULT;
|
||||
pos++;
|
||||
|
|
|
@ -238,7 +238,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||
pages += nr;
|
||||
|
||||
ret = get_user_pages_unlocked(start,
|
||||
(end - start) >> PAGE_SHIFT, write, 0, pages);
|
||||
(end - start) >> PAGE_SHIFT, pages,
|
||||
write ? FOLL_WRITE : 0);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
|
|
@ -57,7 +57,8 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
|
|||
unsigned char opcode[15];
|
||||
unsigned long addr = convert_ip_to_linear(child, regs);
|
||||
|
||||
copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
|
||||
copied = access_process_vm(child, addr, opcode, sizeof(opcode),
|
||||
FOLL_FORCE);
|
||||
for (i = 0; i < copied; i++) {
|
||||
switch (opcode[i]) {
|
||||
/* popf and iret */
|
||||
|
|
|
@ -435,7 +435,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||
|
||||
ret = get_user_pages_unlocked(start,
|
||||
(end - start) >> PAGE_SHIFT,
|
||||
write, 0, pages);
|
||||
pages, write ? FOLL_WRITE : 0);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
|
|
@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
|
|||
{
|
||||
long gup_ret;
|
||||
int nr_pages = 1;
|
||||
int force = 0;
|
||||
|
||||
gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
|
||||
force, NULL, NULL);
|
||||
gup_ret = get_user_pages((unsigned long)addr, nr_pages,
|
||||
write ? FOLL_WRITE : 0, NULL, NULL);
|
||||
/*
|
||||
* get_user_pages() returns number of pages gotten.
|
||||
* 0 means we failed to fault in and get anything,
|
||||
|
|
|
@ -36,7 +36,8 @@ int is_syscall(unsigned long addr)
|
|||
* slow, but that doesn't matter, since it will be called only
|
||||
* in case of singlestepping, if copy_from_user failed.
|
||||
*/
|
||||
n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
|
||||
n = access_process_vm(current, addr, &instr, sizeof(instr),
|
||||
FOLL_FORCE);
|
||||
if (n != sizeof(instr)) {
|
||||
printk(KERN_ERR "is_syscall : failed to read "
|
||||
"instruction from 0x%lx\n", addr);
|
||||
|
|
|
@ -212,7 +212,8 @@ int is_syscall(unsigned long addr)
|
|||
* slow, but that doesn't matter, since it will be called only
|
||||
* in case of singlestepping, if copy_from_user failed.
|
||||
*/
|
||||
n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
|
||||
n = access_process_vm(current, addr, &instr, sizeof(instr),
|
||||
FOLL_FORCE);
|
||||
if (n != sizeof(instr)) {
|
||||
printk("is_syscall : failed to read instruction from "
|
||||
"0x%lx\n", addr);
|
||||
|
|
|
@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
|
|||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
unsigned int flags = 0;
|
||||
unsigned pinned = 0;
|
||||
int r;
|
||||
|
||||
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
|
||||
/* check that we only use anonymous memory
|
||||
to prevent problems with writeback */
|
||||
|
@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
|||
list_add(&guptask.list, >t->guptasks);
|
||||
spin_unlock(>t->guptasklock);
|
||||
|
||||
r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
|
||||
r = get_user_pages(userptr, num_pages, flags, p, NULL);
|
||||
|
||||
spin_lock(>t->guptasklock);
|
||||
list_del(&guptask.list);
|
||||
|
|
|
@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
|
|||
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
|
||||
struct page **pvec;
|
||||
uintptr_t ptr;
|
||||
unsigned int flags = 0;
|
||||
|
||||
pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (!pvec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (!etnaviv_obj->userptr.ro)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
pinned = 0;
|
||||
ptr = etnaviv_obj->userptr.ptr;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
|
||||
!etnaviv_obj->userptr.ro, 0,
|
||||
pvec + pinned, NULL);
|
||||
flags, pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
|
|
|
@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
|
|||
goto err_free;
|
||||
}
|
||||
|
||||
ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
|
||||
ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
|
||||
g2d_userptr->vec);
|
||||
if (ret != npages) {
|
||||
DRM_ERROR("failed to get user pages from userptr.\n");
|
||||
if (ret < 0)
|
||||
|
|
|
@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
|
||||
if (pvec != NULL) {
|
||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (!obj->userptr.read_only)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (atomic_inc_not_zero(&mm->mm_users)) {
|
||||
|
@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||
(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
flags,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
|
|
@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
|||
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
|
||||
struct page **pages = ttm->pages + pinned;
|
||||
|
||||
r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
|
||||
r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
|
||||
pages, NULL);
|
||||
if (r < 0)
|
||||
goto release_pages;
|
||||
|
||||
|
|
|
@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
|
|||
down_read(¤t->mm->mmap_sem);
|
||||
ret = get_user_pages((unsigned long)xfer->mem_addr,
|
||||
vsg->num_pages,
|
||||
(vsg->direction == DMA_FROM_DEVICE),
|
||||
0, vsg->pages, NULL);
|
||||
(vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
|
||||
vsg->pages, NULL);
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (ret != vsg->num_pages) {
|
||||
|
|
|
@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
unsigned long dma_attrs = 0;
|
||||
struct scatterlist *sg, *sg_list_start;
|
||||
int need_release = 0;
|
||||
unsigned int gup_flags = FOLL_WRITE;
|
||||
|
||||
if (dmasync)
|
||||
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
|
||||
|
@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!umem->writable)
|
||||
gup_flags |= FOLL_FORCE;
|
||||
|
||||
need_release = 1;
|
||||
sg_list_start = umem->sg_head.sgl;
|
||||
|
||||
|
@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
ret = get_user_pages(cur_base,
|
||||
min_t(unsigned long, npages,
|
||||
PAGE_SIZE / sizeof (struct page *)),
|
||||
1, !umem->writable, page_list, vma_list);
|
||||
gup_flags, page_list, vma_list);
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
|
|
@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
|
|||
u64 off;
|
||||
int j, k, ret = 0, start_idx, npages = 0;
|
||||
u64 base_virt_addr;
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (access_mask == 0)
|
||||
return -EINVAL;
|
||||
|
@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
|
|||
goto out_put_task;
|
||||
}
|
||||
|
||||
if (access_mask & ODP_WRITE_ALLOWED_BIT)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
|
||||
k = start_idx;
|
||||
|
||||
|
@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
|
|||
*/
|
||||
npages = get_user_pages_remote(owning_process, owning_mm,
|
||||
user_virt, gup_num_pages,
|
||||
access_mask & ODP_WRITE_ALLOWED_BIT,
|
||||
0, local_page_list, NULL);
|
||||
flags, local_page_list, NULL);
|
||||
up_read(&owning_mm->mmap_sem);
|
||||
|
||||
if (npages < 0)
|
||||
|
|
|
@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
|
||||
ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
|
|||
|
||||
for (got = 0; got < num_pages; got += ret) {
|
||||
ret = get_user_pages(start_page + got * PAGE_SIZE,
|
||||
num_pages - got, 1, 1,
|
||||
num_pages - got,
|
||||
FOLL_WRITE | FOLL_FORCE,
|
||||
p + got, NULL);
|
||||
if (ret < 0)
|
||||
goto bail_release;
|
||||
|
|
|
@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
|||
int i;
|
||||
int flags;
|
||||
dma_addr_t pa;
|
||||
unsigned int gup_flags;
|
||||
|
||||
if (!can_do_mlock())
|
||||
return -EPERM;
|
||||
|
@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
|||
|
||||
flags = IOMMU_READ | IOMMU_CACHE;
|
||||
flags |= (writable) ? IOMMU_WRITE : 0;
|
||||
gup_flags = FOLL_WRITE;
|
||||
gup_flags |= (writable) ? 0 : FOLL_FORCE;
|
||||
cur_base = addr & PAGE_MASK;
|
||||
ret = 0;
|
||||
|
||||
|
@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
|
|||
ret = get_user_pages(cur_base,
|
||||
min_t(unsigned long, npages,
|
||||
PAGE_SIZE / sizeof(struct page *)),
|
||||
1, !writable, page_list, NULL);
|
||||
gup_flags, page_list, NULL);
|
||||
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
|
|
@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
|
|||
}
|
||||
|
||||
/* Get user pages for DMA Xfer */
|
||||
err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
|
||||
1, dma->map);
|
||||
err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
|
||||
dma->map, FOLL_FORCE);
|
||||
|
||||
if (user_dma.page_count != err) {
|
||||
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
|
||||
|
|
|
@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
|
|||
|
||||
/* Get user pages for DMA Xfer */
|
||||
y_pages = get_user_pages_unlocked(y_dma.uaddr,
|
||||
y_dma.page_count, 0, 1, &dma->map[0]);
|
||||
y_dma.page_count, &dma->map[0], FOLL_FORCE);
|
||||
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
|
||||
if (y_pages == y_dma.page_count) {
|
||||
uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
|
||||
uv_dma.page_count, 0, 1, &dma->map[y_pages]);
|
||||
uv_dma.page_count, &dma->map[y_pages],
|
||||
FOLL_FORCE);
|
||||
}
|
||||
|
||||
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
|
||||
|
|
|
@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
|
|||
if (!vec)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_vaddr_frames(virtp, 1, true, false, vec);
|
||||
ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
|
||||
if (ret != 1) {
|
||||
frame_vector_destroy(vec);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
|
|||
{
|
||||
unsigned long first, last;
|
||||
int err, rw = 0;
|
||||
unsigned int flags = FOLL_FORCE;
|
||||
|
||||
dma->direction = direction;
|
||||
switch (dma->direction) {
|
||||
|
@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
|
|||
if (NULL == dma->pages)
|
||||
return -ENOMEM;
|
||||
|
||||
if (rw == READ)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
|
||||
data, size, dma->nr_pages);
|
||||
|
||||
err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
|
||||
rw == READ, 1, /* force */
|
||||
dma->pages, NULL);
|
||||
flags, dma->pages, NULL);
|
||||
|
||||
if (err != dma->nr_pages) {
|
||||
dma->nr_pages = (err >= 0) ? err : 0;
|
||||
|
|
|
@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
|
|||
unsigned long first, last;
|
||||
unsigned long nr;
|
||||
struct frame_vector *vec;
|
||||
unsigned int flags = FOLL_FORCE;
|
||||
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
first = start >> PAGE_SHIFT;
|
||||
last = (start + length - 1) >> PAGE_SHIFT;
|
||||
|
@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
|
|||
vec = frame_vector_create(nr);
|
||||
if (!vec)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
|
||||
ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
|
||||
if (ret < 0)
|
||||
goto out_destroy;
|
||||
/* We accept only complete set of PFNs */
|
||||
|
|
|
@ -1396,8 +1396,7 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
|
|||
pinned_pages->nr_pages = get_user_pages(
|
||||
(u64)addr,
|
||||
nr_pages,
|
||||
!!(prot & SCIF_PROT_WRITE),
|
||||
0,
|
||||
(prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
|
||||
pinned_pages->pages,
|
||||
NULL);
|
||||
up_write(&mm->mmap_sem);
|
||||
|
|
|
@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
|
|||
#else
|
||||
*pageshift = PAGE_SHIFT;
|
||||
#endif
|
||||
if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
|
||||
if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
|
||||
return -EFAULT;
|
||||
*paddr = page_to_phys(page);
|
||||
put_page(page);
|
||||
|
|
|
@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
|
|||
* much memory to the process.
|
||||
*/
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
|
||||
ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
|
||||
&page, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
|
|
@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
|
|||
down_read(¤t->mm->mmap_sem);
|
||||
pinned = get_user_pages(
|
||||
(unsigned long)xfer->loc_addr & PAGE_MASK,
|
||||
nr_pages, dir == DMA_FROM_DEVICE, 0,
|
||||
nr_pages,
|
||||
dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
|
||||
page_list, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
|
|
|
@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
|||
res = get_user_pages_unlocked(
|
||||
uaddr,
|
||||
nr_pages,
|
||||
rw == READ,
|
||||
0, /* don't force */
|
||||
pages);
|
||||
pages,
|
||||
rw == READ ? FOLL_WRITE : 0); /* don't force */
|
||||
|
||||
/* Errors and no page mapped should return here */
|
||||
if (res < nr_pages)
|
||||
|
|
|
@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
|
|||
actual_pages = get_user_pages(task, task->mm,
|
||||
(unsigned long)buf & ~(PAGE_SIZE - 1),
|
||||
num_pages,
|
||||
(type == PAGELIST_READ) /*Write */ ,
|
||||
0 /*Force */ ,
|
||||
(type == PAGELIST_READ) ? FOLL_WRITE : 0,
|
||||
pages,
|
||||
NULL /*vmas */);
|
||||
up_read(&task->mm->mmap_sem);
|
||||
|
|
|
@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
|
|||
current->mm, /* mm */
|
||||
(unsigned long)virt_addr, /* start */
|
||||
num_pages, /* len */
|
||||
0, /* write */
|
||||
0, /* force */
|
||||
0, /* gup_flags */
|
||||
pages, /* pages (array of page pointers) */
|
||||
NULL); /* vmas */
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
|
|
@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
|
|||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
|
||||
0, pages);
|
||||
ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
|
||||
FOLL_WRITE);
|
||||
|
||||
if (ret < nr_pages) {
|
||||
nr_pages = ret;
|
||||
|
|
|
@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
|
|||
/* Get the physical addresses of the source buffer */
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
num_pinned = get_user_pages(param.local_vaddr - lb_offset,
|
||||
num_pages, (param.source == -1) ? READ : WRITE,
|
||||
0, pages, NULL);
|
||||
num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
|
||||
pages, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
if (num_pinned != num_pages) {
|
||||
|
|
|
@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
|||
{
|
||||
struct page *page;
|
||||
int ret;
|
||||
unsigned int gup_flags = FOLL_FORCE;
|
||||
|
||||
#ifdef CONFIG_STACK_GROWSUP
|
||||
if (write) {
|
||||
|
@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
|
|||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (write)
|
||||
gup_flags |= FOLL_WRITE;
|
||||
|
||||
/*
|
||||
* We are doing an exec(). 'current' is the process
|
||||
* doing the exec and bprm->mm is the new process's mm.
|
||||
*/
|
||||
ret = get_user_pages_remote(current, bprm->mm, pos, 1, write,
|
||||
1, &page, NULL);
|
||||
ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
|
||||
&page, NULL);
|
||||
if (ret <= 0)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
|
|||
* Inherently racy -- command line shares address space
|
||||
* with code and data.
|
||||
*/
|
||||
rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
|
||||
rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);
|
||||
if (rv <= 0)
|
||||
goto out_free_page;
|
||||
|
||||
|
@ -270,7 +270,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
|
|||
int nr_read;
|
||||
|
||||
_count = min3(count, len, PAGE_SIZE);
|
||||
nr_read = access_remote_vm(mm, p, page, _count, 0);
|
||||
nr_read = access_remote_vm(mm, p, page, _count,
|
||||
FOLL_FORCE);
|
||||
if (nr_read < 0)
|
||||
rv = nr_read;
|
||||
if (nr_read <= 0)
|
||||
|
@ -305,7 +306,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
|
|||
bool final;
|
||||
|
||||
_count = min3(count, len, PAGE_SIZE);
|
||||
nr_read = access_remote_vm(mm, p, page, _count, 0);
|
||||
nr_read = access_remote_vm(mm, p, page, _count,
|
||||
FOLL_FORCE);
|
||||
if (nr_read < 0)
|
||||
rv = nr_read;
|
||||
if (nr_read <= 0)
|
||||
|
@ -354,7 +356,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
|
|||
bool final;
|
||||
|
||||
_count = min3(count, len, PAGE_SIZE);
|
||||
nr_read = access_remote_vm(mm, p, page, _count, 0);
|
||||
nr_read = access_remote_vm(mm, p, page, _count,
|
||||
FOLL_FORCE);
|
||||
if (nr_read < 0)
|
||||
rv = nr_read;
|
||||
if (nr_read <= 0)
|
||||
|
@ -832,6 +835,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
|
|||
unsigned long addr = *ppos;
|
||||
ssize_t copied;
|
||||
char *page;
|
||||
unsigned int flags = FOLL_FORCE;
|
||||
|
||||
if (!mm)
|
||||
return 0;
|
||||
|
@ -844,6 +848,9 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
|
|||
if (!atomic_inc_not_zero(&mm->mm_users))
|
||||
goto free;
|
||||
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
while (count > 0) {
|
||||
int this_len = min_t(int, count, PAGE_SIZE);
|
||||
|
||||
|
@ -852,7 +859,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
|
|||
break;
|
||||
}
|
||||
|
||||
this_len = access_remote_vm(mm, addr, page, this_len, write);
|
||||
this_len = access_remote_vm(mm, addr, page, this_len, flags);
|
||||
if (!this_len) {
|
||||
if (!copied)
|
||||
copied = -EIO;
|
||||
|
@ -965,7 +972,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
|
|||
this_len = min(max_len, this_len);
|
||||
|
||||
retval = access_remote_vm(mm, (env_start + src),
|
||||
page, this_len, 0);
|
||||
page, this_len, FOLL_FORCE);
|
||||
|
||||
if (retval <= 0) {
|
||||
ret = retval;
|
||||
|
|
|
@ -1266,9 +1266,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
|
|||
}
|
||||
#endif
|
||||
|
||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
|
||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
|
||||
unsigned int gup_flags);
|
||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, int write);
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
|
||||
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
|
@ -1276,19 +1277,18 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
struct vm_area_struct **vmas, int *nonblocking);
|
||||
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages, int *locked);
|
||||
unsigned int gup_flags, struct page **pages, int *locked);
|
||||
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags);
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages);
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
|
||||
|
@ -1306,7 +1306,7 @@ struct frame_vector {
|
|||
struct frame_vector *frame_vector_create(unsigned int nr_frames);
|
||||
void frame_vector_destroy(struct frame_vector *vec);
|
||||
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
|
||||
bool write, bool force, struct frame_vector *vec);
|
||||
unsigned int gup_flags, struct frame_vector *vec);
|
||||
void put_vaddr_frames(struct frame_vector *vec);
|
||||
int frame_vector_to_pages(struct frame_vector *vec);
|
||||
void frame_vector_to_pfns(struct frame_vector *vec);
|
||||
|
@ -2232,6 +2232,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
|
|||
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
|
||||
#define FOLL_MLOCK 0x1000 /* lock present pages */
|
||||
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
|
||||
#define FOLL_COW 0x4000 /* internal GUP flag */
|
||||
|
||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||
void *data);
|
||||
|
|
|
@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
|
|||
|
||||
retry:
|
||||
/* Read the page with vaddr into memory */
|
||||
ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
|
||||
ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
|
||||
&vma);
|
||||
if (ret <= 0)
|
||||
return ret;
|
||||
|
||||
|
@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
|
|||
* but we treat this as a 'remote' access since it is
|
||||
* essentially a kernel access to the memory.
|
||||
*/
|
||||
result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
|
||||
result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
|
||||
NULL);
|
||||
if (result < 0)
|
||||
return result;
|
||||
|
||||
|
|
|
@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
|
|||
int this_len, retval;
|
||||
|
||||
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
|
||||
retval = access_process_vm(tsk, src, buf, this_len, 0);
|
||||
retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
|
||||
if (!retval) {
|
||||
if (copied)
|
||||
break;
|
||||
|
@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
|
|||
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
|
||||
if (copy_from_user(buf, src, this_len))
|
||||
return -EFAULT;
|
||||
retval = access_process_vm(tsk, dst, buf, this_len, 1);
|
||||
retval = access_process_vm(tsk, dst, buf, this_len,
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
if (!retval) {
|
||||
if (copied)
|
||||
break;
|
||||
|
@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
|
|||
unsigned long tmp;
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
|
||||
copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
|
||||
if (copied != sizeof(tmp))
|
||||
return -EIO;
|
||||
return put_user(tmp, (unsigned long __user *)data);
|
||||
|
@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
|
|||
{
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
|
||||
copied = access_process_vm(tsk, addr, &data, sizeof(data),
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
return (copied == sizeof(data)) ? 0 : -EIO;
|
||||
}
|
||||
|
||||
|
@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
|
|||
switch (request) {
|
||||
case PTRACE_PEEKTEXT:
|
||||
case PTRACE_PEEKDATA:
|
||||
ret = access_process_vm(child, addr, &word, sizeof(word), 0);
|
||||
ret = access_process_vm(child, addr, &word, sizeof(word),
|
||||
FOLL_FORCE);
|
||||
if (ret != sizeof(word))
|
||||
ret = -EIO;
|
||||
else
|
||||
|
@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
|
|||
|
||||
case PTRACE_POKETEXT:
|
||||
case PTRACE_POKEDATA:
|
||||
ret = access_process_vm(child, addr, &data, sizeof(data), 1);
|
||||
ret = access_process_vm(child, addr, &data, sizeof(data),
|
||||
FOLL_FORCE | FOLL_WRITE);
|
||||
ret = (ret != sizeof(data) ? -EIO : 0);
|
||||
break;
|
||||
|
||||
|
|
|
@ -11,10 +11,7 @@
|
|||
* get_vaddr_frames() - map virtual addresses to pfns
|
||||
* @start: starting user address
|
||||
* @nr_frames: number of pages / pfns from start to map
|
||||
* @write: whether pages will be written to by the caller
|
||||
* @force: whether to force write access even if user mapping is
|
||||
* readonly. See description of the same argument of
|
||||
get_user_pages().
|
||||
* @gup_flags: flags modifying lookup behaviour
|
||||
* @vec: structure which receives pages / pfns of the addresses mapped.
|
||||
* It should have space for at least nr_frames entries.
|
||||
*
|
||||
|
@ -34,7 +31,7 @@
|
|||
* This function takes care of grabbing mmap_sem as necessary.
|
||||
*/
|
||||
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
|
||||
bool write, bool force, struct frame_vector *vec)
|
||||
unsigned int gup_flags, struct frame_vector *vec)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
|
@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
|
|||
vec->got_ref = true;
|
||||
vec->is_pfns = false;
|
||||
ret = get_user_pages_locked(start, nr_frames,
|
||||
write, force, (struct page **)(vec->ptrs), &locked);
|
||||
gup_flags, (struct page **)(vec->ptrs), &locked);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
64
mm/gup.c
64
mm/gup.c
|
@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
|
|||
return -EEXIST;
|
||||
}
|
||||
|
||||
/*
|
||||
* FOLL_FORCE can write to even unwritable pte's, but only
|
||||
* after we've gone through a COW cycle and they are dirty.
|
||||
*/
|
||||
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
|
||||
{
|
||||
return pte_write(pte) ||
|
||||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
|
||||
}
|
||||
|
||||
static struct page *follow_page_pte(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd, unsigned int flags)
|
||||
{
|
||||
|
@ -95,7 +105,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
|
|||
}
|
||||
if ((flags & FOLL_NUMA) && pte_protnone(pte))
|
||||
goto no_page;
|
||||
if ((flags & FOLL_WRITE) && !pte_write(pte)) {
|
||||
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
|
|||
* reCOWed by userspace write).
|
||||
*/
|
||||
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
|
||||
*flags &= ~FOLL_WRITE;
|
||||
*flags |= FOLL_COW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -729,7 +739,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
|||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
int write, int force,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
int *locked, bool notify_drop,
|
||||
|
@ -747,10 +756,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
|||
|
||||
if (pages)
|
||||
flags |= FOLL_GET;
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
if (force)
|
||||
flags |= FOLL_FORCE;
|
||||
|
||||
pages_done = 0;
|
||||
lock_dropped = false;
|
||||
|
@ -843,12 +848,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
|
|||
* up_read(&mm->mmap_sem);
|
||||
*/
|
||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
int *locked)
|
||||
{
|
||||
return __get_user_pages_locked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, NULL, locked, true,
|
||||
FOLL_TOUCH);
|
||||
pages, NULL, locked, true,
|
||||
gup_flags | FOLL_TOUCH);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_locked);
|
||||
|
||||
|
@ -864,14 +869,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
|
|||
*/
|
||||
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags)
|
||||
struct page **pages, unsigned int gup_flags)
|
||||
{
|
||||
long ret;
|
||||
int locked = 1;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
|
||||
pages, NULL, &locked, false, gup_flags);
|
||||
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
|
||||
&locked, false, gup_flags);
|
||||
if (locked)
|
||||
up_read(&mm->mmap_sem);
|
||||
return ret;
|
||||
|
@ -896,10 +901,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
|
|||
* "force" parameter).
|
||||
*/
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
struct page **pages, unsigned int gup_flags)
|
||||
{
|
||||
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, FOLL_TOUCH);
|
||||
pages, gup_flags | FOLL_TOUCH);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||
|
||||
|
@ -910,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
|
|||
* @mm: mm_struct of target mm
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to by the caller
|
||||
* @force: whether to force access even when user mapping is currently
|
||||
* protected (but never forces write access to shared mapping).
|
||||
* @gup_flags: flags modifying lookup behaviour
|
||||
* @pages: array that receives pointers to the pages pinned.
|
||||
* Should be at least nr_pages long. Or NULL, if caller
|
||||
* only intends to ensure the pages are faulted in.
|
||||
|
@ -941,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
|
|||
* or similar operation cannot guarantee anything stronger anyway because
|
||||
* locks can't be held over the syscall boundary.
|
||||
*
|
||||
* If write=0, the page must not be written to. If the page is written to,
|
||||
* set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
|
||||
* after the page is finished with, and before put_page is called.
|
||||
* If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
|
||||
* is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
|
||||
* be called after the page is finished with, and before put_page is called.
|
||||
*
|
||||
* get_user_pages is typically used for fewer-copy IO operations, to get a
|
||||
* handle on the memory by some means other than accesses via the user virtual
|
||||
|
@ -960,12 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
|
|||
*/
|
||||
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
|
||||
pages, vmas, NULL, false,
|
||||
FOLL_TOUCH | FOLL_REMOTE);
|
||||
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
|
||||
NULL, false,
|
||||
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_remote);
|
||||
|
||||
|
@ -976,12 +979,12 @@ EXPORT_SYMBOL(get_user_pages_remote);
|
|||
* obviously don't pass FOLL_REMOTE in here.
|
||||
*/
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
return __get_user_pages_locked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, vmas, NULL, false,
|
||||
FOLL_TOUCH);
|
||||
pages, vmas, NULL, false,
|
||||
gup_flags | FOLL_TOUCH);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
|
@ -1505,7 +1508,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
|||
start += nr << PAGE_SHIFT;
|
||||
pages += nr;
|
||||
|
||||
ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
|
||||
ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
|
||||
write ? FOLL_WRITE : 0);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
|
16
mm/memory.c
16
mm/memory.c
|
@ -3869,10 +3869,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
|
|||
* given task for page fault accounting.
|
||||
*/
|
||||
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long addr, void *buf, int len, int write)
|
||||
unsigned long addr, void *buf, int len, unsigned int gup_flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
void *old_buf = buf;
|
||||
int write = gup_flags & FOLL_WRITE;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
/* ignore errors, just check how much was successfully transferred */
|
||||
|
@ -3882,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
|||
struct page *page = NULL;
|
||||
|
||||
ret = get_user_pages_remote(tsk, mm, addr, 1,
|
||||
write, 1, &page, &vma);
|
||||
gup_flags, &page, &vma);
|
||||
if (ret <= 0) {
|
||||
#ifndef CONFIG_HAVE_IOREMAP_PROT
|
||||
break;
|
||||
|
@ -3934,14 +3935,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
|||
* @addr: start address to access
|
||||
* @buf: source or destination buffer
|
||||
* @len: number of bytes to transfer
|
||||
* @write: whether the access is a write
|
||||
* @gup_flags: flags modifying lookup behaviour
|
||||
*
|
||||
* The caller must hold a reference on @mm.
|
||||
*/
|
||||
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
void *buf, int len, unsigned int gup_flags)
|
||||
{
|
||||
return __access_remote_vm(NULL, mm, addr, buf, len, write);
|
||||
return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3950,7 +3951,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
|||
* Do not walk the page table directly, use get_user_pages
|
||||
*/
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
void *buf, int len, unsigned int gup_flags)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
int ret;
|
||||
|
@ -3959,7 +3960,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
|
|||
if (!mm)
|
||||
return 0;
|
||||
|
||||
ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
|
||||
ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
|
||||
|
||||
mmput(mm);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr)
|
|||
struct page *p;
|
||||
int err;
|
||||
|
||||
err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
|
||||
err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
|
||||
if (err >= 0) {
|
||||
err = page_to_nid(p);
|
||||
put_page(p);
|
||||
|
|
38
mm/nommu.c
38
mm/nommu.c
|
@ -160,33 +160,25 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
* - don't permit access to VMAs that don't support it, such as I/O mappings
|
||||
*/
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
int flags = 0;
|
||||
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
if (force)
|
||||
flags |= FOLL_FORCE;
|
||||
|
||||
return __get_user_pages(current, current->mm, start, nr_pages, flags,
|
||||
pages, vmas, NULL);
|
||||
return __get_user_pages(current, current->mm, start, nr_pages,
|
||||
gup_flags, pages, vmas, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
int *locked)
|
||||
{
|
||||
return get_user_pages(start, nr_pages, write, force, pages, NULL);
|
||||
return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_locked);
|
||||
|
||||
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags)
|
||||
struct page **pages, unsigned int gup_flags)
|
||||
{
|
||||
long ret;
|
||||
down_read(&mm->mmap_sem);
|
||||
|
@ -198,10 +190,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
|||
EXPORT_SYMBOL(__get_user_pages_unlocked);
|
||||
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
struct page **pages, unsigned int gup_flags)
|
||||
{
|
||||
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, 0);
|
||||
pages, gup_flags);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||
|
||||
|
@ -1817,9 +1809,10 @@ void filemap_map_pages(struct fault_env *fe,
|
|||
EXPORT_SYMBOL(filemap_map_pages);
|
||||
|
||||
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long addr, void *buf, int len, int write)
|
||||
unsigned long addr, void *buf, int len, unsigned int gup_flags)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
int write = gup_flags & FOLL_WRITE;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
|
@ -1854,21 +1847,22 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
|||
* @addr: start address to access
|
||||
* @buf: source or destination buffer
|
||||
* @len: number of bytes to transfer
|
||||
* @write: whether the access is a write
|
||||
* @gup_flags: flags modifying lookup behaviour
|
||||
*
|
||||
* The caller must hold a reference on @mm.
|
||||
*/
|
||||
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, int write)
|
||||
void *buf, int len, unsigned int gup_flags)
|
||||
{
|
||||
return __access_remote_vm(NULL, mm, addr, buf, len, write);
|
||||
return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access another process' address space.
|
||||
* - source/target buffer must be kernel space
|
||||
*/
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
|
||||
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
|
||||
unsigned int gup_flags)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
|
||||
|
@ -1879,7 +1873,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
|
|||
if (!mm)
|
||||
return 0;
|
||||
|
||||
len = __access_remote_vm(tsk, mm, addr, buf, len, write);
|
||||
len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
|
||||
|
||||
mmput(mm);
|
||||
return len;
|
||||
|
|
|
@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
|
|||
ssize_t rc = 0;
|
||||
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
|
||||
/ sizeof(struct pages *);
|
||||
unsigned int flags = FOLL_REMOTE;
|
||||
|
||||
/* Work out address and page range required */
|
||||
if (len == 0)
|
||||
return 0;
|
||||
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
|
||||
|
||||
if (vm_write)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
while (!rc && nr_pages && iov_iter_count(iter)) {
|
||||
int pages = min(nr_pages, max_pages_per_loop);
|
||||
size_t bytes;
|
||||
|
@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
|
|||
* current/current->mm
|
||||
*/
|
||||
pages = __get_user_pages_unlocked(task, mm, pa, pages,
|
||||
vm_write, 0, process_pages,
|
||||
FOLL_REMOTE);
|
||||
process_pages, flags);
|
||||
if (pages <= 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -283,7 +283,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
|
|||
int __weak get_user_pages_fast(unsigned long start,
|
||||
int nr_pages, int write, struct page **pages)
|
||||
{
|
||||
return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
|
||||
return get_user_pages_unlocked(start, nr_pages, pages,
|
||||
write ? FOLL_WRITE : 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_user_pages_fast);
|
||||
|
||||
|
@ -623,7 +624,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
|
|||
if (len > buflen)
|
||||
len = buflen;
|
||||
|
||||
res = access_process_vm(task, arg_start, buffer, len, 0);
|
||||
res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
|
||||
|
||||
/*
|
||||
* If the nul at the end of args has been overwritten, then
|
||||
|
@ -638,7 +639,8 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
|
|||
if (len > buflen - res)
|
||||
len = buflen - res;
|
||||
res += access_process_vm(task, env_start,
|
||||
buffer+res, len, 0);
|
||||
buffer+res, len,
|
||||
FOLL_FORCE);
|
||||
res = strnlen(buffer, res);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
|
|||
while (got < num_pages) {
|
||||
rc = get_user_pages_unlocked(
|
||||
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
|
||||
num_pages - got, write_page, 0, pages + got);
|
||||
num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
|
||||
if (rc < 0)
|
||||
break;
|
||||
BUG_ON(rc == 0);
|
||||
|
|
|
@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
|
|||
* the execve().
|
||||
*/
|
||||
if (get_user_pages_remote(current, bprm->mm, pos, 1,
|
||||
0, 1, &page, NULL) <= 0)
|
||||
FOLL_FORCE, &page, NULL) <= 0)
|
||||
return false;
|
||||
#else
|
||||
page = bprm->page[pos / PAGE_SIZE];
|
||||
|
|
|
@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
|
|||
* mm and might be done in another context, so we must
|
||||
* use FOLL_REMOTE.
|
||||
*/
|
||||
__get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE);
|
||||
__get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
|
||||
FOLL_WRITE | FOLL_REMOTE);
|
||||
|
||||
kvm_async_page_present_sync(vcpu, apf);
|
||||
|
||||
|
|
|
@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
|
|||
down_read(¤t->mm->mmap_sem);
|
||||
npages = get_user_page_nowait(addr, write_fault, page);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
} else
|
||||
} else {
|
||||
unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
|
||||
|
||||
if (write_fault)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
|
||||
write_fault, 0, page,
|
||||
FOLL_TOUCH|FOLL_HWPOISON);
|
||||
page, flags);
|
||||
}
|
||||
if (npages != 1)
|
||||
return npages;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user