forked from luck/tmp_suning_uos_patched
d4d9c06598
commit 295cf156231ca3f9e3a66bde7fab5e09c41835e0 upstream. Al reminds us that the usercopy API must only return complete failure if absolutely nothing could be copied. Currently, if userspace does something silly like giving us an unaligned pointer to Device memory, or a size which overruns MTE tag bounds, we may fail to honour that requirement when faulting on a multi-byte access even though a smaller access could have succeeded. Add a mitigation to the fixup routines to fall back to a single-byte copy if we faulted on a larger access before anything has been written to the destination, to guarantee making *some* forward progress. We needn't be too concerned about the overall performance since this should only occur when callers are doing something a bit dodgy in the first place. Particularly broken userspace might still be able to trick generic_perform_write() into an infinite loop by targeting write() at an mmap() of some read-only device register where the fault-in load succeeds but any store synchronously aborts such that copy_to_user() is genuinely unable to make progress, but, well, don't do that... CC: stable@vger.kernel.org Reported-by: Chen Huang <chenhuang5@huawei.com> Suggested-by: Al Viro <viro@zeniv.linux.org.uk> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/dc03d5c675731a1f24a62417dba5429ad744234e.1626098433.git.robin.murphy@arm.com Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Chen Huang <chenhuang5@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
76 lines
1.4 KiB
ArmAsm
76 lines
1.4 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/asm-uaccess.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/cache.h>
|
|
|
|
/*
|
|
* Copy to user space from a kernel buffer (alignment handled by the hardware)
|
|
*
|
|
* Parameters:
|
|
* x0 - to
|
|
* x1 - from
|
|
* x2 - n
|
|
* Returns:
|
|
* x0 - bytes not copied
|
|
*/
|
|
.macro ldrb1 reg, ptr, val
|
|
ldrb \reg, [\ptr], \val
|
|
.endm
|
|
|
|
.macro strb1 reg, ptr, val
|
|
uao_user_alternative 9998f, strb, sttrb, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro ldrh1 reg, ptr, val
|
|
ldrh \reg, [\ptr], \val
|
|
.endm
|
|
|
|
.macro strh1 reg, ptr, val
|
|
uao_user_alternative 9997f, strh, sttrh, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro ldr1 reg, ptr, val
|
|
ldr \reg, [\ptr], \val
|
|
.endm
|
|
|
|
.macro str1 reg, ptr, val
|
|
uao_user_alternative 9997f, str, sttr, \reg, \ptr, \val
|
|
.endm
|
|
|
|
.macro ldp1 reg1, reg2, ptr, val
|
|
ldp \reg1, \reg2, [\ptr], \val
|
|
.endm
|
|
|
|
.macro stp1 reg1, reg2, ptr, val
|
|
uao_stp 9997f, \reg1, \reg2, \ptr, \val
|
|
.endm
|
|
|
|
end .req x5
|
|
srcin .req x15
|
|
SYM_FUNC_START(__arch_copy_to_user)
|
|
add end, x0, x2
|
|
mov srcin, x1
|
|
#include "copy_template.S"
|
|
mov x0, #0
|
|
ret
|
|
SYM_FUNC_END(__arch_copy_to_user)
|
|
EXPORT_SYMBOL(__arch_copy_to_user)
|
|
|
|
.section .fixup,"ax"
|
|
.align 2
|
|
9997: cmp dst, dstin
|
|
b.ne 9998f
|
|
// Before being absolutely sure we couldn't copy anything, try harder
|
|
ldrb tmp1w, [srcin]
|
|
USER(9998f, sttrb tmp1w, [dst])
|
|
add dst, dst, #1
|
|
9998: sub x0, end, dst // bytes not copied
|
|
ret
|
|
.previous
|