e83c40f7bb
commit 61e3acd8c693a14fc69b824cb5b08d02cb90a6e7 upstream.
The KUAP implementation adds calls in clear_user() to enable and
disable access to userspace memory. However, it doesn't add these to
__clear_user(), which is used in the ptrace regset code.
As there's only one direct user of __clear_user() (the regset code),
and the time taken to set the AMR for KUAP purposes is going to
dominate the cost of a quick access_ok(), there's not much point
having a separate path.
Rename __clear_user() to __arch_clear_user(), and make __clear_user()
just call clear_user().
Reported-by: syzbot+f25ecf4b2982d8c7a640@syzkaller-ppc64.appspotmail.com
Reported-by: Daniel Axtens <dja@axtens.net>
Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
Fixes: de78a9c42a
("powerpc: Add a framework for Kernel Userspace Access Protection")
Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
[mpe: Use __arch_clear_user() for the asm version like arm64 & nds32]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191209132221.15328-1-ajd@linux.ibm.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
91 lines
1.4 KiB
ArmAsm
91 lines
1.4 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
* String handling functions for PowerPC32
|
|
*
|
|
* Copyright (C) 1996 Paul Mackerras.
|
|
*
|
|
*/
|
|
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/export.h>
|
|
#include <asm/cache.h>
|
|
|
|
.text
|
|
|
|
CACHELINE_BYTES = L1_CACHE_BYTES
|
|
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
|
|
CACHELINE_MASK = (L1_CACHE_BYTES-1)
|
|
|
|
_GLOBAL(__arch_clear_user)
|
|
/*
|
|
* Use dcbz on the complete cache lines in the destination
|
|
* to set them to zero. This requires that the destination
|
|
* area is cacheable.
|
|
*/
|
|
cmplwi cr0, r4, 4
|
|
mr r10, r3
|
|
li r3, 0
|
|
blt 7f
|
|
|
|
11: stw r3, 0(r10)
|
|
beqlr
|
|
andi. r0, r10, 3
|
|
add r11, r0, r4
|
|
subf r6, r0, r10
|
|
|
|
clrlwi r7, r6, 32 - LG_CACHELINE_BYTES
|
|
add r8, r7, r11
|
|
srwi r9, r8, LG_CACHELINE_BYTES
|
|
addic. r9, r9, -1 /* total number of complete cachelines */
|
|
ble 2f
|
|
xori r0, r7, CACHELINE_MASK & ~3
|
|
srwi. r0, r0, 2
|
|
beq 3f
|
|
mtctr r0
|
|
4: stwu r3, 4(r6)
|
|
bdnz 4b
|
|
3: mtctr r9
|
|
li r7, 4
|
|
10: dcbz r7, r6
|
|
addi r6, r6, CACHELINE_BYTES
|
|
bdnz 10b
|
|
clrlwi r11, r8, 32 - LG_CACHELINE_BYTES
|
|
addi r11, r11, 4
|
|
|
|
2: srwi r0 ,r11 ,2
|
|
mtctr r0
|
|
bdz 6f
|
|
1: stwu r3, 4(r6)
|
|
bdnz 1b
|
|
6: andi. r11, r11, 3
|
|
beqlr
|
|
mtctr r11
|
|
addi r6, r6, 3
|
|
8: stbu r3, 1(r6)
|
|
bdnz 8b
|
|
blr
|
|
|
|
7: cmpwi cr0, r4, 0
|
|
beqlr
|
|
mtctr r4
|
|
addi r6, r10, -1
|
|
9: stbu r3, 1(r6)
|
|
bdnz 9b
|
|
blr
|
|
|
|
90: mr r3, r4
|
|
blr
|
|
91: add r3, r10, r4
|
|
subf r3, r6, r3
|
|
blr
|
|
|
|
EX_TABLE(11b, 90b)
|
|
EX_TABLE(4b, 91b)
|
|
EX_TABLE(10b, 91b)
|
|
EX_TABLE(1b, 91b)
|
|
EX_TABLE(8b, 91b)
|
|
EX_TABLE(9b, 91b)
|
|
|
|
EXPORT_SYMBOL(__arch_clear_user)
|