FROMLIST: scs: add accounting

This change adds accounting for the memory allocated for shadow stacks.

Bug: 145210207
Change-Id: Iee94c22abefcabb63a3bcd4db8ba952130f30a82
(am from https://lore.kernel.org/patchwork/patch/1149055/)
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
This commit is contained in:
Sami Tolvanen 2018-05-23 13:27:08 -07:00
parent ff9de73a0a
commit 7f498a4b7b
6 changed files with 42 additions and 0 deletions

View File

@ -415,6 +415,9 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d AnonPages: %8lu kB\n" "Node %d AnonPages: %8lu kB\n"
"Node %d Shmem: %8lu kB\n" "Node %d Shmem: %8lu kB\n"
"Node %d KernelStack: %8lu kB\n" "Node %d KernelStack: %8lu kB\n"
#ifdef CONFIG_SHADOW_CALL_STACK
"Node %d ShadowCallStack:%8lu kB\n"
#endif
"Node %d PageTables: %8lu kB\n" "Node %d PageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n" "Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n" "Node %d Bounce: %8lu kB\n"
@ -438,6 +441,9 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
nid, K(i.sharedram), nid, K(i.sharedram),
nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB), nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
#ifdef CONFIG_SHADOW_CALL_STACK
nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_BYTES) / 1024,
#endif
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)), nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),

View File

@ -103,6 +103,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "SUnreclaim: ", sunreclaim); show_val_kb(m, "SUnreclaim: ", sunreclaim);
seq_printf(m, "KernelStack: %8lu kB\n", seq_printf(m, "KernelStack: %8lu kB\n",
global_zone_page_state(NR_KERNEL_STACK_KB)); global_zone_page_state(NR_KERNEL_STACK_KB));
#ifdef CONFIG_SHADOW_CALL_STACK
seq_printf(m, "ShadowCallStack:%8lu kB\n",
global_zone_page_state(NR_KERNEL_SCS_BYTES) / 1024);
#endif
show_val_kb(m, "PageTables: ", show_val_kb(m, "PageTables: ",
global_zone_page_state(NR_PAGETABLE)); global_zone_page_state(NR_PAGETABLE));

View File

@ -200,6 +200,9 @@ enum zone_stat_item {
NR_MLOCK, /* mlock()ed pages found and moved off LRU */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */
NR_PAGETABLE, /* used for pagetables */ NR_PAGETABLE, /* used for pagetables */
NR_KERNEL_STACK_KB, /* measured in KiB */ NR_KERNEL_STACK_KB, /* measured in KiB */
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
NR_KERNEL_SCS_BYTES, /* measured in bytes */
#endif
/* Second 128 byte cacheline */ /* Second 128 byte cacheline */
NR_BOUNCE, NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC) #if IS_ENABLED(CONFIG_ZSMALLOC)

View File

@ -12,6 +12,7 @@
#include <linux/scs.h> #include <linux/scs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/vmstat.h>
#include <asm/scs.h> #include <asm/scs.h>
static inline void *__scs_base(struct task_struct *tsk) static inline void *__scs_base(struct task_struct *tsk)
@ -89,6 +90,11 @@ static void scs_free(void *s)
vfree_atomic(s); vfree_atomic(s);
} }
static struct page *__scs_page(struct task_struct *tsk)
{
return vmalloc_to_page(__scs_base(tsk));
}
static int scs_cleanup(unsigned int cpu) static int scs_cleanup(unsigned int cpu)
{ {
int i; int i;
@ -135,6 +141,11 @@ static inline void scs_free(void *s)
kmem_cache_free(scs_cache, s); kmem_cache_free(scs_cache, s);
} }
static struct page *__scs_page(struct task_struct *tsk)
{
return virt_to_page(__scs_base(tsk));
}
void __init scs_init(void) void __init scs_init(void)
{ {
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, SCS_SIZE, scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, SCS_SIZE,
@ -153,6 +164,12 @@ void scs_task_reset(struct task_struct *tsk)
task_set_scs(tsk, __scs_base(tsk)); task_set_scs(tsk, __scs_base(tsk));
} }
static void scs_account(struct task_struct *tsk, int account)
{
mod_zone_page_state(page_zone(__scs_page(tsk)), NR_KERNEL_SCS_BYTES,
account * SCS_SIZE);
}
int scs_prepare(struct task_struct *tsk, int node) int scs_prepare(struct task_struct *tsk, int node)
{ {
void *s; void *s;
@ -162,6 +179,8 @@ int scs_prepare(struct task_struct *tsk, int node)
return -ENOMEM; return -ENOMEM;
task_set_scs(tsk, s); task_set_scs(tsk, s);
scs_account(tsk, 1);
return 0; return 0;
} }
@ -182,6 +201,7 @@ void scs_release(struct task_struct *tsk)
WARN_ON(scs_corrupted(tsk)); WARN_ON(scs_corrupted(tsk));
scs_account(tsk, -1);
task_set_scs(tsk, NULL); task_set_scs(tsk, NULL);
scs_free(s); scs_free(s);
} }

View File

@ -5376,6 +5376,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
" managed:%lukB" " managed:%lukB"
" mlocked:%lukB" " mlocked:%lukB"
" kernel_stack:%lukB" " kernel_stack:%lukB"
#ifdef CONFIG_SHADOW_CALL_STACK
" shadow_call_stack:%lukB"
#endif
" pagetables:%lukB" " pagetables:%lukB"
" bounce:%lukB" " bounce:%lukB"
" free_pcp:%lukB" " free_pcp:%lukB"
@ -5397,6 +5400,9 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(zone_managed_pages(zone)), K(zone_managed_pages(zone)),
K(zone_page_state(zone, NR_MLOCK)), K(zone_page_state(zone, NR_MLOCK)),
zone_page_state(zone, NR_KERNEL_STACK_KB), zone_page_state(zone, NR_KERNEL_STACK_KB),
#ifdef CONFIG_SHADOW_CALL_STACK
zone_page_state(zone, NR_KERNEL_SCS_BYTES) / 1024,
#endif
K(zone_page_state(zone, NR_PAGETABLE)), K(zone_page_state(zone, NR_PAGETABLE)),
K(zone_page_state(zone, NR_BOUNCE)), K(zone_page_state(zone, NR_BOUNCE)),
K(free_pcp), K(free_pcp),

View File

@ -1118,6 +1118,9 @@ const char * const vmstat_text[] = {
"nr_mlock", "nr_mlock",
"nr_page_table_pages", "nr_page_table_pages",
"nr_kernel_stack", "nr_kernel_stack",
#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
"nr_shadow_call_stack_bytes",
#endif
"nr_bounce", "nr_bounce",
#if IS_ENABLED(CONFIG_ZSMALLOC) #if IS_ENABLED(CONFIG_ZSMALLOC)
"nr_zspages", "nr_zspages",