soc: qcom: Enable slabinfo support in minidump

Register and dump slabinfo on kernel panic to minidump
table.

Change-Id: I8e2eabf63233bdb0039a7c7c962b980fba2e21ef
Signed-off-by: Vijayanand Jitta <vjitta@codeaurora.org>
This commit is contained in:
Vijayanand Jitta 2020-07-16 11:30:55 +05:30 committed by Gerrit - the friendly Code Review server
parent 2ae883be00
commit 65caff8352
4 changed files with 100 additions and 2 deletions

View File

@ -106,6 +106,11 @@ static struct seq_buf *md_cntxt_seq_buf;
struct seq_buf *md_meminfo_seq_buf;
/* Slabinfo */
#define MD_SLABINFO_PAGES 8
struct seq_buf *md_slabinfo_seq_buf;
/* Modules information */
#ifdef CONFIG_MODULES
#define NUM_MD_MODULES 200
@ -968,6 +973,10 @@ dump_rq:
#endif
if (md_meminfo_seq_buf)
md_dump_meminfo();
if (md_slabinfo_seq_buf)
md_dump_slabinfo();
md_in_oops_handler = false;
return NOTIFY_DONE;
}
@ -1040,6 +1049,8 @@ static void md_register_panic_data(void)
#endif
md_register_panic_entries(MD_MEMINFO_PAGES, "MEMINFO",
&md_meminfo_seq_buf);
md_register_panic_entries(MD_SLABINFO_PAGES, "SLABINFO",
&md_slabinfo_seq_buf);
}
#ifdef CONFIG_MODULES

View File

@ -33,6 +33,7 @@ struct md_region {
*/
#if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
extern struct seq_buf *md_meminfo_seq_buf;
extern struct seq_buf *md_slabinfo_seq_buf;
extern int msm_minidump_add_region(const struct md_region *entry);
extern int msm_minidump_remove_region(const struct md_region *entry);
@ -47,6 +48,7 @@ extern int msm_minidump_update_region(int regno, const struct md_region *entry);
extern bool msm_minidump_enabled(void);
extern void dump_stack_minidump(u64 sp);
extern void md_dump_meminfo(void);
extern void md_dump_slabinfo(void);
#else
static inline int msm_minidump_add_region(const struct md_region *entry)
{
@ -61,6 +63,7 @@ static inline bool msm_minidump_enabled(void) { return false; }
static inline void dump_stack_minidump(u64 sp) {}
static inline void add_trace_event(char *buf, size_t size) {}
static inline void md_dump_meminfo(void) {}
static inline void md_dump_slabinfo(void) {}
#endif
#ifdef CONFIG_QCOM_MINIDUMP_FTRACE
extern void minidump_add_trace_event(char *buf, size_t size);

View File

@ -129,6 +129,10 @@
#include "internal.h"
#include "slab.h"
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
#include <soc/qcom/minidump.h>
#include <linux/seq_buf.h>
#endif
/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
@ -4084,10 +4088,21 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
unsigned long node_frees = cachep->node_frees;
unsigned long overflows = cachep->node_overflow;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
if (m) {
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
allocs, high, grown,
reaped, errors, max_freeable, node_allocs,
node_frees, overflows);
} else {
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
if (md_slabinfo_seq_buf)
seq_buf_printf(md_slabinfo_seq_buf,
" : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
allocs, high, grown,
reaped, errors, max_freeable,
node_allocs, node_frees, overflows);
#endif
}
}
/* cpu stats */
{
@ -4096,8 +4111,17 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
unsigned long freehit = atomic_read(&cachep->freehit);
unsigned long freemiss = atomic_read(&cachep->freemiss);
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
if (m) {
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss);
} else {
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
if (md_slabinfo_seq_buf)
seq_buf_printf(md_slabinfo_seq_buf,
" : cpustat %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss);
#endif
}
}
#endif
}

View File

@ -27,6 +27,10 @@
#include <trace/events/kmem.h>
#include "slab.h"
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
#include <soc/qcom/minidump.h>
#include <linux/seq_buf.h>
#endif
enum slab_state slab_state;
LIST_HEAD(slab_caches);
@ -1501,6 +1505,62 @@ static int slab_show(struct seq_file *m, void *p)
return 0;
}
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
void md_dump_slabinfo(void)
{
struct kmem_cache *s;
struct slabinfo sinfo;
if (!md_slabinfo_seq_buf)
return;
/* print_slabinfo_header */
#ifdef CONFIG_DEBUG_SLAB
seq_buf_printf(md_slabinfo_seq_buf,
"slabinfo - version: 2.1 (statistics)\n");
#else
seq_buf_printf(md_slabinfo_seq_buf,
"slabinfo - version: 2.1\n");
#endif
seq_buf_printf(md_slabinfo_seq_buf,
"# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
seq_buf_printf(md_slabinfo_seq_buf,
" : tunables <limit> <batchcount> <sharedfactor>");
seq_buf_printf(md_slabinfo_seq_buf,
" : slabdata <active_slabs> <num_slabs> <sharedavail>");
#ifdef CONFIG_DEBUG_SLAB
seq_buf_printf(md_slabinfo_seq_buf,
" : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_buf_printf(md_slabinfo_seq_buf,
" : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
seq_buf_printf(md_slabinfo_seq_buf, "\n");
/* Loop through all slabs */
mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_root_caches, root_caches_node) {
memset(&sinfo, 0, sizeof(sinfo));
get_slabinfo(s, &sinfo);
memcg_accumulate_slabinfo(s, &sinfo);
seq_buf_printf(md_slabinfo_seq_buf,
"%-17s %6lu %6lu %6u %4u %4d",
cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
sinfo.objects_per_slab, (1 << sinfo.cache_order));
seq_buf_printf(md_slabinfo_seq_buf, " : tunables %4u %4u %4u",
sinfo.limit, sinfo.batchcount, sinfo.shared);
seq_buf_printf(md_slabinfo_seq_buf,
" : slabdata %6lu %6lu %6lu",
sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
slabinfo_show_stats(NULL, s);
seq_buf_printf(md_slabinfo_seq_buf, "\n");
}
mutex_unlock(&slab_mutex);
}
#endif
void dump_unreclaimable_slab(void)
{
struct kmem_cache *s, *s2;