sched/debug: Remove mpol_get/put and task_lock/unlock from sched_show_numa

[ Upstream commit 28c988c3ec29db74a1dda631b18785958d57df4f ]

The older format of /proc/pid/sched printed home node info which
required the mempolicy and task lock around mpol_get(). However
the format has changed since then and there is no need for
sched_show_numa() any more to have mempolicy argument,
asssociated mpol_get/put and task_lock/unlock. Remove them.

Fixes: 397f2378f1 ("sched/numa: Fix numa balancing stats in /proc/pid/sched")
Signed-off-by: Bharata B Rao <bharata@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Link: https://lore.kernel.org/r/20220118050515.2973-1-bharata@amd.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Bharata B Rao 2022-01-18 10:35:15 +05:30 committed by Greg Kroah-Hartman
parent 715a343172
commit 841f5b235d

View File

@ -847,25 +847,15 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
static void sched_show_numa(struct task_struct *p, struct seq_file *m) static void sched_show_numa(struct task_struct *p, struct seq_file *m)
{ {
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
struct mempolicy *pol;
if (p->mm) if (p->mm)
P(mm->numa_scan_seq); P(mm->numa_scan_seq);
task_lock(p);
pol = p->mempolicy;
if (pol && !(pol->flags & MPOL_F_MORON))
pol = NULL;
mpol_get(pol);
task_unlock(p);
P(numa_pages_migrated); P(numa_pages_migrated);
P(numa_preferred_nid); P(numa_preferred_nid);
P(total_numa_faults); P(total_numa_faults);
SEQ_printf(m, "current_node=%d, numa_group_id=%d\n", SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
task_node(p), task_numa_group_id(p)); task_node(p), task_numa_group_id(p));
show_numa_stats(p, m); show_numa_stats(p, m);
mpol_put(pol);
#endif #endif
} }