android_kernel_xiaomi_sm8350/fs/proc/array.c
Serge E. Hallyn 289f8e27ed capabilities: add bounding set to /proc/self/status
There is currently no way to query the bounding set of another task.  As there
appears to be no security reason not to, and as Michael Kerrisk points out the
following valid reasons to do so exist:

* consistency (I can see all of the other per-thread/process sets in
  /proc/.../status)

* debugging -- I could imagine that it would make the job of debugging an
  application that uses capabilities a little simpler.

this patch adds the bounding set to /proc/self/status right after the
effective set.

Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
Acked-by: Michael Kerrisk <mtk.manpages@gmail.com>
Acked-by: Andrew G. Morgan <morgan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-05-13 08:02:24 -07:00

581 lines
14 KiB
C

/*
* linux/fs/proc/array.c
*
* Copyright (C) 1992 by Linus Torvalds
* based on ideas by Darren Senn
*
* Fixes:
* Michael. K. Johnson: stat,statm extensions.
* <johnsonm@stolaf.edu>
*
* Pauline Middelink : Made cmdline,envline only break at '\0's, to
* make sure SET_PROCTITLE works. Also removed
* bad '!' which forced address recalculation for
* EVERY character on the current page.
* <middelin@polyware.iaf.nl>
*
* Danny ter Haar : added cpuinfo
* <dth@cistron.nl>
*
* Alessandro Rubini : profile extension.
* <rubini@ipvvis.unipv.it>
*
* Jeff Tranter : added BogoMips field to cpuinfo
* <Jeff_Tranter@Mitel.COM>
*
* Bruno Haible : remove 4K limit for the maps file
* <haible@ma2s2.mathematik.uni-karlsruhe.de>
*
* Yves Arrouye : remove removal of trailing spaces in get_array.
* <Yves.Arrouye@marin.fdn.fr>
*
* Jerome Forissier : added per-CPU time information to /proc/stat
* and /proc/<pid>/cpu extension
* <forissier@isia.cma.fr>
* - Incorporation and non-SMP safe operation
* of forissier patch in 2.1.78 by
* Hans Marcus <crowbar@concepts.nl>
*
* aeb@cwi.nl : /proc/partitions
*
*
* Alan Cox : security fixes.
* <Alan.Cox@linux.org>
*
* Al Viro : safe handling of mm_struct
*
* Gerhard Wichert : added BIGMEM support
* Siemens AG <Gerhard.Wichert@pdb.siemens.de>
*
* Al Viro & Jeff Garzik : moved most of the thing into base.c and
* : proc_misc.c. The rest may eventually go into
* : base.c too.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/tty.h>
#include <linux/string.h>
#include <linux/mman.h>
#include <linux/proc_fs.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/times.h>
#include <linux/cpuset.h>
#include <linux/rcupdate.h>
#include <linux/delayacct.h>
#include <linux/seq_file.h>
#include <linux/pid_namespace.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include "internal.h"
/* Gcc optimizes away "strlen(x)" for constant x */
#define ADDBUF(buffer, string) \
do { memcpy(buffer, string, strlen(string)); \
buffer += strlen(string); } while (0)
static inline void task_name(struct seq_file *m, struct task_struct *p)
{
int i;
char *buf, *end;
char *name;
char tcomm[sizeof(p->comm)];
get_task_comm(tcomm, p);
seq_printf(m, "Name:\t");
end = m->buf + m->size;
buf = m->buf + m->count;
name = tcomm;
i = sizeof(tcomm);
while (i && (buf < end)) {
unsigned char c = *name;
name++;
i--;
*buf = c;
if (!c)
break;
if (c == '\\') {
buf++;
if (buf < end)
*buf++ = c;
continue;
}
if (c == '\n') {
*buf++ = '\\';
if (buf < end)
*buf++ = 'n';
continue;
}
buf++;
}
m->count = buf - m->buf;
seq_printf(m, "\n");
}
/*
* The task state array is a strange "bitmap" of
* reasons to sleep. Thus "running" is zero, and
* you can test for combinations of others with
* simple bit tests.
*/
static const char *task_state_array[] = {
"R (running)", /* 0 */
"S (sleeping)", /* 1 */
"D (disk sleep)", /* 2 */
"T (stopped)", /* 4 */
"T (tracing stop)", /* 8 */
"Z (zombie)", /* 16 */
"X (dead)" /* 32 */
};
static inline const char *get_task_state(struct task_struct *tsk)
{
unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
const char **p = &task_state_array[0];
while (state) {
p++;
state >>= 1;
}
return *p;
}
static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *p)
{
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
pid_t ppid, tpid;
rcu_read_lock();
ppid = pid_alive(p) ?
task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
tpid = pid_alive(p) && p->ptrace ?
task_pid_nr_ns(rcu_dereference(p->parent), ns) : 0;
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
"Pid:\t%d\n"
"PPid:\t%d\n"
"TracerPid:\t%d\n"
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
task_tgid_nr_ns(p, ns),
pid_nr_ns(pid, ns),
ppid, tpid,
p->uid, p->euid, p->suid, p->fsuid,
p->gid, p->egid, p->sgid, p->fsgid);
task_lock(p);
if (p->files)
fdt = files_fdtable(p->files);
seq_printf(m,
"FDSize:\t%d\n"
"Groups:\t",
fdt ? fdt->max_fds : 0);
rcu_read_unlock();
group_info = p->group_info;
get_group_info(group_info);
task_unlock(p);
for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
seq_printf(m, "%d ", GROUP_AT(group_info, g));
put_group_info(group_info);
seq_printf(m, "\n");
}
static void render_sigset_t(struct seq_file *m, const char *header,
sigset_t *set)
{
int i;
seq_printf(m, "%s", header);
i = _NSIG;
do {
int x = 0;
i -= 4;
if (sigismember(set, i+1)) x |= 1;
if (sigismember(set, i+2)) x |= 2;
if (sigismember(set, i+3)) x |= 4;
if (sigismember(set, i+4)) x |= 8;
seq_printf(m, "%x", x);
} while (i >= 4);
seq_printf(m, "\n");
}
static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
sigset_t *catch)
{
struct k_sigaction *k;
int i;
k = p->sighand->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
if (k->sa.sa_handler == SIG_IGN)
sigaddset(ign, i);
else if (k->sa.sa_handler != SIG_DFL)
sigaddset(catch, i);
}
}
static inline void task_sig(struct seq_file *m, struct task_struct *p)
{
unsigned long flags;
sigset_t pending, shpending, blocked, ignored, caught;
int num_threads = 0;
unsigned long qsize = 0;
unsigned long qlim = 0;
sigemptyset(&pending);
sigemptyset(&shpending);
sigemptyset(&blocked);
sigemptyset(&ignored);
sigemptyset(&caught);
rcu_read_lock();
if (lock_task_sighand(p, &flags)) {
pending = p->pending.signal;
shpending = p->signal->shared_pending.signal;
blocked = p->blocked;
collect_sigign_sigcatch(p, &ignored, &caught);
num_threads = atomic_read(&p->signal->count);
qsize = atomic_read(&p->user->sigpending);
qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur;
unlock_task_sighand(p, &flags);
}
rcu_read_unlock();
seq_printf(m, "Threads:\t%d\n", num_threads);
seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
/* render them all */
render_sigset_t(m, "SigPnd:\t", &pending);
render_sigset_t(m, "ShdPnd:\t", &shpending);
render_sigset_t(m, "SigBlk:\t", &blocked);
render_sigset_t(m, "SigIgn:\t", &ignored);
render_sigset_t(m, "SigCgt:\t", &caught);
}
static void render_cap_t(struct seq_file *m, const char *header,
kernel_cap_t *a)
{
unsigned __capi;
seq_printf(m, "%s", header);
CAP_FOR_EACH_U32(__capi) {
seq_printf(m, "%08x",
a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]);
}
seq_printf(m, "\n");
}
static inline void task_cap(struct seq_file *m, struct task_struct *p)
{
render_cap_t(m, "CapInh:\t", &p->cap_inheritable);
render_cap_t(m, "CapPrm:\t", &p->cap_permitted);
render_cap_t(m, "CapEff:\t", &p->cap_effective);
render_cap_t(m, "CapBnd:\t", &p->cap_bset);
}
static inline void task_context_switch_counts(struct seq_file *m,
struct task_struct *p)
{
seq_printf(m, "voluntary_ctxt_switches:\t%lu\n"
"nonvoluntary_ctxt_switches:\t%lu\n",
p->nvcsw,
p->nivcsw);
}
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
struct mm_struct *mm = get_task_mm(task);
task_name(m, task);
task_state(m, ns, pid, task);
if (mm) {
task_mem(m, mm);
mmput(mm);
}
task_sig(m, task);
task_cap(m, task);
cpuset_task_status_allowed(m, task);
#if defined(CONFIG_S390)
task_show_regs(m, task);
#endif
task_context_switch_counts(m, task);
return 0;
}
/*
* Use precise platform statistics if available:
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
static cputime_t task_utime(struct task_struct *p)
{
return p->utime;
}
static cputime_t task_stime(struct task_struct *p)
{
return p->stime;
}
#else
static cputime_t task_utime(struct task_struct *p)
{
clock_t utime = cputime_to_clock_t(p->utime),
total = utime + cputime_to_clock_t(p->stime);
u64 temp;
/*
* Use CFS's precise accounting:
*/
temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
if (total) {
temp *= utime;
do_div(temp, total);
}
utime = (clock_t)temp;
p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
return p->prev_utime;
}
static cputime_t task_stime(struct task_struct *p)
{
clock_t stime;
/*
* Use CFS's precise accounting. (we subtract utime from
* the total, to make sure the total observed by userspace
* grows monotonically - apps rely on that):
*/
stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
cputime_to_clock_t(task_utime(p));
if (stime >= 0)
p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
return p->prev_stime;
}
#endif
static cputime_t task_gtime(struct task_struct *p)
{
return p->gtime;
}
static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task, int whole)
{
unsigned long vsize, eip, esp, wchan = ~0UL;
long priority, nice;
int tty_pgrp = -1, tty_nr = 0;
sigset_t sigign, sigcatch;
char state;
pid_t ppid = 0, pgid = -1, sid = -1;
int num_threads = 0;
struct mm_struct *mm;
unsigned long long start_time;
unsigned long cmin_flt = 0, cmaj_flt = 0;
unsigned long min_flt = 0, maj_flt = 0;
cputime_t cutime, cstime, utime, stime;
cputime_t cgtime, gtime;
unsigned long rsslim = 0;
char tcomm[sizeof(task->comm)];
unsigned long flags;
state = *get_task_state(task);
vsize = eip = esp = 0;
mm = get_task_mm(task);
if (mm) {
vsize = task_vsize(mm);
eip = KSTK_EIP(task);
esp = KSTK_ESP(task);
}
get_task_comm(tcomm, task);
sigemptyset(&sigign);
sigemptyset(&sigcatch);
cutime = cstime = utime = stime = cputime_zero;
cgtime = gtime = cputime_zero;
if (lock_task_sighand(task, &flags)) {
struct signal_struct *sig = task->signal;
if (sig->tty) {
struct pid *pgrp = tty_get_pgrp(sig->tty);
tty_pgrp = pid_nr_ns(pgrp, ns);
put_pid(pgrp);
tty_nr = new_encode_dev(tty_devnum(sig->tty));
}
num_threads = atomic_read(&sig->count);
collect_sigign_sigcatch(task, &sigign, &sigcatch);
cmin_flt = sig->cmin_flt;
cmaj_flt = sig->cmaj_flt;
cutime = sig->cutime;
cstime = sig->cstime;
cgtime = sig->cgtime;
rsslim = sig->rlim[RLIMIT_RSS].rlim_cur;
/* add up live thread stats at the group level */
if (whole) {
struct task_struct *t = task;
do {
min_flt += t->min_flt;
maj_flt += t->maj_flt;
utime = cputime_add(utime, task_utime(t));
stime = cputime_add(stime, task_stime(t));
gtime = cputime_add(gtime, task_gtime(t));
t = next_thread(t);
} while (t != task);
min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
utime = cputime_add(utime, sig->utime);
stime = cputime_add(stime, sig->stime);
gtime = cputime_add(gtime, sig->gtime);
}
sid = task_session_nr_ns(task, ns);
ppid = task_tgid_nr_ns(task->real_parent, ns);
pgid = task_pgrp_nr_ns(task, ns);
unlock_task_sighand(task, &flags);
}
if (!whole || num_threads < 2)
wchan = get_wchan(task);
if (!whole) {
min_flt = task->min_flt;
maj_flt = task->maj_flt;
utime = task_utime(task);
stime = task_stime(task);
gtime = task_gtime(task);
}
/* scale priority and nice values from timeslices to -20..20 */
/* to make it look like a "normal" Unix priority/nice value */
priority = task_prio(task);
nice = task_nice(task);
/* Temporary variable needed for gcc-2.96 */
/* convert timespec -> nsec*/
start_time =
(unsigned long long)task->real_start_time.tv_sec * NSEC_PER_SEC
+ task->real_start_time.tv_nsec;
/* convert nsec -> ticks */
start_time = nsec_to_clock_t(start_time);
seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \
%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
pid_nr_ns(pid, ns),
tcomm,
state,
ppid,
pgid,
sid,
tty_nr,
tty_pgrp,
task->flags,
min_flt,
cmin_flt,
maj_flt,
cmaj_flt,
cputime_to_clock_t(utime),
cputime_to_clock_t(stime),
cputime_to_clock_t(cutime),
cputime_to_clock_t(cstime),
priority,
nice,
num_threads,
start_time,
vsize,
mm ? get_mm_rss(mm) : 0,
rsslim,
mm ? mm->start_code : 0,
mm ? mm->end_code : 0,
mm ? mm->start_stack : 0,
esp,
eip,
/* The signal information here is obsolete.
* It must be decimal for Linux 2.0 compatibility.
* Use /proc/#/status for real-time signals.
*/
task->pending.signal.sig[0] & 0x7fffffffUL,
task->blocked.sig[0] & 0x7fffffffUL,
sigign .sig[0] & 0x7fffffffUL,
sigcatch .sig[0] & 0x7fffffffUL,
wchan,
0UL,
0UL,
task->exit_signal,
task_cpu(task),
task->rt_priority,
task->policy,
(unsigned long long)delayacct_blkio_ticks(task),
cputime_to_clock_t(gtime),
cputime_to_clock_t(cgtime));
if (mm)
mmput(mm);
return 0;
}
int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
return do_task_stat(m, ns, pid, task, 0);
}
int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
return do_task_stat(m, ns, pid, task, 1);
}
int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
struct mm_struct *mm = get_task_mm(task);
if (mm) {
size = task_statm(mm, &shared, &text, &data, &resident);
mmput(mm);
}
seq_printf(m, "%d %d %d %d %d %d %d\n",
size, resident, shared, text, lib, data, 0);
return 0;
}