Merge tag 'ASB-2024-02-05_11-5.4' of https://android.googlesource.com/kernel/common into android13-5.4-lahaina

https://source.android.com/docs/security/bulletin/2024-02-01

* tag 'ASB-2024-02-05_11-5.4' of https://android.googlesource.com/kernel/common:
  Reapply "perf: Fix perf_event_validate_size()"
  UPSTREAM: ida: Fix crash in ida_free when the bitmap is empty
  UPSTREAM: netfilter: nf_tables: Reject tables of unsupported family
  Reapply "perf: Disallow mis-matched inherited group reads"
  UPSTREAM: ath10k: Get rid of "per_ce_irq" hw param
  UPSTREAM: ath10k: Keep track of which interrupts fired, don't poll them
  UPSTREAM: ath10k: Add interrupt summary based CE processing
  UPSTREAM: ath10k: Wait until copy complete is actually done before completing
  FROMGIT: clk: qcom: gcc-sdm845: Add soft dependency on rpmhpd

 Conflicts:
	kernel/events/core.c
	net/netfilter/nf_tables_api.c

Change-Id: Id54c39eddcf408eff81a27a89621447e6b5f0e8e
This commit is contained in:
Bruno Martins 2024-02-08 19:04:53 +00:00 committed by Michael Bestas
commit a74c928fd6
No known key found for this signature in database
GPG Key ID: CC95044519BE6669
4 changed files with 81175 additions and 84593 deletions

File diff suppressed because it is too large Load Diff

View File

@ -608,6 +608,9 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
#ifndef __GENKSYMS__
unsigned int group_generation;
#endif
struct perf_event *group_leader;
struct pmu *pmu;
void *pmu_private;

View File

@ -1732,28 +1732,31 @@ static inline void perf_event__state_init(struct perf_event *event)
PERF_EVENT_STATE_INACTIVE;
}
static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
static int __perf_event_read_size(u64 read_format, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
if (read_format & PERF_FORMAT_GROUP) {
nr += nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
/*
* Since perf_event_validate_size() limits this to 16k and inhibits
* adding more siblings, this will never overflow.
*/
return size + nr * entry;
}
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
@ -1794,8 +1797,9 @@ static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
*/
static void perf_event__header_size(struct perf_event *event)
{
__perf_event_read_size(event,
event->group_leader->nr_siblings);
event->read_size =
__perf_event_read_size(event->attr.read_format,
event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}
@ -1826,24 +1830,35 @@ static void perf_event__id_header_size(struct perf_event *event)
event->id_header_size = size;
}
/*
* Check that adding an event to the group does not result in anybody
* overflowing the 64k event limit imposed by the output buffer.
*
* Specifically, check that the read_size for the event does not exceed 16k,
* read_size being the one term that grows with groups size. Since read_size
* depends on per-event read_format, also (re)check the existing events.
*
* This leaves 48k for the constant size fields and things like callchains,
* branch stacks and register sets.
*/
static bool perf_event_validate_size(struct perf_event *event)
{
/*
* The values computed here will be over-written when we actually
* attach the event.
*/
__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
perf_event__id_header_size(event);
struct perf_event *sibling, *group_leader = event->group_leader;
/*
* Sum the lot; should not exceed the 64k limit we have on records.
* Conservative limit to allow for callchains and other variable fields.
*/
if (event->read_size + event->header_size +
event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
if (__perf_event_read_size(event->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
if (__perf_event_read_size(group_leader->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
for_each_sibling_event(sibling, group_leader) {
if (__perf_event_read_size(sibling->attr.read_format,
group_leader->nr_siblings + 1) > 16*1024)
return false;
}
return true;
}
@ -1870,6 +1885,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
group_leader->group_generation++;
perf_event__header_size(group_leader);
@ -2025,6 +2041,7 @@ static void perf_group_detach(struct perf_event *event)
if (event->group_leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
event->group_leader->group_generation++;
#ifdef CONFIG_PERF_KERNEL_SHARE
if (event->shared)
event->group_leader = event;
@ -4925,7 +4942,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
struct perf_event *sub;
struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@ -4935,6 +4952,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* Verify the grouping between the parent and child (inherited)
* events is still in tact.
*
* Specifically:
* - leader->ctx->lock pins leader->sibling_list
* - parent->child_mutex pins parent->child_list
* - parent->ctx->mutex pins parent->sibling_list
*
* Because parent->ctx != leader->ctx (and child_list nests inside
* ctx->mutex), group destruction is not atomic between children, also
* see perf_event_release_kernel(). Additionally, parent can grow the
* group.
*
* Therefore it is possible to have parent and child groups in a
* different configuration and summing over such a beast makes no sense
* what so ever.
*
* Reject this.
*/
parent = leader->parent;
if (parent &&
(parent->group_generation != leader->group_generation ||
parent->nr_siblings != leader->nr_siblings)) {
ret = -ECHILD;
goto unlock;
}
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@ -4964,8 +5008,9 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] = primary_event_id(sub);
}
unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
return 0;
return ret;
}
static int perf_read_group(struct perf_event *event,
@ -4984,10 +5029,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
/*
* By locking the child_mutex of the leader we effectively
* lock the child list of all siblings.. XXX explain how.
*/
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@ -12288,6 +12329,7 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
leader->group_generation = parent_event->group_generation;
return 0;
}

View File

@ -7117,14 +7117,9 @@ struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
if (!trans)
return NULL;
trans->net = maybe_get_net(net);
if (!trans->net) {
kfree(trans);
return NULL;
}
refcount_inc(&set->refs);
trans->set = set;
trans->net = get_net(net);
trans->seq = gc_seq;
return trans;