Merge tag 'LA.UM.9.14.r1-24200-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4 into android13-5.4-lahaina

"LA.UM.9.14.r1-24200-LAHAINA.QSSI13.0"

* tag 'LA.UM.9.14.r1-24200-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4:
  bus: mhi: Fix potential out-of-bound access
  rpmsg: slatecom: maintain rx_size to read
  rpmsg: slatecom: out of bound read from process_cmd
  soc: qcom: add out of bound check for AON fifo
  soc: qcom: smem: Add boundary checks for partitions
  msm: kgsl: Do not release dma and anon buffers if unmap fails
  msm: kgsl: Do not release dma and anon buffers if unmap fails
  memshare: Prevent possible integer overflow
  msm: kgsl: Keep the timeline fence valid for logging
  msm: ipa: Add support for Private IP Forwarding
  msm: ipa3: add support to identify wifi attach
  soc: qcom: minidump_log: Protect md_dump_slabinfo under SLUB_DEBUG
  mm: slub: Declare slab_owner_ops only when SLUB DEBUG is enabled
  soc: qcom: Add BLAIR-LITE SoC information to socinfo
  soc: qcom: socinfo: Add soc information for BLAIR LTE
  msm_serial_hs: Fix race between mod_timer and del_timer calls

Change-Id: I5ab9a7732af0be4754b506f2c815ab29b236fb91
This commit is contained in:
Michael Bestas 2024-03-23 17:36:59 +02:00
commit 58d91073d9
No known key found for this signature in database
GPG Key ID: CC95044519BE6669
15 changed files with 174 additions and 53 deletions

View File

@ -1 +1 @@
LTS_5.4.259_81334f26ac70
LTS_5.4.259_70db018a109

View File

@ -357,7 +357,8 @@ static struct mhi_sat_device *find_sat_dev_by_id(
static bool mhi_sat_isvalid_header(struct sat_header *hdr, int len)
{
/* validate payload size */
if (len >= sizeof(*hdr) && (len != hdr->payload_size + sizeof(*hdr)))
if ((len < sizeof(*hdr)) ||
(len >= sizeof(*hdr) && (len != hdr->payload_size + sizeof(*hdr))))
return false;
/* validate SAT IPC version */

View File

@ -344,6 +344,9 @@ static void kgsl_destroy_ion(struct kgsl_memdesc *memdesc)
struct kgsl_mem_entry, memdesc);
struct kgsl_dma_buf_meta *meta = entry->priv_data;
if (memdesc->priv & KGSL_MEMDESC_MAPPED)
return;
if (meta != NULL) {
remove_dmabuf_list(meta);
dma_buf_unmap_attachment(meta->attach, meta->table,
@ -367,6 +370,9 @@ static void kgsl_destroy_anon(struct kgsl_memdesc *memdesc)
struct scatterlist *sg;
struct page *page;
if (memdesc->priv & KGSL_MEMDESC_MAPPED)
return;
for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
page = sg_page(sg);
for (j = 0; j < (sg->length >> PAGE_SHIFT); j++) {

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/*
@ -510,6 +510,8 @@ static int drawobj_add_sync_timeline(struct kgsl_device *device,
/* Set pending flag before adding callback to avoid race */
set_bit(event->id, &syncobj->pending);
/* Get a dma_fence refcount to hand over to the callback */
dma_fence_get(event->fence);
ret = dma_fence_add_callback(event->fence,
&event->cb, drawobj_sync_timeline_fence_callback);
@ -522,11 +524,16 @@ static int drawobj_add_sync_timeline(struct kgsl_device *device,
ret = 0;
}
/* Put the refcount from fence creation */
dma_fence_put(event->fence);
kgsl_drawobj_put(drawobj);
return ret;
}
trace_syncpoint_timeline_fence(event->syncobj, event->fence, false);
/* Put the refcount from fence creation */
dma_fence_put(event->fence);
return 0;
}

View File

@ -1900,7 +1900,7 @@ static void glink_slatecom_handle_rx_done(struct glink_slatecom *glink,
mutex_unlock(&channel->intent_lock);
}
static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_data,
static int glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_data,
u32 rx_size)
{
struct glink_slatecom_msg *msg;
@ -1909,12 +1909,18 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da
unsigned int param3;
unsigned int param4;
unsigned int cmd;
int offset = 0;
int ret;
u32 offset = 0;
int ret = 0;
u16 name_len;
char *name;
while (offset < rx_size) {
if (rx_size - offset < sizeof(struct glink_slatecom_msg)) {
ret = -EBADMSG;
GLINK_ERR(glink, "%s: Error %d process cmd\n", __func__, ret);
return ret;
}
msg = (struct glink_slatecom_msg *)(rx_data + offset);
offset += sizeof(*msg);
@ -1935,7 +1941,7 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da
case SLATECOM_CMD_CLOSE_ACK:
glink_slatecom_rx_defer(glink,
rx_data + offset - sizeof(*msg),
rx_size + offset - sizeof(*msg), 0);
rx_size - offset + sizeof(*msg), 0);
break;
case SLATECOM_CMD_RX_INTENT_REQ:
glink_slatecom_handle_intent_req(glink, param1, param2);
@ -1948,7 +1954,7 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da
name = rx_data + offset;
glink_slatecom_rx_defer(glink,
rx_data + offset - sizeof(*msg),
rx_size + offset - sizeof(*msg),
rx_size - offset + sizeof(*msg),
ALIGN(name_len, SLATECOM_ALIGNMENT));
offset += ALIGN(name_len, SLATECOM_ALIGNMENT);
@ -1997,6 +2003,7 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da
break;
}
}
return ret;
}
/**

View File

@ -494,8 +494,12 @@ static void handle_alloc_generic_req(struct qmi_handle *handle,
}
}
if (!memblock[index].allotted) {
if (memblock[index].guard_band && alloc_req->num_bytes > 0)
if (!memblock[index].allotted && alloc_req->num_bytes > 0) {
if (alloc_req->num_bytes > memblock[index].init_size)
alloc_req->num_bytes = memblock[index].init_size;
if (memblock[index].guard_band)
size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES;
else
size = alloc_req->num_bytes;

View File

@ -1012,10 +1012,10 @@ dump_rq:
if (md_meminfo_seq_buf)
md_dump_meminfo();
#ifdef CONFIG_SLUB_DEBUG
if (md_slabinfo_seq_buf)
md_dump_slabinfo();
#ifdef CONFIG_SLUB_DEBUG
if (md_slabowner_dump_addr)
md_dump_slabowner();
#endif

View File

@ -303,7 +303,7 @@ void slatecom_slatedown_handler(void)
}
EXPORT_SYMBOL(slatecom_slatedown_handler);
static void parse_fifo(uint8_t *data, union slatecom_event_data_type *event_data)
static void parse_fifo(uint8_t *data, uint16_t data_len, union slatecom_event_data_type *event_data)
{
uint16_t p_len;
uint8_t sub_id;
@ -314,11 +314,16 @@ static void parse_fifo(uint8_t *data, union slatecom_event_data_type *event_data
struct event_list *data_list;
while (*data != '\0') {
if (data_len < HED_EVENT_ID_LEN)
break;
event_id = *((uint16_t *) data);
data = data + HED_EVENT_ID_LEN;
data_len = data_len - HED_EVENT_ID_LEN;
if (data_len < HED_EVENT_SIZE_LEN)
break;
p_len = *((uint16_t *) data);
data = data + HED_EVENT_SIZE_LEN;
data_len = data_len - HED_EVENT_SIZE_LEN;
if (event_id == 0xFFFE) {
@ -346,8 +351,14 @@ static void parse_fifo(uint8_t *data, union slatecom_event_data_type *event_data
send_event(SLATECOM_EVENT_TO_MASTER_FIFO_USED,
event_data);
}
} else if (event_id == 0xc8) {
data = data + 12;
data_len = data_len - 12;
pr_err("Packet Received = 0x%X, len = %u\n", event_id, p_len);
}
data = data + p_len;
data_len = data_len - p_len;
}
if (!list_empty(&pr_lst_hd))
queue_work(wq, &input_work);
@ -451,7 +462,8 @@ static void send_back_notification(uint32_t slav_status_reg,
if (!ret) {
augmnt_fifo((uint8_t *)ptr,
master_fifo_used*SLATE_SPI_WORD_SIZE);
parse_fifo((uint8_t *)ptr, &event_data);
parse_fifo((uint8_t *)ptr,
master_fifo_used*SLATE_SPI_WORD_SIZE, &event_data);
}
kfree(ptr);
}

View File

@ -2,6 +2,7 @@
/*
* Copyright (c) 2015, Sony Mobile Communications AB.
* Copyright (c) 2012-2013, 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/hwspinlock.h>
@ -86,6 +87,17 @@
/* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT 14
/* Entry range check
* ptr >= start : Checks if ptr is greater than the start of access region
* ptr + size >= ptr: Check for integer overflow (On 32bit system where ptr
* and size are 32bits, ptr + size can wrap around to be a small integer)
* ptr + size <= end: Checks if ptr+size is less than the end of access region
*/
#define IN_PARTITION_RANGE(ptr, size, start, end) \
(((void *)(ptr) >= (void *)(start)) && \
(((void *)(ptr) + (size)) >= (void *)(ptr)) && \
(((void *)(ptr) + (size)) <= (void *)(end)))
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
* @command: current command to be executed
@ -353,6 +365,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
size_t size)
{
struct smem_private_entry *hdr, *end;
struct smem_private_entry *next_hdr;
struct smem_partition_header *phdr;
size_t alloc_size;
void *cached;
@ -365,18 +378,25 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
end = phdr_to_last_uncached_entry(phdr);
cached = phdr_to_last_cached_entry(phdr);
if (WARN_ON((void *)end > p_end || (void *)cached > p_end))
if (WARN_ON(!IN_PARTITION_RANGE(end, 0, phdr, cached) ||
cached > p_end))
return -EINVAL;
while (hdr < end) {
while ((hdr < end) && ((hdr + 1) < end)) {
if (hdr->canary != SMEM_PRIVATE_CANARY)
goto bad_canary;
if (le16_to_cpu(hdr->item) == item)
return -EEXIST;
hdr = uncached_entry_next(hdr);
next_hdr = uncached_entry_next(hdr);
if (WARN_ON(next_hdr <= hdr))
return -EINVAL;
hdr = next_hdr;
}
if (WARN_ON((void *)hdr > p_end))
if (WARN_ON((void *)hdr > (void *)end))
return -EINVAL;
/* Check that we don't grow into the cached region */
@ -534,9 +554,11 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
unsigned item,
size_t *size)
{
struct smem_private_entry *e, *end;
struct smem_private_entry *e, *uncached_end, *cached_end;
struct smem_private_entry *next_e;
struct smem_partition_header *phdr;
void *item_ptr, *p_end;
size_t entry_size = 0;
u32 partition_size;
size_t cacheline;
u32 padding_data;
@ -548,72 +570,87 @@ static void *qcom_smem_get_private(struct qcom_smem *smem,
cacheline = le32_to_cpu(entry->cacheline);
e = phdr_to_first_uncached_entry(phdr);
end = phdr_to_last_uncached_entry(phdr);
uncached_end = phdr_to_last_uncached_entry(phdr);
cached_end = phdr_to_last_cached_entry(phdr);
if (WARN_ON((void *)end > p_end))
if (WARN_ON(!IN_PARTITION_RANGE(uncached_end, 0, phdr, cached_end)
|| (void *)cached_end > p_end))
return ERR_PTR(-EINVAL);
while (e < end) {
while ((e < uncached_end) && ((e + 1) < uncached_end)) {
if (e->canary != SMEM_PRIVATE_CANARY)
goto invalid_canary;
if (le16_to_cpu(e->item) == item) {
if (size != NULL) {
e_size = le32_to_cpu(e->size);
padding_data = le16_to_cpu(e->padding_data);
e_size = le32_to_cpu(e->size);
padding_data = le16_to_cpu(e->padding_data);
if (e_size < partition_size
&& padding_data < e_size)
*size = e_size - padding_data;
else
return ERR_PTR(-EINVAL);
}
if (e_size < partition_size && padding_data < e_size)
entry_size = e_size - padding_data;
else
return ERR_PTR(-EINVAL);
item_ptr = uncached_entry_to_item(e);
if (WARN_ON(item_ptr > p_end))
if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, e, uncached_end)))
return ERR_PTR(-EINVAL);
if (size != NULL)
*size = entry_size;
return item_ptr;
}
e = uncached_entry_next(e);
next_e = uncached_entry_next(e);
if (WARN_ON(next_e <= e))
return ERR_PTR(-EINVAL);
e = next_e;
}
if (WARN_ON((void *)e > p_end))
if (WARN_ON((void *)e > (void *)uncached_end))
return ERR_PTR(-EINVAL);
/* Item was not found in the uncached list, search the cached list */
e = phdr_to_first_cached_entry(phdr, cacheline);
end = phdr_to_last_cached_entry(phdr);
if (cached_end == p_end)
return ERR_PTR(-ENOENT);
if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end))
e = phdr_to_first_cached_entry(phdr, cacheline);
if (WARN_ON(!IN_PARTITION_RANGE(cached_end, 0, uncached_end, p_end) ||
!IN_PARTITION_RANGE(e, sizeof(*e), cached_end, p_end)))
return ERR_PTR(-EINVAL);
while (e > end) {
while (e > cached_end) {
if (e->canary != SMEM_PRIVATE_CANARY)
goto invalid_canary;
if (le16_to_cpu(e->item) == item) {
if (size != NULL) {
e_size = le32_to_cpu(e->size);
padding_data = le16_to_cpu(e->padding_data);
e_size = le32_to_cpu(e->size);
padding_data = le16_to_cpu(e->padding_data);
if (e_size < partition_size
&& padding_data < e_size)
*size = e_size - padding_data;
else
return ERR_PTR(-EINVAL);
}
if (e_size < partition_size && padding_data < e_size)
entry_size = e_size - padding_data;
else
return ERR_PTR(-EINVAL);
item_ptr = cached_entry_to_item(e);
if (WARN_ON(item_ptr < (void *)phdr))
if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, cached_end, e)))
return ERR_PTR(-EINVAL);
if (size != NULL)
*size = entry_size;
return item_ptr;
}
e = cached_entry_next(e, cacheline);
next_e = cached_entry_next(e, cacheline);
if (WARN_ON(next_e >= e))
return ERR_PTR(-EINVAL);
e = next_e;
}
if (WARN_ON((void *)e < (void *)phdr))
return ERR_PTR(-EINVAL);

View File

@ -781,8 +781,10 @@ static const struct soc_id soc_id[] = {
{ 450, "SHIMA" },
{ 454, "HOLI" },
{ 507, "BLAIR" },
{ 578, "BLAIR-LITE" },
{ 565, "BLAIRP" },
{ 628, "BLAIRP-XR" },
{ 647, "BLAIRP-LTE" },
{ 486, "MONACO" },
{ 458, "SDXLEMUR" },
{ 483, "SDXLEMUR-SD"},

View File

@ -1849,7 +1849,7 @@ static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
&addr, notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
del_timer(&msm_uport->tx.tx_timeout_timer);
del_timer_sync(&msm_uport->tx.tx_timeout_timer);
MSM_HS_DBG("%s(): Queue kthread work\n", __func__);
kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
}

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_H_
@ -490,6 +491,17 @@ struct ipa_ep_cfg_seq {
int seq_type;
};
/**
* struct ipa_ep_cfg_ucp - uCP config register
* @command: Command ID at uCP, that the packets should hit
*
* @enable: 0 - Disabled
* 1- Enabled
*/
struct ipa_ep_cfg_ucp {
u16 command;
u32 enable;
};
/**
* struct ipa_ep_cfg_ulso - ULSO configurations
* @ipid_min_max_idx: A value in the range [0, 2]. Determines the registers

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018 - 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022 - 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_WDI3_H_
@ -73,6 +73,7 @@ typedef void (*ipa_wdi_mesh_ast_notifier_cb)(void *priv, unsigned long data);
* @wdi_notify: bw notification cb
* inst_id: Instance ID
* @ast_update: AST update needed or not.
* @is_hsp: need to identify if attach is hsp/pine.
*/
struct ipa_wdi_init_in_params {
enum ipa_wdi_version wdi_version;
@ -83,6 +84,7 @@ struct ipa_wdi_init_in_params {
#endif
int inst_id;
bool ast_update;
bool is_hsp;
};
/**

View File

@ -2,7 +2,7 @@
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_MSM_IPA_H_
@ -151,6 +151,7 @@
#define IPA_IOCTL_QUERY_CACHED_DRIVER_MSG 94
#define IPA_IOCTL_SET_EXT_ROUTER_MODE 95
#define IPA_IOCTL_ADD_DEL_DSCP_PCP_MAPPING 96
#define IPA_IOCTL_SEND_VLAN_MUXID_MAPPING 97
/**
* max size of the header to be inserted
*/
@ -1755,6 +1756,27 @@ struct IpaDscpVlanPcpMap_t {
uint16_t vlan_s[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN];
} __packed;
#define MAX_VLAN_CONFIGURE 16
enum pkt_path {
SW_PATH = 0,
HW_PATH_OUTSIDE_TUNNEL = 1,
HW_PATH_INSIDE_TUNNEL = 2
};
/* VLANID - Action - Tunnel_ID - MUX_ID */
struct singletag_mux_map_entry {
uint16_t vlan_id;
uint8_t mux_id;
uint8_t pkt_path;
uint32_t tunnel_id;
};
struct ipa_ioc_mux_mapping_table {
struct singletag_mux_map_entry map_entries[MAX_VLAN_CONFIGURE];
};
/**
* struct ipa_exception
*
@ -4077,6 +4099,11 @@ struct ipa_ioc_dscp_pcp_map_info {
IPA_IOCTL_ADD_DEL_DSCP_PCP_MAPPING, \
struct ipa_ioc_dscp_pcp_map_info)
#define IPA_IOC_SEND_VLAN_MUXID_MAPPING _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_SEND_VLAN_MUXID_MAPPING, \
struct ipa_ioc_mux_mapping_table)
/*
* unique magic number of the Tethering bridge ioctls
*/

View File

@ -6240,6 +6240,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
return 0;
}
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
static ssize_t slab_owner_filter_write(struct file *file,
const char __user *ubuf,
@ -6321,7 +6322,8 @@ static const struct file_operations proc_slab_owner_handle_ops = {
.write = slab_owner_handle_write,
.read = slab_owner_handle_read,
};
#endif
#endif /* CONFIG_QCOM_MINIDUMP_PANIC_DUMP */
#endif /* CONFIG_SLUB_DEBUG */
static int __init slab_sysfs_init(void)
{
@ -6366,6 +6368,7 @@ static int __init slab_sysfs_init(void)
kfree(al);
}
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP
if (slub_debug) {
int i;
@ -6380,6 +6383,7 @@ static int __init slab_sysfs_init(void)
set_bit(i, &slab_owner_filter);
}
}
#endif
#endif
mutex_unlock(&slab_mutex);
resiliency_test();