From 7a811175cab8be7390447c0c77645a3799e46d12 Mon Sep 17 00:00:00 2001 From: Panicker Harish Date: Tue, 12 Dec 2023 10:56:46 +0530 Subject: [PATCH 01/16] msm_serial_hs: Fix race between mod_timer and del_timer calls Deletion of timer and synchronization of the handler will happen during del_timer, but in the issue case the timer got deleted and before handler_sync should happen the timer is getting modified or accessed which further leads to kernel panic. Fix this by modifying del_timer to del_timer_sync so that the timer is protected until synchronization happen. Change-Id: If5dec66b9bbcb5369898536ba1c727b487a13018 Signed-off-by: Venkata Manasa Kakarla Signed-off-by: Yatish Kumar Singh Signed-off-by: Panicker Harish --- drivers/tty/serial/msm_serial_hs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index c55036f2b87d..cfee5b5799b6 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c @@ -1849,7 +1849,7 @@ static void msm_hs_sps_tx_callback(struct sps_event_notify *notify) &addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags); - del_timer(&msm_uport->tx.tx_timeout_timer); + del_timer_sync(&msm_uport->tx.tx_timeout_timer); MSM_HS_DBG("%s(): Queue kthread work\n", __func__); kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork); } From 40d06eb6f1f4462665f848e8c94830f5cff5e3a7 Mon Sep 17 00:00:00 2001 From: Swetha Chikkaboraiah Date: Mon, 8 Jan 2024 19:52:01 +0530 Subject: [PATCH 02/16] soc: qcom: socinfo: Add soc information for BLAIR LTE Add SOC ID to support socinfo for BLAIR LTE platform. Change-Id: I5223272ef20eac2396e52fa910628ec8236eb1ed Signed-off-by: Swetha Chikkaboraiah --- drivers/soc/qcom/socinfo.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index acd296faa0c4..7767793072f1 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -783,6 +783,7 @@ static const struct soc_id soc_id[] = { { 507, "BLAIR" }, { 565, "BLAIRP" }, { 628, "BLAIRP-XR" }, + { 647, "BLAIRP-LTE" }, { 486, "MONACO" }, { 458, "SDXLEMUR" }, { 483, "SDXLEMUR-SD"}, From c7fecf048114b69aca984a1068139e164da10792 Mon Sep 17 00:00:00 2001 From: Saranya R Date: Wed, 10 Jan 2024 17:28:00 +0530 Subject: [PATCH 03/16] soc: qcom: Add BLAIR-LITE SoC information to socinfo Add BLAIR-LITE SoC information to socinfo. Change-Id: I334a80508434ea3aaa972ecb0e2b72586e81dfca Signed-off-by: Saranya R --- drivers/soc/qcom/socinfo.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index 7767793072f1..9db6b91b5d7b 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -781,6 +781,7 @@ static const struct soc_id soc_id[] = { { 450, "SHIMA" }, { 454, "HOLI" }, { 507, "BLAIR" }, + { 578, "BLAIR-LITE" }, { 565, "BLAIRP" }, { 628, "BLAIRP-XR" }, { 647, "BLAIRP-LTE" }, From 40d396eb36139df03ea00eb1b70b4c18d283ce34 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Thu, 18 Jan 2024 13:04:58 +0530 Subject: [PATCH 04/16] mm: slub: Declare slab_owner_ops only when SLUB DEBUG is enabled Declare the ops struct and corresponding callbacks only when SLUB_DEBUG is enabled. Currently, the ops is defined only under MINIDUMP_PANIC_DUMP config but the variables it uses are protected under SLUB_DEBUG as well. Change-Id: I11f29564e1d65edc506a50e2c12aac374dbca6d5 Signed-off-by: Rohit Agarwal --- mm/slub.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index 3cc6ee044f27..f558c25a6206 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6240,6 +6240,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name) return 0; } +#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP static ssize_t slab_owner_filter_write(struct file *file, const char __user *ubuf, @@ -6321,7 +6322,8 @@ static const struct file_operations proc_slab_owner_handle_ops = { .write = slab_owner_handle_write, .read = slab_owner_handle_read, }; -#endif +#endif /* CONFIG_QCOM_MINIDUMP_PANIC_DUMP */ +#endif /* CONFIG_SLUB_DEBUG */ static int __init slab_sysfs_init(void) { @@ -6366,6 +6368,7 @@ static int __init slab_sysfs_init(void) kfree(al); } +#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_QCOM_MINIDUMP_PANIC_DUMP if (slub_debug) { int i; @@ -6380,6 +6383,7 @@ static int __init slab_sysfs_init(void) set_bit(i, &slab_owner_filter); } } +#endif #endif mutex_unlock(&slab_mutex); resiliency_test(); From ae146b9a201e7b8fea96f4ea618474c6832fe7d1 Mon Sep 17 00:00:00 2001 From: Rohit Agarwal Date: Fri, 19 Jan 2024 14:06:33 +0530 Subject: [PATCH 05/16] soc: qcom: minidump_log: Protect md_dump_slabinfo under SLUB_DEBUG Protect md_dump_slabinfo call only if SLUB_DEBUG is enabled. Change-Id: I1f703e039517dd24fad303f830a6a30a3f32c3f7 Signed-off-by: Rohit Agarwal --- drivers/soc/qcom/minidump_log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/minidump_log.c b/drivers/soc/qcom/minidump_log.c index 921e2a6ea63b..771e6f4f03ed 100644 --- a/drivers/soc/qcom/minidump_log.c +++ b/drivers/soc/qcom/minidump_log.c @@ -1006,10 +1006,10 @@ dump_rq: if (md_meminfo_seq_buf) md_dump_meminfo(); +#ifdef CONFIG_SLUB_DEBUG if (md_slabinfo_seq_buf) md_dump_slabinfo(); -#ifdef CONFIG_SLUB_DEBUG if (md_slabowner_dump_addr) md_dump_slabowner(); #endif From fc5843f99a40d59899d8066f516f4f1c7b1af040 Mon Sep 17 00:00:00 2001 From: Fakruddin Vohra Date: Mon, 18 Dec 2023 09:19:23 +0530 Subject: [PATCH 06/16] msm: ipa3: add support to identify wifi attach add change to identify the wifi attach as communicated by wlan at wdi init. Change-Id: Iea73ce1037bdbe1064570173c0792f0fe139ac4f Signed-off-by: Fakruddin Vohra --- include/linux/ipa_wdi3.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h index 38465301fab3..8fbd994cb26a 100644 --- a/include/linux/ipa_wdi3.h +++ b/include/linux/ipa_wdi3.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2018 - 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022 - 2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _IPA_WDI3_H_ @@ -73,6 +73,7 @@ typedef void (*ipa_wdi_mesh_ast_notifier_cb)(void *priv, unsigned long data); * @wdi_notify: bw notification cb * inst_id: Instance ID * @ast_update: AST update needed or not. + * @is_hsp: need to identify if attach is hsp/pine. */ struct ipa_wdi_init_in_params { enum ipa_wdi_version wdi_version; @@ -83,6 +84,7 @@ struct ipa_wdi_init_in_params { #endif int inst_id; bool ast_update; + bool is_hsp; }; /** From fd3f99504d712da144638d6d22fcc9663cc88aed Mon Sep 17 00:00:00 2001 From: Krishna Nagaraja Date: Tue, 23 Jan 2024 01:47:28 +0530 Subject: [PATCH 07/16] msm: ipa: Add support for Private IP Forwarding Changes for new uCP commands, and IOCTL to support this feature Change-Id: Idd7de3f18fc557b54b3c2d965802065b3f6dd982 Signed-off-by: Krishna Nagaraja --- include/linux/ipa.h | 12 ++++++++++++ include/uapi/linux/msm_ipa.h | 29 ++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/include/linux/ipa.h b/include/linux/ipa.h index fa528096b133..a0c4d9387e4f 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _IPA_H_ @@ -490,6 +491,17 @@ struct ipa_ep_cfg_seq { int seq_type; }; +/** + * struct ipa_ep_cfg_ucp - uCP config register + * @command: Command ID at uCP, that the packets should hit + * + * @enable: 0 - Disabled + * 1- Enabled + */ +struct ipa_ep_cfg_ucp { + u16 command; + u32 enable; +}; /** * struct ipa_ep_cfg_ulso - ULSO configurations * @ipid_min_max_idx: A value in the range [0, 2]. Determines the registers diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 3e258d6293ce..dcd4980ea987 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -2,7 +2,7 @@ /* * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. * - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _UAPI_MSM_IPA_H_ @@ -151,6 +151,7 @@ #define IPA_IOCTL_QUERY_CACHED_DRIVER_MSG 94 #define IPA_IOCTL_SET_EXT_ROUTER_MODE 95 #define IPA_IOCTL_ADD_DEL_DSCP_PCP_MAPPING 96 +#define IPA_IOCTL_SEND_VLAN_MUXID_MAPPING 97 /** * max size of the header to be inserted */ @@ -1755,6 +1756,27 @@ struct IpaDscpVlanPcpMap_t { uint16_t vlan_s[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN]; } __packed; + +#define MAX_VLAN_CONFIGURE 16 +enum pkt_path { + SW_PATH = 0, + HW_PATH_OUTSIDE_TUNNEL = 1, + HW_PATH_INSIDE_TUNNEL = 2 +}; + +/* VLANID - Action - Tunnel_ID - MUX_ID */ +struct singletag_mux_map_entry { + uint16_t vlan_id; + uint8_t mux_id; + uint8_t pkt_path; + uint32_t tunnel_id; +}; + +struct ipa_ioc_mux_mapping_table { + struct singletag_mux_map_entry map_entries[MAX_VLAN_CONFIGURE]; +}; + + /** * struct ipa_exception * @@ -4077,6 +4099,11 @@ struct ipa_ioc_dscp_pcp_map_info { IPA_IOCTL_ADD_DEL_DSCP_PCP_MAPPING, \ struct ipa_ioc_dscp_pcp_map_info) +#define IPA_IOC_SEND_VLAN_MUXID_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_SEND_VLAN_MUXID_MAPPING, \ + struct ipa_ioc_mux_mapping_table) + + /* * unique magic number of the Tethering bridge ioctls */ From c2eae40b63aabc52bf346cca55ba81d7be4fcf79 Mon Sep 17 00:00:00 2001 From: Lynus Vaz Date: Fri, 19 Jan 2024 08:38:17 -0800 Subject: [PATCH 08/16] msm: kgsl: Keep the timeline fence valid for logging The timeline fence needs to remain valid for logging purposes. Take an extra refcount on the timeline dma_fence to make sure it doesn't go away till we're done with it. Change-Id: I6670ef7add099a72684c1fe20ed009dff85d1f27 Signed-off-by: Lynus Vaz Signed-off-by: Deepak Kumar --- drivers/gpu/msm/kgsl_drawobj.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index 9e00d2c08234..7ecae4f7cebd 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ /* @@ -510,6 +510,8 @@ static int drawobj_add_sync_timeline(struct kgsl_device *device, /* Set pending flag before adding callback to avoid race */ set_bit(event->id, &syncobj->pending); + /* Get a dma_fence refcount to hand over to the callback */ + dma_fence_get(event->fence); ret = dma_fence_add_callback(event->fence, &event->cb, drawobj_sync_timeline_fence_callback); @@ -522,11 +524,16 @@ static int drawobj_add_sync_timeline(struct kgsl_device *device, ret = 0; } + /* Put the refcount from fence creation */ + dma_fence_put(event->fence); kgsl_drawobj_put(drawobj); return ret; } trace_syncpoint_timeline_fence(event->syncobj, event->fence, false); + + /* Put the refcount from fence creation */ + dma_fence_put(event->fence); return 0; } From f555e9e4ad112c390f3f26674558276f5210deff Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Mon, 9 Oct 2023 12:32:30 +0530 Subject: [PATCH 09/16] memshare: Prevent possible integer overflow Prevent possible integer overflow by sanitizing the alloc request size coming from the client against allottable amount of memory. Change-Id: I74cb0f7b0808f20299586969fd5c810d44c3e576 Signed-off-by: Manoj Prabhu B Signed-off-by: Madhab Sharma --- drivers/soc/qcom/memshare/msm_memshare.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c index 43ca69072932..aa747aa4e433 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.c +++ b/drivers/soc/qcom/memshare/msm_memshare.c @@ -494,8 +494,12 @@ static void handle_alloc_generic_req(struct qmi_handle *handle, } } - if (!memblock[index].allotted) { - if (memblock[index].guard_band && alloc_req->num_bytes > 0) + if (!memblock[index].allotted && alloc_req->num_bytes > 0) { + + if (alloc_req->num_bytes > memblock[index].init_size) + alloc_req->num_bytes = memblock[index].init_size; + + if (memblock[index].guard_band) size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES; else size = alloc_req->num_bytes; From e7c4bb239ba84e45c05ccd1105d400f0a61a7613 Mon Sep 17 00:00:00 2001 From: Lynus Vaz Date: Wed, 20 Dec 2023 13:52:55 -0800 Subject: [PATCH 10/16] msm: kgsl: Do not release dma and anon buffers if unmap fails If iommu unmap fails and leaves dma or anon buffers still mapped in the iommu, do not free them. Change-Id: Ice0e1a59c1ac0ee7a9d62d8899966b84fa63d5ca Signed-off-by: Lynus Vaz Signed-off-by: Deepak Kumar --- drivers/gpu/msm/kgsl.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 964b8536666b..ff7e3e5468ea 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -344,6 +344,9 @@ static void kgsl_destroy_ion(struct kgsl_memdesc *memdesc) struct kgsl_mem_entry, memdesc); struct kgsl_dma_buf_meta *meta = entry->priv_data; + if (memdesc->priv & KGSL_MEMDESC_MAPPED) + return; + if (meta != NULL) { remove_dmabuf_list(meta); dma_buf_unmap_attachment(meta->attach, meta->table, @@ -367,6 +370,9 @@ static void kgsl_destroy_anon(struct kgsl_memdesc *memdesc) struct scatterlist *sg; struct page *page; + if (memdesc->priv & KGSL_MEMDESC_MAPPED) + return; + for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) { page = sg_page(sg); for (j = 0; j < (sg->length >> PAGE_SHIFT); j++) { From 26a82ee4663c89e364df1212b9cc9fbf4a86af74 Mon Sep 17 00:00:00 2001 From: Lynus Vaz Date: Wed, 20 Dec 2023 13:52:55 -0800 Subject: [PATCH 11/16] msm: kgsl: Do not release dma and anon buffers if unmap fails If iommu unmap fails and leaves dma or anon buffers still mapped in the iommu, do not free them. Change-Id: Ice0e1a59c1ac0ee7a9d62d8899966b84fa63d5ca Signed-off-by: Lynus Vaz Signed-off-by: Deepak Kumar (cherry picked from commit e7c4bb239ba84e45c05ccd1105d400f0a61a7613) --- drivers/gpu/msm/kgsl.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 964b8536666b..ff7e3e5468ea 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -344,6 +344,9 @@ static void kgsl_destroy_ion(struct kgsl_memdesc *memdesc) struct kgsl_mem_entry, memdesc); struct kgsl_dma_buf_meta *meta = entry->priv_data; + if (memdesc->priv & KGSL_MEMDESC_MAPPED) + return; + if (meta != NULL) { remove_dmabuf_list(meta); dma_buf_unmap_attachment(meta->attach, meta->table, @@ -367,6 +370,9 @@ static void kgsl_destroy_anon(struct kgsl_memdesc *memdesc) struct scatterlist *sg; struct page *page; + if (memdesc->priv & KGSL_MEMDESC_MAPPED) + return; + for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) { page = sg_page(sg); for (j = 0; j < (sg->length >> PAGE_SHIFT); j++) { From 58e401790ae9f1bbaab96eda7d2e21fb4b020247 Mon Sep 17 00:00:00 2001 From: Sarannya S Date: Fri, 22 Sep 2023 13:13:58 +0530 Subject: [PATCH 12/16] soc: qcom: smem: Add boundary checks for partitions Add condition check to make sure that the end address of private entry does not go out of partition. Change-Id: I88b3c69d86d90905b214c13a8c632b134b487a49 Signed-off-by: Sarannya S Signed-off-by: Pranav Mahesh Phansalkar --- drivers/soc/qcom/smem.c | 107 +++++++++++++++++++++++++++------------- 1 file changed, 72 insertions(+), 35 deletions(-) diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 0c65d83e8096..04a38617d643 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -2,6 +2,7 @@ /* * Copyright (c) 2015, Sony Mobile Communications AB. * Copyright (c) 2012-2013, 2019-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -86,6 +87,17 @@ /* Max number of processors/hosts in a system */ #define SMEM_HOST_COUNT 14 +/* Entry range check + * ptr >= start : Checks if ptr is greater than the start of access region + * ptr + size >= ptr: Check for integer overflow (On 32bit system where ptr + * and size are 32bits, ptr + size can wrap around to be a small integer) + * ptr + size <= end: Checks if ptr+size is less than the end of access region + */ +#define IN_PARTITION_RANGE(ptr, size, start, end) \ + (((void *)(ptr) >= (void *)(start)) && \ + (((void *)(ptr) + (size)) >= (void *)(ptr)) && \ + (((void *)(ptr) + (size)) <= (void *)(end))) + /** * struct smem_proc_comm - proc_comm communication struct (legacy) * @command: current command to be executed @@ -353,6 +365,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, size_t size) { struct smem_private_entry *hdr, *end; + struct smem_private_entry *next_hdr; struct smem_partition_header *phdr; size_t alloc_size; void *cached; @@ -365,18 +378,25 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, end = phdr_to_last_uncached_entry(phdr); cached = phdr_to_last_cached_entry(phdr); - if (WARN_ON((void *)end > p_end || (void *)cached > p_end)) + if (WARN_ON(!IN_PARTITION_RANGE(end, 0, phdr, cached) || + cached > p_end)) return -EINVAL; - while (hdr < end) { + while ((hdr < end) && ((hdr + 1) < end)) { if (hdr->canary != SMEM_PRIVATE_CANARY) goto bad_canary; if (le16_to_cpu(hdr->item) == item) return -EEXIST; - hdr = uncached_entry_next(hdr); + next_hdr = uncached_entry_next(hdr); + + if (WARN_ON(next_hdr <= hdr)) + return -EINVAL; + + hdr = next_hdr; } - if (WARN_ON((void *)hdr > p_end)) + + if (WARN_ON((void *)hdr > (void *)end)) return -EINVAL; /* Check that we don't grow into the cached region */ @@ -534,9 +554,11 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, unsigned item, size_t *size) { - struct smem_private_entry *e, *end; + struct smem_private_entry *e, *uncached_end, *cached_end; + struct smem_private_entry *next_e; struct smem_partition_header *phdr; void *item_ptr, *p_end; + size_t entry_size = 0; u32 partition_size; size_t cacheline; u32 padding_data; @@ -548,72 +570,87 @@ static void *qcom_smem_get_private(struct qcom_smem *smem, cacheline = le32_to_cpu(entry->cacheline); e = phdr_to_first_uncached_entry(phdr); - end = phdr_to_last_uncached_entry(phdr); + uncached_end = phdr_to_last_uncached_entry(phdr); + cached_end = phdr_to_last_cached_entry(phdr); - if (WARN_ON((void *)end > p_end)) + if (WARN_ON(!IN_PARTITION_RANGE(uncached_end, 0, phdr, cached_end) + || (void *)cached_end > p_end)) return ERR_PTR(-EINVAL); - while (e < end) { + while ((e < uncached_end) && ((e + 1) < uncached_end)) { if (e->canary != SMEM_PRIVATE_CANARY) goto invalid_canary; if (le16_to_cpu(e->item) == item) { - if (size != NULL) { - e_size = le32_to_cpu(e->size); - padding_data = le16_to_cpu(e->padding_data); + e_size = le32_to_cpu(e->size); + padding_data = le16_to_cpu(e->padding_data); - if (e_size < partition_size - && padding_data < e_size) - *size = e_size - padding_data; - else - return ERR_PTR(-EINVAL); - } + if (e_size < partition_size && padding_data < e_size) + entry_size = e_size - padding_data; + else + return ERR_PTR(-EINVAL); item_ptr = uncached_entry_to_item(e); - if (WARN_ON(item_ptr > p_end)) + + if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, e, uncached_end))) return ERR_PTR(-EINVAL); + if (size != NULL) + *size = entry_size; + return item_ptr; } - e = uncached_entry_next(e); + next_e = uncached_entry_next(e); + if (WARN_ON(next_e <= e)) + return ERR_PTR(-EINVAL); + + e = next_e; } - if (WARN_ON((void *)e > p_end)) + if (WARN_ON((void *)e > (void *)uncached_end)) return ERR_PTR(-EINVAL); /* Item was not found in the uncached list, search the cached list */ - e = phdr_to_first_cached_entry(phdr, cacheline); - end = phdr_to_last_cached_entry(phdr); + if (cached_end == p_end) + return ERR_PTR(-ENOENT); - if (WARN_ON((void *)e < (void *)phdr || (void *)end > p_end)) + e = phdr_to_first_cached_entry(phdr, cacheline); + + if (WARN_ON(!IN_PARTITION_RANGE(cached_end, 0, uncached_end, p_end) || + !IN_PARTITION_RANGE(e, sizeof(*e), cached_end, p_end))) return ERR_PTR(-EINVAL); - while (e > end) { + while (e > cached_end) { if (e->canary != SMEM_PRIVATE_CANARY) goto invalid_canary; if (le16_to_cpu(e->item) == item) { - if (size != NULL) { - e_size = le32_to_cpu(e->size); - padding_data = le16_to_cpu(e->padding_data); + e_size = le32_to_cpu(e->size); + padding_data = le16_to_cpu(e->padding_data); - if (e_size < partition_size - && padding_data < e_size) - *size = e_size - padding_data; - else - return ERR_PTR(-EINVAL); - } + if (e_size < partition_size && padding_data < e_size) + entry_size = e_size - padding_data; + else + return ERR_PTR(-EINVAL); item_ptr = cached_entry_to_item(e); - if (WARN_ON(item_ptr < (void *)phdr)) + if (WARN_ON(!IN_PARTITION_RANGE(item_ptr, entry_size, cached_end, e))) return ERR_PTR(-EINVAL); + if (size != NULL) + *size = entry_size; + return item_ptr; } - e = cached_entry_next(e, cacheline); + next_e = cached_entry_next(e, cacheline); + if (WARN_ON(next_e >= e)) + return ERR_PTR(-EINVAL); + + e = next_e; } + if (WARN_ON((void *)e < (void *)phdr)) return ERR_PTR(-EINVAL); From d830cc50b9bfc55b9a3da96de560962556fda132 Mon Sep 17 00:00:00 2001 From: Ajit Kumar Date: Mon, 21 Aug 2023 12:36:55 +0530 Subject: [PATCH 13/16] soc: qcom: add out of bound check for AON fifo Add out of bound check while parsing the SPI slave-to-master fifo. Change-Id: I14f707307fa277b2f8a7b543d3cc5e9ebac885db Signed-off-by: Ajit Kumar (cherry picked from commit 0950011ce6a06720105c0849bb35e33492d629e7) --- drivers/soc/qcom/slatecom_spi.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/soc/qcom/slatecom_spi.c b/drivers/soc/qcom/slatecom_spi.c index de30f47a1288..a728ba43c90f 100644 --- a/drivers/soc/qcom/slatecom_spi.c +++ b/drivers/soc/qcom/slatecom_spi.c @@ -303,7 +303,7 @@ void slatecom_slatedown_handler(void) } EXPORT_SYMBOL(slatecom_slatedown_handler); -static void parse_fifo(uint8_t *data, union slatecom_event_data_type *event_data) +static void parse_fifo(uint8_t *data, uint16_t data_len, union slatecom_event_data_type *event_data) { uint16_t p_len; uint8_t sub_id; @@ -314,11 +314,16 @@ static void parse_fifo(uint8_t *data, union slatecom_event_data_type *event_data struct event_list *data_list; while (*data != '\0') { - + if (data_len < HED_EVENT_ID_LEN) + break; event_id = *((uint16_t *) data); data = data + HED_EVENT_ID_LEN; + data_len = data_len - HED_EVENT_ID_LEN; + if (data_len < HED_EVENT_SIZE_LEN) + break; p_len = *((uint16_t *) data); data = data + HED_EVENT_SIZE_LEN; + data_len = data_len - HED_EVENT_SIZE_LEN; if (event_id == 0xFFFE) { @@ -346,8 +351,14 @@ static void parse_fifo(uint8_t *data, union slatecom_event_data_type *event_data send_event(SLATECOM_EVENT_TO_MASTER_FIFO_USED, event_data); } + } else if (event_id == 0xc8) { + data = data + 12; + data_len = data_len - 12; + pr_err("Packet Received = 0x%X, len = %u\n", event_id, p_len); } + data = data + p_len; + data_len = data_len - p_len; } if (!list_empty(&pr_lst_hd)) queue_work(wq, &input_work); @@ -451,7 +462,8 @@ static void send_back_notification(uint32_t slav_status_reg, if (!ret) { augmnt_fifo((uint8_t *)ptr, master_fifo_used*SLATE_SPI_WORD_SIZE); - parse_fifo((uint8_t *)ptr, &event_data); + parse_fifo((uint8_t *)ptr, + master_fifo_used*SLATE_SPI_WORD_SIZE, &event_data); } kfree(ptr); } From 0106549bb994956320ed766f687edde09bd2a2c3 Mon Sep 17 00:00:00 2001 From: Kaushal Hooda Date: Fri, 2 Jun 2023 20:21:06 +0530 Subject: [PATCH 14/16] rpmsg: slatecom: out of bound read from process_cmd When dereferencing "rx_data" as type "glink_slatecom_msg" , we didn't check if "rx_data" has enough room to hold that type. The "rx_size" is read from slate to master fifo and if received rx_size is less then "glink_slatecom_msg" then it could lead to heap out of bounds read. If received rx_size is less then the expected glink_slatecom_msg then return back as a bad message. Change-Id: Idde757ee70c7c88c22e4f036e6da0280e3b385d0 Signed-off-by: Kaushal Hooda (cherry picked from commit 7ddb61a6ac27c795177021b05f60377482043222) --- drivers/rpmsg/qcom_glink_slatecom.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/rpmsg/qcom_glink_slatecom.c b/drivers/rpmsg/qcom_glink_slatecom.c index d57adfa3c92f..18ae3934aa15 100644 --- a/drivers/rpmsg/qcom_glink_slatecom.c +++ b/drivers/rpmsg/qcom_glink_slatecom.c @@ -1900,7 +1900,7 @@ static void glink_slatecom_handle_rx_done(struct glink_slatecom *glink, mutex_unlock(&channel->intent_lock); } -static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_data, +static int glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_data, u32 rx_size) { struct glink_slatecom_msg *msg; @@ -1909,12 +1909,18 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da unsigned int param3; unsigned int param4; unsigned int cmd; - int offset = 0; - int ret; + u32 offset = 0; + int ret = 0; u16 name_len; char *name; while (offset < rx_size) { + if (rx_size - offset < sizeof(struct glink_slatecom_msg)) { + ret = -EBADMSG; + GLINK_ERR(glink, "%s: Error %d process cmd\n", __func__, ret); + return ret; + } + msg = (struct glink_slatecom_msg *)(rx_data + offset); offset += sizeof(*msg); @@ -1997,6 +2003,7 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da break; } } + return ret; } /** From bff9163e239a8469e20c480e189dc87ac953cdea Mon Sep 17 00:00:00 2001 From: Kaushal Hooda Date: Tue, 20 Jun 2023 11:06:00 +0530 Subject: [PATCH 15/16] rpmsg: slatecom: maintain rx_size to read For cmd close_ack or open request where rx_size is being incrementing with respect to offset might lead to out of bound read from rx_data. Decrease rx_size as we process commands. Change-Id: I492eadcbebb78386fc20f744eb9ad8db4a2914fc Signed-off-by: Kaushal Hooda (cherry picked from commit ab0f86134fcb5d4e066dffcc6e1684138e96337d) --- drivers/rpmsg/qcom_glink_slatecom.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/rpmsg/qcom_glink_slatecom.c b/drivers/rpmsg/qcom_glink_slatecom.c index d57adfa3c92f..79e241a04ce5 100644 --- a/drivers/rpmsg/qcom_glink_slatecom.c +++ b/drivers/rpmsg/qcom_glink_slatecom.c @@ -1935,7 +1935,7 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da case SLATECOM_CMD_CLOSE_ACK: glink_slatecom_rx_defer(glink, rx_data + offset - sizeof(*msg), - rx_size + offset - sizeof(*msg), 0); + rx_size - offset + sizeof(*msg), 0); break; case SLATECOM_CMD_RX_INTENT_REQ: glink_slatecom_handle_intent_req(glink, param1, param2); @@ -1948,7 +1948,7 @@ static void glink_slatecom_process_cmd(struct glink_slatecom *glink, void *rx_da name = rx_data + offset; glink_slatecom_rx_defer(glink, rx_data + offset - sizeof(*msg), - rx_size + offset - sizeof(*msg), + rx_size - offset + sizeof(*msg), ALIGN(name_len, SLATECOM_ALIGNMENT)); offset += ALIGN(name_len, SLATECOM_ALIGNMENT); From 6df00291cbc46b9d1e6127503e6b1f6e35388721 Mon Sep 17 00:00:00 2001 From: Krishna chaitanya chundru Date: Fri, 16 Jun 2023 10:27:03 +0530 Subject: [PATCH 16/16] bus: mhi: Fix potential out-of-bound access In mhi_sat_isvalid_header function if the length is less than the size of header then there can be out-of-bound access. So fix the len check in the function. Change-Id: I80f1556557b1bf2f30c07f6377bd6e3db48712b3 Signed-off-by: Krishna chaitanya chundru (cherry picked from commit d7601393dce0e11ab823fe530e8d5f2277b5fdc0) --- drivers/bus/mhi/devices/mhi_satellite.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c index f7c6c07c30ab..2e33051d910a 100644 --- a/drivers/bus/mhi/devices/mhi_satellite.c +++ b/drivers/bus/mhi/devices/mhi_satellite.c @@ -357,7 +357,8 @@ static struct mhi_sat_device *find_sat_dev_by_id( static bool mhi_sat_isvalid_header(struct sat_header *hdr, int len) { /* validate payload size */ - if (len >= sizeof(*hdr) && (len != hdr->payload_size + sizeof(*hdr))) + if ((len < sizeof(*hdr)) || + (len >= sizeof(*hdr) && (len != hdr->payload_size + sizeof(*hdr)))) return false; /* validate SAT IPC version */