From 73d3217026f69f4c6aee493ed112507e8675cb22 Mon Sep 17 00:00:00 2001 From: Pratham Pratap Date: Tue, 27 Sep 2022 14:28:12 +0530 Subject: [PATCH 01/30] usb: gadget: cdev: Requeue the request outside spinlock 'Commit I87015f00267af ("usb: gadget: cdev: Requeue the request in cser_resume if start_rx fails")' is requeuing the request in if start_rx fails as dut is in suspend mode. This needs to done outside of spinlock as usb_cser_start_rx will again acquire the port_lock to queue the request causing a deadlock condition. Fix this by requeuing the request outside the spinlock. Change-Id: Ifa1cdb338d6a7e6a2d98ced55c4dbca9caf6ba0e Signed-off-by: Pratham Pratap --- drivers/usb/gadget/function/f_cdev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c index bef28623462d..9357949a1525 100644 --- a/drivers/usb/gadget/function/f_cdev.c +++ b/drivers/usb/gadget/function/f_cdev.c @@ -573,7 +573,9 @@ static void usb_cser_resume(struct usb_function *f) if (port->setup_pending) { pr_info("%s: start_rx called due to rx_out error.\n", __func__); port->setup_pending = false; + spin_unlock_irqrestore(&port->port_lock, flags); usb_cser_start_rx(port); + spin_lock_irqsave(&port->port_lock, flags); } in = port->port_usb.in; /* process any pending requests */ From 553b73b6ae612bf320294b1c22f6415cd016c746 Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Fri, 2 Sep 2022 17:12:28 +0530 Subject: [PATCH 02/30] ANDROID: ABI: Update allowed list for QCOM Update the android/abi_gki_aarch64_qcom with API refcount_warn_saturate. Bug: 244699106 Change-Id: Iee45f9e7ab3cc24b678fac28b5981c0e44745769 Signed-off-by: Srinivasarao Pathipati --- android/abi_gki_aarch64_qcom | 1 + 1 file changed, 1 insertion(+) diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index a2acb78eadaf..d6f756e415e4 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -1942,6 +1942,7 @@ refcount_dec_not_one refcount_inc_checked refcount_inc_not_zero_checked + refcount_warn_saturate __refrigerator regcache_cache_bypass regcache_cache_only From a508c150551f1d2ab1fd585a3a2eba2d822e18b8 Mon Sep 17 00:00:00 2001 From: Jyothi Kumar Seerapu Date: Thu, 22 Sep 2022 23:52:01 +0530 Subject: [PATCH 03/30] pci: msm: Disable BDF halt for sdxlemur This change will disable BDF halt based on BDF halt device tree flag for sdxlemur. Change-Id: Ic2cc57d4e7938aaa492c9842abb24e4e8e68b651 Signed-off-by: Jyothi Kumar Seerapu --- drivers/pci/controller/pci-msm.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c index f27e6e68ed24..bf1718dfe5a1 100644 --- a/drivers/pci/controller/pci-msm.c +++ b/drivers/pci/controller/pci-msm.c @@ -74,6 +74,7 @@ #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL (0x174) #define PCIE20_PARF_AXI_MSTR_RD_ADDR_HALT (0x1a4) #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT (0x1a8) +#define PCIE20_PCIE_PARF_AXI_MSTR_WR_NS_BDF_HALT (0x4a0) #define PCIE20_PARF_LTSSM (0x1b0) #define PCIE20_PARF_INT_ALL_STATUS (0x224) #define PCIE20_PARF_INT_ALL_CLEAR (0x228) @@ -847,6 +848,7 @@ struct msm_pcie_dev_t { bool linkdown_panic; uint32_t boot_option; bool pcie_halt_feature_dis; + bool pcie_bdf_halt_dis; uint32_t rc_idx; uint32_t phy_ver; @@ -1684,6 +1686,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev) dev->slv_addr_space_size); PCIE_DBG_FS(dev, "PCIe: halt_feature_dis is %d\n", dev->pcie_halt_feature_dis); + PCIE_DBG_FS(dev, "PCIe: bdf_halt_dis is %d\n", + dev->pcie_bdf_halt_dis); PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n", dev->phy_status_offset); PCIE_DBG_FS(dev, "phy_status_bit: %u\n", @@ -4761,6 +4765,12 @@ static int msm_pcie_enable(struct msm_pcie_dev_t *dev) BIT(31) | val); } + if (dev->pcie_bdf_halt_dis) { + val = readl_relaxed(dev->parf + PCIE20_PCIE_PARF_AXI_MSTR_WR_NS_BDF_HALT); + msm_pcie_write_reg(dev->parf, PCIE20_PCIE_PARF_AXI_MSTR_WR_NS_BDF_HALT, + (~BIT(0)) & val); + } + /* init tcsr */ if (dev->tcsr_config) pcie_tcsr_init(dev); @@ -6312,6 +6322,11 @@ static int msm_pcie_probe(struct platform_device *pdev) PCIE_DBG(pcie_dev, "PCIe halt feature is %s enabled.\n", pcie_dev->pcie_halt_feature_dis ? "not" : ""); + pcie_dev->pcie_bdf_halt_dis = of_property_read_bool(of_node, + "qcom,bdf-halt-dis"); + PCIE_DBG(pcie_dev, "PCIe BDF halt feature is %s enabled.\n", + pcie_dev->pcie_bdf_halt_dis ? "not" : ""); + of_property_read_u32(of_node, "qcom,phy-status-offset", &pcie_dev->phy_status_offset); PCIE_DBG(pcie_dev, "RC%d: phy-status-offset: 0x%x.\n", pcie_dev->rc_idx, From 840766fa596db606d81d5cc13822e3a758001532 Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Mon, 10 Oct 2022 11:56:51 +0530 Subject: [PATCH 04/30] virt: haven: rm_core: Validate notifications using payload size only Preparation for cleaning up the resource manager core. Notification size validation needs to only be performed on the payload size. The RM RPC header will always be present, else the RM driver would not have been able to determine that the message is a notification. Thus, considering the RPC header is not required. Change-Id: Id6a561a77d693e4f8f307d9d0d96b8d173940c60 Signed-off-by: Elliot Berman Signed-off-by: Srinivasarao Pathipati --- drivers/virt/haven/hh_rm_core.c | 44 +++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/drivers/virt/haven/hh_rm_core.c b/drivers/virt/haven/hh_rm_core.c index 5bb93ea78643..157513a1b1ff 100644 --- a/drivers/virt/haven/hh_rm_core.c +++ b/drivers/virt/haven/hh_rm_core.c @@ -170,6 +170,7 @@ static int hh_rm_process_notif(void *recv_buff, size_t recv_buff_size) struct hh_rm_rpc_hdr *hdr = recv_buff; u32 notification = hdr->msg_id; void *payload = NULL; + size_t payload_size; int ret = 0; pr_debug("Notification received from RM-VM: %x\n", notification); @@ -197,87 +198,92 @@ static int hh_rm_process_notif(void *recv_buff, size_t recv_buff_size) payload = connection->recv_buff; recv_buff_size = connection->recv_buff_size; } + payload_size = recv_buff_size - sizeof(*hdr); switch (notification) { case HH_RM_NOTIF_VM_STATUS: - if (recv_buff_size != sizeof(*hdr) + + if (payload_size != sizeof(struct hh_rm_notif_vm_status_payload)) { pr_err("%s: Invalid size for VM_STATUS notif: %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } break; case HH_RM_NOTIF_VM_IRQ_LENT: - if (recv_buff_size != sizeof(*hdr) + + if (payload_size != sizeof(struct hh_rm_notif_vm_irq_lent_payload)) { pr_err("%s: Invalid size for VM_IRQ_LENT notif: %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } break; case HH_RM_NOTIF_VM_IRQ_RELEASED: - if (recv_buff_size != sizeof(*hdr) + + if (payload_size != sizeof(struct hh_rm_notif_vm_irq_released_payload)) { pr_err("%s: Invalid size for VM_IRQ_REL notif: %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } break; case HH_RM_NOTIF_VM_IRQ_ACCEPTED: - if (recv_buff_size != sizeof(*hdr) + + if (payload_size != sizeof(struct hh_rm_notif_vm_irq_accepted_payload)) { pr_err("%s: Invalid size for VM_IRQ_ACCEPTED notif: %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } break; case HH_RM_NOTIF_MEM_SHARED: - if (recv_buff_size < sizeof(*hdr) + + if (payload_size < sizeof(struct hh_rm_notif_mem_shared_payload)) { pr_err("%s: Invalid size for MEM_SHARED notif: %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } break; case HH_RM_NOTIF_MEM_RELEASED: - if (recv_buff_size != sizeof(*hdr) + + if (payload_size != sizeof(struct hh_rm_notif_mem_released_payload)) { pr_err("%s: Invalid size for MEM_RELEASED notif: %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } break; case HH_RM_NOTIF_MEM_ACCEPTED: - if (recv_buff_size != sizeof(*hdr) + + if (payload_size != sizeof(struct hh_rm_notif_mem_accepted_payload)) { pr_err("%s: Invalid size for MEM_ACCEPTED notif: %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } break; case HH_RM_NOTIF_VM_CONSOLE_CHARS: - if (recv_buff_size < sizeof(*hdr) + + if (payload_size >= sizeof(struct hh_rm_notif_vm_console_chars)) { struct hh_rm_notif_vm_console_chars *console_chars; u16 num_bytes; - console_chars = recv_buff + sizeof(*hdr); + console_chars = payload; num_bytes = console_chars->num_bytes; - if (sizeof(*hdr) + sizeof(*console_chars) + num_bytes != - recv_buff_size) { + if (sizeof(*console_chars) + num_bytes != + payload_size) { pr_err("%s: Invalid size for VM_CONSOLE_CHARS notify %u\n", - __func__, recv_buff_size - sizeof(*hdr)); + __func__, payload_size); ret = -EINVAL; goto err; } + } else { + pr_err("%s: Invalid size for VM_CONSOLE_CHARS notify %u\n", + __func__, payload_size); + goto err; } break; default: From 0178bcb75fb2f1e36d5970961a1f6ff721e339db Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Mon, 26 Sep 2022 16:11:35 +0530 Subject: [PATCH 05/30] icnss2: Enable power supply framework for 5.4 kernel Icnss driver indicates vbatt voltage level to wlan fw based on battery percentage. This change adds power supply framework code to enable icnss2 driver to measure battery percentage and update vbatt volatge level to wlan fw. Change-Id: I376805da38222a5e7a20ae4a1737bc978ee6a9ae Signed-off-by: Sandeep Singh --- drivers/soc/qcom/icnss2/main.c | 27 ++++++++ drivers/soc/qcom/icnss2/main.h | 12 ++++ drivers/soc/qcom/icnss2/power.c | 115 ++++++++++++++++++++++++++++++++ drivers/soc/qcom/icnss2/power.h | 2 +- 4 files changed, 155 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c index 7d7021a33e14..51aff64aaa5c 100644 --- a/drivers/soc/qcom/icnss2/main.c +++ b/drivers/soc/qcom/icnss2/main.c @@ -722,6 +722,9 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv, if (priv->vbatt_supported) icnss_init_vph_monitor(priv); + if (priv->psf_supported) + queue_work(priv->soc_update_wq, &priv->soc_update_work); + return ret; device_info_failure: @@ -745,6 +748,9 @@ static int icnss_driver_event_server_exit(struct icnss_priv *priv) adc_tm_disable_chan_meas(priv->adc_tm_dev, &priv->vph_monitor_params); + if (priv->psf_supported) + priv->last_updated_voltage = 0; + return 0; } @@ -3610,6 +3616,13 @@ static int icnss_resource_parse(struct icnss_priv *priv) goto put_vreg; } + if (of_property_read_bool(pdev->dev.of_node, "qcom,psf-supported")) { + ret = icnss_get_psf_info(priv); + if (ret < 0) + goto out; + priv->psf_supported = true; + } + if (priv->device_id == ADRASTEA_DEVICE_ID) { res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase"); @@ -4145,6 +4158,18 @@ out_reset_drvdata: return ret; } +static void icnss_unregister_power_supply_notifier(struct icnss_priv *priv) +{ + if (priv->batt_psy) + power_supply_put(penv->batt_psy); + + if (priv->psf_supported) { + flush_workqueue(priv->soc_update_wq); + destroy_workqueue(priv->soc_update_wq); + power_supply_unreg_notifier(&priv->psf_nb); + } +} + static int icnss_remove(struct platform_device *pdev) { struct icnss_priv *priv = dev_get_drvdata(&pdev->dev); @@ -4164,6 +4189,8 @@ static int icnss_remove(struct platform_device *pdev) icnss_debugfs_destroy(priv); + icnss_unregister_power_supply_notifier(penv); + icnss_sysfs_destroy(priv); complete_all(&priv->unblock_shutdown); diff --git a/drivers/soc/qcom/icnss2/main.h b/drivers/soc/qcom/icnss2/main.h index 2c13b3b442a4..b62029408be9 100644 --- a/drivers/soc/qcom/icnss2/main.h +++ b/drivers/soc/qcom/icnss2/main.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -167,6 +168,11 @@ struct icnss_clk_cfg { u32 required; }; +struct icnss_battery_level { + int lower_battery_threshold; + int ldo_voltage; +}; + struct icnss_clk_info { struct list_head list; struct clk *clk; @@ -466,6 +472,12 @@ struct icnss_priv { struct icnss_dms_data dms; u8 use_nv_mac; u32 wlan_en_delay_ms; + bool psf_supported; + struct notifier_block psf_nb; + struct power_supply *batt_psy; + int last_updated_voltage; + struct work_struct soc_update_work; + struct workqueue_struct *soc_update_wq; unsigned long device_config; struct timer_list recovery_timer; }; diff --git a/drivers/soc/qcom/icnss2/power.c b/drivers/soc/qcom/icnss2/power.c index 86b33c5a5ad9..0f85f010e2b1 100644 --- a/drivers/soc/qcom/icnss2/power.c +++ b/drivers/soc/qcom/icnss2/power.c @@ -29,6 +29,14 @@ static struct icnss_vreg_cfg icnss_adrestea_vreg_list[] = { {"vdd-smps", 984000, 984000, 0, 0, 0, false, true}, }; +static struct icnss_battery_level icnss_battery_level[] = { + {70, 3300000}, + {60, 3200000}, + {50, 3100000}, + {25, 3000000}, + {0, 2850000}, +}; + static struct icnss_clk_cfg icnss_clk_list[] = { {"rf_clk", 0, 0}, }; @@ -55,6 +63,9 @@ static struct icnss_clk_cfg icnss_adrestea_clk_list[] = { #define MAX_TCS_CMD_NUM 5 #define BT_CXMX_VOLTAGE_MV 950 +#define ICNSS_BATTERY_LEVEL_COUNT ARRAY_SIZE(icnss_battery_level) +#define ICNSS_MAX_BATTERY_LEVEL 100 + static int icnss_get_vreg_single(struct icnss_priv *priv, struct icnss_vreg_info *vreg) { @@ -821,6 +832,110 @@ out: return ret; } +static int icnss_get_battery_level(struct icnss_priv *priv) +{ + int err = 0, battery_percentage = 0; + union power_supply_propval psp = {0,}; + + if (!priv->batt_psy) + priv->batt_psy = power_supply_get_by_name("battery"); + + if (priv->batt_psy) { + err = power_supply_get_property(priv->batt_psy, + POWER_SUPPLY_PROP_CAPACITY, + &psp); + if (err) { + icnss_pr_err("battery percentage read error:%d\n", err); + goto out; + } + battery_percentage = psp.intval; + } + + icnss_pr_info("Battery Percentage: %d\n", battery_percentage); +out: + return battery_percentage; +} + +static void icnss_update_soc_level(struct work_struct *work) +{ + int battery_percentage = 0, current_updated_voltage = 0, err = 0; + int level_count; + struct icnss_priv *priv = container_of(work, struct icnss_priv, soc_update_work); + + battery_percentage = icnss_get_battery_level(priv); + if (!battery_percentage || + battery_percentage > ICNSS_MAX_BATTERY_LEVEL) { + icnss_pr_err("Battery percentage read failure\n"); + return; + } + + for (level_count = 0; level_count < ICNSS_BATTERY_LEVEL_COUNT; + level_count++) { + if (battery_percentage >= + icnss_battery_level[level_count].lower_battery_threshold) { + current_updated_voltage = + icnss_battery_level[level_count].ldo_voltage; + break; + } + } + + if (level_count != ICNSS_BATTERY_LEVEL_COUNT && + priv->last_updated_voltage != current_updated_voltage) { + err = icnss_send_vbatt_update(priv, current_updated_voltage); + if (err < 0) { + icnss_pr_err("Unable to update ldo voltage"); + return; + } + priv->last_updated_voltage = current_updated_voltage; + } +} + +static int icnss_battery_supply_callback(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct power_supply *psy = data; + struct icnss_priv *priv = container_of(nb, struct icnss_priv, + psf_nb); + if (strcmp(psy->desc->name, "battery")) + return NOTIFY_OK; + + if (test_bit(ICNSS_WLFW_CONNECTED, &priv->state) && + !test_bit(ICNSS_FW_DOWN, &priv->state)) + queue_work(priv->soc_update_wq, &priv->soc_update_work); + + return NOTIFY_OK; +} + +int icnss_get_psf_info(struct icnss_priv *priv) +{ + int ret = 0; + + priv->soc_update_wq = alloc_workqueue("icnss_soc_update", + WQ_UNBOUND, 1); + if (!priv->soc_update_wq) { + icnss_pr_err("Workqueue creation failed for soc update\n"); + ret = -EFAULT; + goto out; + } + + priv->psf_nb.notifier_call = icnss_battery_supply_callback; + ret = power_supply_reg_notifier(&priv->psf_nb); + if (ret < 0) { + icnss_pr_err("Power supply framework registration err: %d\n", + ret); + goto err_psf_registration; + } + + INIT_WORK(&priv->soc_update_work, icnss_update_soc_level); + + return 0; + +err_psf_registration: + destroy_workqueue(priv->soc_update_wq); +out: + return ret; +} + int icnss_get_cpr_info(struct icnss_priv *priv) { struct platform_device *plat_dev = priv->pdev; diff --git a/drivers/soc/qcom/icnss2/power.h b/drivers/soc/qcom/icnss2/power.h index 003694135435..490728f0e9a7 100644 --- a/drivers/soc/qcom/icnss2/power.h +++ b/drivers/soc/qcom/icnss2/power.h @@ -15,5 +15,5 @@ void icnss_put_resources(struct icnss_priv *priv); void icnss_put_vreg(struct icnss_priv *priv); void icnss_put_clk(struct icnss_priv *priv); int icnss_vreg_unvote(struct icnss_priv *priv); - +int icnss_get_psf_info(struct icnss_priv *priv); #endif From 6824b7f6ea5572de4420ee2ac68e0f2bd94dd842 Mon Sep 17 00:00:00 2001 From: Sandeep Singh Date: Mon, 26 Sep 2022 17:05:29 +0530 Subject: [PATCH 06/30] icnss: Add debug fs entry to call PSF callback This change adds an entry to call a callback from power supply framework for debugging and testing purpose. Change-Id: I5fdd4b1568524dd3926611ca1cfd49ed7884b8c1 Signed-off-by: Sandeep Singh --- drivers/soc/qcom/icnss2/debug.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/icnss2/debug.c b/drivers/soc/qcom/icnss2/debug.c index e12cee7dfc04..88dda1cb3065 100644 --- a/drivers/soc/qcom/icnss2/debug.c +++ b/drivers/soc/qcom/icnss2/debug.c @@ -521,7 +521,8 @@ static int icnss_fw_debug_show(struct seq_file *s, void *data) seq_puts(s, " VAL: 2 (CCPM test)\n"); seq_puts(s, " VAL: 3 (Trigger Recovery)\n"); seq_puts(s, " VAL: 4 (allow recursive recovery)\n"); - seq_puts(s, " VAL: 3 (Disallow recursive recovery)\n"); + seq_puts(s, " VAL: 5 (Disallow recursive recovery)\n"); + seq_puts(s, " VAL: 6 (Trigger power supply callback)\n"); seq_puts(s, "\nCMD: dynamic_feature_mask\n"); seq_puts(s, " VAL: (64 bit feature mask)\n"); @@ -682,6 +683,9 @@ static ssize_t icnss_fw_debug_write(struct file *fp, case 5: icnss_disallow_recursive_recovery(&priv->pdev->dev); break; + case 6: + power_supply_changed(priv->batt_psy); + break; default: return -EINVAL; } From 2541b4e89190f0b12bcc7e4d35f0ccd97df674b8 Mon Sep 17 00:00:00 2001 From: Balaji Pothunoori Date: Mon, 12 Sep 2022 13:25:32 +0530 Subject: [PATCH 07/30] defconfig : Enable xt_multiport module for iptables Currently iptables doesn't has support for multiport config options and due to WFA 5.6.1 test is failing as part of QOS-R2. This change is to enable xt_multiport modules to support multiport config. Change-Id: If4bfdad7b6755b5f5532ec5b21d6272d7894fe34 Signed-off-by: Balaji Pothunoori --- arch/arm64/configs/vendor/lahaina_QGKI.config | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/configs/vendor/lahaina_QGKI.config b/arch/arm64/configs/vendor/lahaina_QGKI.config index 2eb59768edd1..5358cae0d6d2 100644 --- a/arch/arm64/configs/vendor/lahaina_QGKI.config +++ b/arch/arm64/configs/vendor/lahaina_QGKI.config @@ -271,3 +271,4 @@ CONFIG_PCIEASPM_POWER_SUPERSAVE=y CONFIG_TLB_CONF_HANDLER=y CONFIG_PRINTK_CALLER=y CONFIG_QCOM_SUBSYSTEM_SLEEP_STATS=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y From faf7c82fabcf97d4abb4eddda18001dda2de93ae Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Mon, 10 Oct 2022 14:30:57 +0530 Subject: [PATCH 08/30] virt: haven: rm_core: Always allocate a connection for notifications Currently, there is an optimized path to directly use the message queue buffer when a received notification fits within a single message and there aren't any fragments. This greatly complicates buffer management and might not provide any measurable benefit. Suppose Linux receives a notification with one fragment then a notification with 3 fragments. In current flow, there is 1 alloc for the first notification and 4 allocations for the 2nd notification (1 for each message + 1 for overall combined buffer). In the new flow, we could optimize this to 0 allocations for each message because we can re-use the same message buffer for each fragment and then a single alloc for the entire payload. There is, however, a slightly increased cost for single-message notifications in the form of an extra memcpy from the message buffer to notification buffer. Under the code optimizations and simplifications that come with this cost, it is a worthy cost. Change-Id: Ib52bd332f59b108d7dab9121c0beefed0aa66994 Signed-off-by: Elliot Berman Signed-off-by: Srinivasarao Pathipati --- drivers/virt/haven/hh_rm_core.c | 75 +++++++++------------------------ 1 file changed, 19 insertions(+), 56 deletions(-) diff --git a/drivers/virt/haven/hh_rm_core.c b/drivers/virt/haven/hh_rm_core.c index 157513a1b1ff..c4246b6ca3f6 100644 --- a/drivers/virt/haven/hh_rm_core.c +++ b/drivers/virt/haven/hh_rm_core.c @@ -132,73 +132,36 @@ int hh_rm_unregister_notifier(struct notifier_block *nb) } EXPORT_SYMBOL(hh_rm_unregister_notifier); -static struct hh_rm_connection * -hh_rm_wait_for_notif_fragments(void *recv_buff, size_t recv_buff_size) +static int hh_rm_process_notif(void *msg, size_t msg_size) { - struct hh_rm_rpc_hdr *hdr = recv_buff; + struct hh_rm_rpc_hdr *hdr = msg; struct hh_rm_connection *connection; + u32 notification; size_t payload_size; + void *payload; int ret = 0; - connection = hh_rm_alloc_connection(hdr->msg_id); - if (IS_ERR_OR_NULL(connection)) - return connection; - - payload_size = recv_buff_size - sizeof(*hdr); - curr_connection = connection; - - ret = hh_rm_init_connection_buff(connection, recv_buff, - sizeof(*hdr), payload_size); - if (ret < 0) - goto out; - - if (wait_for_completion_interruptible(&connection->seq_done)) { - ret = -ERESTARTSYS; - goto out; - } - - return connection; - -out: - kfree(connection); - return ERR_PTR(ret); -} - -static int hh_rm_process_notif(void *recv_buff, size_t recv_buff_size) -{ - struct hh_rm_connection *connection = NULL; - struct hh_rm_rpc_hdr *hdr = recv_buff; - u32 notification = hdr->msg_id; - void *payload = NULL; - size_t payload_size; - int ret = 0; - - pr_debug("Notification received from RM-VM: %x\n", notification); - if (curr_connection) { pr_err("Received new notification from RM-VM before completing last connection\n"); return -EINVAL; } - if (recv_buff_size > sizeof(*hdr)) - payload = recv_buff + sizeof(*hdr); + connection = hh_rm_alloc_connection(hdr->msg_id); + if (!connection) + return -EINVAL; - /* If the notification payload is split-up into - * fragments, wait until all them arrive. - */ - if (hdr->fragments) { - connection = hh_rm_wait_for_notif_fragments(recv_buff, - recv_buff_size); - if (IS_ERR_OR_NULL(connection)) - return PTR_ERR(connection); - - /* In the case of fragments, the complete payload - * is contained under connection->recv_buff - */ - payload = connection->recv_buff; - recv_buff_size = connection->recv_buff_size; + if (hh_rm_init_connection_buff(connection, msg, sizeof(*hdr), msg_size - sizeof(*hdr))) { + kfree(connection); + return -EINVAL; } - payload_size = recv_buff_size - sizeof(*hdr); + + if (hdr->fragments) + return PTR_ERR(connection); + + payload = connection->recv_buff; + payload_size = connection->recv_buff_size; + notification = connection->msg_id; + pr_debug("Notification received from RM-VM: %x\n", notification); switch (notification) { case HH_RM_NOTIF_VM_STATUS: @@ -297,7 +260,7 @@ static int hh_rm_process_notif(void *recv_buff, size_t recv_buff_size) err: if (connection) { - kfree(connection->recv_buff); + kfree(payload); kfree(connection); } From 9a6e5e12be416bf996adee17b1a59d0428fa40d4 Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Mon, 10 Oct 2022 14:47:07 +0530 Subject: [PATCH 09/30] virt: haven: rm_core: Remove current_recv_buffer tracking current_recv_buffer tracks the current end of an ongoing message from resource manager. It can be easily tracked by "conection->recv_buff + connection->recv_buff_size". Do that instead to make code clearer. Change-Id: I191dbdbf39c802868e1c6a262ef5420c02f127b2 Signed-off-by: Elliot Berman Signed-off-by: Srinivasarao Pathipati --- drivers/virt/haven/hh_rm_core.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/virt/haven/hh_rm_core.c b/drivers/virt/haven/hh_rm_core.c index c4246b6ca3f6..2f7bbc5921bc 100644 --- a/drivers/virt/haven/hh_rm_core.c +++ b/drivers/virt/haven/hh_rm_core.c @@ -48,7 +48,6 @@ struct hh_rm_connection { u8 num_fragments; u8 fragments_received; - void *current_recv_buff; }; static struct task_struct *hh_rm_drv_recv_task; @@ -92,6 +91,9 @@ hh_rm_init_connection_buff(struct hh_rm_connection *connection, struct hh_rm_rpc_hdr *hdr = recv_buff; size_t max_buf_size; + connection->num_fragments = hdr->fragments; + connection->fragments_received = 0; + /* Some of the 'reply' types doesn't contain any payload */ if (!payload_size) return 0; @@ -107,15 +109,12 @@ hh_rm_init_connection_buff(struct hh_rm_connection *connection, /* If the data is split into multiple fragments, allocate a large * enough buffer to hold the payloads for all the fragments. */ - connection->recv_buff = connection->current_recv_buff = - kzalloc(max_buf_size, GFP_KERNEL); + connection->recv_buff = kzalloc(max_buf_size, GFP_KERNEL); if (!connection->recv_buff) return -ENOMEM; memcpy(connection->recv_buff, recv_buff + hdr_size, payload_size); - connection->current_recv_buff += payload_size; connection->recv_buff_size = payload_size; - connection->num_fragments = hdr->fragments; return 0; } @@ -352,9 +351,8 @@ static int hh_rm_process_cont(void *recv_buff, size_t recv_buff_size) payload_size = recv_buff_size - sizeof(*hdr); /* Keep appending the data to the previous fragment's end */ - memcpy(connection->current_recv_buff, + memcpy(connection->recv_buff + connection->recv_buff_size, recv_buff + sizeof(*hdr), payload_size); - connection->current_recv_buff += payload_size; connection->recv_buff_size += payload_size; connection->fragments_received++; From d6cfdf1189b350e855de075907c378da8afbe3ed Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Mon, 10 Oct 2022 15:19:41 +0530 Subject: [PATCH 10/30] virt: haven: rm_core: Rename connection variables Rename "recv_buff" and "recv_buff_size" to "payload" and "size", respectively. hh_rm_connection on tracks received buffers, so it is redundant to add "recv". Rename "reply_err_code" to "rm_error" to make it clear that this field is used for the resource manager's return error code, and not some other value. While here, re-org and document the hh_rm_connection struct. Change-Id: I101262dd907a0e678d8d2c5232a17745f057edaf Signed-off-by: Elliot Berman Signed-off-by: Srinivasarao Pathipati --- drivers/virt/haven/hh_rm_core.c | 63 +++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/drivers/virt/haven/hh_rm_core.c b/drivers/virt/haven/hh_rm_core.c index 2f7bbc5921bc..5a0a0fb747d6 100644 --- a/drivers/virt/haven/hh_rm_core.c +++ b/drivers/virt/haven/hh_rm_core.c @@ -37,17 +37,28 @@ #define HH_RM_MAX_MSG_SIZE_BYTES \ (HH_MSGQ_MAX_MSG_SIZE_BYTES - sizeof(struct hh_rm_rpc_hdr)) +/** + * struct hh_rm_connection - Represents a complete message from resource manager + * @payload: Combined payload of all the fragments without any RPC headers + * @size: Size of the payload. + * @msg_id: Message ID from the header. + * @num_fragments: total number of fragments expected to be received for this connection. + * @fragments_received: fragments received so far. + * @rm_error: For request/reply sequences with standard replies. + * @seq: Sequence ID for the main message. + */ struct hh_rm_connection { + void *payload; + size_t size; u32 msg_id; - u16 seq; - void *recv_buff; - u32 reply_err_code; - size_t recv_buff_size; - - struct completion seq_done; u8 num_fragments; u8 fragments_received; + + /* only for req/reply sequence */ + u32 rm_error; + u16 seq; + struct completion seq_done; }; static struct task_struct *hh_rm_drv_recv_task; @@ -109,12 +120,12 @@ hh_rm_init_connection_buff(struct hh_rm_connection *connection, /* If the data is split into multiple fragments, allocate a large * enough buffer to hold the payloads for all the fragments. */ - connection->recv_buff = kzalloc(max_buf_size, GFP_KERNEL); - if (!connection->recv_buff) + connection->payload = kzalloc(max_buf_size, GFP_KERNEL); + if (!connection->payload) return -ENOMEM; - memcpy(connection->recv_buff, recv_buff + hdr_size, payload_size); - connection->recv_buff_size = payload_size; + memcpy(connection->payload, recv_buff + hdr_size, payload_size); + connection->size = payload_size; return 0; } @@ -157,8 +168,8 @@ static int hh_rm_process_notif(void *msg, size_t msg_size) if (hdr->fragments) return PTR_ERR(connection); - payload = connection->recv_buff; - payload_size = connection->recv_buff_size; + payload = connection->payload; + payload_size = connection->size; notification = connection->msg_id; pr_debug("Notification received from RM-VM: %x\n", notification); @@ -303,7 +314,7 @@ static int hh_rm_process_rply(void *recv_buff, size_t recv_buff_size) if (ret < 0) return ret; - connection->reply_err_code = reply_hdr->err_code; + connection->rm_error = reply_hdr->err_code; /* * If the data is composed of a single message, wakeup the @@ -351,9 +362,9 @@ static int hh_rm_process_cont(void *recv_buff, size_t recv_buff_size) payload_size = recv_buff_size - sizeof(*hdr); /* Keep appending the data to the previous fragment's end */ - memcpy(connection->recv_buff + connection->recv_buff_size, + memcpy(connection->payload + connection->size, recv_buff + sizeof(*hdr), payload_size); - connection->recv_buff_size += payload_size; + connection->size += payload_size; connection->fragments_received++; if (connection->fragments_received == connection->num_fragments) { @@ -546,7 +557,7 @@ static int hh_rm_send_request(u32 message_id, * @req_buff: Request buffer that contains the payload * @req_buff_size: Total size of the payload * @resp_buff_size: Size of the response buffer - * @reply_err_code: Returns Haven standard error code for the response + * @rm_error: Returns Haven standard error code for the response * * Make a request to the RM-VM and expect a reply back. For a successful * response, the function returns the payload and its size for the response. @@ -558,13 +569,13 @@ static int hh_rm_send_request(u32 message_id, */ void *hh_rm_call(hh_rm_msgid_t message_id, void *req_buff, size_t req_buff_size, - size_t *resp_buff_size, int *reply_err_code) + size_t *resp_buff_size, int *rm_error) { struct hh_rm_connection *connection; int req_ret; void *ret; - if (!message_id || !req_buff || !resp_buff_size || !reply_err_code) + if (!message_id || !req_buff || !resp_buff_size || !rm_error) return ERR_PTR(-EINVAL); connection = hh_rm_alloc_connection(message_id); @@ -599,21 +610,21 @@ void *hh_rm_call(hh_rm_msgid_t message_id, goto out; } - *reply_err_code = connection->reply_err_code; - if (connection->reply_err_code) { + *rm_error = connection->rm_error; + if (connection->rm_error) { pr_err("%s: Reply for seq:%d failed with RM err: %d\n", - __func__, connection->seq, connection->reply_err_code); - ret = ERR_PTR(hh_remap_error(connection->reply_err_code)); - kfree(connection->recv_buff); + __func__, connection->seq, connection->rm_error); + ret = ERR_PTR(hh_remap_error(connection->rm_error)); + kfree(connection->payload); goto out; } print_hex_dump_debug("hh_rm_call RX: ", DUMP_PREFIX_OFFSET, 4, 1, - connection->recv_buff, connection->recv_buff_size, + connection->payload, connection->size, false); - ret = connection->recv_buff; - *resp_buff_size = connection->recv_buff_size; + ret = connection->payload; + *resp_buff_size = connection->size; out: mutex_lock(&hh_rm_call_idr_lock); From 83c548eb972a1f805259ef0ec53e9b8fcfdc6656 Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Thu, 13 Oct 2022 00:21:23 +0530 Subject: [PATCH 11/30] virt: haven: rm_core: Guard against unexpected messages Gracefully handle non-standard sequences of messages such as continuation messages without receiving a reply/notification message first, or a new reply/notification message without having finished the last message sequence. Change-Id: I52922fc9ccefb1f7dbb40fbffd2ad6b7fd1162b7 Signed-off-by: Elliot Berman Signed-off-by: Srinivasarao Pathipati --- drivers/virt/haven/hh_rm_core.c | 41 +++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/virt/haven/hh_rm_core.c b/drivers/virt/haven/hh_rm_core.c index 5a0a0fb747d6..6645eea471da 100644 --- a/drivers/virt/haven/hh_rm_core.c +++ b/drivers/virt/haven/hh_rm_core.c @@ -42,6 +42,8 @@ * @payload: Combined payload of all the fragments without any RPC headers * @size: Size of the payload. * @msg_id: Message ID from the header. + * @ret: Linux return code, set in case there was an error processing the connection. + * @type: HH_RM_RPC_TYPE_RPLY or HH_RM_RPC_TYPE_NOTIF. * @num_fragments: total number of fragments expected to be received for this connection. * @fragments_received: fragments received so far. * @rm_error: For request/reply sequences with standard replies. @@ -51,6 +53,8 @@ struct hh_rm_connection { void *payload; size_t size; u32 msg_id; + int ret; + u8 type; u8 num_fragments; u8 fragments_received; @@ -104,6 +108,7 @@ hh_rm_init_connection_buff(struct hh_rm_connection *connection, connection->num_fragments = hdr->fragments; connection->fragments_received = 0; + connection->type = hdr->type; /* Some of the 'reply' types doesn't contain any payload */ if (!payload_size) @@ -381,6 +386,21 @@ struct hh_rm_msgq_data { struct work_struct recv_work; }; +static void hh_rm_abort_connection(struct hh_rm_connection *connection) +{ + switch (connection->type) { + case HH_RM_RPC_TYPE_RPLY: + connection->ret = -EIO; + complete(&connection->seq_done); + break; + case HH_RM_RPC_TYPE_NOTIF: + fallthrough; + default: + kfree(connection->payload); + kfree(connection); + } +} + static void hh_rm_process_recv_work(struct work_struct *work) { struct hh_rm_msgq_data *msgq_data; @@ -392,12 +412,27 @@ static void hh_rm_process_recv_work(struct work_struct *work) switch (hdr->type) { case HH_RM_RPC_TYPE_NOTIF: + if (curr_connection) { + /* Not possible per protocol. Do something better than BUG_ON */ + pr_warn("Received start of new notification without finishing existing message series.\n"); + hh_rm_abort_connection(curr_connection); + } hh_rm_process_notif(recv_buff, msgq_data->recv_buff_size); break; case HH_RM_RPC_TYPE_RPLY: + if (curr_connection) { + /* Not possible per protocol. Do something better than BUG_ON */ + pr_warn("Received start of new reply without finishing existing message series.\n"); + hh_rm_abort_connection(curr_connection); + } hh_rm_process_rply(recv_buff, msgq_data->recv_buff_size); break; case HH_RM_RPC_TYPE_CONT: + if (!curr_connection) { + /* Not possible per protocol. Do something better than BUG_ON */ + pr_warn("Received a continuation message without receiving initial message\n"); + break; + } hh_rm_process_cont(recv_buff, msgq_data->recv_buff_size); break; default: @@ -619,6 +654,12 @@ void *hh_rm_call(hh_rm_msgid_t message_id, goto out; } + if (connection->ret) { + ret = ERR_PTR(connection->ret); + kfree(connection->payload); + goto out; + } + print_hex_dump_debug("hh_rm_call RX: ", DUMP_PREFIX_OFFSET, 4, 1, connection->payload, connection->size, false); From 8651cbc06a16bd36ff96dfee165aacefb66279bf Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Mon, 10 Oct 2022 15:59:12 +0530 Subject: [PATCH 12/30] virt: haven: rm_core: Re-use alloc'd message when sending requests No need to allocate a message queue buffer for each message queue fragment, re-use the same one. Change-Id: I5743a704ec08732a2e405724ba795d2d8cea90e3 Signed-off-by: Elliot Berman Signed-off-by: Srinivasarao Pathipati --- drivers/virt/haven/hh_rm_core.c | 48 +++++++++++++++------------------ 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/drivers/virt/haven/hh_rm_core.c b/drivers/virt/haven/hh_rm_core.c index 6645eea471da..5c31130b81bc 100644 --- a/drivers/virt/haven/hh_rm_core.c +++ b/drivers/virt/haven/hh_rm_core.c @@ -507,8 +507,8 @@ static int hh_rm_send_request(u32 message_id, unsigned long tx_flags; u32 num_fragments = 0; size_t payload_size; - void *send_buff; - int i, ret; + void *msg; + int i, ret = 0; num_fragments = (req_buff_size + HH_RM_MAX_MSG_SIZE_BYTES - 1) / HH_RM_MAX_MSG_SIZE_BYTES; @@ -525,11 +525,15 @@ static int hh_rm_send_request(u32 message_id, return -E2BIG; } + msg = kzalloc(HH_RM_MAX_MSG_SIZE_BYTES, GFP_KERNEL); + if (!msg) + return -ENOMEM; + if (mutex_lock_interruptible(&hh_rm_send_lock)) { - return -ERESTARTSYS; + ret = -ERESTARTSYS; + goto free_msg; } - /* Consider also the 'request' packet for the loop count */ for (i = 0; i <= num_fragments; i++) { if (buff_size_remaining > HH_RM_MAX_MSG_SIZE_BYTES) { payload_size = HH_RM_MAX_MSG_SIZE_BYTES; @@ -538,13 +542,10 @@ static int hh_rm_send_request(u32 message_id, payload_size = buff_size_remaining; } - send_buff = kzalloc(sizeof(*hdr) + payload_size, GFP_KERNEL); - if (!send_buff) { - mutex_unlock(&hh_rm_send_lock); - return -ENOMEM; - } + memset(msg, 0, HH_RM_MAX_MSG_SIZE_BYTES); + /* Fill header */ + hdr = msg; - hdr = send_buff; hdr->version = HH_RM_RPC_HDR_VERSION_ONE; hdr->hdr_words = HH_RM_RPC_HDR_WORDS; hdr->type = i == 0 ? HH_RM_RPC_TYPE_REQ : HH_RM_RPC_TYPE_CONT; @@ -552,11 +553,12 @@ static int hh_rm_send_request(u32 message_id, hdr->seq = connection->seq; hdr->msg_id = message_id; - memcpy(send_buff + sizeof(*hdr), req_buff_curr, payload_size); + /* Copy payload */ + memcpy(msg + sizeof(*hdr), req_buff_curr, payload_size); req_buff_curr += payload_size; - /* Force the last fragment (or the request type) - * to be sent immediately to the receiver + /* Force the last fragment to be sent immediately to + * the receiver */ tx_flags = (i == num_fragments) ? HH_MSGQ_TX_PUSH : 0; @@ -565,25 +567,17 @@ static int hh_rm_send_request(u32 message_id, message_id == HH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_FLUSH) udelay(800); - ret = hh_msgq_send(hh_rm_msgq_desc, send_buff, + ret = hh_msgq_send(hh_rm_msgq_desc, msg, sizeof(*hdr) + payload_size, tx_flags); - /* - * In the case of a success, the hypervisor would have consumed - * the buffer. While in the case of a failure, we are going to - * quit anyways. Hence, free the buffer regardless of the - * return value. - */ - kfree(send_buff); - - if (ret) { - mutex_unlock(&hh_rm_send_lock); - return ret; - } + if (ret) + break; } mutex_unlock(&hh_rm_send_lock); - return 0; +free_msg: + kfree(msg); + return ret; } /** From bcea72ceab494f22dfc9788566ed9d06354c32a7 Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Mon, 10 Oct 2022 16:33:22 +0530 Subject: [PATCH 13/30] virt: haven: rm_core: Clean up sequence idr earlier When RM sends an error for a given call, Linux can re-use that sequence number later. Presently, the sequence number is kept as reserved and could cause a denial of service if many messages are crafted which always return an RM error. Change-Id: Icc2b054ab8d110c63759e30d62650ebc9d02b234 Signed-off-by: Elliot Berman Signed-off-by: Srinivasarao Pathipati --- drivers/virt/haven/hh_rm_core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/virt/haven/hh_rm_core.c b/drivers/virt/haven/hh_rm_core.c index 5c31130b81bc..02fcfcb8ef82 100644 --- a/drivers/virt/haven/hh_rm_core.c +++ b/drivers/virt/haven/hh_rm_core.c @@ -639,6 +639,10 @@ void *hh_rm_call(hh_rm_msgid_t message_id, goto out; } + mutex_lock(&hh_rm_call_idr_lock); + idr_remove(&hh_rm_call_idr, connection->seq); + mutex_unlock(&hh_rm_call_idr_lock); + *rm_error = connection->rm_error; if (connection->rm_error) { pr_err("%s: Reply for seq:%d failed with RM err: %d\n", @@ -662,10 +666,6 @@ void *hh_rm_call(hh_rm_msgid_t message_id, *resp_buff_size = connection->size; out: - mutex_lock(&hh_rm_call_idr_lock); - idr_remove(&hh_rm_call_idr, connection->seq); - mutex_unlock(&hh_rm_call_idr_lock); - kfree(connection); return ret; } From 1de735143f58b2c98206ccd46d8c40dc5a5ca4b0 Mon Sep 17 00:00:00 2001 From: Srinivasarao Pathipati Date: Fri, 14 Oct 2022 11:42:36 +0530 Subject: [PATCH 14/30] Revert "arm64: mm: Don't invalidate FROM_DEVICE buffers at start of DMA transfer" This reverts commit 7b9c3bfbad2510478ac29221d2c40a09184ce6a1. Change-Id: Ib18fb945b6bbdfc19e249e93a38f1979dd564626 Signed-off-by: Srinivasarao Pathipati --- arch/arm64/mm/cache.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 7b054c67acd8..db767b072601 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -228,6 +228,8 @@ ENDPIPROC(__dma_flush_area) * - dir - DMA direction */ ENTRY(__dma_map_area) + cmp w2, #DMA_FROM_DEVICE + b.eq __dma_inv_area b __dma_clean_area ENDPIPROC(__dma_map_area) From 6186cb14ccf66adf0d53e1210bc285be22fa30bf Mon Sep 17 00:00:00 2001 From: Druva Kumar Charipalli Date: Mon, 1 Aug 2022 13:17:09 +0530 Subject: [PATCH 15/30] msm: kgsl: Cleanup correctly when platform probe fails When kgsl_device_platform_probe fails with EPROBE_DEFER, do proper cleanup to avoid issues in next probe call. Change-Id: Icd998b1299a64d53458bcd452f754903710c4ca0 Signed-off-by: Druva Kumar Charipalli Signed-off-by: Pankaj Gupta --- drivers/gpu/msm/adreno.c | 1 + drivers/gpu/msm/kgsl.c | 18 +++++++++--------- drivers/gpu/msm/kgsl_bus.c | 2 ++ 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 8ab1514ebe6a..3a898033c178 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1585,6 +1585,7 @@ err: if (of_find_matching_node(dev->of_node, adreno_gmu_match)) component_unbind_all(dev, NULL); + idr_destroy(&device->context_idr); kgsl_bus_close(device); return status; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 196b6ed7fd92..de5c52d3a54b 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -4293,6 +4293,9 @@ static void _unregister_device(struct kgsl_device *device) { int minor; + if (device->gpu_sysfs_kobj.state_initialized) + kobject_del(&device->gpu_sysfs_kobj); + mutex_lock(&kgsl_driver.devlock); for (minor = 0; minor < ARRAY_SIZE(kgsl_driver.devp); minor++) { if (device == kgsl_driver.devp[minor]) { @@ -4484,12 +4487,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device) device->pwrctrl.interrupt_num = status; disable_irq(device->pwrctrl.interrupt_num); - rwlock_init(&device->context_lock); - spin_lock_init(&device->submit_lock); - - idr_init(&device->timelines); - spin_lock_init(&device->timelines_lock); - device->events_wq = alloc_workqueue("kgsl-events", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS | WQ_HIGHPRI, 0); @@ -4504,6 +4501,12 @@ int kgsl_device_platform_probe(struct kgsl_device *device) if (status != 0) goto error_pwrctrl_close; + rwlock_init(&device->context_lock); + spin_lock_init(&device->submit_lock); + + idr_init(&device->timelines); + spin_lock_init(&device->timelines_lock); + kgsl_device_debugfs_init(device); dma_set_coherent_mask(&pdev->dev, KGSL_DMA_BIT_MASK); @@ -4537,9 +4540,6 @@ void kgsl_device_platform_remove(struct kgsl_device *device) kgsl_device_snapshot_close(device); - if (device->gpu_sysfs_kobj.state_initialized) - kobject_del(&device->gpu_sysfs_kobj); - idr_destroy(&device->context_idr); idr_destroy(&device->timelines); diff --git a/drivers/gpu/msm/kgsl_bus.c b/drivers/gpu/msm/kgsl_bus.c index 8cae22d18b54..0839c474f1de 100644 --- a/drivers/gpu/msm/kgsl_bus.c +++ b/drivers/gpu/msm/kgsl_bus.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -224,6 +225,7 @@ void kgsl_bus_close(struct kgsl_device *device) kfree(device->pwrctrl.ddr_table); device->pwrctrl.ddr_table = NULL; icc_put(device->pwrctrl.icc_path); + device->pwrctrl.icc_path = NULL; if (device->pwrctrl.ddr_qos_devfreq) put_device(&device->pwrctrl.ddr_qos_devfreq->dev); } From 29d7ace1da9fddb89d01e544602d68bf8a980c0c Mon Sep 17 00:00:00 2001 From: Eliad Ben Yishay Date: Sun, 29 May 2022 10:49:36 +0300 Subject: [PATCH 16/30] kernel-msm: ipa_eth: Dynamic IPA NTN3 ETH client moderation configuration * Giving flexibility in moderation configuration for clients Change-Id: Ic072d2682289e1ed0dfda62f67d5402a134a5d53 Signed-off-by: Eliad Ben Yishay --- include/linux/ipa_eth.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/ipa_eth.h b/include/linux/ipa_eth.h index f746ce965777..e64fca58d1fe 100644 --- a/include/linux/ipa_eth.h +++ b/include/linux/ipa_eth.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _IPA_ETH_H_ @@ -73,10 +75,13 @@ enum ipa_eth_pipe_direction { * * @bar_addr: bar PA to access NTN register * @tail_ptr_offs: tail ptr offset + * @ioc_mod_threshold: Descriptors # per interrupt request from + * NTN3 HW via descriptor bit as part of the protocol. */ struct ipa_eth_ntn_setup_info { phys_addr_t bar_addr; phys_addr_t tail_ptr_offs; + uint16_t ioc_mod_threshold; }; /** @@ -112,6 +117,7 @@ struct ipa_eth_realtek_setup_info { phys_addr_t dest_tail_ptr_offs; }; + /** * struct ipa_eth_buff_smmu_map - IPA iova->pa SMMU mapping * @iova: virtual address of the data buffer From 3ae07bc5589b369ee42659ce745db845bdd4ef1e Mon Sep 17 00:00:00 2001 From: Gaurav Kashyap Date: Mon, 12 Sep 2022 11:28:15 +0530 Subject: [PATCH 17/30] qcedev: check num_fds during unmap check the num_fds passed into unmap buf ioctl, or else it can lead to an out of bounds access. Test: Build compilation. qcedev tests. Change-Id: I206ba01dfa989346ade769a0f68b372b21f84043 Signed-off-by: Gaurav Kashyap Signed-off-by: Pranav Lavhate --- drivers/crypto/msm/qcedev.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c index 6221510f7997..d81ce43e3fd6 100644 --- a/drivers/crypto/msm/qcedev.c +++ b/drivers/crypto/msm/qcedev.c @@ -1916,7 +1916,9 @@ long qcedev_ioctl(struct file *file, goto exit_free_qcedev_areq; } - if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) { + if (map_buf.num_fds > ARRAY_SIZE(map_buf.fd)) { + pr_err("%s: err: num_fds = %d exceeds max value\n", + __func__, map_buf.num_fds); err = -EINVAL; goto exit_free_qcedev_areq; } @@ -1956,6 +1958,12 @@ long qcedev_ioctl(struct file *file, err = -EFAULT; goto exit_free_qcedev_areq; } + if (unmap_buf.num_fds > ARRAY_SIZE(unmap_buf.fd)) { + pr_err("%s: err: num_fds = %d exceeds max value\n", + __func__, unmap_buf.num_fds); + err = -EINVAL; + goto exit_free_qcedev_areq; + } for (i = 0; i < unmap_buf.num_fds; i++) { err = qcedev_check_and_unmap_buffer(handle, From b4ea3e4d0a61fa186a8873df6c607c7be25b99cc Mon Sep 17 00:00:00 2001 From: Will Huang Date: Mon, 20 Jun 2022 09:56:01 +0800 Subject: [PATCH 18/30] cnss2: Add support for handling AFC memory request from FW Add APIs for handling AFC memory request from FW: cnss_send_buffer_to_afcmem() and cnss_reset_afcmem(). cnss_send_buffer_to_afcmem() will be called if receive valid AFC response data, cnss_reset_afcmem() will be called if receive invalid AFC response data. After memory copy done, another WMI command will indicate FW ready to read. Add FW memory type QMI_WLFW_AFC_MEM_V01. Change-Id: I34b9add3d7721d778e5474d9b11ad64adb4f04f0 CRs-Fixed: 3223607 Signed-off-by: Balamurugan Mahalingam Signed-off-by: Will Huang --- drivers/net/wireless/cnss2/pci.c | 95 +++++++++++++++++++ .../cnss_utils/wlan_firmware_service_v01.h | 1 + include/net/cnss2.h | 3 + 3 files changed, 99 insertions(+) diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 4b8f78da518d..1428c2493d56 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -89,6 +89,13 @@ static DEFINE_SPINLOCK(time_sync_lock); #define MHI_SUSPEND_RETRY_CNT 3 +#define AFC_SLOT_SIZE 0x1000 +#define AFC_MAX_SLOT 2 +#define AFC_MEM_SIZE (AFC_SLOT_SIZE * AFC_MAX_SLOT) +#define AFC_AUTH_STATUS_OFFSET 1 +#define AFC_AUTH_SUCCESS 1 +#define AFC_AUTH_ERROR 0 + static struct cnss_pci_reg ce_src[] = { { "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET }, { "SRC_RING_BASE_MSB", QCA6390_CE_SRC_RING_BASE_MSB_OFFSET }, @@ -3977,6 +3984,94 @@ int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv) return ret; } +int cnss_send_buffer_to_afcmem(struct device *dev, char *afcdb, uint32_t len, + uint8_t slotid) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + struct cnss_fw_mem *fw_mem; + void *mem = NULL; + int i, ret; + u32 *status; + + if (!plat_priv) + return -EINVAL; + + fw_mem = plat_priv->fw_mem; + if (slotid >= AFC_MAX_SLOT) { + cnss_pr_err("Invalid slot id %d\n", slotid); + ret = -EINVAL; + goto err; + } + if (len > AFC_SLOT_SIZE) { + cnss_pr_err("len %d greater than slot size", len); + ret = -EINVAL; + goto err; + } + + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) { + mem = fw_mem[i].va; + status = mem + (slotid * AFC_SLOT_SIZE); + break; + } + } + + if (!mem) { + cnss_pr_err("AFC mem is not available\n"); + ret = -ENOMEM; + goto err; + } + + memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len); + if (len < AFC_SLOT_SIZE) + memset(mem + (slotid * AFC_SLOT_SIZE) + len, + 0, AFC_SLOT_SIZE - len); + status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS); + + return 0; +err: + return ret; +} +EXPORT_SYMBOL(cnss_send_buffer_to_afcmem); + +int cnss_reset_afcmem(struct device *dev, uint8_t slotid) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + struct cnss_fw_mem *fw_mem; + void *mem = NULL; + int i, ret; + + if (!plat_priv) + return -EINVAL; + + fw_mem = plat_priv->fw_mem; + if (slotid >= AFC_MAX_SLOT) { + cnss_pr_err("Invalid slot id %d\n", slotid); + ret = -EINVAL; + goto err; + } + + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) { + mem = fw_mem[i].va; + break; + } + } + + if (!mem) { + cnss_pr_err("AFC mem is not available\n"); + ret = -ENOMEM; + goto err; + } + + memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE); + return 0; + +err: + return ret; +} +EXPORT_SYMBOL(cnss_reset_afcmem); + int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; diff --git a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h index eb89ad4176ad..4145358c86bd 100644 --- a/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h +++ b/drivers/net/wireless/cnss_utils/wlan_firmware_service_v01.h @@ -168,6 +168,7 @@ enum wlfw_mem_type_enum_v01 { QMI_WLFW_MEM_HANG_DATA_V01 = 7, QMI_WLFW_MLO_GLOBAL_MEM_V01 = 8, QMI_WLFW_PAGEABLE_MEM_V01 = 9, + QMI_WLFW_AFC_MEM_V01 = 10, WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX, }; diff --git a/include/net/cnss2.h b/include/net/cnss2.h index 2cbfd3136d82..dbb0ea5978fb 100644 --- a/include/net/cnss2.h +++ b/include/net/cnss2.h @@ -278,4 +278,7 @@ extern int cnss_get_mem_seg_count(enum cnss_remote_mem_type type, u32 *seg); extern int cnss_get_mem_segment_info(enum cnss_remote_mem_type type, struct cnss_mem_segment segment[], u32 segment_count); +extern int cnss_send_buffer_to_afcmem(struct device *dev, char *afcdb, + uint32_t len, uint8_t slotid); +extern int cnss_reset_afcmem(struct device *dev, uint8_t slotid); #endif /* _NET_CNSS2_H */ From cfac1c34666e29d4e3f1a3bd4250d6dcdcca4a94 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Sep 2022 21:56:15 +0200 Subject: [PATCH 19/30] wifi: cfg80211: fix u8 overflow in cfg80211_update_notlisted_nontrans() In the copy code of the elements, we do the following calculation to reach the end of the MBSSID element: /* copy the IEs after MBSSID */ cpy_len = mbssid[1] + 2; This looks fine, however, cpy_len is a u8, the same as mbssid[1], so the addition of two can overflow. In this case the subsequent memcpy() will overflow the allocated buffer, since it copies 256 bytes too much due to the way the allocation and memcpy() sizes are calculated. Fix this by using size_t for the cpy_len variable. This fixes CVE-2022-41674. Reported-by: Soenke Huster Tested-by: Soenke Huster Fixes: 0b8fb8235be8 ("cfg80211: Parsing of Multiple BSSID information in scanning") Link: https://lore.kernel.org/lkml/20221013175147.067414219@linuxfoundation.org/ Reviewed-by: Kees Cook Signed-off-by: Johannes Berg Git-commit: aebe9f4639b13a1f4e9a6b42cdd2e38c617b442d Git-repo: https://android.googlesource.com/kernel/common Change-Id: If6ed330dc65fdf387ee8584b5a69840242edf5cf Signed-off-by: Vulupala Shashank Reddy --- net/wireless/scan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 86c6066c42e5..81e7469c65b5 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1717,7 +1717,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, size_t new_ie_len; struct cfg80211_bss_ies *new_ies; const struct cfg80211_bss_ies *old; - u8 cpy_len; + size_t cpy_len; lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock); From 060227f10d13c4b8ebc5dbfda94bfc056a7cbb62 Mon Sep 17 00:00:00 2001 From: Srikanth Marepalli Date: Wed, 2 Nov 2022 20:33:04 +0530 Subject: [PATCH 20/30] wifi: cfg80211: avoid nontransmitted BSS list corruption MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a non-transmitted BSS shares enough information (both SSID and BSSID!) with another non-transmitted BSS of a different AP, then we can find and update it, and then try to add it to the non-transmitted BSS list. We do a search for it on the transmitted BSS, but if it's not there (but belongs to another transmitted BSS), the list gets corrupted. Since this is an erroneous situation, simply fail the list insertion in this case and free the non-transmitted BSS. This fixes CVE-2022-42721. Reported-by: Sönke Huster Tested-by: Sönke Huster Fixes: 0b8fb8235be8 ("cfg80211: Parsing of Multiple BSSID information in scanning") Link: https://lore.kernel.org/all/20221013175145.382242160@linuxfoundation.org/ Signed-off-by: Johannes Berg Git-commit: bcca852027e5878aec911a347407ecc88d6fff7f Git-repo: https://android.googlesource.com/kernel/common Change-Id: Icb2106b5ac5ff5e3ecb50bd09440bce5560fbb05 Signed-off-by: Srikanth Marepalli --- net/wireless/scan.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 86c6066c42e5..2bdf93d28d3d 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -390,6 +390,14 @@ cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss, rcu_read_unlock(); + /* This is a bit weird - it's not on the list, but already on another + * one! The only way that could happen is if there's some BSSID/SSID + * shared by multiple APs in their multi-BSSID profiles, potentially + * with hidden SSID mixed in ... ignore it. + */ + if (!list_empty(&nontrans_bss->nontrans_list)) + return -EINVAL; + /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; From d67ce86e48b997653800f2e96695df6454dd4357 Mon Sep 17 00:00:00 2001 From: Balaji Pothunoori Date: Wed, 2 Nov 2022 19:36:09 +0530 Subject: [PATCH 21/30] wifi: cfg80211: fix BSS refcounting bugs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are multiple refcounting bugs related to multi-BSSID: - In bss_ref_get(), if the BSS has a hidden_beacon_bss, then the bss pointer is overwritten before checking for the transmitted BSS, which is clearly wrong. Fix this by using the bss_from_pub() macro. - In cfg80211_bss_update() we copy the transmitted_bss pointer from tmp into new, but then if we release new, we'll unref it erroneously. We already set the pointer and ref it, but need to NULL it since it was copied from the tmp data. - In cfg80211_inform_single_bss_data(), if adding to the non- transmitted list fails, we unlink the BSS and yet still we return it, but this results in returning an entry without a reference. We shouldn't return it anyway if it was broken enough to not get added there. This fixes CVE-2022-42720. Reported-by: Sönke Huster Tested-by: Sönke Huster Fixes: a3584f56de1c ("cfg80211: Properly track transmitting and non-transmitting BSS") Link: https://lore.kernel.org/lkml/20221013175147.168042993@linuxfoundation.org/ Signed-off-by: Johannes Berg Change-Id: If6ed330dc65fdf387ee8584b5a69840242edf5cc Signed-off-by: Balaji Pothunoori --- net/wireless/scan.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 81e7469c65b5..261f5866ebec 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -104,18 +104,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev, lockdep_assert_held(&rdev->bss_lock); bss->refcount++; - if (bss->pub.hidden_beacon_bss) { - bss = container_of(bss->pub.hidden_beacon_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } - if (bss->pub.transmitted_bss) { - bss = container_of(bss->pub.transmitted_bss, - struct cfg80211_internal_bss, - pub); - bss->refcount++; - } + + if (bss->pub.hidden_beacon_bss) + bss_from_pub(bss->pub.hidden_beacon_bss)->refcount++; + + if (bss->pub.transmitted_bss) + bss_from_pub(bss->pub.transmitted_bss)->refcount++; } static inline void bss_ref_put(struct cfg80211_registered_device *rdev, @@ -1231,6 +1225,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, new->refcount = 1; INIT_LIST_HEAD(&new->hidden_list); INIT_LIST_HEAD(&new->pub.nontrans_list); + /* we'll set this later if it was non-NULL */ + new->pub.transmitted_bss = NULL; if (rcu_access_pointer(tmp->pub.proberesp_ies)) { hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN); @@ -1460,10 +1456,15 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, spin_lock_bh(&rdev->bss_lock); if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, &res->pub)) { - if (__cfg80211_unlink_bss(rdev, res)) + if (__cfg80211_unlink_bss(rdev, res)) { rdev->bss_generation++; + res = NULL; + } } spin_unlock_bh(&rdev->bss_lock); + + if (!res) + return NULL; } trace_cfg80211_return_bss(&res->pub); From 3b9c44ebffdd43e64606ebc99ded3f548ef8274c Mon Sep 17 00:00:00 2001 From: Michael Adisumarta Date: Wed, 16 Nov 2022 17:26:08 -0800 Subject: [PATCH 22/30] msm: ipa3: add MPLS support to existing IPA GRE struct Add kernel support to handle IPA offload for MPLS packets over GRE tunnel. Add new struct that specify tuple parameters for packets that will take the exception path for MPLS over GRE. Change-Id: I27a668af59af02dd08dd74f19782e28a7a40c6c2 Signed-off-by: Michael Adisumarta --- include/uapi/linux/msm_ipa.h | 239 +++++++++++++++++++++++++++++++---- 1 file changed, 216 insertions(+), 23 deletions(-) diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index b6ad02a7ea3c..0b7c1cd6015a 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -261,12 +261,13 @@ #define IPA_FLT_L2TP_UDP_INNER_MAC_DST_ADDR (1ul << 31) /* Extended attributes for the rule (routing or filtering) */ -#define IPA_FLT_EXT_L2TP_UDP_TCP_SYN (1ul << 0) -#define IPA_FLT_EXT_L2TP_UDP_INNER_ETHER_TYPE (1ul << 1) -#define IPA_FLT_EXT_MTU (1ul << 2) -#define IPA_FLT_EXT_L2TP_UDP_INNER_NEXT_HDR (1ul << 3) -#define IPA_FLT_EXT_NEXT_HDR (1ul << 4) - +#define IPA_FLT_EXT_L2TP_UDP_TCP_SYN (1ul << 0) +#define IPA_FLT_EXT_L2TP_UDP_INNER_ETHER_TYPE (1ul << 1) +#define IPA_FLT_EXT_MTU (1ul << 2) +#define IPA_FLT_EXT_L2TP_UDP_INNER_NEXT_HDR (1ul << 3) +#define IPA_FLT_EXT_NEXT_HDR (1ul << 4) +/* ip protocol/udp ports/eth-type */ +#define IPA_FLT_EXT_MPLS_GRE_GENERAL (1ul << 5) /** * maximal number of NAT PDNs in the PDN config table @@ -712,6 +713,17 @@ enum ipa_ip_type { #define VALID_IPA_IP_TYPE(t) \ ((t) >= IPA_IP_v4 && (t) < IPA_IP_MAX) +static inline const char *ipa_ip_type_as_str(enum ipa_ip_type t) +{ + return + (t == IPA_IP_v4) ? "v4" : + (t == IPA_IP_v6) ? "v6" : + (t == IPA_IP_MAX) ? "max|v4_vlan" : + (t == IPA_IP_v6_VLAN) ? "v6_vlan" : + (t == IPA_IP_MAX_WLAN) ? "max_wlan" : + "???"; +} + /** * enum ipa_rule_type - Type of routing or filtering rule * Hashable: Rule will be located at the hashable tables @@ -1056,6 +1068,100 @@ enum ipa_hw_feature_support { #define IPA_HW_ETH_BRIDGING_SUPPORT_BMSK 0x1 +/** + * enum ipa_exception_type + * + * This enum is for describing which field is to be looked at for + * exception path consideration. + * + * NOTE 1: The field implies an offset into the packet under + * consideration. This offset will be calculated on behalf of + * the user of this API. + * + * NOTE 2: When exceptions are generated/sent in an ipa_exception + * structure, they will considered to be from the upload + * perspective. And when appropriate, a corresponding, and + * perhaps inverted, downlink exception will be automatically + * created on the callers behalf. As an example: If a + * FIELD_UDP_SRC_PORT is sent, an uplink exception will be + * created for udp source port, and a corresponding + * FIELD_UDP_DST_PORT will be automatically created for the + * downlink. + */ +enum ipa_exception_type { + FIELD_IP_PROTOCOL = 0, + FIELD_TCP_SRC_PORT = 1, + FIELD_TCP_DST_PORT = 2, + FIELD_UDP_SRC_PORT = 3, + FIELD_UDP_DST_PORT = 4, + FIELD_ETHER_TYPE = 5, + FIELD_MAX +}; + +#define VALID_EXCEPTION_TYPE(x) \ + ((x) >= FIELD_IP_PROTOCOL && (x) < FIELD_MAX) + +static inline const char *exception_type_as_str(enum ipa_exception_type t) +{ + return + (t == FIELD_IP_PROTOCOL) ? "ip_protocol" : + (t == FIELD_TCP_SRC_PORT) ? "tcp_src_port" : + (t == FIELD_TCP_DST_PORT) ? "tcp_dst_port" : + (t == FIELD_UDP_SRC_PORT) ? "udp_src_port" : + (t == FIELD_UDP_DST_PORT) ? "udp_dst_port" : + (t == FIELD_ETHER_TYPE) ? "ether_type" : + (t == FIELD_MAX) ? "max" : + "???"; +} + +#define IP_TYPE_EXCEPTION(x) \ + ((x) == FIELD_IP_PROTOCOL || \ + (x) == FIELD_TCP_SRC_PORT || \ + (x) == FIELD_TCP_DST_PORT || \ + (x) == FIELD_UDP_SRC_PORT || \ + (x) == FIELD_UDP_DST_PORT) + +/** + * enum ipa_data_flow_type + * + * This enum is for describing whether a data flow is uplink or down. + */ +enum ipa_data_flow_type { + FLOW_UPLINK = 0, + FLOW_DOWNLINK = 1, + FLOW_MAX +}; + +#define VALID_FLOW_TYPE(x) \ + ((x) >= FLOW_UPLINK && (x) < FLOW_MAX) + +static inline const char *flow_type_as_str(enum ipa_data_flow_type t) +{ + return + (t == FLOW_UPLINK) ? "uplink" : + (t == FLOW_DOWNLINK) ? "downlink" : + (t == FLOW_MAX) ? "max" : + "???"; +} + +/** + * struct ipa_field_val_equation_gen + * + * Needed for generating an equation for peering into and finding + * data in a packet or frame. + * + * @flow: Are we generating an equation for uplink or downlink + * @inner_iptype: What's the ip type of the inner packet + * @field: Field we're generating equation for + * @value: The value at that field we're looking for + */ +struct ipa_field_val_equation_gen { + enum ipa_data_flow_type flow; + enum ipa_ip_type inner_iptype; + enum ipa_exception_type field; + uint32_t value; +}; + /** * struct ipa_rule_attrib - attributes of a routing/filtering * rule, all in LE @@ -1088,6 +1194,7 @@ enum ipa_hw_feature_support { * @payload_length: Payload length. * @ext_attrib_mask: Extended attributes. * @l2tp_udp_next_hdr: next header in L2TP tunneling + * @field_val_equ: for finding a value at a particular offset */ struct ipa_rule_attrib { uint32_t attrib_mask; @@ -1133,7 +1240,7 @@ struct ipa_rule_attrib { __u32 ext_attrib_mask; __u8 l2tp_udp_next_hdr; __u8 padding1; - __u32 padding2; + struct ipa_field_val_equation_gen fld_val_eq; }; @@ -1396,8 +1503,8 @@ enum ipa_hdr_l2_type { * IPA_HDR_PROC_L2TP_UDP_HEADER_REMOVE: Process Ethernet To WLAN packets to * remove L2TP UDP header. * IPA_HDR_PROC_SET_DSCP: - * IPA_HDR_PROC_EoGRE_HEADER_ADD: Add IPV[46] GRE header - * IPA_HDR_PROC_EoGRE_HEADER_REMOVE: Remove IPV[46] GRE header + * IPA_HDR_PROC_EoGRE_HEADER_ADD: Add IPV[46] and GRE header + * IPA_HDR_PROC_EoGRE_HEADER_REMOVE: Remove IPV[46] and GRE header */ enum ipa_hdr_proc_type { IPA_HDR_PROC_NONE, @@ -1576,7 +1683,8 @@ struct ipa_l2tp_hdr_proc_ctx_params { }; #define IPA_EoGRE_MAX_PCP_IDX 8 /* From 802.1Q tag format (reflects IEEE P802.1p) */ -#define IPA_EoGRE_MAX_VLAN 8 /* Our supported number of VLAN id's */ +#define IPA_EoGRE_MAX_VLAN 8 /* Supported number of VLAN id's (C-Tag when MPLSoGRE) */ +#define IPA_GRE_MAX_S_VLAN 4 /* We'll support up to 4 vlans from the S-Tag */ /* vlan 12 bits + pcp 3 bites <-> dscp 6 bits */ struct IpaDscpVlanPcpMap_t { @@ -1587,23 +1695,98 @@ struct IpaDscpVlanPcpMap_t { /* * dscp[vlan][pcp], valid only lower 6 bits, using pcp as index */ - uint8_t dscp[IPA_EoGRE_MAX_VLAN][IPA_EoGRE_MAX_PCP_IDX]; - uint8_t num_vlan; /* indicate how many vlans valid */ - uint8_t reserved0; + uint8_t dscp[IPA_EoGRE_MAX_VLAN][IPA_EoGRE_MAX_PCP_IDX]; + uint8_t num_vlan; /* indicate how many vlans valid vlan above */ + uint8_t num_s_vlan; /* indicate how many vlans valid in s_vlan below */ + uint8_t dscp_opt; /* indicates if dscp is required or optional */ + uint8_t pad1; /* for alignment */ + /* + * The same lookup scheme, using vlan[] above, is used for + * generating the first index of mpls below; and in addition, + * we've added a similar array, s_vlan[], for generating the + * second index below. + */ + uint16_t s_vlan[IPA_GRE_MAX_S_VLAN]; + /* + * mpls[C-Tag vlan][S-Tag vlan] -> Only the lower 20 bits of the + * 32-bit mpls value will be used, since that's the size of the + * MPLS label component. + */ + uint32_t mpls[IPA_EoGRE_MAX_VLAN][IPA_GRE_MAX_S_VLAN]; + /* + * following three arrays are used for DL vlan tag lookup + * mpls_val_sorted is in ascending order, by mpls label values in mpls array + * vlan_c and vlan_s are vlan id values that are corresponding to the mpls label + */ + uint16_t pad2; /* for alignment */ + uint8_t pad3; /* for alignment */ + uint8_t num_mpls_val_sorted; /* num of elements in mpls_val_sorted */ + uint32_t mpls_val_sorted[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN]; + uint8_t vlan_c[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN]; + uint8_t vlan_s[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN]; } __packed; +/** + * struct ipa_exception + * + * This structure is used to generate an exception filter rule (see + * exception_list in struct ipa_ipgre_info). + * + * NOTE: + * + * When generating the exception list, generate them from the + * perepctive of the UL data flow. The inner workings of the system + * will generate the complimentary DL rule, if necessary. For + * example, UL TCP or UDP source/destination ports will have the + * complimentary DL rule generated with the addresses and ports + * reversed. + * + * For finding/matching @value at @field. + * + * Please always set @inner_iptype even when looking for an element in + * an Ethernet header. In that case, inner_iptype would be set to the + * ip header type following the Ethernet header. + */ +struct ipa_exception { + enum ipa_exception_type field; + uint32_t value; + enum ipa_ip_type inner_iptype; +} __packed; + +#define IPA_MAX_EXCEPTIONS 10 /* Max exception rule tuples */ + struct ipa_ipgre_info { - /* ip address type */ + /* + * GRE tunnel's ip address type. + */ enum ipa_ip_type iptype; - /* ipv4 */ + /* + * If GRE tunnel is ipv4, then address goes here. + */ uint32_t ipv4_src; uint32_t ipv4_dst; - /* ipv6 */ + /* + * Else if GRE tunnel is ipv6, then address goes here. + */ uint32_t ipv6_src[4]; uint32_t ipv6_dst[4]; - /* gre header info */ + /* + * If running MPLS over GRE, please provide mpls header value + * here. + */ + uint32_t mpls_protocol; + /* + * GRE header value. + */ uint16_t gre_protocol; -}; + uint8_t unused; /* for alignment */ + /* + * The number of valid elements in, and the accompanying + * exception_list, below + */ + uint8_t num_exceptions; + struct ipa_exception exception_list[IPA_MAX_EXCEPTIONS]; +} __packed; struct ipa_ioc_eogre_info { /* ip and gre info */ @@ -1618,23 +1801,33 @@ struct ipa_ioc_eogre_info { * @input_ip_version: Specifies if Input header is IPV4(0) or IPV6(1) * @output_ip_version: Specifies if template header's outer IP is IPV4(0) or IPV6(1) * @second_pass: Specifies if the data should be processed again. + * @is_mpls: Specifies if ucp cmd is for legacy EoGRE(0) or MPLSoGRE(1) + * @tag_remove_len: Specifies amount to be removed for the tags */ struct ipa_eogre_header_add_procparams { uint32_t eth_hdr_retained :1; uint32_t input_ip_version :1; uint32_t output_ip_version :1; uint32_t second_pass :1; - uint32_t reserved :28; + uint32_t is_mpls :1; + uint32_t tag_remove_len :4; + uint32_t reserved :23; }; /** * struct ipa_eogre_header_remove_procparams - - * @hdr_len_remove: Specifies how much (in bytes) of the header needs - * to be removed + * @hdr_len_remove: Specifies how much (in bytes) of the header needs + * to be removed + * @outer_ip_version: Specifies if template header's outer IP is IPV4(0) or IPV6(1) + * @is_mpls: Specifies if ucp cmd is for legacy EoGRE(0) or MPLSoGRE(1) + * @tag_add_len: Specifies amount to be added for the tags */ struct ipa_eogre_header_remove_procparams { - uint32_t hdr_len_remove:8; /* 44 bytes for IPV6, 24 for IPV4 */ - uint32_t reserved:24; + uint32_t hdr_len_remove :8; /* 44 bytes for IPV6, 24 for IPV4 */ + uint32_t outer_ip_version :1; + uint32_t is_mpls :1; + uint32_t tag_add_len :4; + uint32_t reserved :18; }; /** From 64ca5a9d315d45f03c60ffd24de1264793514a9f Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Mon, 29 Aug 2022 15:42:35 -0700 Subject: [PATCH 23/30] msm: ipa3: rmnet: header file update for eth pdu Introduce new ioctls and structures for ETH PDU Change-Id: I5b6a98f90b8b3816a27aa9e6020b1a863d24aa22 Signed-off-by: Skylar Chang --- include/uapi/linux/msm_rmnet.h | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/msm_rmnet.h b/include/uapi/linux/msm_rmnet.h index 25267c3c9b09..7ea067075c64 100644 --- a/include/uapi/linux/msm_rmnet.h +++ b/include/uapi/linux/msm_rmnet.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _UAPI_MSM_RMNET_H_ @@ -72,6 +74,10 @@ #define RMNET_IOCTL_SET_MTU 0x0020 /* Set v4/v6 MTU */ #define RMNET_IOCTL_GET_EPID_LL 0x0021 /* Get LL ep ID */ #define RMNET_IOCTL_GET_EP_PAIR_LL 0x0022 /* LL ep pair */ +#define RMNET_IOCTL_SET_ETH_VLAN 0x0023 /* Set ETH Vlan */ +#define RMNET_IOCTL_ADD_MUX_CHANNEL_v2 0x0024 /* Add MUX ID + mac*/ +#define RMNET_IOCTL_GET_EPID_ETH 0x0025 /* Get ETH ep ID */ +#define RMNET_IOCTL_GET_EP_PAIR_ETH 0x0026 /* ETH data ep pair*/ /** * RMNET_IOCTL_EXTENDED_V2 ioctl types. @@ -100,6 +106,7 @@ #define RMNET_IOCTL_FEAT_FLOW_CONTROL (1<<7) #define RMNET_IOCTL_FEAT_GET_DFLT_CONTROL_CHANNEL (1<<8) #define RMNET_IOCTL_FEAT_GET_HWSW_MAP (1<<9) +#define RMNET_IOCTL_FEAT_ETH_PDU (1<<10) /* Input values for the RMNET_IOCTL_SET_EGRESS_DATA_FORMAT IOCTL */ #define RMNET_IOCTL_EGRESS_FORMAT_MAP (1<<1) @@ -124,6 +131,9 @@ #define IFNAMSIZ 16 #endif +/* size of the mac address */ +#define MAC_ADDR_SIZE 6 + /** * enum rmnet_egress_ep_type - To specify pipe type for egress * @RMNET_EGRESS_DEFAULT: WAN Producer pipe @@ -132,10 +142,11 @@ * Add any number of pipes before max */ enum rmnet_egress_ep_type { - RMNET_EGRESS_DEFAULT = 0x0000, + RMNET_EGRESS_DEFAULT = 0x0000, RMNET_EGRESS_LOW_LAT_CTRL = 0x0001, RMNET_EGRESS_LOW_LAT_DATA = 0x0002, - RMNET_EGRESS_MAX = 0x0003, + RMNET_EGRESS_ETH_DATA = 0x0003, + RMNET_EGRESS_MAX = 0x0004, }; @@ -300,7 +311,7 @@ struct rmnet_ioctl_extended_s { /* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL IOCTL */ struct { __u32 mux_id; - __s8 vchannel_name[IFNAMSIZ]; + __s8 vchannel_name[IFNAMSIZ]; } rmnet_mux_val; /* Input values for the RMNET_IOCTL_FLOW_CONTROL IOCTL */ @@ -309,7 +320,7 @@ struct rmnet_ioctl_extended_s { __u8 mux_id; } flow_control_prop; - /* Return values for RMNET_IOCTL_GET_EP_PAIR */ + /* Return values for RMNET_IOCTL_GET_EP_PAIR/LL/ETH */ struct { __u32 consumer_pipe_num; __u32 producer_pipe_num; @@ -334,6 +345,13 @@ struct rmnet_ioctl_extended_s { __u16 mtu_v4; __u16 mtu_v6; } mtu_params; + + /* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL_v2 IOCTL */ + struct { + __u32 mux_id; + __s8 vchannel_name[IFNAMSIZ]; + __u8 mac[MAC_ADDR_SIZE]; + } rmnet_mux_val_v2; } u; }; From 7610cc416f940b666c4d594201f0dec3382b647b Mon Sep 17 00:00:00 2001 From: Michael Adisumarta Date: Mon, 21 Nov 2022 16:29:51 -0800 Subject: [PATCH 24/30] msm: ipa3: add new ETH PDU pipes and ETH PDU event Define the UL and DL pipe for ETH PDU E2E transfers. Add new event for ENABLE_ETH_PDU_MODE. Change-Id: I1ebb943d63cee47c686a857343b1974ead376be0 Signed-off-by: Michael Adisumarta --- include/uapi/linux/msm_ipa.h | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index b6ad02a7ea3c..c97b7da86f0c 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _UAPI_MSM_IPA_H_ @@ -517,9 +519,15 @@ enum ipa_client_type { /* RESERVED PROD = 124, */ IPA_CLIENT_TPUT_CONS = 125, + + IPA_CLIENT_Q6_DL_NLO_ETH_DATA_PROD = 126, + /* RESERVED CONS = 127, */ + + IPA_CLIENT_APPS_WAN_ETH_PROD = 128, + /* RESERVED CONS = 129, */ }; -#define IPA_CLIENT_MAX (IPA_CLIENT_TPUT_CONS + 1) +#define IPA_CLIENT_MAX (IPA_CLIENT_APPS_WAN_ETH_PROD + 1) #define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD #define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD @@ -562,7 +570,8 @@ enum ipa_client_type { ((client) == IPA_CLIENT_APPS_LAN_PROD || \ (client) == IPA_CLIENT_APPS_WAN_PROD || \ (client) == IPA_CLIENT_APPS_WAN_LOW_LAT_PROD || \ - (client) == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) + (client) == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD || \ + (client) == IPA_CLIENT_APPS_WAN_ETH_PROD) #define IPA_CLIENT_IS_USB_CONS(client) \ ((client) == IPA_CLIENT_USB_CONS || \ @@ -611,7 +620,8 @@ enum ipa_client_type { (client) == IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD || \ (client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD || \ (client) == IPA_CLIENT_Q6_CV2X_PROD || \ - (client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD) + (client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD || \ + (client) == IPA_CLIENT_Q6_DL_NLO_ETH_DATA_PROD) #define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \ ((client) == IPA_CLIENT_Q6_LAN_CONS || \ @@ -939,7 +949,14 @@ enum ipa_ext_route_evt { #define IPA_SET_EXT_ROUTER_MODE_EVENT_MAX IPA_SET_EXT_ROUTER_MODE_EVENT_MAX }; -#define IPA_EVENT_MAX_NUM (IPA_SET_EXT_ROUTER_MODE_EVENT_MAX) +enum ipa_eth_pdu_evt { + IPA_ENABLE_ETH_PDU_MODE_EVENT = IPA_SET_EXT_ROUTER_MODE_EVENT_MAX, + IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX +#define IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX +}; + + +#define IPA_EVENT_MAX_NUM (IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX) #define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) /** From 16d47db67bd7397e10d0f449f5d0e911e0995d80 Mon Sep 17 00:00:00 2001 From: Michael Adisumarta Date: Tue, 22 Nov 2022 12:14:28 -0800 Subject: [PATCH 25/30] msm: ipa3: add new ETH PDU QMI Add new QMI messages and struct to support ETH PDU FR. Change-Id: I2fa969330898e7e08b7ab3dcf0fea6b4239b253a Signed-off-by: Michael Adisumarta --- include/uapi/linux/ipa_qmi_service_v01.h | 106 +++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h index 55148fd4f494..d79855aa3cc2 100644 --- a/include/uapi/linux/ipa_qmi_service_v01.h +++ b/include/uapi/linux/ipa_qmi_service_v01.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ /* @@ -52,6 +54,9 @@ #define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2 #define QMI_IPA_MAX_PIPES_V01 20 #define QMI_IPA_MAX_PER_CLIENTS_V01 64 +#define QMI_IPA_MAX_RMNET_ETH_INFO_V01 19 +#define QMI_IPA_MAX_MAC_ADDR_LEN_V01 6 +#define QMI_IPA_MAX_IPV4_ADDR_LEN_V01 4 /* * Indicates presence of newly added member to support HW stats. @@ -113,6 +118,19 @@ enum ipa_platform_type_enum_v01 { /* To force a 32 bit signed enum. Do not change or use */ }; +enum ipa_eth_hw_config_enum_v01 { + IPA_QMI_ETH_HW_CONFIG_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /**< To force a 32 bit signed enum. Do not change or use*/ + IPA_QMI_ETH_HW_NONE_V01 = 0x00, + /**< Ethernet Setting HW Default \n */ + IPA_QMI_ETH_HW_VLAN_IP_V01 = 0x01, + /**< Ethernet HW VLAN+IP supported \n */ + IPA_QMI_ETH_HW_NON_VLAN_IP_V01 = 0x02, + /**< Ethernet HW NON_VLAN+IP supported */ + IPA_QMI_ETH_HW_CONFIG_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /**< To force a 32 bit signed enum. Do not change or use*/ +}; + #define QMI_IPA_PLATFORM_TYPE_LE_MHI_V01 \ QMI_IPA_PLATFORM_TYPE_LE_MHI_V01 @@ -538,6 +556,18 @@ struct ipa_indication_reg_req_msg_v01 { * message makes sense only when the QMI_IPA_INDICATION_REGISTER_REQ * is being originated from the master driver. */ + + /* Optional */ + /* Rmnet Ethernet MAC Information */ + __u8 rmnet_eth_mac_info_valid; + /* Must be set to true if rmnet_eth_mac_info is being passed */ + __u8 rmnet_eth_mac_info; + /* If set to TRUE, this field indicates that the client wants to + * receive indications about embeddd rmnet_eth mac info via + * QMI_IPA_RMNET_ETH_INFO_INDICATION. Setting this field in the request + * message makes sense only when the QMI_IPA_INDICATION_REGISTER_REQ is + * being originated from the master driver. + */ }; /* Message */ @@ -2559,6 +2589,7 @@ enum ipa_ic_type_enum_v01 { DATA_IC_TYPE_AP_V01 = 0x04, DATA_IC_TYPE_Q6_V01 = 0x05, DATA_IC_TYPE_UC_V01 = 0x06, + DATA_IC_TYPE_ETH_V01 = 0x07, IPA_IC_TYPE_ENUM_MAX_VAL_V01 = IPA_INT_MAX, }; @@ -2781,6 +2812,78 @@ struct ipa_move_nat_table_complt_ind_msg_v01 { }; /* Message */ #define QMI_IPA_NAT_TABLE_MOVE_COMPLETE_IND_MAX_MSG_LEN_V01 7 +/* + * Request Message; QMI request message to request for a dual-backhaul traffic + * offloading with ethernet and WWAN and notify the eth-header + */ +struct ipa_eth_backhaul_info_req_msg_v01 { + /* Mandatory */ + /* SRC MAC ADDR */ + __u8 src_mac_addr[QMI_IPA_MAX_MAC_ADDR_LEN_V01]; + /* src mac addr of eth hdr */ + /* Mandatory */ + /* DST MAC ADDR */ + __u8 dst_mac_addr[QMI_IPA_MAX_MAC_ADDR_LEN_V01]; + /* dst mac addr of eth hdr */ + /* Mandatory */ + /* IPv4 ADDR of ETH0 */ + __u32 ipv4_addr_eth0[QMI_IPA_MAX_IPV4_ADDR_LEN_V01]; + /* ipv4 addr of eth0 */ + /* Mandatory */ + /* ETH PIPE */ + __u8 eth_pipe; + /* Specifies eth pipe for Q6 to route UL pkts for ETH backhaul */ + /* Mandatory */ + /* ACTION */ + __u8 enable; + /* Specifies whether eth backhaul is enabled or disabled */ +}; /* Message */ +#define IPA_ETH_BACKHAUL_INFO_REQ_MSG_V01_MAX_MSG_LEN 45 + +/* Response Message; to notify the status of the dual-backhaul traffic + * offloading request using QMI_IPA_ETH_BACKHAUL_INFO_REQ + */ +struct ipa_eth_backhaul_info_resp_msg_v01 { + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* + * Standard response type. + * Standard response type. Contains the following data members: + * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE + * qmi_error_type -- Error code. Possible error code values are + * described in the error codes section of each message definition. + */ +}; /* Message */ +#define IPA_ETH_BACKHAUL_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 + +struct ipa_rmnet_eth_info_type_v01 { + __u8 mac_addr[QMI_IPA_MAX_MAC_ADDR_LEN_V01]; + /* mac addr of rmnet_eth */ + __u32 mux_id; + /* QMAP Mux ID. As a part of the QMAP protocol, + * several data calls may be multiplexed over the same physical transport + * channel. This identifier is used to identify one such data call. + * The maximum value for this identifier is 255. + */ +}; /* Type */ + +/* Indication Message; This is an indication to send rmnet ethernet information + * for embedded wireless ethernet PDU from the modem processor to + * the application processor + */ +struct ipa_rmnet_eth_info_indication_msg_v01 { + /* Optional */ + /* Rmnet Ethernet Information */ + __u8 rmnet_eth_info_valid; + /* Must be set to true if rmnet_eth_info is being passed */ + __u32 rmnet_eth_info_len; + /* Must be set to # of elements in rmnet_eth_info */ + struct ipa_rmnet_eth_info_type_v01 rmnet_eth_info[QMI_IPA_MAX_RMNET_ETH_INFO_V01]; + /* Rmnet Ethernet Info array */ +}; /* Message */ +#define IPA_RMNET_ETH_INFO_INDICATION_MSG_V01_MAX_MSG_LEN 194 + /*Service Message Definition*/ #define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020 #define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020 @@ -2839,6 +2942,9 @@ struct ipa_move_nat_table_complt_ind_msg_v01 { #define QMI_IPA_MOVE_NAT_REQ_V01 0x0046 #define QMI_IPA_MOVE_NAT_RESP_V01 0x0046 #define QMI_IPA_MOVE_NAT_COMPLETE_IND_V01 0x0046 +#define QMI_IPA_ETH_BACKHAUL_INFO_REQ_V01 0x0047 +#define QMI_IPA_ETH_BACKHAUL_INFO_RESP_V01 0x0047 +#define QMI_IPA_RMNET_ETH_INFO_INDICATION_V01 0x0048 /* add for max length*/ #define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 197 From 4dd23ac1ac2a7cc819769efadf5f4df863e622bf Mon Sep 17 00:00:00 2001 From: Sai Chaitanya Kaveti Date: Tue, 15 Nov 2022 15:50:12 +0530 Subject: [PATCH 26/30] msm: mhi_dev: Avoiding double free in MHI UCI layer In mhi_uci_ctrl_set_tiocm(), the control message is submitted to MHI/IPA using mhi_uci_send_packet(). Device waits for completion after this. If the wait is interrupted or is timed out, control message buffer is freed using kfree(). But as the message is already sent, write completion callback is invoked after buffer is freed and same pointer is being freed again. To avoid this double free issue, removing kfree() in mhi_uci_ctrl_set_tiocm(). Once sending of the message is completed, buffer is freed as part of the write completion callback. Change-Id: I6e33ce46fc5506ac45256102221fafb08050a5b5 Signed-off-by: Sai Chaitanya Kaveti --- drivers/platform/msm/mhi_dev/mhi_uci.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c index d8c8f8655f25..222c71854d3b 100644 --- a/drivers/platform/msm/mhi_dev/mhi_uci.c +++ b/drivers/platform/msm/mhi_dev/mhi_uci.c @@ -1748,8 +1748,12 @@ static int mhi_uci_ctrl_set_tiocm(struct uci_client *client, reinit_completion(ctrl_client->write_done); ret_val = mhi_uci_send_packet(ctrl_client, ctrl_msg, sizeof(*ctrl_msg)); - if (ret_val != sizeof(*ctrl_msg)) + if (ret_val != sizeof(*ctrl_msg)) { + uci_log(UCI_DBG_ERROR, "Failed to send ctrl msg\n"); + kfree(ctrl_msg); + ctrl_msg = NULL; goto tiocm_error; + } compl_ret = wait_for_completion_interruptible_timeout( ctrl_client->write_done, MHI_UCI_ASYNC_WRITE_TIMEOUT); @@ -1768,7 +1772,6 @@ static int mhi_uci_ctrl_set_tiocm(struct uci_client *client, return 0; tiocm_error: - kfree(ctrl_msg); return ret_val; } From a56e91363f83b875a478a85b2fd5d7e2d6b989e8 Mon Sep 17 00:00:00 2001 From: Amit Kushwaha Date: Thu, 24 Nov 2022 17:30:18 +0530 Subject: [PATCH 27/30] msm: kgsl: Remove protected GPUCC registers from snapshot This change is to prevent data abort when HLOS accessing protected registers used in SoftSKU. Change-Id: Ia7facc5cf78453f75f829ae7b6626a7f182c8f91 Signed-off-by: Amit Kushwaha --- drivers/gpu/msm/adreno_a6xx_gmu_snapshot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno_a6xx_gmu_snapshot.c b/drivers/gpu/msm/adreno_a6xx_gmu_snapshot.c index cc7ad875a598..5b19a616ee3e 100644 --- a/drivers/gpu/msm/adreno_a6xx_gmu_snapshot.c +++ b/drivers/gpu/msm/adreno_a6xx_gmu_snapshot.c @@ -43,7 +43,7 @@ static const unsigned int a6xx_gmu_registers[] = { 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B, 0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440, 0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802, - 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02, + 0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x26000, 0x26002, /* GPU CC ACD */ 0x26400, 0x26416, 0x26420, 0x26427, From 7bb7779ce4d629f8d06d0f24fd474e9b0e5d262d Mon Sep 17 00:00:00 2001 From: Avaneesh Kumar Dwivedi Date: Wed, 2 Nov 2022 15:48:06 +0530 Subject: [PATCH 28/30] core_ctl: Add check for available cpus before accessing per_cpus For qultivate target its trying to call per_cpu() for cpu's which are fused out and leading to crash So, Add a check for available cpu's before calling per_cpu. Change-Id: Idfd97fcfc83baa59afe9010396e7b6314087bf13 Signed-off-by: Avaneesh Kumar Dwivedi Signed-off-by: Chetan C R --- kernel/sched/walt/core_ctl.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/kernel/sched/walt/core_ctl.c b/kernel/sched/walt/core_ctl.c index 44b0fef98e4c..b3e4e5548ace 100644 --- a/kernel/sched/walt/core_ctl.c +++ b/kernel/sched/walt/core_ctl.c @@ -340,7 +340,7 @@ static ssize_t store_not_preferred(struct cluster_data *state, const char *buf, size_t count) { struct cpu_data *c; - unsigned int i; + unsigned int i, mask; unsigned int val[MAX_CPUS_PER_CLUSTER]; unsigned long flags; int ret; @@ -353,10 +353,16 @@ static ssize_t store_not_preferred(struct cluster_data *state, return -EINVAL; spin_lock_irqsave(&state_lock, flags); - for (i = 0; i < state->num_cpus; i++) { - c = &per_cpu(cpu_state, i + state->first_cpu); + for (i = 0, mask = 0; i < state->num_cpus;) { + if (!cpumask_test_cpu(i + mask + state->first_cpu, cpu_possible_mask)) { + mask++; + continue; + } + + c = &per_cpu(cpu_state, i + mask + state->first_cpu); c->not_preferred = val[i]; not_preferred_count += !!val[i]; + i++; } state->nr_not_preferred_cpus = not_preferred_count; spin_unlock_irqrestore(&state_lock, flags); @@ -369,20 +375,26 @@ static ssize_t show_not_preferred(const struct cluster_data *state, char *buf) struct cpu_data *c; ssize_t count = 0; unsigned long flags; - int i; + int i, mask; spin_lock_irqsave(&state_lock, flags); - for (i = 0; i < state->num_cpus; i++) { - c = &per_cpu(cpu_state, i + state->first_cpu); + for (i = 0, mask = 0; i < state->num_cpus;) { + if (!cpumask_test_cpu(i + mask + state->first_cpu, cpu_possible_mask)) { + mask++; + continue; + } + + c = &per_cpu(cpu_state, i + mask + state->first_cpu); count += scnprintf(buf + count, PAGE_SIZE - count, - "CPU#%d: %u\n", c->cpu, c->not_preferred); + "CPU#%d: %u\n", c->cpu, c->not_preferred); + i++; } + spin_unlock_irqrestore(&state_lock, flags); return count; } - struct core_ctl_attr { struct attribute attr; ssize_t (*show)(const struct cluster_data *, char *); From 6da027eb3af592f5ad3c18b8de8918cb56e0420a Mon Sep 17 00:00:00 2001 From: Krishna Kurapati Date: Sun, 13 Nov 2022 19:40:39 +0530 Subject: [PATCH 29/30] net: usbnet: Add mechanism to throttle usb0 RX traffic in USB SS ECM traffic is non-aggregated which makes it send huge number of packets in high throughput usecases. This causes the CPU to dedicate majority of the cycles to ISR context and the softIRQ contexts stay suspended. The rx_complete (hardIRQ) keeps adding SKBs to the 'done' list. The usbnet_bh (tasklet) is supposed to dequeue SKBs from 'done' list and provide them to NW stack so that they can be consumed. Because of the above issue, the 'done' list keeps increasing and the system eventually runs out of memory. When the WD_pet task also does not get a chance to run because of hardIRQ storm, WD bite is also observed. Introduce a mechanism to balance the time between hardIRQ and softIRQ contexts. Add module parameter 'usb0_rx_skb_threshold' to stop the submission of URBs to HW from the ISR. This will allow the usbnet_bh to run and consume SKBs, thereby avoiding the OOM scenario and the WD_pet task also gets a chance to pet the WD. The default value of this threshold is 500, which can be changed runtime to suit the requirements according to CPU load. Since in the failing case, ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet adapters are being used, throttle handling has been added in that driver. Usage: echo > /sys/module/usbnet/parameters/usb0_rx_skb_threshold Change-Id: I4667f0ad67d5605b132a0e6062be27e01ef75a08 Signed-off-by: Ajay Agarwal Signed-off-by: Krishna Kurapati --- drivers/net/usb/ax88179_178a.c | 16 ++++++++-------- drivers/net/usb/usbnet.c | 14 +++++++++++--- include/linux/usb/usbnet.h | 1 + 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index dd5cff14f214..f8dbe908d4c5 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1698,7 +1698,7 @@ static const struct driver_info ax88179_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1711,7 +1711,7 @@ static const struct driver_info ax88178a_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1724,7 +1724,7 @@ static const struct driver_info cypress_GX3_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1737,7 +1737,7 @@ static const struct driver_info dlink_dub1312_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1750,7 +1750,7 @@ static const struct driver_info sitecom_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1763,7 +1763,7 @@ static const struct driver_info samsung_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1776,7 +1776,7 @@ static const struct driver_info lenovo_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1789,7 +1789,7 @@ static const struct driver_info belkin_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP | FLAG_THROTTLE_RX, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index af3a3ccf2e3c..c3ca4b7b501f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -75,6 +75,10 @@ static int msg_level = -1; module_param (msg_level, int, 0); MODULE_PARM_DESC (msg_level, "Override default message level"); +static int usb0_rx_skb_threshold = 500; +module_param(usb0_rx_skb_threshold, int, 0644); +MODULE_PARM_DESC(usb0_rx_skb_threshold, "Throttle rx traffic in USB3"); + /*-------------------------------------------------------------------------*/ /* handles CDC Ethernet and many other network "bulk data" interfaces */ @@ -654,9 +658,13 @@ block: if (netif_running (dev->net) && !test_bit (EVENT_RX_HALT, &dev->flags) && state != unlink_start) { - rx_submit (dev, urb, GFP_ATOMIC); - usb_mark_last_busy(dev->udev); - return; + if ((!(dev->driver_info->flags & FLAG_THROTTLE_RX)) || + ((dev->driver_info->flags & FLAG_THROTTLE_RX) && + (dev->done.qlen < usb0_rx_skb_threshold))) { + rx_submit(dev, urb, GFP_ATOMIC); + usb_mark_last_busy(dev->udev); + return; + } } usb_free_urb (urb); } diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index a90e2a9dfdd3..73f5f58d6e72 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -129,6 +129,7 @@ struct driver_info { #define FLAG_MULTI_PACKET 0x2000 #define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */ #define FLAG_NOARP 0x8000 /* device can't do ARP */ +#define FLAG_THROTTLE_RX 0x10000 /* Throttle RX traffic in USB SS */ /* init device ... can sleep, or cause probe() failure */ int (*bind)(struct usbnet *, struct usb_interface *); From 272e244efaa1da4d65d47baba42a003f618b9367 Mon Sep 17 00:00:00 2001 From: Sharath Chandra Vurukala Date: Mon, 28 Nov 2022 11:44:06 +0530 Subject: [PATCH 30/30] net: rmnet: add ioctl support for IP route utility Add a new define to support rmnet ioctl for IP ROUTE. Change-Id: I9927ff28dd0a377eef132586ea7794cc01279fc0 Signed-off-by: Sharath Chandra Vurukala --- include/uapi/linux/msm_rmnet.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/msm_rmnet.h b/include/uapi/linux/msm_rmnet.h index 7ea067075c64..fb00ec04b48a 100644 --- a/include/uapi/linux/msm_rmnet.h +++ b/include/uapi/linux/msm_rmnet.h @@ -113,13 +113,14 @@ #define RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION (1<<2) #define RMNET_IOCTL_EGRESS_FORMAT_MUXING (1<<3) #define RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM (1<<4) - +#define RMNET_IOCTL_EGRESS_FORMAT_IP_ROUTE (1<<5) /* Input values for the RMNET_IOCTL_SET_INGRESS_DATA_FORMAT IOCTL */ #define RMNET_IOCTL_INGRESS_FORMAT_MAP (1<<1) #define RMNET_IOCTL_INGRESS_FORMAT_DEAGGREGATION (1<<2) #define RMNET_IOCTL_INGRESS_FORMAT_DEMUXING (1<<3) #define RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM (1<<4) #define RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA (1<<5) +#define RMNET_IOCTL_INGRESS_FORMAT_IP_ROUTE (1<<6) /* Input values for the RMNET_IOCTL_SET_OFFLOAD */ #define RMNET_IOCTL_OFFLOAD_FORMAT_NONE (0)