Merge tag 'LA.UM.9.14.r1-21000-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4 into android13-5.4-lahaina

"LA.UM.9.14.r1-21000-LAHAINA.QSSI13.0"

* tag 'LA.UM.9.14.r1-21000-LAHAINA.QSSI13.0' of https://git.codelinaro.org/clo/la/kernel/msm-5.4:
  net: rmnet: add ioctl support for IP route utility
  net: usbnet: Add mechanism to throttle usb0 RX traffic in USB SS
  core_ctl: Add check for available cpus before accessing per_cpus
  msm: kgsl: Remove protected GPUCC registers from snapshot
  msm: mhi_dev: Avoiding double free in MHI UCI layer
  msm: ipa3: add new ETH PDU QMI
  msm: ipa3: add new ETH PDU pipes and ETH PDU event
  msm: ipa3: rmnet: header file update for eth pdu
  msm: ipa3: add MPLS support to existing IPA GRE struct
  wifi: cfg80211: fix BSS refcounting bugs
  wifi: cfg80211: avoid nontransmitted BSS list corruption
  wifi: cfg80211: fix u8 overflow in cfg80211_update_notlisted_nontrans()
  cnss2: Add support for handling AFC memory request from FW
  qcedev: check num_fds during unmap
  kernel-msm: ipa_eth: Dynamic IPA NTN3 ETH client moderation configuration
  msm: kgsl: Cleanup correctly when platform probe fails
  Revert "arm64: mm: Don't invalidate FROM_DEVICE buffers at start of DMA transfer"
  virt: haven: rm_core: Clean up sequence idr earlier
  virt: haven: rm_core: Re-use alloc'd message when sending requests
  virt: haven: rm_core: Guard against unexpected messages
  virt: haven: rm_core: Rename connection variables
  virt: haven: rm_core: Remove current_recv_buffer tracking
  virt: haven: rm_core: Always allocate a connection for notifications
  defconfig : Enable xt_multiport module for iptables
  icnss: Add debug fs entry to call PSF callback
  icnss2: Enable power supply framework for 5.4 kernel
  virt: haven: rm_core: Validate notifications using payload size only
  pci: msm: Disable BDF halt for sdxlemur
  ANDROID: ABI: Update allowed list for QCOM
  usb: gadget: cdev: Requeue the request outside spinlock

 Conflicts:
	arch/arm64/boot/dts/vendor/bindings/gpio/gpio-altera.txt
	arch/arm64/mm/cache.S
	drivers/net/usb/ax88179_178a.c
	net/wireless/scan.c

Change-Id: I432c73b27169086d5a0431d2b049a8d98e0432fa
This commit is contained in:
Michael Bestas 2022-12-17 02:49:28 +02:00
commit 7714527f2d
No known key found for this signature in database
GPG Key ID: CC95044519BE6669
28 changed files with 2105 additions and 1424 deletions

View File

@ -1 +1 @@
LTS_5.4.197_26eb689452c8
LTS_5.4.210_7e6cbbe7e59a

View File

@ -271,3 +271,4 @@ CONFIG_PCIEASPM_POWER_SUPERSAVE=y
CONFIG_TLB_CONF_HANDLER=y
CONFIG_PRINTK_CALLER=y
CONFIG_QCOM_SUBSYSTEM_SLEEP_STATS=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y

View File

@ -1916,7 +1916,9 @@ long qcedev_ioctl(struct file *file,
goto exit_free_qcedev_areq;
}
if (map_buf.num_fds > QCEDEV_MAX_BUFFERS) {
if (map_buf.num_fds > ARRAY_SIZE(map_buf.fd)) {
pr_err("%s: err: num_fds = %d exceeds max value\n",
__func__, map_buf.num_fds);
err = -EINVAL;
goto exit_free_qcedev_areq;
}
@ -1956,6 +1958,12 @@ long qcedev_ioctl(struct file *file,
err = -EFAULT;
goto exit_free_qcedev_areq;
}
if (unmap_buf.num_fds > ARRAY_SIZE(unmap_buf.fd)) {
pr_err("%s: err: num_fds = %d exceeds max value\n",
__func__, unmap_buf.num_fds);
err = -EINVAL;
goto exit_free_qcedev_areq;
}
for (i = 0; i < unmap_buf.num_fds; i++) {
err = qcedev_check_and_unmap_buffer(handle,

View File

@ -1585,6 +1585,7 @@ err:
if (of_find_matching_node(dev->of_node, adreno_gmu_match))
component_unbind_all(dev, NULL);
idr_destroy(&device->context_idr);
kgsl_bus_close(device);
return status;

View File

@ -43,7 +43,7 @@ static const unsigned int a6xx_gmu_registers[] = {
0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802,
0x26000, 0x26002,
/* GPU CC ACD */
0x26400, 0x26416, 0x26420, 0x26427,

View File

@ -4293,6 +4293,9 @@ static void _unregister_device(struct kgsl_device *device)
{
int minor;
if (device->gpu_sysfs_kobj.state_initialized)
kobject_del(&device->gpu_sysfs_kobj);
mutex_lock(&kgsl_driver.devlock);
for (minor = 0; minor < ARRAY_SIZE(kgsl_driver.devp); minor++) {
if (device == kgsl_driver.devp[minor]) {
@ -4484,12 +4487,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
device->pwrctrl.interrupt_num = status;
disable_irq(device->pwrctrl.interrupt_num);
rwlock_init(&device->context_lock);
spin_lock_init(&device->submit_lock);
idr_init(&device->timelines);
spin_lock_init(&device->timelines_lock);
device->events_wq = alloc_workqueue("kgsl-events",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS | WQ_HIGHPRI, 0);
@ -4504,6 +4501,12 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
if (status != 0)
goto error_pwrctrl_close;
rwlock_init(&device->context_lock);
spin_lock_init(&device->submit_lock);
idr_init(&device->timelines);
spin_lock_init(&device->timelines_lock);
kgsl_device_debugfs_init(device);
dma_set_coherent_mask(&pdev->dev, KGSL_DMA_BIT_MASK);
@ -4537,9 +4540,6 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
kgsl_device_snapshot_close(device);
if (device->gpu_sysfs_kobj.state_initialized)
kobject_del(&device->gpu_sysfs_kobj);
idr_destroy(&device->context_idr);
idr_destroy(&device->timelines);

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/interconnect.h>
@ -224,6 +225,7 @@ void kgsl_bus_close(struct kgsl_device *device)
kfree(device->pwrctrl.ddr_table);
device->pwrctrl.ddr_table = NULL;
icc_put(device->pwrctrl.icc_path);
device->pwrctrl.icc_path = NULL;
if (device->pwrctrl.ddr_qos_devfreq)
put_device(&device->pwrctrl.ddr_qos_devfreq->dev);
}

View File

@ -1698,7 +1698,7 @@ static const struct driver_info ax88179_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1711,7 +1711,7 @@ static const struct driver_info ax88178a_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1724,7 +1724,7 @@ static const struct driver_info cypress_GX3_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1737,7 +1737,7 @@ static const struct driver_info dlink_dub1312_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1750,7 +1750,7 @@ static const struct driver_info sitecom_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1763,7 +1763,7 @@ static const struct driver_info samsung_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1776,7 +1776,7 @@ static const struct driver_info lenovo_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};
@ -1789,7 +1789,7 @@ static const struct driver_info belkin_info = {
.link_reset = ax88179_link_reset,
.reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_THROTTLE_RX,
.rx_fixup = ax88179_rx_fixup,
.tx_fixup = ax88179_tx_fixup,
};

View File

@ -75,6 +75,10 @@ static int msg_level = -1;
module_param (msg_level, int, 0);
MODULE_PARM_DESC (msg_level, "Override default message level");
static int usb0_rx_skb_threshold = 500;
module_param(usb0_rx_skb_threshold, int, 0644);
MODULE_PARM_DESC(usb0_rx_skb_threshold, "Throttle rx traffic in USB3");
/*-------------------------------------------------------------------------*/
/* handles CDC Ethernet and many other network "bulk data" interfaces */
@ -654,9 +658,13 @@ block:
if (netif_running (dev->net) &&
!test_bit (EVENT_RX_HALT, &dev->flags) &&
state != unlink_start) {
rx_submit (dev, urb, GFP_ATOMIC);
usb_mark_last_busy(dev->udev);
return;
if ((!(dev->driver_info->flags & FLAG_THROTTLE_RX)) ||
((dev->driver_info->flags & FLAG_THROTTLE_RX) &&
(dev->done.qlen < usb0_rx_skb_threshold))) {
rx_submit(dev, urb, GFP_ATOMIC);
usb_mark_last_busy(dev->udev);
return;
}
}
usb_free_urb (urb);
}

View File

@ -89,6 +89,13 @@ static DEFINE_SPINLOCK(time_sync_lock);
#define MHI_SUSPEND_RETRY_CNT 3
#define AFC_SLOT_SIZE 0x1000
#define AFC_MAX_SLOT 2
#define AFC_MEM_SIZE (AFC_SLOT_SIZE * AFC_MAX_SLOT)
#define AFC_AUTH_STATUS_OFFSET 1
#define AFC_AUTH_SUCCESS 1
#define AFC_AUTH_ERROR 0
static struct cnss_pci_reg ce_src[] = {
{ "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET },
{ "SRC_RING_BASE_MSB", QCA6390_CE_SRC_RING_BASE_MSB_OFFSET },
@ -3977,6 +3984,94 @@ int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
return ret;
}
int cnss_send_buffer_to_afcmem(struct device *dev, char *afcdb, uint32_t len,
uint8_t slotid)
{
struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct cnss_fw_mem *fw_mem;
void *mem = NULL;
int i, ret;
u32 *status;
if (!plat_priv)
return -EINVAL;
fw_mem = plat_priv->fw_mem;
if (slotid >= AFC_MAX_SLOT) {
cnss_pr_err("Invalid slot id %d\n", slotid);
ret = -EINVAL;
goto err;
}
if (len > AFC_SLOT_SIZE) {
cnss_pr_err("len %d greater than slot size", len);
ret = -EINVAL;
goto err;
}
for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
mem = fw_mem[i].va;
status = mem + (slotid * AFC_SLOT_SIZE);
break;
}
}
if (!mem) {
cnss_pr_err("AFC mem is not available\n");
ret = -ENOMEM;
goto err;
}
memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
if (len < AFC_SLOT_SIZE)
memset(mem + (slotid * AFC_SLOT_SIZE) + len,
0, AFC_SLOT_SIZE - len);
status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
return 0;
err:
return ret;
}
EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
{
struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
struct cnss_fw_mem *fw_mem;
void *mem = NULL;
int i, ret;
if (!plat_priv)
return -EINVAL;
fw_mem = plat_priv->fw_mem;
if (slotid >= AFC_MAX_SLOT) {
cnss_pr_err("Invalid slot id %d\n", slotid);
ret = -EINVAL;
goto err;
}
for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
mem = fw_mem[i].va;
break;
}
}
if (!mem) {
cnss_pr_err("AFC mem is not available\n");
ret = -ENOMEM;
goto err;
}
memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
return 0;
err:
return ret;
}
EXPORT_SYMBOL(cnss_reset_afcmem);
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;

View File

@ -168,6 +168,7 @@ enum wlfw_mem_type_enum_v01 {
QMI_WLFW_MEM_HANG_DATA_V01 = 7,
QMI_WLFW_MLO_GLOBAL_MEM_V01 = 8,
QMI_WLFW_PAGEABLE_MEM_V01 = 9,
QMI_WLFW_AFC_MEM_V01 = 10,
WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
};

View File

@ -74,6 +74,7 @@
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL (0x174)
#define PCIE20_PARF_AXI_MSTR_RD_ADDR_HALT (0x1a4)
#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT (0x1a8)
#define PCIE20_PCIE_PARF_AXI_MSTR_WR_NS_BDF_HALT (0x4a0)
#define PCIE20_PARF_LTSSM (0x1b0)
#define PCIE20_PARF_INT_ALL_STATUS (0x224)
#define PCIE20_PARF_INT_ALL_CLEAR (0x228)
@ -847,6 +848,7 @@ struct msm_pcie_dev_t {
bool linkdown_panic;
uint32_t boot_option;
bool pcie_halt_feature_dis;
bool pcie_bdf_halt_dis;
uint32_t rc_idx;
uint32_t phy_ver;
@ -1684,6 +1686,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->slv_addr_space_size);
PCIE_DBG_FS(dev, "PCIe: halt_feature_dis is %d\n",
dev->pcie_halt_feature_dis);
PCIE_DBG_FS(dev, "PCIe: bdf_halt_dis is %d\n",
dev->pcie_bdf_halt_dis);
PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
dev->phy_status_offset);
PCIE_DBG_FS(dev, "phy_status_bit: %u\n",
@ -4761,6 +4765,12 @@ static int msm_pcie_enable(struct msm_pcie_dev_t *dev)
BIT(31) | val);
}
if (dev->pcie_bdf_halt_dis) {
val = readl_relaxed(dev->parf + PCIE20_PCIE_PARF_AXI_MSTR_WR_NS_BDF_HALT);
msm_pcie_write_reg(dev->parf, PCIE20_PCIE_PARF_AXI_MSTR_WR_NS_BDF_HALT,
(~BIT(0)) & val);
}
/* init tcsr */
if (dev->tcsr_config)
pcie_tcsr_init(dev);
@ -6312,6 +6322,11 @@ static int msm_pcie_probe(struct platform_device *pdev)
PCIE_DBG(pcie_dev, "PCIe halt feature is %s enabled.\n",
pcie_dev->pcie_halt_feature_dis ? "not" : "");
pcie_dev->pcie_bdf_halt_dis = of_property_read_bool(of_node,
"qcom,bdf-halt-dis");
PCIE_DBG(pcie_dev, "PCIe BDF halt feature is %s enabled.\n",
pcie_dev->pcie_bdf_halt_dis ? "not" : "");
of_property_read_u32(of_node, "qcom,phy-status-offset",
&pcie_dev->phy_status_offset);
PCIE_DBG(pcie_dev, "RC%d: phy-status-offset: 0x%x.\n", pcie_dev->rc_idx,

View File

@ -1748,8 +1748,12 @@ static int mhi_uci_ctrl_set_tiocm(struct uci_client *client,
reinit_completion(ctrl_client->write_done);
ret_val = mhi_uci_send_packet(ctrl_client, ctrl_msg, sizeof(*ctrl_msg));
if (ret_val != sizeof(*ctrl_msg))
if (ret_val != sizeof(*ctrl_msg)) {
uci_log(UCI_DBG_ERROR, "Failed to send ctrl msg\n");
kfree(ctrl_msg);
ctrl_msg = NULL;
goto tiocm_error;
}
compl_ret = wait_for_completion_interruptible_timeout(
ctrl_client->write_done,
MHI_UCI_ASYNC_WRITE_TIMEOUT);
@ -1768,7 +1772,6 @@ static int mhi_uci_ctrl_set_tiocm(struct uci_client *client,
return 0;
tiocm_error:
kfree(ctrl_msg);
return ret_val;
}

View File

@ -521,7 +521,8 @@ static int icnss_fw_debug_show(struct seq_file *s, void *data)
seq_puts(s, " VAL: 2 (CCPM test)\n");
seq_puts(s, " VAL: 3 (Trigger Recovery)\n");
seq_puts(s, " VAL: 4 (allow recursive recovery)\n");
seq_puts(s, " VAL: 3 (Disallow recursive recovery)\n");
seq_puts(s, " VAL: 5 (Disallow recursive recovery)\n");
seq_puts(s, " VAL: 6 (Trigger power supply callback)\n");
seq_puts(s, "\nCMD: dynamic_feature_mask\n");
seq_puts(s, " VAL: (64 bit feature mask)\n");
@ -682,6 +683,9 @@ static ssize_t icnss_fw_debug_write(struct file *fp,
case 5:
icnss_disallow_recursive_recovery(&priv->pdev->dev);
break;
case 6:
power_supply_changed(priv->batt_psy);
break;
default:
return -EINVAL;
}

View File

@ -722,6 +722,9 @@ static int icnss_driver_event_server_arrive(struct icnss_priv *priv,
if (priv->vbatt_supported)
icnss_init_vph_monitor(priv);
if (priv->psf_supported)
queue_work(priv->soc_update_wq, &priv->soc_update_work);
return ret;
device_info_failure:
@ -745,6 +748,9 @@ static int icnss_driver_event_server_exit(struct icnss_priv *priv)
adc_tm_disable_chan_meas(priv->adc_tm_dev,
&priv->vph_monitor_params);
if (priv->psf_supported)
priv->last_updated_voltage = 0;
return 0;
}
@ -3610,6 +3616,13 @@ static int icnss_resource_parse(struct icnss_priv *priv)
goto put_vreg;
}
if (of_property_read_bool(pdev->dev.of_node, "qcom,psf-supported")) {
ret = icnss_get_psf_info(priv);
if (ret < 0)
goto out;
priv->psf_supported = true;
}
if (priv->device_id == ADRASTEA_DEVICE_ID) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"membase");
@ -4145,6 +4158,18 @@ out_reset_drvdata:
return ret;
}
static void icnss_unregister_power_supply_notifier(struct icnss_priv *priv)
{
if (priv->batt_psy)
power_supply_put(penv->batt_psy);
if (priv->psf_supported) {
flush_workqueue(priv->soc_update_wq);
destroy_workqueue(priv->soc_update_wq);
power_supply_unreg_notifier(&priv->psf_nb);
}
}
static int icnss_remove(struct platform_device *pdev)
{
struct icnss_priv *priv = dev_get_drvdata(&pdev->dev);
@ -4164,6 +4189,8 @@ static int icnss_remove(struct platform_device *pdev)
icnss_debugfs_destroy(priv);
icnss_unregister_power_supply_notifier(penv);
icnss_sysfs_destroy(priv);
complete_all(&priv->unblock_shutdown);

View File

@ -13,6 +13,7 @@
#include <linux/kobject.h>
#include <linux/platform_device.h>
#include <linux/ipc_logging.h>
#include <linux/power_supply.h>
#include <dt-bindings/iio/qcom,spmi-vadc.h>
#include <soc/qcom/icnss2.h>
#include <soc/qcom/service-locator.h>
@ -167,6 +168,11 @@ struct icnss_clk_cfg {
u32 required;
};
struct icnss_battery_level {
int lower_battery_threshold;
int ldo_voltage;
};
struct icnss_clk_info {
struct list_head list;
struct clk *clk;
@ -466,6 +472,12 @@ struct icnss_priv {
struct icnss_dms_data dms;
u8 use_nv_mac;
u32 wlan_en_delay_ms;
bool psf_supported;
struct notifier_block psf_nb;
struct power_supply *batt_psy;
int last_updated_voltage;
struct work_struct soc_update_work;
struct workqueue_struct *soc_update_wq;
unsigned long device_config;
struct timer_list recovery_timer;
};

View File

@ -29,6 +29,14 @@ static struct icnss_vreg_cfg icnss_adrestea_vreg_list[] = {
{"vdd-smps", 984000, 984000, 0, 0, 0, false, true},
};
static struct icnss_battery_level icnss_battery_level[] = {
{70, 3300000},
{60, 3200000},
{50, 3100000},
{25, 3000000},
{0, 2850000},
};
static struct icnss_clk_cfg icnss_clk_list[] = {
{"rf_clk", 0, 0},
};
@ -55,6 +63,9 @@ static struct icnss_clk_cfg icnss_adrestea_clk_list[] = {
#define MAX_TCS_CMD_NUM 5
#define BT_CXMX_VOLTAGE_MV 950
#define ICNSS_BATTERY_LEVEL_COUNT ARRAY_SIZE(icnss_battery_level)
#define ICNSS_MAX_BATTERY_LEVEL 100
static int icnss_get_vreg_single(struct icnss_priv *priv,
struct icnss_vreg_info *vreg)
{
@ -821,6 +832,110 @@ out:
return ret;
}
static int icnss_get_battery_level(struct icnss_priv *priv)
{
int err = 0, battery_percentage = 0;
union power_supply_propval psp = {0,};
if (!priv->batt_psy)
priv->batt_psy = power_supply_get_by_name("battery");
if (priv->batt_psy) {
err = power_supply_get_property(priv->batt_psy,
POWER_SUPPLY_PROP_CAPACITY,
&psp);
if (err) {
icnss_pr_err("battery percentage read error:%d\n", err);
goto out;
}
battery_percentage = psp.intval;
}
icnss_pr_info("Battery Percentage: %d\n", battery_percentage);
out:
return battery_percentage;
}
static void icnss_update_soc_level(struct work_struct *work)
{
int battery_percentage = 0, current_updated_voltage = 0, err = 0;
int level_count;
struct icnss_priv *priv = container_of(work, struct icnss_priv, soc_update_work);
battery_percentage = icnss_get_battery_level(priv);
if (!battery_percentage ||
battery_percentage > ICNSS_MAX_BATTERY_LEVEL) {
icnss_pr_err("Battery percentage read failure\n");
return;
}
for (level_count = 0; level_count < ICNSS_BATTERY_LEVEL_COUNT;
level_count++) {
if (battery_percentage >=
icnss_battery_level[level_count].lower_battery_threshold) {
current_updated_voltage =
icnss_battery_level[level_count].ldo_voltage;
break;
}
}
if (level_count != ICNSS_BATTERY_LEVEL_COUNT &&
priv->last_updated_voltage != current_updated_voltage) {
err = icnss_send_vbatt_update(priv, current_updated_voltage);
if (err < 0) {
icnss_pr_err("Unable to update ldo voltage");
return;
}
priv->last_updated_voltage = current_updated_voltage;
}
}
static int icnss_battery_supply_callback(struct notifier_block *nb,
unsigned long event, void *data)
{
struct power_supply *psy = data;
struct icnss_priv *priv = container_of(nb, struct icnss_priv,
psf_nb);
if (strcmp(psy->desc->name, "battery"))
return NOTIFY_OK;
if (test_bit(ICNSS_WLFW_CONNECTED, &priv->state) &&
!test_bit(ICNSS_FW_DOWN, &priv->state))
queue_work(priv->soc_update_wq, &priv->soc_update_work);
return NOTIFY_OK;
}
int icnss_get_psf_info(struct icnss_priv *priv)
{
int ret = 0;
priv->soc_update_wq = alloc_workqueue("icnss_soc_update",
WQ_UNBOUND, 1);
if (!priv->soc_update_wq) {
icnss_pr_err("Workqueue creation failed for soc update\n");
ret = -EFAULT;
goto out;
}
priv->psf_nb.notifier_call = icnss_battery_supply_callback;
ret = power_supply_reg_notifier(&priv->psf_nb);
if (ret < 0) {
icnss_pr_err("Power supply framework registration err: %d\n",
ret);
goto err_psf_registration;
}
INIT_WORK(&priv->soc_update_work, icnss_update_soc_level);
return 0;
err_psf_registration:
destroy_workqueue(priv->soc_update_wq);
out:
return ret;
}
int icnss_get_cpr_info(struct icnss_priv *priv)
{
struct platform_device *plat_dev = priv->pdev;

View File

@ -15,5 +15,5 @@ void icnss_put_resources(struct icnss_priv *priv);
void icnss_put_vreg(struct icnss_priv *priv);
void icnss_put_clk(struct icnss_priv *priv);
int icnss_vreg_unvote(struct icnss_priv *priv);
int icnss_get_psf_info(struct icnss_priv *priv);
#endif

View File

@ -573,7 +573,9 @@ static void usb_cser_resume(struct usb_function *f)
if (port->setup_pending) {
pr_info("%s: start_rx called due to rx_out error.\n", __func__);
port->setup_pending = false;
spin_unlock_irqrestore(&port->port_lock, flags);
usb_cser_start_rx(port);
spin_lock_irqsave(&port->port_lock, flags);
}
in = port->port_usb.in;
/* process any pending requests */

View File

@ -37,18 +37,32 @@
#define HH_RM_MAX_MSG_SIZE_BYTES \
(HH_MSGQ_MAX_MSG_SIZE_BYTES - sizeof(struct hh_rm_rpc_hdr))
/**
* struct hh_rm_connection - Represents a complete message from resource manager
* @payload: Combined payload of all the fragments without any RPC headers
* @size: Size of the payload.
* @msg_id: Message ID from the header.
* @ret: Linux return code, set in case there was an error processing the connection.
* @type: HH_RM_RPC_TYPE_RPLY or HH_RM_RPC_TYPE_NOTIF.
* @num_fragments: total number of fragments expected to be received for this connection.
* @fragments_received: fragments received so far.
* @rm_error: For request/reply sequences with standard replies.
* @seq: Sequence ID for the main message.
*/
struct hh_rm_connection {
void *payload;
size_t size;
u32 msg_id;
u16 seq;
void *recv_buff;
u32 reply_err_code;
size_t recv_buff_size;
struct completion seq_done;
int ret;
u8 type;
u8 num_fragments;
u8 fragments_received;
void *current_recv_buff;
/* only for req/reply sequence */
u32 rm_error;
u16 seq;
struct completion seq_done;
};
static struct task_struct *hh_rm_drv_recv_task;
@ -92,6 +106,10 @@ hh_rm_init_connection_buff(struct hh_rm_connection *connection,
struct hh_rm_rpc_hdr *hdr = recv_buff;
size_t max_buf_size;
connection->num_fragments = hdr->fragments;
connection->fragments_received = 0;
connection->type = hdr->type;
/* Some of the 'reply' types doesn't contain any payload */
if (!payload_size)
return 0;
@ -107,15 +125,12 @@ hh_rm_init_connection_buff(struct hh_rm_connection *connection,
/* If the data is split into multiple fragments, allocate a large
* enough buffer to hold the payloads for all the fragments.
*/
connection->recv_buff = connection->current_recv_buff =
kzalloc(max_buf_size, GFP_KERNEL);
if (!connection->recv_buff)
connection->payload = kzalloc(max_buf_size, GFP_KERNEL);
if (!connection->payload)
return -ENOMEM;
memcpy(connection->recv_buff, recv_buff + hdr_size, payload_size);
connection->current_recv_buff += payload_size;
connection->recv_buff_size = payload_size;
connection->num_fragments = hdr->fragments;
memcpy(connection->payload, recv_buff + hdr_size, payload_size);
connection->size = payload_size;
return 0;
}
@ -132,152 +147,121 @@ int hh_rm_unregister_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(hh_rm_unregister_notifier);
static struct hh_rm_connection *
hh_rm_wait_for_notif_fragments(void *recv_buff, size_t recv_buff_size)
static int hh_rm_process_notif(void *msg, size_t msg_size)
{
struct hh_rm_rpc_hdr *hdr = recv_buff;
struct hh_rm_rpc_hdr *hdr = msg;
struct hh_rm_connection *connection;
u32 notification;
size_t payload_size;
void *payload;
int ret = 0;
connection = hh_rm_alloc_connection(hdr->msg_id);
if (IS_ERR_OR_NULL(connection))
return connection;
payload_size = recv_buff_size - sizeof(*hdr);
curr_connection = connection;
ret = hh_rm_init_connection_buff(connection, recv_buff,
sizeof(*hdr), payload_size);
if (ret < 0)
goto out;
if (wait_for_completion_interruptible(&connection->seq_done)) {
ret = -ERESTARTSYS;
goto out;
}
return connection;
out:
kfree(connection);
return ERR_PTR(ret);
}
static int hh_rm_process_notif(void *recv_buff, size_t recv_buff_size)
{
struct hh_rm_connection *connection = NULL;
struct hh_rm_rpc_hdr *hdr = recv_buff;
u32 notification = hdr->msg_id;
void *payload = NULL;
int ret = 0;
pr_debug("Notification received from RM-VM: %x\n", notification);
if (curr_connection) {
pr_err("Received new notification from RM-VM before completing last connection\n");
return -EINVAL;
}
if (recv_buff_size > sizeof(*hdr))
payload = recv_buff + sizeof(*hdr);
connection = hh_rm_alloc_connection(hdr->msg_id);
if (!connection)
return -EINVAL;
/* If the notification payload is split-up into
* fragments, wait until all them arrive.
*/
if (hdr->fragments) {
connection = hh_rm_wait_for_notif_fragments(recv_buff,
recv_buff_size);
if (IS_ERR_OR_NULL(connection))
return PTR_ERR(connection);
/* In the case of fragments, the complete payload
* is contained under connection->recv_buff
*/
payload = connection->recv_buff;
recv_buff_size = connection->recv_buff_size;
if (hh_rm_init_connection_buff(connection, msg, sizeof(*hdr), msg_size - sizeof(*hdr))) {
kfree(connection);
return -EINVAL;
}
if (hdr->fragments)
return PTR_ERR(connection);
payload = connection->payload;
payload_size = connection->size;
notification = connection->msg_id;
pr_debug("Notification received from RM-VM: %x\n", notification);
switch (notification) {
case HH_RM_NOTIF_VM_STATUS:
if (recv_buff_size != sizeof(*hdr) +
if (payload_size !=
sizeof(struct hh_rm_notif_vm_status_payload)) {
pr_err("%s: Invalid size for VM_STATUS notif: %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
break;
case HH_RM_NOTIF_VM_IRQ_LENT:
if (recv_buff_size != sizeof(*hdr) +
if (payload_size !=
sizeof(struct hh_rm_notif_vm_irq_lent_payload)) {
pr_err("%s: Invalid size for VM_IRQ_LENT notif: %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
break;
case HH_RM_NOTIF_VM_IRQ_RELEASED:
if (recv_buff_size != sizeof(*hdr) +
if (payload_size !=
sizeof(struct hh_rm_notif_vm_irq_released_payload)) {
pr_err("%s: Invalid size for VM_IRQ_REL notif: %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
break;
case HH_RM_NOTIF_VM_IRQ_ACCEPTED:
if (recv_buff_size != sizeof(*hdr) +
if (payload_size !=
sizeof(struct hh_rm_notif_vm_irq_accepted_payload)) {
pr_err("%s: Invalid size for VM_IRQ_ACCEPTED notif: %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
break;
case HH_RM_NOTIF_MEM_SHARED:
if (recv_buff_size < sizeof(*hdr) +
if (payload_size <
sizeof(struct hh_rm_notif_mem_shared_payload)) {
pr_err("%s: Invalid size for MEM_SHARED notif: %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
break;
case HH_RM_NOTIF_MEM_RELEASED:
if (recv_buff_size != sizeof(*hdr) +
if (payload_size !=
sizeof(struct hh_rm_notif_mem_released_payload)) {
pr_err("%s: Invalid size for MEM_RELEASED notif: %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
break;
case HH_RM_NOTIF_MEM_ACCEPTED:
if (recv_buff_size != sizeof(*hdr) +
if (payload_size !=
sizeof(struct hh_rm_notif_mem_accepted_payload)) {
pr_err("%s: Invalid size for MEM_ACCEPTED notif: %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
break;
case HH_RM_NOTIF_VM_CONSOLE_CHARS:
if (recv_buff_size < sizeof(*hdr) +
if (payload_size >=
sizeof(struct hh_rm_notif_vm_console_chars)) {
struct hh_rm_notif_vm_console_chars *console_chars;
u16 num_bytes;
console_chars = recv_buff + sizeof(*hdr);
console_chars = payload;
num_bytes = console_chars->num_bytes;
if (sizeof(*hdr) + sizeof(*console_chars) + num_bytes !=
recv_buff_size) {
if (sizeof(*console_chars) + num_bytes !=
payload_size) {
pr_err("%s: Invalid size for VM_CONSOLE_CHARS notify %u\n",
__func__, recv_buff_size - sizeof(*hdr));
__func__, payload_size);
ret = -EINVAL;
goto err;
}
} else {
pr_err("%s: Invalid size for VM_CONSOLE_CHARS notify %u\n",
__func__, payload_size);
goto err;
}
break;
default:
@ -291,7 +275,7 @@ static int hh_rm_process_notif(void *recv_buff, size_t recv_buff_size)
err:
if (connection) {
kfree(connection->recv_buff);
kfree(payload);
kfree(connection);
}
@ -335,7 +319,7 @@ static int hh_rm_process_rply(void *recv_buff, size_t recv_buff_size)
if (ret < 0)
return ret;
connection->reply_err_code = reply_hdr->err_code;
connection->rm_error = reply_hdr->err_code;
/*
* If the data is composed of a single message, wakeup the
@ -383,10 +367,9 @@ static int hh_rm_process_cont(void *recv_buff, size_t recv_buff_size)
payload_size = recv_buff_size - sizeof(*hdr);
/* Keep appending the data to the previous fragment's end */
memcpy(connection->current_recv_buff,
memcpy(connection->payload + connection->size,
recv_buff + sizeof(*hdr), payload_size);
connection->current_recv_buff += payload_size;
connection->recv_buff_size += payload_size;
connection->size += payload_size;
connection->fragments_received++;
if (connection->fragments_received == connection->num_fragments) {
@ -403,6 +386,21 @@ struct hh_rm_msgq_data {
struct work_struct recv_work;
};
static void hh_rm_abort_connection(struct hh_rm_connection *connection)
{
switch (connection->type) {
case HH_RM_RPC_TYPE_RPLY:
connection->ret = -EIO;
complete(&connection->seq_done);
break;
case HH_RM_RPC_TYPE_NOTIF:
fallthrough;
default:
kfree(connection->payload);
kfree(connection);
}
}
static void hh_rm_process_recv_work(struct work_struct *work)
{
struct hh_rm_msgq_data *msgq_data;
@ -414,12 +412,27 @@ static void hh_rm_process_recv_work(struct work_struct *work)
switch (hdr->type) {
case HH_RM_RPC_TYPE_NOTIF:
if (curr_connection) {
/* Not possible per protocol. Do something better than BUG_ON */
pr_warn("Received start of new notification without finishing existing message series.\n");
hh_rm_abort_connection(curr_connection);
}
hh_rm_process_notif(recv_buff, msgq_data->recv_buff_size);
break;
case HH_RM_RPC_TYPE_RPLY:
if (curr_connection) {
/* Not possible per protocol. Do something better than BUG_ON */
pr_warn("Received start of new reply without finishing existing message series.\n");
hh_rm_abort_connection(curr_connection);
}
hh_rm_process_rply(recv_buff, msgq_data->recv_buff_size);
break;
case HH_RM_RPC_TYPE_CONT:
if (!curr_connection) {
/* Not possible per protocol. Do something better than BUG_ON */
pr_warn("Received a continuation message without receiving initial message\n");
break;
}
hh_rm_process_cont(recv_buff, msgq_data->recv_buff_size);
break;
default:
@ -494,8 +507,8 @@ static int hh_rm_send_request(u32 message_id,
unsigned long tx_flags;
u32 num_fragments = 0;
size_t payload_size;
void *send_buff;
int i, ret;
void *msg;
int i, ret = 0;
num_fragments = (req_buff_size + HH_RM_MAX_MSG_SIZE_BYTES - 1) /
HH_RM_MAX_MSG_SIZE_BYTES;
@ -512,11 +525,15 @@ static int hh_rm_send_request(u32 message_id,
return -E2BIG;
}
msg = kzalloc(HH_RM_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (mutex_lock_interruptible(&hh_rm_send_lock)) {
return -ERESTARTSYS;
ret = -ERESTARTSYS;
goto free_msg;
}
/* Consider also the 'request' packet for the loop count */
for (i = 0; i <= num_fragments; i++) {
if (buff_size_remaining > HH_RM_MAX_MSG_SIZE_BYTES) {
payload_size = HH_RM_MAX_MSG_SIZE_BYTES;
@ -525,13 +542,10 @@ static int hh_rm_send_request(u32 message_id,
payload_size = buff_size_remaining;
}
send_buff = kzalloc(sizeof(*hdr) + payload_size, GFP_KERNEL);
if (!send_buff) {
mutex_unlock(&hh_rm_send_lock);
return -ENOMEM;
}
memset(msg, 0, HH_RM_MAX_MSG_SIZE_BYTES);
/* Fill header */
hdr = msg;
hdr = send_buff;
hdr->version = HH_RM_RPC_HDR_VERSION_ONE;
hdr->hdr_words = HH_RM_RPC_HDR_WORDS;
hdr->type = i == 0 ? HH_RM_RPC_TYPE_REQ : HH_RM_RPC_TYPE_CONT;
@ -539,11 +553,12 @@ static int hh_rm_send_request(u32 message_id,
hdr->seq = connection->seq;
hdr->msg_id = message_id;
memcpy(send_buff + sizeof(*hdr), req_buff_curr, payload_size);
/* Copy payload */
memcpy(msg + sizeof(*hdr), req_buff_curr, payload_size);
req_buff_curr += payload_size;
/* Force the last fragment (or the request type)
* to be sent immediately to the receiver
/* Force the last fragment to be sent immediately to
* the receiver
*/
tx_flags = (i == num_fragments) ? HH_MSGQ_TX_PUSH : 0;
@ -552,25 +567,17 @@ static int hh_rm_send_request(u32 message_id,
message_id == HH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_FLUSH)
udelay(800);
ret = hh_msgq_send(hh_rm_msgq_desc, send_buff,
ret = hh_msgq_send(hh_rm_msgq_desc, msg,
sizeof(*hdr) + payload_size, tx_flags);
/*
* In the case of a success, the hypervisor would have consumed
* the buffer. While in the case of a failure, we are going to
* quit anyways. Hence, free the buffer regardless of the
* return value.
*/
kfree(send_buff);
if (ret) {
mutex_unlock(&hh_rm_send_lock);
return ret;
}
if (ret)
break;
}
mutex_unlock(&hh_rm_send_lock);
return 0;
free_msg:
kfree(msg);
return ret;
}
/**
@ -579,7 +586,7 @@ static int hh_rm_send_request(u32 message_id,
* @req_buff: Request buffer that contains the payload
* @req_buff_size: Total size of the payload
* @resp_buff_size: Size of the response buffer
* @reply_err_code: Returns Haven standard error code for the response
* @rm_error: Returns Haven standard error code for the response
*
* Make a request to the RM-VM and expect a reply back. For a successful
* response, the function returns the payload and its size for the response.
@ -591,13 +598,13 @@ static int hh_rm_send_request(u32 message_id,
*/
void *hh_rm_call(hh_rm_msgid_t message_id,
void *req_buff, size_t req_buff_size,
size_t *resp_buff_size, int *reply_err_code)
size_t *resp_buff_size, int *rm_error)
{
struct hh_rm_connection *connection;
int req_ret;
void *ret;
if (!message_id || !req_buff || !resp_buff_size || !reply_err_code)
if (!message_id || !req_buff || !resp_buff_size || !rm_error)
return ERR_PTR(-EINVAL);
connection = hh_rm_alloc_connection(message_id);
@ -632,27 +639,33 @@ void *hh_rm_call(hh_rm_msgid_t message_id,
goto out;
}
*reply_err_code = connection->reply_err_code;
if (connection->reply_err_code) {
pr_err("%s: Reply for seq:%d failed with RM err: %d\n",
__func__, connection->seq, connection->reply_err_code);
ret = ERR_PTR(hh_remap_error(connection->reply_err_code));
kfree(connection->recv_buff);
goto out;
}
print_hex_dump_debug("hh_rm_call RX: ", DUMP_PREFIX_OFFSET, 4, 1,
connection->recv_buff, connection->recv_buff_size,
false);
ret = connection->recv_buff;
*resp_buff_size = connection->recv_buff_size;
out:
mutex_lock(&hh_rm_call_idr_lock);
idr_remove(&hh_rm_call_idr, connection->seq);
mutex_unlock(&hh_rm_call_idr_lock);
*rm_error = connection->rm_error;
if (connection->rm_error) {
pr_err("%s: Reply for seq:%d failed with RM err: %d\n",
__func__, connection->seq, connection->rm_error);
ret = ERR_PTR(hh_remap_error(connection->rm_error));
kfree(connection->payload);
goto out;
}
if (connection->ret) {
ret = ERR_PTR(connection->ret);
kfree(connection->payload);
goto out;
}
print_hex_dump_debug("hh_rm_call RX: ", DUMP_PREFIX_OFFSET, 4, 1,
connection->payload, connection->size,
false);
ret = connection->payload;
*resp_buff_size = connection->size;
out:
kfree(connection);
return ret;
}

View File

@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _IPA_ETH_H_
@ -73,10 +75,13 @@ enum ipa_eth_pipe_direction {
*
* @bar_addr: bar PA to access NTN register
* @tail_ptr_offs: tail ptr offset
* @ioc_mod_threshold: Descriptors # per interrupt request from
* NTN3 HW via descriptor bit as part of the protocol.
*/
struct ipa_eth_ntn_setup_info {
phys_addr_t bar_addr;
phys_addr_t tail_ptr_offs;
uint16_t ioc_mod_threshold;
};
/**
@ -112,6 +117,7 @@ struct ipa_eth_realtek_setup_info {
phys_addr_t dest_tail_ptr_offs;
};
/**
* struct ipa_eth_buff_smmu_map - IPA iova->pa SMMU mapping
* @iova: virtual address of the data buffer

View File

@ -129,6 +129,7 @@ struct driver_info {
#define FLAG_MULTI_PACKET 0x2000
#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
#define FLAG_NOARP 0x8000 /* device can't do ARP */
#define FLAG_THROTTLE_RX 0x10000 /* Throttle RX traffic in USB SS */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);

View File

@ -278,4 +278,7 @@ extern int cnss_get_mem_seg_count(enum cnss_remote_mem_type type, u32 *seg);
extern int cnss_get_mem_segment_info(enum cnss_remote_mem_type type,
struct cnss_mem_segment segment[],
u32 segment_count);
extern int cnss_send_buffer_to_afcmem(struct device *dev, char *afcdb,
uint32_t len, uint8_t slotid);
extern int cnss_reset_afcmem(struct device *dev, uint8_t slotid);
#endif /* _NET_CNSS2_H */

View File

@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/*
@ -52,6 +54,9 @@
#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2
#define QMI_IPA_MAX_PIPES_V01 20
#define QMI_IPA_MAX_PER_CLIENTS_V01 64
#define QMI_IPA_MAX_RMNET_ETH_INFO_V01 19
#define QMI_IPA_MAX_MAC_ADDR_LEN_V01 6
#define QMI_IPA_MAX_IPV4_ADDR_LEN_V01 4
/*
* Indicates presence of newly added member to support HW stats.
@ -113,6 +118,19 @@ enum ipa_platform_type_enum_v01 {
/* To force a 32 bit signed enum. Do not change or use */
};
enum ipa_eth_hw_config_enum_v01 {
IPA_QMI_ETH_HW_CONFIG_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
/**< To force a 32 bit signed enum. Do not change or use*/
IPA_QMI_ETH_HW_NONE_V01 = 0x00,
/**< Ethernet Setting HW Default \n */
IPA_QMI_ETH_HW_VLAN_IP_V01 = 0x01,
/**< Ethernet HW VLAN+IP supported \n */
IPA_QMI_ETH_HW_NON_VLAN_IP_V01 = 0x02,
/**< Ethernet HW NON_VLAN+IP supported */
IPA_QMI_ETH_HW_CONFIG_ENUM_MAX_ENUM_VAL_V01 = 2147483647
/**< To force a 32 bit signed enum. Do not change or use*/
};
#define QMI_IPA_PLATFORM_TYPE_LE_MHI_V01 \
QMI_IPA_PLATFORM_TYPE_LE_MHI_V01
@ -538,6 +556,18 @@ struct ipa_indication_reg_req_msg_v01 {
* message makes sense only when the QMI_IPA_INDICATION_REGISTER_REQ
* is being originated from the master driver.
*/
/* Optional */
/* Rmnet Ethernet MAC Information */
__u8 rmnet_eth_mac_info_valid;
/* Must be set to true if rmnet_eth_mac_info is being passed */
__u8 rmnet_eth_mac_info;
/* If set to TRUE, this field indicates that the client wants to
* receive indications about embeddd rmnet_eth mac info via
* QMI_IPA_RMNET_ETH_INFO_INDICATION. Setting this field in the request
* message makes sense only when the QMI_IPA_INDICATION_REGISTER_REQ is
* being originated from the master driver.
*/
}; /* Message */
@ -2559,6 +2589,7 @@ enum ipa_ic_type_enum_v01 {
DATA_IC_TYPE_AP_V01 = 0x04,
DATA_IC_TYPE_Q6_V01 = 0x05,
DATA_IC_TYPE_UC_V01 = 0x06,
DATA_IC_TYPE_ETH_V01 = 0x07,
IPA_IC_TYPE_ENUM_MAX_VAL_V01 = IPA_INT_MAX,
};
@ -2781,6 +2812,78 @@ struct ipa_move_nat_table_complt_ind_msg_v01 {
}; /* Message */
#define QMI_IPA_NAT_TABLE_MOVE_COMPLETE_IND_MAX_MSG_LEN_V01 7
/*
* Request Message; QMI request message to request for a dual-backhaul traffic
* offloading with ethernet and WWAN and notify the eth-header
*/
struct ipa_eth_backhaul_info_req_msg_v01 {
/* Mandatory */
/* SRC MAC ADDR */
__u8 src_mac_addr[QMI_IPA_MAX_MAC_ADDR_LEN_V01];
/* src mac addr of eth hdr */
/* Mandatory */
/* DST MAC ADDR */
__u8 dst_mac_addr[QMI_IPA_MAX_MAC_ADDR_LEN_V01];
/* dst mac addr of eth hdr */
/* Mandatory */
/* IPv4 ADDR of ETH0 */
__u32 ipv4_addr_eth0[QMI_IPA_MAX_IPV4_ADDR_LEN_V01];
/* ipv4 addr of eth0 */
/* Mandatory */
/* ETH PIPE */
__u8 eth_pipe;
/* Specifies eth pipe for Q6 to route UL pkts for ETH backhaul */
/* Mandatory */
/* ACTION */
__u8 enable;
/* Specifies whether eth backhaul is enabled or disabled */
}; /* Message */
#define IPA_ETH_BACKHAUL_INFO_REQ_MSG_V01_MAX_MSG_LEN 45
/* Response Message; to notify the status of the dual-backhaul traffic
* offloading request using QMI_IPA_ETH_BACKHAUL_INFO_REQ
*/
struct ipa_eth_backhaul_info_resp_msg_v01 {
/* Mandatory */
/* Result Code */
struct ipa_qmi_response_type_v01 resp;
/*
* Standard response type.
* Standard response type. Contains the following data members:
* qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
* qmi_error_type -- Error code. Possible error code values are
* described in the error codes section of each message definition.
*/
}; /* Message */
#define IPA_ETH_BACKHAUL_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
struct ipa_rmnet_eth_info_type_v01 {
__u8 mac_addr[QMI_IPA_MAX_MAC_ADDR_LEN_V01];
/* mac addr of rmnet_eth */
__u32 mux_id;
/* QMAP Mux ID. As a part of the QMAP protocol,
* several data calls may be multiplexed over the same physical transport
* channel. This identifier is used to identify one such data call.
* The maximum value for this identifier is 255.
*/
}; /* Type */
/* Indication Message; This is an indication to send rmnet ethernet information
* for embedded wireless ethernet PDU from the modem processor to
* the application processor
*/
struct ipa_rmnet_eth_info_indication_msg_v01 {
/* Optional */
/* Rmnet Ethernet Information */
__u8 rmnet_eth_info_valid;
/* Must be set to true if rmnet_eth_info is being passed */
__u32 rmnet_eth_info_len;
/* Must be set to # of elements in rmnet_eth_info */
struct ipa_rmnet_eth_info_type_v01 rmnet_eth_info[QMI_IPA_MAX_RMNET_ETH_INFO_V01];
/* Rmnet Ethernet Info array */
}; /* Message */
#define IPA_RMNET_ETH_INFO_INDICATION_MSG_V01_MAX_MSG_LEN 194
/*Service Message Definition*/
#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@ -2839,6 +2942,9 @@ struct ipa_move_nat_table_complt_ind_msg_v01 {
#define QMI_IPA_MOVE_NAT_REQ_V01 0x0046
#define QMI_IPA_MOVE_NAT_RESP_V01 0x0046
#define QMI_IPA_MOVE_NAT_COMPLETE_IND_V01 0x0046
#define QMI_IPA_ETH_BACKHAUL_INFO_REQ_V01 0x0047
#define QMI_IPA_ETH_BACKHAUL_INFO_RESP_V01 0x0047
#define QMI_IPA_RMNET_ETH_INFO_INDICATION_V01 0x0048
/* add for max length*/
#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 197

View File

@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_MSM_IPA_H_
@ -261,12 +263,13 @@
#define IPA_FLT_L2TP_UDP_INNER_MAC_DST_ADDR (1ul << 31)
/* Extended attributes for the rule (routing or filtering) */
#define IPA_FLT_EXT_L2TP_UDP_TCP_SYN (1ul << 0)
#define IPA_FLT_EXT_L2TP_UDP_INNER_ETHER_TYPE (1ul << 1)
#define IPA_FLT_EXT_MTU (1ul << 2)
#define IPA_FLT_EXT_L2TP_UDP_INNER_NEXT_HDR (1ul << 3)
#define IPA_FLT_EXT_NEXT_HDR (1ul << 4)
#define IPA_FLT_EXT_L2TP_UDP_TCP_SYN (1ul << 0)
#define IPA_FLT_EXT_L2TP_UDP_INNER_ETHER_TYPE (1ul << 1)
#define IPA_FLT_EXT_MTU (1ul << 2)
#define IPA_FLT_EXT_L2TP_UDP_INNER_NEXT_HDR (1ul << 3)
#define IPA_FLT_EXT_NEXT_HDR (1ul << 4)
/* ip protocol/udp ports/eth-type */
#define IPA_FLT_EXT_MPLS_GRE_GENERAL (1ul << 5)
/**
* maximal number of NAT PDNs in the PDN config table
@ -517,9 +520,15 @@ enum ipa_client_type {
/* RESERVED PROD = 124, */
IPA_CLIENT_TPUT_CONS = 125,
IPA_CLIENT_Q6_DL_NLO_ETH_DATA_PROD = 126,
/* RESERVED CONS = 127, */
IPA_CLIENT_APPS_WAN_ETH_PROD = 128,
/* RESERVED CONS = 129, */
};
#define IPA_CLIENT_MAX (IPA_CLIENT_TPUT_CONS + 1)
#define IPA_CLIENT_MAX (IPA_CLIENT_APPS_WAN_ETH_PROD + 1)
#define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD
#define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD
@ -562,7 +571,8 @@ enum ipa_client_type {
((client) == IPA_CLIENT_APPS_LAN_PROD || \
(client) == IPA_CLIENT_APPS_WAN_PROD || \
(client) == IPA_CLIENT_APPS_WAN_LOW_LAT_PROD || \
(client) == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD)
(client) == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD || \
(client) == IPA_CLIENT_APPS_WAN_ETH_PROD)
#define IPA_CLIENT_IS_USB_CONS(client) \
((client) == IPA_CLIENT_USB_CONS || \
@ -611,7 +621,8 @@ enum ipa_client_type {
(client) == IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD || \
(client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD || \
(client) == IPA_CLIENT_Q6_CV2X_PROD || \
(client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD)
(client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD || \
(client) == IPA_CLIENT_Q6_DL_NLO_ETH_DATA_PROD)
#define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \
((client) == IPA_CLIENT_Q6_LAN_CONS || \
@ -712,6 +723,17 @@ enum ipa_ip_type {
#define VALID_IPA_IP_TYPE(t) \
((t) >= IPA_IP_v4 && (t) < IPA_IP_MAX)
static inline const char *ipa_ip_type_as_str(enum ipa_ip_type t)
{
return
(t == IPA_IP_v4) ? "v4" :
(t == IPA_IP_v6) ? "v6" :
(t == IPA_IP_MAX) ? "max|v4_vlan" :
(t == IPA_IP_v6_VLAN) ? "v6_vlan" :
(t == IPA_IP_MAX_WLAN) ? "max_wlan" :
"???";
}
/**
* enum ipa_rule_type - Type of routing or filtering rule
* Hashable: Rule will be located at the hashable tables
@ -939,7 +961,14 @@ enum ipa_ext_route_evt {
#define IPA_SET_EXT_ROUTER_MODE_EVENT_MAX IPA_SET_EXT_ROUTER_MODE_EVENT_MAX
};
#define IPA_EVENT_MAX_NUM (IPA_SET_EXT_ROUTER_MODE_EVENT_MAX)
enum ipa_eth_pdu_evt {
IPA_ENABLE_ETH_PDU_MODE_EVENT = IPA_SET_EXT_ROUTER_MODE_EVENT_MAX,
IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX
#define IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX
};
#define IPA_EVENT_MAX_NUM (IPA_ENABLE_ETH_PDU_MODE_EVENT_MAX)
#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
/**
@ -1056,6 +1085,100 @@ enum ipa_hw_feature_support {
#define IPA_HW_ETH_BRIDGING_SUPPORT_BMSK 0x1
/**
* enum ipa_exception_type
*
* This enum is for describing which field is to be looked at for
* exception path consideration.
*
* NOTE 1: The field implies an offset into the packet under
* consideration. This offset will be calculated on behalf of
* the user of this API.
*
* NOTE 2: When exceptions are generated/sent in an ipa_exception
* structure, they will considered to be from the upload
* perspective. And when appropriate, a corresponding, and
* perhaps inverted, downlink exception will be automatically
* created on the callers behalf. As an example: If a
* FIELD_UDP_SRC_PORT is sent, an uplink exception will be
* created for udp source port, and a corresponding
* FIELD_UDP_DST_PORT will be automatically created for the
* downlink.
*/
enum ipa_exception_type {
FIELD_IP_PROTOCOL = 0,
FIELD_TCP_SRC_PORT = 1,
FIELD_TCP_DST_PORT = 2,
FIELD_UDP_SRC_PORT = 3,
FIELD_UDP_DST_PORT = 4,
FIELD_ETHER_TYPE = 5,
FIELD_MAX
};
#define VALID_EXCEPTION_TYPE(x) \
((x) >= FIELD_IP_PROTOCOL && (x) < FIELD_MAX)
static inline const char *exception_type_as_str(enum ipa_exception_type t)
{
return
(t == FIELD_IP_PROTOCOL) ? "ip_protocol" :
(t == FIELD_TCP_SRC_PORT) ? "tcp_src_port" :
(t == FIELD_TCP_DST_PORT) ? "tcp_dst_port" :
(t == FIELD_UDP_SRC_PORT) ? "udp_src_port" :
(t == FIELD_UDP_DST_PORT) ? "udp_dst_port" :
(t == FIELD_ETHER_TYPE) ? "ether_type" :
(t == FIELD_MAX) ? "max" :
"???";
}
#define IP_TYPE_EXCEPTION(x) \
((x) == FIELD_IP_PROTOCOL || \
(x) == FIELD_TCP_SRC_PORT || \
(x) == FIELD_TCP_DST_PORT || \
(x) == FIELD_UDP_SRC_PORT || \
(x) == FIELD_UDP_DST_PORT)
/**
* enum ipa_data_flow_type
*
* This enum is for describing whether a data flow is uplink or down.
*/
enum ipa_data_flow_type {
FLOW_UPLINK = 0,
FLOW_DOWNLINK = 1,
FLOW_MAX
};
#define VALID_FLOW_TYPE(x) \
((x) >= FLOW_UPLINK && (x) < FLOW_MAX)
static inline const char *flow_type_as_str(enum ipa_data_flow_type t)
{
return
(t == FLOW_UPLINK) ? "uplink" :
(t == FLOW_DOWNLINK) ? "downlink" :
(t == FLOW_MAX) ? "max" :
"???";
}
/**
* struct ipa_field_val_equation_gen
*
* Needed for generating an equation for peering into and finding
* data in a packet or frame.
*
* @flow: Are we generating an equation for uplink or downlink
* @inner_iptype: What's the ip type of the inner packet
* @field: Field we're generating equation for
* @value: The value at that field we're looking for
*/
struct ipa_field_val_equation_gen {
enum ipa_data_flow_type flow;
enum ipa_ip_type inner_iptype;
enum ipa_exception_type field;
uint32_t value;
};
/**
* struct ipa_rule_attrib - attributes of a routing/filtering
* rule, all in LE
@ -1088,6 +1211,7 @@ enum ipa_hw_feature_support {
* @payload_length: Payload length.
* @ext_attrib_mask: Extended attributes.
* @l2tp_udp_next_hdr: next header in L2TP tunneling
* @field_val_equ: for finding a value at a particular offset
*/
struct ipa_rule_attrib {
uint32_t attrib_mask;
@ -1133,7 +1257,7 @@ struct ipa_rule_attrib {
__u32 ext_attrib_mask;
__u8 l2tp_udp_next_hdr;
__u8 padding1;
__u32 padding2;
struct ipa_field_val_equation_gen fld_val_eq;
};
@ -1396,8 +1520,8 @@ enum ipa_hdr_l2_type {
* IPA_HDR_PROC_L2TP_UDP_HEADER_REMOVE: Process Ethernet To WLAN packets to
* remove L2TP UDP header.
* IPA_HDR_PROC_SET_DSCP:
* IPA_HDR_PROC_EoGRE_HEADER_ADD: Add IPV[46] GRE header
* IPA_HDR_PROC_EoGRE_HEADER_REMOVE: Remove IPV[46] GRE header
* IPA_HDR_PROC_EoGRE_HEADER_ADD: Add IPV[46] and GRE header
* IPA_HDR_PROC_EoGRE_HEADER_REMOVE: Remove IPV[46] and GRE header
*/
enum ipa_hdr_proc_type {
IPA_HDR_PROC_NONE,
@ -1576,7 +1700,8 @@ struct ipa_l2tp_hdr_proc_ctx_params {
};
#define IPA_EoGRE_MAX_PCP_IDX 8 /* From 802.1Q tag format (reflects IEEE P802.1p) */
#define IPA_EoGRE_MAX_VLAN 8 /* Our supported number of VLAN id's */
#define IPA_EoGRE_MAX_VLAN 8 /* Supported number of VLAN id's (C-Tag when MPLSoGRE) */
#define IPA_GRE_MAX_S_VLAN 4 /* We'll support up to 4 vlans from the S-Tag */
/* vlan 12 bits + pcp 3 bites <-> dscp 6 bits */
struct IpaDscpVlanPcpMap_t {
@ -1587,23 +1712,98 @@ struct IpaDscpVlanPcpMap_t {
/*
* dscp[vlan][pcp], valid only lower 6 bits, using pcp as index
*/
uint8_t dscp[IPA_EoGRE_MAX_VLAN][IPA_EoGRE_MAX_PCP_IDX];
uint8_t num_vlan; /* indicate how many vlans valid */
uint8_t reserved0;
uint8_t dscp[IPA_EoGRE_MAX_VLAN][IPA_EoGRE_MAX_PCP_IDX];
uint8_t num_vlan; /* indicate how many vlans valid vlan above */
uint8_t num_s_vlan; /* indicate how many vlans valid in s_vlan below */
uint8_t dscp_opt; /* indicates if dscp is required or optional */
uint8_t pad1; /* for alignment */
/*
* The same lookup scheme, using vlan[] above, is used for
* generating the first index of mpls below; and in addition,
* we've added a similar array, s_vlan[], for generating the
* second index below.
*/
uint16_t s_vlan[IPA_GRE_MAX_S_VLAN];
/*
* mpls[C-Tag vlan][S-Tag vlan] -> Only the lower 20 bits of the
* 32-bit mpls value will be used, since that's the size of the
* MPLS label component.
*/
uint32_t mpls[IPA_EoGRE_MAX_VLAN][IPA_GRE_MAX_S_VLAN];
/*
* following three arrays are used for DL vlan tag lookup
* mpls_val_sorted is in ascending order, by mpls label values in mpls array
* vlan_c and vlan_s are vlan id values that are corresponding to the mpls label
*/
uint16_t pad2; /* for alignment */
uint8_t pad3; /* for alignment */
uint8_t num_mpls_val_sorted; /* num of elements in mpls_val_sorted */
uint32_t mpls_val_sorted[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN];
uint8_t vlan_c[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN];
uint8_t vlan_s[IPA_EoGRE_MAX_VLAN * IPA_GRE_MAX_S_VLAN];
} __packed;
/**
* struct ipa_exception
*
* This structure is used to generate an exception filter rule (see
* exception_list in struct ipa_ipgre_info).
*
* NOTE:
*
* When generating the exception list, generate them from the
* perepctive of the UL data flow. The inner workings of the system
* will generate the complimentary DL rule, if necessary. For
* example, UL TCP or UDP source/destination ports will have the
* complimentary DL rule generated with the addresses and ports
* reversed.
*
* For finding/matching @value at @field.
*
* Please always set @inner_iptype even when looking for an element in
* an Ethernet header. In that case, inner_iptype would be set to the
* ip header type following the Ethernet header.
*/
struct ipa_exception {
enum ipa_exception_type field;
uint32_t value;
enum ipa_ip_type inner_iptype;
} __packed;
#define IPA_MAX_EXCEPTIONS 10 /* Max exception rule tuples */
struct ipa_ipgre_info {
/* ip address type */
/*
* GRE tunnel's ip address type.
*/
enum ipa_ip_type iptype;
/* ipv4 */
/*
* If GRE tunnel is ipv4, then address goes here.
*/
uint32_t ipv4_src;
uint32_t ipv4_dst;
/* ipv6 */
/*
* Else if GRE tunnel is ipv6, then address goes here.
*/
uint32_t ipv6_src[4];
uint32_t ipv6_dst[4];
/* gre header info */
/*
* If running MPLS over GRE, please provide mpls header value
* here.
*/
uint32_t mpls_protocol;
/*
* GRE header value.
*/
uint16_t gre_protocol;
};
uint8_t unused; /* for alignment */
/*
* The number of valid elements in, and the accompanying
* exception_list, below
*/
uint8_t num_exceptions;
struct ipa_exception exception_list[IPA_MAX_EXCEPTIONS];
} __packed;
struct ipa_ioc_eogre_info {
/* ip and gre info */
@ -1618,23 +1818,33 @@ struct ipa_ioc_eogre_info {
* @input_ip_version: Specifies if Input header is IPV4(0) or IPV6(1)
* @output_ip_version: Specifies if template header's outer IP is IPV4(0) or IPV6(1)
* @second_pass: Specifies if the data should be processed again.
* @is_mpls: Specifies if ucp cmd is for legacy EoGRE(0) or MPLSoGRE(1)
* @tag_remove_len: Specifies amount to be removed for the tags
*/
struct ipa_eogre_header_add_procparams {
uint32_t eth_hdr_retained :1;
uint32_t input_ip_version :1;
uint32_t output_ip_version :1;
uint32_t second_pass :1;
uint32_t reserved :28;
uint32_t is_mpls :1;
uint32_t tag_remove_len :4;
uint32_t reserved :23;
};
/**
* struct ipa_eogre_header_remove_procparams -
* @hdr_len_remove: Specifies how much (in bytes) of the header needs
* to be removed
* @hdr_len_remove: Specifies how much (in bytes) of the header needs
* to be removed
* @outer_ip_version: Specifies if template header's outer IP is IPV4(0) or IPV6(1)
* @is_mpls: Specifies if ucp cmd is for legacy EoGRE(0) or MPLSoGRE(1)
* @tag_add_len: Specifies amount to be added for the tags
*/
struct ipa_eogre_header_remove_procparams {
uint32_t hdr_len_remove:8; /* 44 bytes for IPV6, 24 for IPV4 */
uint32_t reserved:24;
uint32_t hdr_len_remove :8; /* 44 bytes for IPV6, 24 for IPV4 */
uint32_t outer_ip_version :1;
uint32_t is_mpls :1;
uint32_t tag_add_len :4;
uint32_t reserved :18;
};
/**

View File

@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_MSM_RMNET_H_
@ -72,6 +74,10 @@
#define RMNET_IOCTL_SET_MTU 0x0020 /* Set v4/v6 MTU */
#define RMNET_IOCTL_GET_EPID_LL 0x0021 /* Get LL ep ID */
#define RMNET_IOCTL_GET_EP_PAIR_LL 0x0022 /* LL ep pair */
#define RMNET_IOCTL_SET_ETH_VLAN 0x0023 /* Set ETH Vlan */
#define RMNET_IOCTL_ADD_MUX_CHANNEL_v2 0x0024 /* Add MUX ID + mac*/
#define RMNET_IOCTL_GET_EPID_ETH 0x0025 /* Get ETH ep ID */
#define RMNET_IOCTL_GET_EP_PAIR_ETH 0x0026 /* ETH data ep pair*/
/**
* RMNET_IOCTL_EXTENDED_V2 ioctl types.
@ -100,19 +106,21 @@
#define RMNET_IOCTL_FEAT_FLOW_CONTROL (1<<7)
#define RMNET_IOCTL_FEAT_GET_DFLT_CONTROL_CHANNEL (1<<8)
#define RMNET_IOCTL_FEAT_GET_HWSW_MAP (1<<9)
#define RMNET_IOCTL_FEAT_ETH_PDU (1<<10)
/* Input values for the RMNET_IOCTL_SET_EGRESS_DATA_FORMAT IOCTL */
#define RMNET_IOCTL_EGRESS_FORMAT_MAP (1<<1)
#define RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION (1<<2)
#define RMNET_IOCTL_EGRESS_FORMAT_MUXING (1<<3)
#define RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM (1<<4)
#define RMNET_IOCTL_EGRESS_FORMAT_IP_ROUTE (1<<5)
/* Input values for the RMNET_IOCTL_SET_INGRESS_DATA_FORMAT IOCTL */
#define RMNET_IOCTL_INGRESS_FORMAT_MAP (1<<1)
#define RMNET_IOCTL_INGRESS_FORMAT_DEAGGREGATION (1<<2)
#define RMNET_IOCTL_INGRESS_FORMAT_DEMUXING (1<<3)
#define RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM (1<<4)
#define RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA (1<<5)
#define RMNET_IOCTL_INGRESS_FORMAT_IP_ROUTE (1<<6)
/* Input values for the RMNET_IOCTL_SET_OFFLOAD */
#define RMNET_IOCTL_OFFLOAD_FORMAT_NONE (0)
@ -124,6 +132,9 @@
#define IFNAMSIZ 16
#endif
/* size of the mac address */
#define MAC_ADDR_SIZE 6
/**
* enum rmnet_egress_ep_type - To specify pipe type for egress
* @RMNET_EGRESS_DEFAULT: WAN Producer pipe
@ -132,10 +143,11 @@
* Add any number of pipes before max
*/
enum rmnet_egress_ep_type {
RMNET_EGRESS_DEFAULT = 0x0000,
RMNET_EGRESS_DEFAULT = 0x0000,
RMNET_EGRESS_LOW_LAT_CTRL = 0x0001,
RMNET_EGRESS_LOW_LAT_DATA = 0x0002,
RMNET_EGRESS_MAX = 0x0003,
RMNET_EGRESS_ETH_DATA = 0x0003,
RMNET_EGRESS_MAX = 0x0004,
};
@ -300,7 +312,7 @@ struct rmnet_ioctl_extended_s {
/* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL IOCTL */
struct {
__u32 mux_id;
__s8 vchannel_name[IFNAMSIZ];
__s8 vchannel_name[IFNAMSIZ];
} rmnet_mux_val;
/* Input values for the RMNET_IOCTL_FLOW_CONTROL IOCTL */
@ -309,7 +321,7 @@ struct rmnet_ioctl_extended_s {
__u8 mux_id;
} flow_control_prop;
/* Return values for RMNET_IOCTL_GET_EP_PAIR */
/* Return values for RMNET_IOCTL_GET_EP_PAIR/LL/ETH */
struct {
__u32 consumer_pipe_num;
__u32 producer_pipe_num;
@ -334,6 +346,13 @@ struct rmnet_ioctl_extended_s {
__u16 mtu_v4;
__u16 mtu_v6;
} mtu_params;
/* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL_v2 IOCTL */
struct {
__u32 mux_id;
__s8 vchannel_name[IFNAMSIZ];
__u8 mac[MAC_ADDR_SIZE];
} rmnet_mux_val_v2;
} u;
};

View File

@ -340,7 +340,7 @@ static ssize_t store_not_preferred(struct cluster_data *state,
const char *buf, size_t count)
{
struct cpu_data *c;
unsigned int i;
unsigned int i, mask;
unsigned int val[MAX_CPUS_PER_CLUSTER];
unsigned long flags;
int ret;
@ -353,10 +353,16 @@ static ssize_t store_not_preferred(struct cluster_data *state,
return -EINVAL;
spin_lock_irqsave(&state_lock, flags);
for (i = 0; i < state->num_cpus; i++) {
c = &per_cpu(cpu_state, i + state->first_cpu);
for (i = 0, mask = 0; i < state->num_cpus;) {
if (!cpumask_test_cpu(i + mask + state->first_cpu, cpu_possible_mask)) {
mask++;
continue;
}
c = &per_cpu(cpu_state, i + mask + state->first_cpu);
c->not_preferred = val[i];
not_preferred_count += !!val[i];
i++;
}
state->nr_not_preferred_cpus = not_preferred_count;
spin_unlock_irqrestore(&state_lock, flags);
@ -369,20 +375,26 @@ static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
struct cpu_data *c;
ssize_t count = 0;
unsigned long flags;
int i;
int i, mask;
spin_lock_irqsave(&state_lock, flags);
for (i = 0; i < state->num_cpus; i++) {
c = &per_cpu(cpu_state, i + state->first_cpu);
for (i = 0, mask = 0; i < state->num_cpus;) {
if (!cpumask_test_cpu(i + mask + state->first_cpu, cpu_possible_mask)) {
mask++;
continue;
}
c = &per_cpu(cpu_state, i + mask + state->first_cpu);
count += scnprintf(buf + count, PAGE_SIZE - count,
"CPU#%d: %u\n", c->cpu, c->not_preferred);
"CPU#%d: %u\n", c->cpu, c->not_preferred);
i++;
}
spin_unlock_irqrestore(&state_lock, flags);
return count;
}
struct core_ctl_attr {
struct attribute attr;
ssize_t (*show)(const struct cluster_data *, char *);