Update IPA and GSI driver techpack snapshot

Upate the driver to msm-5.4 kernel snapshot as of
'commit <e1007d065f6> ("sched/fair: consider uclamp
 boost while deciding the start_cpu")'.

Change-Id: If63ae521dd26cbf72514cd0b95acfe38ed68cf82
Signed-off-by: Ghanim Fodi <gfodi@codeaurora.org>
This commit is contained in:
Ghanim Fodi 2020-01-29 21:22:33 +02:00
parent 2bf0baa500
commit d0ea932ccf
9 changed files with 170 additions and 58 deletions

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/mutex.h>
@ -1590,6 +1590,17 @@ static int ipa3_usb_xdci_connect_internal(
return result;
}
/* Start MHIP UL channel before starting USB UL channel
* DL channel will be started when voting for PCIe -> LPM Exit.
*/
if (ipa3_is_mhip_offload_enabled()) {
result = ipa_mpm_mhip_xdci_pipe_enable(params->teth_prot);
if (result) {
IPA_USB_ERR("failed to enable MHIP UL channel\n");
goto connect_fail;
}
}
if (params->teth_prot != IPA_USB_DIAG) {
/* Start UL channel */
result = ipa3_xdci_start(params->usb_to_ipa_clnt_hdl,
@ -1610,20 +1621,11 @@ static int ipa3_usb_xdci_connect_internal(
goto connect_dl_fail;
}
/* MHIP pipe enablement */
if (ipa3_is_mhip_offload_enabled()) {
result = ipa_mpm_mhip_xdci_pipe_enable(params->teth_prot);
if (result) {
IPA_USB_ERR("failed to enable MHIP channel\n");
goto connect_teth_prot_fail;
}
}
/* Connect tethering protocol */
result = ipa3_usb_connect_teth_prot(params->teth_prot);
if (result) {
IPA_USB_ERR("failed to connect teth protocol\n");
goto connect_mhip_prot_fail;
goto connect_teth_prot_fail;
}
if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
@ -1637,9 +1639,6 @@ static int ipa3_usb_xdci_connect_internal(
state_change_connected_fail:
ipa3_usb_disconnect_teth_prot(params->teth_prot);
connect_mhip_prot_fail:
if (ipa3_is_mhip_offload_enabled())
ipa_mpm_mhip_xdci_pipe_disable(params->teth_prot);
connect_teth_prot_fail:
ipa3_xdci_disconnect(params->ipa_to_usb_clnt_hdl, false, -1);
ipa3_reset_gsi_channel(params->ipa_to_usb_clnt_hdl);
@ -1651,8 +1650,12 @@ connect_dl_fail:
ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
}
connect_ul_fail:
if (ipa3_is_mhip_offload_enabled())
ipa_mpm_mhip_xdci_pipe_disable(params->teth_prot);
connect_fail:
ipa_pm_deactivate_sync(
ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
return result;
}

View File

@ -3862,22 +3862,23 @@ int _ipa_init_sram_v3(void)
if (ipa_get_hw_type() >= IPA_HW_v4_5) {
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(nat_tbl_ofst) - 12);
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(nat_tbl_ofst) - 8);
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(nat_tbl_ofst) - 4);
ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(nat_tbl_ofst));
}
if (ipa_get_hw_type() >= IPA_HW_v4_0) {
if (ipa_get_hw_type() < IPA_HW_v4_5) {
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(pdn_config_ofst) - 4);
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(pdn_config_ofst));
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(stats_quota_ofst) - 4);
IPA_MEM_PART(stats_quota_q6_ofst) - 4);
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(stats_quota_ofst));
IPA_MEM_PART(stats_quota_q6_ofst));
} else {
ipa3_sram_set_canary(ipa_sram_mmio,
IPA_MEM_PART(stats_quota_q6_ofst) - 12);
}
}
if (ipa_get_hw_type() <= IPA_HW_v3_5 ||
ipa_get_hw_type() >= IPA_HW_v4_5) {
ipa3_sram_set_canary(ipa_sram_mmio,

View File

@ -1427,6 +1427,7 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
"lan_repl_rx_empty=%u\n"
"flow_enable=%u\n"
"flow_disable=%u\n",
"rx_page_drop_cnt=%u\n",
ipa3_ctx->stats.tx_sw_pkts,
ipa3_ctx->stats.tx_hw_pkts,
ipa3_ctx->stats.tx_non_linear,
@ -1442,7 +1443,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
ipa3_ctx->stats.lan_rx_empty,
ipa3_ctx->stats.lan_repl_rx_empty,
ipa3_ctx->stats.flow_enable,
ipa3_ctx->stats.flow_disable);
ipa3_ctx->stats.flow_disable,
ipa3_ctx->stats.rx_page_drop_cnt);
cnt += nbytes;
for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) {

View File

@ -3398,10 +3398,17 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
IPAERR("update_truesize not supported\n");
if (notify->veid >= GSI_VEID_MAX) {
rx_pkt->sys->free_rx_wrapper(rx_pkt);
if (!rx_page.is_tmp_alloc)
init_page_count(rx_page.page);
IPAERR("notify->veid > GSI_VEID_MAX\n");
if (!rx_page.is_tmp_alloc) {
init_page_count(rx_page.page);
} else {
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
IPA_WAN_PAGE_ORDER);
}
rx_pkt->sys->free_rx_wrapper(rx_pkt);
IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt);
return NULL;
}
@ -3415,10 +3422,18 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
rx_skb = alloc_skb(0, GFP_ATOMIC);
if (unlikely(!rx_skb)) {
rx_pkt->sys->free_rx_wrapper(rx_pkt);
if (!rx_page.is_tmp_alloc)
init_page_count(rx_page.page);
IPAERR("skb alloc failure\n");
list_del(&rx_pkt->link);
if (!rx_page.is_tmp_alloc) {
init_page_count(rx_page.page);
} else {
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
IPA_WAN_PAGE_ORDER);
}
rx_pkt->sys->free_rx_wrapper(rx_pkt);
IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt);
return NULL;
}
/* go over the list backward to save computations on updating length */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
@ -271,6 +271,27 @@ static void ipa_close_coal_frame(struct ipahal_imm_cmd_pyld **coal_cmd_pyld)
&reg_write_coal_close, false);
}
static bool ipa_validate_quota_stats_sram_size(u32 needed_len)
{
u32 sram_size;
/* Starting IPA4.5 Quota stats is split between Q6 and AP */
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
IPAERR("Not supported ipa_ver=%d\n", ipa3_ctx->ipa_hw_type);
return false;
}
sram_size = IPA_MEM_PART(stats_quota_ap_size);
if (needed_len > sram_size) {
IPAERR("SRAM partition too small: %u needed %u\n",
sram_size, needed_len);
return false;
}
return true;
}
int ipa_init_quota_stats(u32 pipe_bitmask)
{
struct ipahal_stats_init_pyld *pyld;
@ -301,9 +322,7 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
return -EPERM;
}
if (pyld->len > IPA_MEM_PART(stats_quota_size)) {
IPAERR("SRAM partition too small: %d needed %d\n",
IPA_MEM_PART(stats_quota_size), pyld->len);
if (!ipa_validate_quota_stats_sram_size(pyld->len)) {
ret = -EPERM;
goto destroy_init_pyld;
}
@ -356,7 +375,7 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
quota_base.offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
ipa3_ctx->ee);
quota_base.value = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(stats_quota_ofst);
IPA_MEM_PART(stats_quota_ap_ofst);
quota_base.value_mask = ~0;
quota_base_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
&quota_base, false);
@ -377,7 +396,7 @@ int ipa_init_quota_stats(u32 pipe_bitmask)
cmd.size = pyld->len;
cmd.system_addr = dma_address;
cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(stats_quota_ofst);
IPA_MEM_PART(stats_quota_ap_ofst);
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
if (!cmd_pyld) {
@ -476,7 +495,7 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
cmd.size = mem.size;
cmd.system_addr = mem.phys_base;
cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(stats_quota_ofst) + offset.offset;
IPA_MEM_PART(stats_quota_ap_ofst) + offset.offset;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
if (!cmd_pyld[num_cmd]) {

View File

@ -1397,6 +1397,7 @@ struct ipa3_stats {
u32 flow_enable;
u32 flow_disable;
u32 tx_non_linear;
u32 rx_page_drop_cnt;
struct ipa3_page_recycle_stats page_recycle_stats[2];
};
@ -2318,8 +2319,10 @@ struct ipa3_mem_partition {
u32 uc_descriptor_ram_size;
u32 pdn_config_ofst;
u32 pdn_config_size;
u32 stats_quota_ofst;
u32 stats_quota_size;
u32 stats_quota_q6_ofst;
u32 stats_quota_q6_size;
u32 stats_quota_ap_ofst;
u32 stats_quota_ap_size;
u32 stats_tethering_ofst;
u32 stats_tethering_size;
u32 stats_fnr_ofst;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-mapping.h>
@ -74,6 +74,7 @@
#define IPA_MHIP_HOLB_TMO 31 /* value to match granularity on ipa HW 4.5 */
#define IPA_MPM_FLOW_CTRL_ADD 1
#define IPA_MPM_FLOW_CTRL_DELETE 0
#define IPA_MPM_NUM_OF_INIT_CMD_DESC 2
enum mhip_re_type {
MHIP_RE_XFER = 0x2,
@ -478,12 +479,56 @@ static void ipa_mpm_gsi_chan_err_cb(struct gsi_chan_err_notify *err_data)
static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
enum ipa_client_type dst_pipe, bool reset)
{
int result = 0;
struct ipahal_imm_cmd_pyld *cmd_pyld[IPA_MPM_NUM_OF_INIT_CMD_DESC];
struct ipahal_imm_cmd_register_write reg_write_coal_close;
struct ipahal_reg_valmask valmask;
struct ipa3_desc desc[IPA_MPM_NUM_OF_INIT_CMD_DESC];
int i, num_cmd = 0, result = 0;
struct ipa_ep_cfg ep_cfg = { { 0 } };
IPA_MPM_FUNC_ENTRY();
IPA_MPM_DBG("DMA from %d to %d reset=%d\n", src_pipe, dst_pipe, reset);
memset(desc, 0, sizeof(desc));
memset(cmd_pyld, 0, sizeof(cmd_pyld));
/* First step is to clear IPA Pipeline before changing DMA mode */
if (ipa3_get_ep_mapping(src_pipe) != IPA_EP_NOT_ALLOCATED) {
i = ipa3_get_ep_mapping(src_pipe);
reg_write_coal_close.skip_pipeline_clear = false;
reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
reg_write_coal_close.offset = ipahal_get_reg_ofst(
IPA_AGGR_FORCE_CLOSE);
ipahal_get_aggr_force_close_valmask(i, &valmask);
reg_write_coal_close.value = valmask.val;
reg_write_coal_close.value_mask = valmask.mask;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_WRITE,
&reg_write_coal_close, false);
if (!cmd_pyld[num_cmd]) {
IPA_MPM_ERR("failed to construct coal close IC\n");
result = -ENOMEM;
goto destroy_imm_cmd;
}
ipa3_init_imm_cmd_desc(&desc[num_cmd], cmd_pyld[num_cmd]);
++num_cmd;
}
/* NO-OP IC for ensuring that IPA pipeline is empty */
cmd_pyld[num_cmd] =
ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
if (!cmd_pyld[num_cmd]) {
IPA_MPM_ERR("failed to construct NOP imm cmd\n");
result = -ENOMEM;
goto destroy_imm_cmd;
}
result = ipa3_send_cmd(num_cmd, desc);
if (result) {
IPAERR("fail to send Reset Pipeline immediate command\n");
goto destroy_imm_cmd;
}
/* Reset to basic if reset = 1, otherwise set to DMA */
if (reset)
ep_cfg.mode.mode = IPA_BASIC;
@ -495,6 +540,10 @@ static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
result = ipa_cfg_ep(ipa_get_ep_mapping(src_pipe), &ep_cfg);
IPA_MPM_FUNC_EXIT();
destroy_imm_cmd:
for (i = 0; i < num_cmd; ++i)
ipahal_destroy_imm_cmd(cmd_pyld[i]);
return result;
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
@ -589,10 +589,10 @@ static int ipa3_qmi_init_modem_send_sync_msg(void)
}
req.hw_stats_quota_base_addr_valid = true;
req.hw_stats_quota_base_addr =
IPA_MEM_PART(stats_quota_ofst) + smem_restr_bytes;
IPA_MEM_PART(stats_quota_q6_ofst) + smem_restr_bytes;
req.hw_stats_quota_size_valid = true;
req.hw_stats_quota_size = IPA_MEM_PART(stats_quota_size);
req.hw_stats_quota_size = IPA_MEM_PART(stats_quota_q6_size);
req.hw_drop_stats_base_addr_valid = true;
req.hw_drop_stats_base_addr =

View File

@ -3144,8 +3144,10 @@ static struct ipa3_mem_partition ipa_4_1_mem_part = {
.uc_descriptor_ram_size = 0x400,
.pdn_config_ofst = 0xbd8,
.pdn_config_size = 0x50,
.stats_quota_ofst = 0xc30,
.stats_quota_size = 0x60,
.stats_quota_q6_ofst = 0xc30,
.stats_quota_q6_size = 0x60,
.stats_quota_ap_ofst = 0,
.stats_quota_ap_size = 0,
.stats_tethering_ofst = 0xc90,
.stats_tethering_size = 0x140,
.stats_flt_v4_ofst = 0xdd0,
@ -3233,8 +3235,10 @@ static struct ipa3_mem_partition ipa_4_2_mem_part = {
.uc_descriptor_ram_size = 0x0,
.pdn_config_ofst = 0x9F8,
.pdn_config_size = 0x50,
.stats_quota_ofst = 0xa50,
.stats_quota_size = 0x60,
.stats_quota_q6_ofst = 0xa50,
.stats_quota_q6_size = 0x60,
.stats_quota_ap_ofst = 0,
.stats_quota_ap_size = 0,
.stats_tethering_ofst = 0xab0,
.stats_tethering_size = 0x140,
.stats_flt_v4_ofst = 0xbf0,
@ -3299,8 +3303,10 @@ static struct ipa3_mem_partition ipa_4_5_mem_part = {
.apps_hdr_proc_ctx_size_ddr = 0x0,
.nat_tbl_ofst = 0x1800,
.nat_tbl_size = 0xd00,
.stats_quota_ofst = 0x2510,
.stats_quota_size = 0x78,
.stats_quota_q6_ofst = 0x2510,
.stats_quota_q6_size = 0x30,
.stats_quota_ap_ofst = 0x2540,
.stats_quota_ap_size = 0x48,
.stats_tethering_ofst = 0x2588,
.stats_tethering_size = 0x238,
.stats_flt_v4_ofst = 0,
@ -3394,8 +3400,10 @@ static struct ipa3_mem_partition ipa_4_7_mem_part = {
.nat_tbl_size = 0xd00,
.pdn_config_ofst = 0x1be8,
.pdn_config_size = 0x50,
.stats_quota_ofst = 0x1c40,
.stats_quota_size = 0x78,
.stats_quota_q6_ofst = 0x1c40,
.stats_quota_q6_size = 0x30,
.stats_quota_ap_ofst = 0x1c70,
.stats_quota_ap_size = 0x48,
.stats_tethering_ofst = 0x1cb8,
.stats_tethering_size = 0x238,
.stats_flt_v4_ofst = 0,
@ -3485,8 +3493,10 @@ static struct ipa3_mem_partition ipa_4_9_mem_part = {
.apps_hdr_proc_ctx_size_ddr = 0x0,
.nat_tbl_ofst = 0x1800,
.nat_tbl_size = 0xd00,
.stats_quota_ofst = 0x2510,
.stats_quota_size = 0x78,
.stats_quota_q6_ofst = 0x2510,
.stats_quota_q6_size = 0x30,
.stats_quota_ap_ofst = 0x2540,
.stats_quota_ap_size = 0x48,
.stats_tethering_ofst = 0x2588,
.stats_tethering_size = 0x238,
.stats_flt_v4_ofst = 0,
@ -6131,13 +6141,23 @@ int ipa3_init_mem_partition(enum ipa_hw_type type)
return -ENODEV;
}
IPADBG("QUOTA STATS OFST 0x%x SIZE 0x%x\n",
IPA_MEM_PART(stats_quota_ofst),
IPA_MEM_PART(stats_quota_size));
IPADBG("Q6 QUOTA STATS OFST 0x%x SIZE 0x%x\n",
IPA_MEM_PART(stats_quota_q6_ofst),
IPA_MEM_PART(stats_quota_q6_size));
if (IPA_MEM_PART(stats_quota_ofst) & 7) {
IPAERR("QUOTA STATS OFST 0x%x is unaligned\n",
IPA_MEM_PART(stats_quota_ofst));
if (IPA_MEM_PART(stats_quota_q6_ofst) & 7) {
IPAERR("Q6 QUOTA STATS OFST 0x%x is unaligned\n",
IPA_MEM_PART(stats_quota_q6_ofst));
return -ENODEV;
}
IPADBG("AP QUOTA STATS OFST 0x%x SIZE 0x%x\n",
IPA_MEM_PART(stats_quota_ap_ofst),
IPA_MEM_PART(stats_quota_ap_size));
if (IPA_MEM_PART(stats_quota_ap_ofst) & 7) {
IPAERR("AP QUOTA STATS OFST 0x%x is unaligned\n",
IPA_MEM_PART(stats_quota_ap_ofst));
return -ENODEV;
}