qcacld-3.0: Add GRO support to NAPI+Rx_thread processing model

GRO aggregations are hanging out of NAPI instance and We cannot use the
hif NAPI instance in Rx thread as gro_list inside NAPI is flushed out
at the start and end of NAPI poll, this will corrupt the gro_list on
which rx_thread is working. Address this concern by creating dummy
NAPI instances mapping to each hif NAPI and not scheduling them.

Change-Id: I6e1de57a669c56206a7863e86c3840bd0a9eabc1
CRs-Fixed: 2128450
This commit is contained in:
Manjunathappa Prakash 2018-03-28 20:05:46 -07:00 committed by nshrivas
parent af8b633fcb
commit fb5f25b24a
3 changed files with 240 additions and 11 deletions

View File

@ -5462,6 +5462,148 @@ static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc,
}
#endif
#ifdef RECEIVE_OFFLOAD
/**
* ol_txrx_offld_flush_handler() - offld flush handler
* @context: dev handle
* @rxpkt: rx data
* @staid: station id
*
* This function handles an offld flush indication.
* If the rx thread is enabled, it will be invoked by the rx
* thread else it will be called in the tasklet context
*
* Return: none
*/
static void ol_txrx_offld_flush_handler(void *context,
void *rxpkt,
uint16_t staid)
{
ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (qdf_unlikely(!pdev)) {
ol_txrx_err("Invalid context");
qdf_assert(0);
return;
}
if (pdev->offld_flush_cb)
pdev->offld_flush_cb(context);
else
ol_txrx_err("offld_flush_cb NULL");
}
/**
* ol_txrx_offld_flush() - offld flush callback
* @data: opaque data pointer
*
* This is the callback registered with CE to trigger
* an offld flush
*
* Return: none
*/
static void ol_txrx_offld_flush(void *data)
{
p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
struct cds_ol_rx_pkt *pkt;
ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (qdf_unlikely(!sched_ctx))
return;
if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
ol_txrx_offld_flush_handler(data, NULL, 0);
} else {
pkt = cds_alloc_ol_rx_pkt(sched_ctx);
if (qdf_unlikely(!pkt)) {
ol_txrx_err("Not able to allocate context");
return;
}
pkt->callback = ol_txrx_offld_flush_handler;
pkt->context = data;
pkt->Rxpkt = NULL;
pkt->staId = 0;
cds_indicate_rxpkt(sched_ctx, pkt);
}
}
/**
* ol_register_offld_flush_cb() - register the offld flush callback
* @offld_flush_cb: flush callback function
* @offld_init_cb: Allocate and initialize offld data structure.
*
* Store the offld flush callback provided and in turn
* register OL's offld flush handler with CE
*
* Return: none
*/
static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
{
struct hif_opaque_softc *hif_device;
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (pdev == NULL) {
ol_txrx_err("pdev NULL!");
TXRX_ASSERT2(0);
goto out;
}
if (pdev->offld_flush_cb != NULL) {
ol_txrx_info("offld already initialised");
if (pdev->offld_flush_cb != offld_flush_cb) {
ol_txrx_err(
"offld_flush_cb is differ to previously registered callback")
TXRX_ASSERT2(0);
goto out;
}
goto out;
}
pdev->offld_flush_cb = offld_flush_cb;
hif_device = cds_get_context(QDF_MODULE_ID_HIF);
if (qdf_unlikely(hif_device == NULL)) {
ol_txrx_err("hif_device NULL!");
qdf_assert(0);
goto out;
}
hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
out:
return;
}
/**
* ol_deregister_offld_flush_cb() - deregister the offld flush callback
*
* Remove the offld flush callback provided and in turn
* deregister OL's offld flush handler with CE
*
* Return: none
*/
static void ol_deregister_offld_flush_cb(void)
{
struct hif_opaque_softc *hif_device;
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (pdev == NULL) {
ol_txrx_err("pdev NULL!");
return;
}
hif_device = cds_get_context(QDF_MODULE_ID_HIF);
if (qdf_unlikely(hif_device == NULL)) {
ol_txrx_err("hif_device NULL!");
qdf_assert(0);
return;
}
hif_offld_flush_cb_deregister(hif_device);
pdev->offld_flush_cb = NULL;
}
#endif /* RECEIVE_OFFLOAD */
/**
* ol_register_data_stall_detect_cb() - register data stall callback
* @data_stall_detect_callback: data stall callback function
@ -6009,6 +6151,13 @@ static struct cdp_ipa_ops ol_ops_ipa = {
};
#endif
#ifdef RECEIVE_OFFLOAD
static struct cdp_rx_offld_ops ol_rx_offld_ops = {
.register_rx_offld_flush_cb = ol_register_offld_flush_cb,
.deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
};
#endif
static struct cdp_bus_ops ol_ops_bus = {
.bus_suspend = ol_txrx_bus_suspend,
.bus_resume = ol_txrx_bus_resume
@ -6131,6 +6280,9 @@ static struct cdp_ops ol_txrx_ops = {
.l_flowctl_ops = &ol_ops_l_flowctl,
#ifdef IPA_OFFLOAD
.ipa_ops = &ol_ops_ipa,
#endif
#ifdef RECEIVE_OFFLOAD
.rx_offld_ops = &ol_rx_offld_ops,
#endif
.bus_ops = &ol_ops_bus,
.ocb_ops = &ol_ops_ocb,

View File

@ -995,10 +995,7 @@ struct ol_txrx_pdev_t {
struct ol_txrx_peer_t *ocb_peer;
tx_pause_callback pause_cb;
struct {
void (*lro_flush_cb)(void *);
qdf_atomic_t lro_dev_cnt;
} lro_info;
void (*offld_flush_cb)(void *);
struct ol_txrx_peer_t *self_peer;
qdf_work_t peer_unmap_timer_work;

View File

@ -1516,8 +1516,9 @@ static void hdd_resolve_rx_ol_mode(struct hdd_context *hdd_ctx)
*/
static QDF_STATUS hdd_gro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
{
struct napi_struct *napi;
struct qca_napi_info *qca_napii;
struct qca_napi_data *napid;
struct napi_struct *napi_to_use;
QDF_STATUS status = QDF_STATUS_E_FAILURE;
/* Only enabling it for STA mode like LRO today */
@ -1525,15 +1526,87 @@ static QDF_STATUS hdd_gro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
return QDF_STATUS_E_NOSUPPORT;
napid = hdd_napi_get_all();
napi = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
if (unlikely(napid == NULL))
goto out;
if (GRO_DROP != napi_gro_receive(napi, skb))
status = QDF_STATUS_SUCCESS;
qca_napii = hif_get_napi(QDF_NBUF_CB_RX_CTX_ID(skb), napid);
if (unlikely(qca_napii == NULL))
goto out;
skb_set_hash(skb, QDF_NBUF_CB_RX_FLOW_ID(skb), PKT_HASH_TYPE_L4);
/*
* As we are breaking context in Rxthread mode, there is rx_thread NAPI
* corresponds each hif_napi.
*/
if (adapter->hdd_ctx->enable_rxthread)
napi_to_use = &qca_napii->rx_thread_napi;
else
napi_to_use = &qca_napii->napi;
local_bh_disable();
napi_gro_receive(napi_to_use, skb);
local_bh_enable();
status = QDF_STATUS_SUCCESS;
out:
return status;
}
/**
* hdd_rxthread_napi_gro_flush() - GRO flush callback for NAPI+Rx_Thread Rx mode
* @data: hif NAPI context
*
* Return: none
*/
static void hdd_rxthread_napi_gro_flush(void *data)
{
struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
local_bh_disable();
/*
* As we are breaking context in Rxthread mode, there is rx_thread NAPI
* corresponds each hif_napi.
*/
napi_gro_flush(&qca_napii->rx_thread_napi, false);
local_bh_enable();
}
/**
* hdd_hif_napi_gro_flush() - GRO flush callback for NAPI Rx mode
* @data: hif NAPI context
*
* Return: none
*/
static void hdd_hif_napi_gro_flush(void *data)
{
struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
local_bh_disable();
napi_gro_flush(&qca_napii->napi, false);
local_bh_enable();
}
#ifdef FEATURE_LRO
/**
* hdd_qdf_lro_flush() - LRO flush wrapper
* @data: hif NAPI context
*
* Return: none
*/
static void hdd_qdf_lro_flush(void *data)
{
struct qca_napi_info *qca_napii = (struct qca_napi_info *)data;
qdf_lro_ctx_t qdf_lro_ctx = qca_napii->lro_ctx;
qdf_lro_flush(qdf_lro_ctx);
}
#else
static void hdd_qdf_lro_flush(void *data)
{
}
#endif
/**
* hdd_register_rx_ol() - Register LRO/GRO rx processing callbacks
*
@ -1542,15 +1615,22 @@ static QDF_STATUS hdd_gro_rx(struct hdd_adapter *adapter, struct sk_buff *skb)
static void hdd_register_rx_ol(void)
{
struct hdd_context *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
void *soc = cds_get_context(QDF_MODULE_ID_SOC);
if (!hdd_ctx)
hdd_err("HDD context is NULL");
if (hdd_ctx->ol_enable == CFG_LRO_ENABLED) {
/* Register the flush callback */
cdp_register_rx_offld_flush_cb(soc, hdd_qdf_lro_flush);
hdd_ctx->receive_offload_cb = hdd_lro_rx;
hdd_debug("LRO is enabled");
} else if (hdd_ctx->ol_enable == CFG_GRO_ENABLED) {
if (hdd_ctx->enable_rxthread)
cdp_register_rx_offld_flush_cb(soc,
hdd_rxthread_napi_gro_flush);
else
cdp_register_rx_offld_flush_cb(soc,
hdd_hif_napi_gro_flush);
hdd_ctx->receive_offload_cb = hdd_gro_rx;
hdd_debug("GRO is enabled");
}
@ -1587,7 +1667,7 @@ int hdd_rx_ol_init(struct hdd_context *hdd_ctx)
LRO_IPV6_SEED_ARR_SZ));
if (0 != wma_lro_init(&lro_config)) {
hdd_err("Failed to send LRO configuration!");
hdd_err("Failed to send LRO/GRO configuration!");
hdd_ctx->ol_enable = 0;
return -EAGAIN;
}