qcacld-3.0: Add multi-queue NAPI function

Add throughput policy handler which triggers irq/CPU mananagement
functions as required.
Adapt NAPI enablement code to recent changes to the init sequence.
Add to the new files to build.

Change-Id: I8c16667a08982e419c2f2152cf31afcd3a535eb4
CRs-Fixed: 1064917
This commit is contained in:
Orhan K AKYILDIZ 2016-05-16 12:40:13 -07:00
parent 38e8ce8849
commit 1481aff562
4 changed files with 98 additions and 14 deletions

1
Kbuild
View File

@ -865,6 +865,7 @@ HIF_SDIO_NATIVE_OBJS := $(WLAN_COMMON_ROOT)/$(HIF_SDIO_NATIVE_SRC_DIR)/hif.o \
ifeq ($(CONFIG_WLAN_NAPI), y)
HIF_OBJS += $(WLAN_COMMON_ROOT)/$(HIF_DIR)/src/hif_napi.o
HIF_OBJS += $(WLAN_COMMON_ROOT)/$(HIF_DIR)/src/hif_napi_cpu.o
endif
HIF_PCIE_OBJS := $(WLAN_COMMON_ROOT)/$(HIF_PCIE_DIR)/if_pci.o

View File

@ -37,9 +37,12 @@
/* CLD headers */
#include "hif_napi.h"
/* Linux headers */
#include <linux/netdevice.h> /* net_device */
struct hdd_context_s;
#define HDD_NAPI_ANY (-1)
int hdd_napi_enabled(int id);
@ -58,6 +61,18 @@ int hdd_napi_poll(struct napi_struct *napi, int budget);
struct qca_napi_data *hdd_napi_get_all(void);
#ifdef HELIUMPLUS
int hdd_napi_apply_throughput_policy(struct hdd_context_s *hddctx,
uint64_t tx_packets,
uint64_t rx_packets);
#else /* FEATURE_NAPI and NOT HELIUM */
static inline int hdd_napi_apply_throughput_policy(struct hdd_context_s *,
uint64_t, uint64_t)
{
return 0;
}
#endif /* HELIUMPLUS */
#else /* ! defined(FEATURE_NAPI) */
#include "hif_napi.h"
/**
@ -71,11 +86,17 @@ static inline int hdd_napi_enabled(int id) { return 0; }
static inline int hdd_napi_create(void) { return 0; }
static inline int hdd_napi_destroy(int force) { return 0; }
static inline int hdd_napi_stats(char *buf, int max, char *indp,
struct qca_napi_data *napid)
{ return 0; }
struct qca_napi_data *napid) { return 0; }
static inline int hdd_napi_event(enum qca_napi_event event, void *data)
{ return 0; }
{
return 0;
}
static inline struct qca_napi_data *hdd_napi_get_all(void) { return NULL; }
static inline int hdd_napi_apply_throughput_policy(struct hdd_context_s *,
uint64_t, uint64_t)
{
return 0;
}
#endif /* FEATURE_NAPI */

View File

@ -110,6 +110,7 @@
#include "ol_rx_fwd.h"
#include "wlan_hdd_lpass.h"
#include "nan_api.h"
#include <wlan_hdd_napi.h>
#ifdef MODULE
#define WLAN_MODULE_NAME module_name(THIS_MODULE)
@ -4894,7 +4895,6 @@ void hdd_pld_request_bus_bandwidth(hdd_context_t *hdd_ctx,
enum wlan_tp_level next_rx_level = WLAN_SVC_TP_NONE;
enum wlan_tp_level next_tx_level = WLAN_SVC_TP_NONE;
if (total > hdd_ctx->config->busBandwidthHighThreshold)
next_vote_level = PLD_BUS_WIDTH_HIGH;
else if (total > hdd_ctx->config->busBandwidthMediumThreshold)
@ -4928,6 +4928,7 @@ void hdd_pld_request_bus_bandwidth(hdd_context_t *hdd_ctx,
if (cds_sched_handle_throughput_req(true))
hdd_err("high bandwidth set rx affinity fail");
}
hdd_napi_apply_throughput_policy(hdd_ctx, tx_packets, rx_packets);
}
/* fine-tuning parameters for RX Flows */

View File

@ -105,6 +105,7 @@ int hdd_napi_create(void)
{
struct hif_opaque_softc *hif_ctx;
int rc = 0;
hdd_context_t *hdd_ctx;
NAPI_DEBUG("-->");
@ -116,11 +117,20 @@ int hdd_napi_create(void)
rc = hif_napi_create(hif_ctx, hdd_napi_poll,
QCA_NAPI_BUDGET,
QCA_NAPI_DEF_SCALE);
if (rc < 0)
if (rc < 0) {
hdd_err("ERR(%d) creating NAPI instances",
rc);
else
} else {
hdd_info("napi instances were created. Map=0x%x", rc);
hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
if (unlikely(NULL == hdd_ctx)) {
QDF_ASSERT( 0 );
rc = -EFAULT;
} else {
rc = hdd_napi_event(NAPI_EVT_INI_FILE,
(void *)hdd_ctx->napi_enable);
}
}
}
NAPI_DEBUG("<-- [rc=%d]", rc);
@ -204,20 +214,16 @@ int hdd_napi_enabled(int id)
}
/**
* hdd_napi_event() - relay the event detected by HDD to HIF NAPI decision maker
* hdd_napi_event() - relay the event detected by HDD to HIF NAPI event handler
* @event: event code
* @data : event-specific auxiliary data
*
* Return code does not indicate a change, but whether or not NAPI is
* enabled at the time of the return of the function. That is, if NAPI
* was disabled before the call, and the event does not cause NAPI to be
* enabled, a value of 0 will be returned indicating that it is (still)
* disabled.
* See function documentation in hif_napi.c::hif_napi_event for list of events
* and how each of them is handled.
*
* Return:
* < 0: error code
* = 0: NAPI state = disabled (after processing the event)
* = 1: NAPI state = enabled (after processing the event)
* = 0: event handled successfully
*/
int hdd_napi_event(enum qca_napi_event event, void *data)
{
@ -236,6 +242,61 @@ int hdd_napi_event(enum qca_napi_event event, void *data)
return rc;
}
#ifdef HELIUMPLUS
/**
* hdd_napi_apply_throughput_policy() - implement the throughput action policy
* @hddctx: HDD context
* @tx_packets: number of tx packets in the last interval
* @rx_packets: number of rx packets in the last interval
*
* Called by hdd_bus_bw_compute_cb, checks the number of packets in the last
* interval, and determines the desired napi throughput state (HI/LO). If
* the desired state is different from the current, then it invokes the
* event handler to switch to the desired state.
*
* The policy implementation is limited to this function and
* The current policy is: determine the NAPI mode based on the condition:
* (total number of packets > medium threshold)
* - tx packets are included because:
* a- tx-completions arrive at one of the rx CEs
* b- in TCP, a lof of TX implies ~(tx/2) rx (ACKs)
* c- so that we can use the same normalized criteria in ini file
* - medium-threshold (default: 500 packets / 10 ms), because
* we would like to be more reactive.
*
* Return: 0 : no action taken, or action return code
* !0: error, or action error code
*/
int hdd_napi_apply_throughput_policy(struct hdd_context_s *hddctx,
uint64_t tx_packets,
uint64_t rx_packets)
{
int rc = 0;
uint64_t packets = tx_packets + rx_packets;
enum qca_napi_tput_state req_state;
struct qca_napi_data *napid = hdd_napi_get_all();
int enabled;
NAPI_DEBUG("-->%s(tx=%lld, rx=%lld)", __func__, tx_packets, rx_packets);
if ((napid != NULL) &&
(enabled = hdd_napi_enabled(HDD_NAPI_ANY))) {
if (packets > hddctx->config->busBandwidthHighThreshold)
req_state = QCA_NAPI_TPUT_HI;
else
req_state = QCA_NAPI_TPUT_LO;
if (req_state != napid->napi_mode)
rc = hdd_napi_event(NAPI_EVT_TPUT_STATE,
(void *)req_state);
} else {
hdd_err("ERR: napid (%p) NULL or napi_enabled (%d) FALSE",
napid, enabled);
}
return rc;
}
#endif
/**
* hdd_napi_poll() - NAPI poll function
* @napi : pointer to NAPI struct