qcacld-3.0: Affine RX thread to big cluster during uplink

During uplink traffic, affine RX thread to big cluster if
throughput is high and rx_packet counts above threshold.

Change-Id: I34034e79ee07c229966a279919acc11d1942b890
CRs-Fixed: 2557745
This commit is contained in:
Alok Kumar 2019-11-21 16:37:36 +05:30 committed by nshrivas
parent 6387417f47
commit 68127f6594
6 changed files with 155 additions and 0 deletions

View File

@ -135,6 +135,10 @@ typedef struct _cds_sched_context {
/* high throughput required */
bool high_throughput_required;
/* affinity requied during uplink traffic*/
bool rx_affinity_required;
uint8_t conf_rx_thread_ul_affinity;
#endif
} cds_sched_context, *p_cds_sched_context;
@ -219,6 +223,25 @@ struct cds_context {
int cds_sched_handle_cpu_hot_plug(void);
int cds_sched_handle_throughput_req(bool high_tput_required);
/**
* cds_sched_handle_rx_thread_affinity_req - rx thread affinity req handler
* @high_tput_required: high throughput is required or not
*
* rx thread affinity handler will find online cores and
* will assign proper core based on perf requirement
*
* Return: None
*/
void cds_sched_handle_rx_thread_affinity_req(bool high_throughput);
/**
* cds_set_rx_thread_ul_cpu_mask() - Rx_thread affinity for UL from INI
* @cpu_affinity_mask: CPU affinity bitmap
*
* Return:None
*/
void cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask);
/**
* cds_set_rx_thread_cpu_mask() - Rx_thread affinity from INI
* @cpu_affinity_mask: CPU affinity bitmap
@ -296,6 +319,26 @@ void cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
-------------------------------------------------------------------------*/
void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
#else
/**
* cds_sched_handle_rx_thread_affinity_req - rx thread affinity req handler
* @high_tput_required: high throughput is required or not
*
* rx thread affinity handler will find online cores and
* will assign proper core based on perf requirement
*
* Return: None
*/
static inline void cds_sched_handle_rx_thread_affinity_req(
bool high_throughput) {}
/**
* cds_set_rx_thread_ul_cpu_mask() - Rx_thread affinity for UL from INI
* @cpu_affinity_mask: CPU affinity bitmap
*
* Return:None
*/
static inline void cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask) {}
/**
* cds_set_rx_thread_cpu_mask() - Rx_thread affinity from INI
* @cpu_affinity_mask: CPU affinity bitmap

View File

@ -93,6 +93,17 @@ void cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask)
sched_context->conf_rx_thread_cpu_mask = cpu_affinity_mask;
}
void cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask)
{
p_cds_sched_context sched_context = get_cds_sched_ctxt();
if (!sched_context) {
qdf_err("invalid context");
return;
}
sched_context->conf_rx_thread_ul_affinity = cpu_affinity_mask;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
/**
* cds_rx_thread_log_cpu_affinity_change - Log Rx thread affinity change
@ -236,6 +247,62 @@ int cds_sched_handle_cpu_hot_plug(void)
return 0;
}
void cds_sched_handle_rx_thread_affinity_req(bool high_throughput)
{
p_cds_sched_context pschedcontext = get_cds_sched_ctxt();
unsigned long cpus;
qdf_cpu_mask new_mask;
unsigned char core_affine_count = 0;
if (!pschedcontext || !pschedcontext->ol_rx_thread)
return;
if (cds_is_load_or_unload_in_progress()) {
cds_err("load or unload in progress");
return;
}
if (pschedcontext->rx_affinity_required == high_throughput)
return;
pschedcontext->rx_affinity_required = high_throughput;
qdf_cpumask_clear(&new_mask);
if (!high_throughput) {
/* Attach to all cores, let scheduler decide */
qdf_cpumask_setall(&new_mask);
goto affine_thread;
}
for_each_online_cpu(cpus) {
if (topology_physical_package_id(cpus) >
CDS_MAX_CPU_CLUSTERS) {
cds_err("can handle max %d clusters ",
CDS_MAX_CPU_CLUSTERS);
return;
}
if (pschedcontext->conf_rx_thread_ul_affinity &&
(pschedcontext->conf_rx_thread_ul_affinity &
(1 << cpus)))
qdf_cpumask_set_cpu(cpus, &new_mask);
core_affine_count++;
}
affine_thread:
cds_rx_thread_log_cpu_affinity_change(
core_affine_count,
(int)pschedcontext->rx_affinity_required,
&pschedcontext->rx_thread_cpu_mask,
&new_mask);
mutex_lock(&pschedcontext->affinity_lock);
if (!cpumask_equal(&pschedcontext->rx_thread_cpu_mask, &new_mask)) {
cpumask_copy(&pschedcontext->rx_thread_cpu_mask, &new_mask);
cds_set_cpus_allowed_ptr_with_mask(pschedcontext->ol_rx_thread,
&new_mask);
}
mutex_unlock(&pschedcontext->affinity_lock);
}
/**
* cds_sched_handle_throughput_req - cpu throughput requirement handler
* @high_tput_required: high throughput is required or not
@ -438,6 +505,7 @@ QDF_STATUS cds_sched_open(void *p_cds_context,
cds_cpu_before_offline_cb);
mutex_init(&pSchedContext->affinity_lock);
pSchedContext->high_throughput_required = false;
pSchedContext->rx_affinity_required = false;
#endif
gp_cds_sched_context = pSchedContext;

View File

@ -865,6 +865,31 @@
"CPU mask to affine Rx_thread")
#endif
/*
* <ini>
* RX_THREAD_UL_CPU_AFFINITY_MASK - CPU mask to affine Rx_thread
*
* @Min: 0
* @Max: 0xFF
* @Default: 0x0
*
* This ini is used to set Rx_thread CPU affinity for uplink traffic
*
* Supported Feature: Rx_thread
*
* Usage: Internal
*
* </ini>
*/
#define CFG_DP_RX_THREAD_UL_CPU_MASK \
CFG_INI_UINT( \
"RX_THREAD_UL_CPU_AFFINITY_MASK", \
0, \
0xFF, \
0x0, \
CFG_VALUE_OR_DEFAULT, \
"CPU mask to affine Rx_thread for uplink traffic")
/*
* <ini>
* rpsRxQueueCpuMapList - RPS map for different RX queues
@ -1354,6 +1379,7 @@
#define CFG_HDD_DP_ALL \
CFG(CFG_DP_NAPI_CE_CPU_MASK) \
CFG(CFG_DP_RX_THREAD_CPU_MASK) \
CFG(CFG_DP_RX_THREAD_UL_CPU_MASK) \
CFG(CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST) \
CFG(CFG_DP_TX_ORPHAN_ENABLE) \
CFG(CFG_DP_RX_MODE) \

View File

@ -195,6 +195,7 @@ struct hdd_config {
#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
uint32_t napi_cpu_affinity_mask;
/* CPU affinity mask for rx_thread */
uint32_t rx_thread_ul_affinity_mask;
uint32_t rx_thread_affinity_mask;
uint8_t cpu_map_list[CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST_LEN];
bool multicast_replay_filter;

View File

@ -3265,6 +3265,10 @@ int hdd_wlan_start_modules(struct hdd_context *hdd_ctx, bool reinit)
cds_set_rx_thread_cpu_mask(
hdd_ctx->config->rx_thread_affinity_mask);
if (hdd_ctx->config->rx_thread_ul_affinity_mask)
cds_set_rx_thread_ul_cpu_mask(
hdd_ctx->config->rx_thread_ul_affinity_mask);
/* initialize components configurations after psoc open */
ret = hdd_update_components_config(hdd_ctx);
if (ret) {
@ -8393,6 +8397,17 @@ static void hdd_pld_request_bus_bandwidth(struct hdd_context *hdd_ctx,
hdd_set_rps_cpu_mask(hdd_ctx);
}
if (hdd_ctx->config->rx_thread_ul_affinity_mask) {
if (next_vote_level == PLD_BUS_WIDTH_HIGH &&
tx_packets >
hdd_ctx->config->bus_bw_high_threshold &&
rx_packets >
hdd_ctx->config->bus_bw_low_threshold)
cds_sched_handle_rx_thread_affinity_req(true);
else if (next_vote_level != PLD_BUS_WIDTH_HIGH)
cds_sched_handle_rx_thread_affinity_req(false);
}
if (hdd_ctx->config->napi_cpu_affinity_mask)
hdd_napi_apply_throughput_policy(hdd_ctx,
tx_packets,

View File

@ -3183,6 +3183,8 @@ void hdd_dp_cfg_update(struct wlan_objmgr_psoc *psoc,
config->napi_cpu_affinity_mask =
cfg_get(psoc, CFG_DP_NAPI_CE_CPU_MASK);
config->rx_thread_ul_affinity_mask =
cfg_get(psoc, CFG_DP_RX_THREAD_UL_CPU_MASK);
config->rx_thread_affinity_mask =
cfg_get(psoc, CFG_DP_RX_THREAD_CPU_MASK);
qdf_uint8_array_parse(cfg_get(psoc, CFG_DP_RPS_RX_QUEUE_CPU_MAP_LIST),