qcacld-3.0: Affine threads to perf core in high tput

Affine DP RX Threads to perf cluster in high throughput scenarios.
High throughput is detected using existing logic from the bandwidth
timer.

Change-Id: Ieb98c6930807ba42be7f5b4d0b8a78dfb197ba27
CRs-Fixed: 2474322
This commit is contained in:
Mohit Khanna 2019-06-18 20:00:50 -07:00 committed by nshrivas
parent 9330148987
commit c77d69d19b
4 changed files with 83 additions and 5 deletions

View File

@ -32,6 +32,7 @@
#include "cds_sched.h"
#include <wlan_hdd_power.h>
#include "wma_types.h"
#include <dp_txrx.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/cpu.h>
@ -68,11 +69,18 @@ static QDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
#define CDS_CPU_CLUSTER_TYPE_PERF 1
static inline
int cds_set_cpus_allowed_ptr(struct task_struct *task, unsigned long cpu)
int cds_set_cpus_allowed_ptr_with_cpu(struct task_struct *task,
unsigned long cpu)
{
return set_cpus_allowed_ptr(task, cpumask_of(cpu));
}
static inline
int cds_set_cpus_allowed_ptr_with_mask(struct task_struct *task,
qdf_cpu_mask *new_mask)
{
return set_cpus_allowed_ptr(task, new_mask);
}
void cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask)
{
@ -109,7 +117,7 @@ static void cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,
cpumap_print_to_pagebuf(false, old_mask_str, old_mask);
cpumap_print_to_pagebuf(false, new_mask_str, new_mask);
cds_debug("num online perf cores %d, high tput req %d, Rx_thread old mask %s new mask %s",
cds_debug("num online cores %d, high tput req %d, Rx_thread old mask %s new mask %s",
core_affine_cnt, tput_req, old_mask_str, new_mask_str);
}
#else
@ -141,8 +149,9 @@ static int cds_sched_find_attach_cpu(p_cds_sched_context pSchedContext,
bool high_throughput)
{
unsigned char core_affine_count = 0;
struct cpumask new_mask;
qdf_cpu_mask new_mask;
unsigned long cpus;
struct cds_config_info *cds_cfg;
cds_debug("num possible cpu %d", num_possible_cpus());
@ -180,8 +189,14 @@ static int cds_sched_find_attach_cpu(p_cds_sched_context pSchedContext,
&new_mask);
if (!cpumask_equal(&pSchedContext->rx_thread_cpu_mask, &new_mask)) {
cds_cfg = cds_get_ini_config();
cpumask_copy(&pSchedContext->rx_thread_cpu_mask, &new_mask);
set_cpus_allowed_ptr(pSchedContext->ol_rx_thread, &new_mask);
if (cds_cfg->enable_dp_rx_threads)
dp_txrx_set_cpu_mask(cds_get_context(QDF_MODULE_ID_SOC),
&new_mask);
else
cds_set_cpus_allowed_ptr_with_mask(pSchedContext->ol_rx_thread,
&new_mask);
}
return 0;
@ -335,7 +350,8 @@ static void __cds_cpu_hotplug_notify(uint32_t cpu, bool cpu_up)
return;
if (pSchedContext->ol_rx_thread &&
!cds_set_cpus_allowed_ptr(pSchedContext->ol_rx_thread, pref_cpu))
!cds_set_cpus_allowed_ptr_with_cpu(pSchedContext->ol_rx_thread,
pref_cpu))
affine_cpu = pref_cpu;
}

View File

@ -761,3 +761,17 @@ struct napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
return &rx_tm_hdl->rx_thread[rx_ctx_id]->napi;
}
QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl,
qdf_cpu_mask *new_mask)
{
int i = 0;
for (i = 0; i < rx_tm_hdl->num_dp_rx_threads; i++) {
if (!rx_tm_hdl->rx_thread[i])
continue;
qdf_thread_set_cpus_allowed_mask(rx_tm_hdl->rx_thread[i]->task,
new_mask);
}
return QDF_STATUS_SUCCESS;
}

View File

@ -224,4 +224,15 @@ dp_rx_thread_get_wait_queue(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
*/
struct napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
uint8_t rx_ctx_id);
/**
* dp_rx_tm_set_cpu_mask() - set CPU mask for RX threads
* @soc: ol_txrx_soc_handle object
* @new_mask: New CPU mask pointer
*
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
*/
QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl,
qdf_cpu_mask *new_mask);
#endif /* __DP_RX_THREAD_H */

View File

@ -257,6 +257,36 @@ struct napi_struct *dp_rx_get_napi_context(ol_txrx_soc_handle soc,
return dp_rx_tm_get_napi_context(&dp_ext_hdl->rx_tm_hdl, rx_ctx_id);
}
/**
* dp_txrx_set_cpu_mask() - set CPU mask for RX threads
* @soc: ol_txrx_soc_handle object
* @new_mask: New CPU mask pointer
*
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
*/
static inline
QDF_STATUS dp_txrx_set_cpu_mask(ol_txrx_soc_handle soc, qdf_cpu_mask *new_mask)
{
struct dp_txrx_handle *dp_ext_hdl;
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
if (!soc) {
qdf_status = QDF_STATUS_E_INVAL;
goto ret;
}
dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
if (!dp_ext_hdl) {
qdf_status = QDF_STATUS_E_FAULT;
goto ret;
}
qdf_status = dp_rx_tm_set_cpu_mask(&dp_ext_hdl->rx_tm_hdl, new_mask);
ret:
return qdf_status;
}
#else
static inline
@ -299,5 +329,12 @@ struct napi_struct *dp_rx_get_napi_context(ol_txrx_soc_handle soc,
{
return NULL;
}
static inline
QDF_STATUS dp_txrx_set_cpu_mask(ol_txrx_soc_handle soc, qdf_cpu_mask *new_mask)
{
return QDF_STATUS_SUCCESS;
}
#endif /* FEATURE_WLAN_DP_RX_THREADS */
#endif /* _DP_TXRX_H */