dfc: reset tx queue

When a bearer is removed, calling qdisc reset on a tx queue
could have a race condition with qdisc dequeue for lockless
qdisc such as pfifo_fast.

This change uses a different mechanism not relying on qdisc
implementation to achieve packet purge on bearer remove.

Change-Id: I8f9201809853b07293896d6cb8e010e9e0904e46
Signed-off-by: Weiyi Chen <quic_weiyic@quicinc.com>
This commit is contained in:
Weiyi Chen 2021-10-14 12:31:07 -07:00
parent 37ee4eb551
commit 8c4afb2b42
4 changed files with 50 additions and 23 deletions

View File

@ -222,21 +222,6 @@ int qmi_rmnet_flow_control(struct net_device *dev, u32 mq_idx, int enable)
return 0;
}
static void qmi_rmnet_reset_txq(struct net_device *dev, unsigned int txq)
{
struct Qdisc *qdisc;
if (unlikely(txq >= dev->num_tx_queues))
return;
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, txq)->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
}
/**
* qmi_rmnet_watchdog_fn - watchdog timer func
*/
@ -362,17 +347,15 @@ static void __qmi_rmnet_bearer_put(struct net_device *dev,
continue;
mq->bearer = NULL;
if (reset) {
qmi_rmnet_reset_txq(dev, i);
qmi_rmnet_flow_control(dev, i, 1);
mq->drop_on_remove = reset;
smp_mb();
qmi_rmnet_flow_control(dev, i, 1);
if (dfc_mode == DFC_MODE_SA) {
j = i + ACK_MQ_OFFSET;
qmi_rmnet_reset_txq(dev, j);
qmi_rmnet_flow_control(dev, j, 1);
}
}
}
/* Remove from bearer map */
list_del(&bearer->list);
@ -394,6 +377,8 @@ static void __qmi_rmnet_update_mq(struct net_device *dev,
mq = &qos_info->mq[itm->mq_idx];
if (!mq->bearer) {
mq->bearer = bearer;
mq->drop_on_remove = false;
smp_mb();
if (dfc_mode == DFC_MODE_SA) {
bearer->mq_idx = itm->mq_idx;
@ -877,6 +862,26 @@ bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
#ifdef CONFIG_QTI_QMI_DFC
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
bool *drop)
{
struct qos_info *qos = rmnet_get_qos_pt(dev);
int txq = skb->queue_mapping;
if (txq > ACK_MQ_OFFSET)
txq -= ACK_MQ_OFFSET;
if (unlikely(!qos || txq >= MAX_MQ_NUM))
return false;
/* If the bearer is gone, packets may need to be dropped */
*drop = (txq != DEFAULT_MQ_NUM && !READ_ONCE(qos->mq[txq].bearer) &&
READ_ONCE(qos->mq[txq].drop_on_remove));
return true;
}
EXPORT_SYMBOL(qmi_rmnet_get_flow_state);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len)
{

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -59,6 +60,8 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev,
struct net_device *vnd_dev, u8 mux_id);
void qmi_rmnet_qos_exit_pre(void *qos);
void qmi_rmnet_qos_exit_post(void);
bool qmi_rmnet_get_flow_state(struct net_device *dev, struct sk_buff *skb,
bool *drop);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
@ -78,6 +81,13 @@ static inline void qmi_rmnet_qos_exit_post(void)
{
}
static inline bool qmi_rmnet_get_flow_state(struct net_device *dev,
struct sk_buff *skb,
bool *drop)
{
return false;
}
static inline void
qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len)

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -82,6 +83,7 @@ struct svc_info {
struct mq_map {
struct rmnet_bearer_map *bearer;
bool drop_on_remove;
};
struct qos_info {

View File

@ -1,4 +1,5 @@
/* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -68,6 +69,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
int ip_type;
u32 mark;
unsigned int len;
bool need_to_drop = false;
priv = netdev_priv(dev);
if (priv->real_dev) {
@ -76,6 +78,14 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
mark = skb->mark;
len = skb->len;
trace_rmnet_xmit_skb(skb);
qmi_rmnet_get_flow_state(dev, skb, &need_to_drop);
if (unlikely(need_to_drop)) {
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
return NETDEV_TX_OK;
}
rmnet_egress_handler(skb);
qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev));