qcacld-3.0: Fix for race condition in peer map/unmap handlers
In case there are multiple copy engines (for example in ihelium), the peer map and unmap handlers may be executed on different CPUs concurrently. These functions access global peer data structures and are not SMP-safe. Re-factor the existing code to make it SMP-safe. Change-Id: I3f52e17a80d7eae8d4c2fb88da7f57f64455b031 CRs-Fixed: 1050653
This commit is contained in:
parent
b257bff22b
commit
37ffb29390
@ -1384,6 +1384,7 @@ ol_txrx_pdev_post_attach(ol_txrx_pdev_handle pdev)
|
||||
qdf_spinlock_create(&pdev->peer_ref_mutex);
|
||||
qdf_spinlock_create(&pdev->rx.mutex);
|
||||
qdf_spinlock_create(&pdev->last_real_peer_mutex);
|
||||
qdf_spinlock_create(&pdev->peer_map_unmap_lock);
|
||||
OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
|
||||
|
||||
if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
|
||||
@ -1660,6 +1661,7 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
|
||||
qdf_spinlock_destroy(&pdev->peer_ref_mutex);
|
||||
qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
|
||||
qdf_spinlock_destroy(&pdev->rx.mutex);
|
||||
qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
|
||||
#ifdef QCA_SUPPORT_TX_THROTTLE
|
||||
/* Thermal Mitigation */
|
||||
qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
|
||||
|
@ -69,20 +69,6 @@ static int ol_txrx_log2_ceil(unsigned value)
|
||||
return log2;
|
||||
}
|
||||
|
||||
static int
|
||||
ol_txrx_peer_find_add_id_to_obj(struct ol_txrx_peer_t *peer, uint16_t peer_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
|
||||
if (peer->peer_ids[i] == HTT_INVALID_PEER) {
|
||||
peer->peer_ids[i] = peer_id;
|
||||
return 0; /* success */
|
||||
}
|
||||
}
|
||||
return 1; /* failure */
|
||||
}
|
||||
|
||||
/*=== function definitions for peer MAC addr --> peer object hash table =====*/
|
||||
|
||||
/*
|
||||
@ -343,55 +329,82 @@ ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
|
||||
uint8_t *peer_mac_addr, uint16_t peer_id)
|
||||
{
|
||||
struct ol_txrx_peer_t *peer;
|
||||
int status;
|
||||
int del_peer_ref = 0;
|
||||
int i;
|
||||
|
||||
/* check if there's already a peer object with this MAC address */
|
||||
peer =
|
||||
ol_txrx_peer_find_hash_find(pdev, peer_mac_addr,
|
||||
1 /* is aligned */, 0);
|
||||
TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "%s: peer %p ID %d\n", __func__,
|
||||
peer, peer_id);
|
||||
if (peer) {
|
||||
/* peer's ref count was already incremented by
|
||||
peer_find_hash_find */
|
||||
if (!pdev->peer_id_to_obj_map[peer_id].peer) {
|
||||
pdev->peer_id_to_obj_map[peer_id].peer = peer;
|
||||
qdf_atomic_init
|
||||
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
|
||||
}
|
||||
qdf_atomic_inc
|
||||
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
|
||||
|
||||
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
|
||||
"%s: peer %p ID %d peer_id_ref_cnt %d peer->ref_cnt %d\n",
|
||||
__func__,
|
||||
peer, peer_id,
|
||||
qdf_atomic_read(&pdev->
|
||||
peer_id_to_obj_map[peer_id].
|
||||
peer_id_ref_cnt),
|
||||
qdf_atomic_read(&peer->ref_cnt));
|
||||
|
||||
if (!peer) {
|
||||
/*
|
||||
* remove the reference added in ol_txrx_peer_find_hash_find.
|
||||
* the reference for the first peer id is already added in
|
||||
* ol_txrx_peer_attach.
|
||||
* Riva/Pronto has one peer id for each peer.
|
||||
* Peregrine/Rome has two peer id for each peer.
|
||||
* Currently peer IDs are assigned for vdevs as well as peers.
|
||||
* If the peer ID is for a vdev, then we will fail to find a
|
||||
* peer with a matching MAC address.
|
||||
*/
|
||||
if (peer->peer_ids[0] == HTT_INVALID_PEER) {
|
||||
ol_txrx_peer_unref_delete(peer);
|
||||
}
|
||||
if (ol_txrx_peer_find_add_id_to_obj(peer, peer_id)) {
|
||||
/* TBDXXX: assert for now */
|
||||
qdf_assert(0);
|
||||
}
|
||||
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
|
||||
"%s: peer not found for ID %d\n", __func__, peer_id);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Currently peer IDs are assigned for vdevs as well as peers.
|
||||
* If the peer ID is for a vdev, then we will fail to find a peer
|
||||
* with a matching MAC address.
|
||||
|
||||
|
||||
qdf_spin_lock(&pdev->peer_map_unmap_lock);
|
||||
|
||||
/* peer's ref count was already incremented by
|
||||
* peer_find_hash_find
|
||||
*/
|
||||
/* TXRX_ASSERT2(0); */
|
||||
if (!pdev->peer_id_to_obj_map[peer_id].peer) {
|
||||
pdev->peer_id_to_obj_map[peer_id].peer = peer;
|
||||
qdf_atomic_init
|
||||
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
|
||||
}
|
||||
qdf_atomic_inc
|
||||
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
|
||||
|
||||
if (peer->peer_ids[0] == HTT_INVALID_PEER)
|
||||
del_peer_ref = 1;
|
||||
|
||||
status = 1;
|
||||
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
|
||||
if (peer->peer_ids[i] == HTT_INVALID_PEER) {
|
||||
peer->peer_ids[i] = peer_id;
|
||||
status = 0;
|
||||
}
|
||||
}
|
||||
|
||||
qdf_spin_unlock(&pdev->peer_map_unmap_lock);
|
||||
|
||||
/*
|
||||
* remove the reference added in ol_txrx_peer_find_hash_find.
|
||||
* the reference for the first peer id is already added in
|
||||
* ol_txrx_peer_attach.
|
||||
* Riva/Pronto has one peer id for each peer.
|
||||
* Peregrine/Rome has two peer id for each peer.
|
||||
*/
|
||||
if (del_peer_ref) {
|
||||
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
|
||||
"%s: peer %p ID %d peer_id_ref_cnt %d peer->ref_cnt %d\n",
|
||||
__func__, peer, peer_id,
|
||||
qdf_atomic_read(&pdev->
|
||||
peer_id_to_obj_map[peer_id].
|
||||
peer_id_ref_cnt),
|
||||
qdf_atomic_read(&peer->ref_cnt));
|
||||
/* Keeping ol_txrx_peer_unref_delete outside the
|
||||
* peer_map_unmap_spinlock as the unref function is protected by
|
||||
* peer_ref_mutex
|
||||
*/
|
||||
ol_txrx_peer_unref_delete(peer);
|
||||
|
||||
}
|
||||
|
||||
|
||||
if (status) {
|
||||
/* TBDXXX: assert for now */
|
||||
qdf_assert(0);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*=== allocation / deallocation function definitions ========================*/
|
||||
@ -498,32 +511,51 @@ void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
|
||||
void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
|
||||
{
|
||||
struct ol_txrx_peer_t *peer;
|
||||
peer = (peer_id == HTT_INVALID_PEER) ? NULL :
|
||||
pdev->peer_id_to_obj_map[peer_id].peer;
|
||||
TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
|
||||
"%s: peer %p with ID %d to be unmapped.\n", __func__, peer,
|
||||
peer_id);
|
||||
int i = 0;
|
||||
|
||||
if (qdf_atomic_dec_and_test
|
||||
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
|
||||
pdev->peer_id_to_obj_map[peer_id].peer = NULL;
|
||||
if (peer_id == HTT_INVALID_PEER) {
|
||||
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
|
||||
"%s: invalid peer ID %d\n", __func__, peer_id);
|
||||
return;
|
||||
}
|
||||
|
||||
peer = pdev->peer_id_to_obj_map[peer_id].peer;
|
||||
|
||||
if (peer == NULL) {
|
||||
/*
|
||||
* Currently peer IDs are assigned for vdevs as well as peers.
|
||||
* If the peer ID is for a vdev, then the peer pointer stored
|
||||
* in peer_id_to_obj_map will be NULL.
|
||||
*/
|
||||
if (!peer)
|
||||
return;
|
||||
/*
|
||||
* Remove a reference to the peer.
|
||||
* If there are no more references, delete the peer object.
|
||||
*/
|
||||
}
|
||||
|
||||
qdf_spin_lock(&pdev->peer_map_unmap_lock);
|
||||
|
||||
if (qdf_atomic_dec_and_test
|
||||
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
|
||||
pdev->peer_id_to_obj_map[peer_id].peer = NULL;
|
||||
|
||||
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
|
||||
if (peer->peer_ids[i] == peer_id) {
|
||||
peer->peer_ids[i] = HTT_INVALID_PEER;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qdf_spin_unlock(&pdev->peer_map_unmap_lock);
|
||||
|
||||
TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
|
||||
"%s: Remove the ID %d reference to peer %p peer_id_ref_cnt %d\n",
|
||||
__func__, peer_id, peer,
|
||||
qdf_atomic_read
|
||||
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt));
|
||||
|
||||
/*
|
||||
* Remove a reference to the peer.
|
||||
* If there are no more references, delete the peer object.
|
||||
*/
|
||||
ol_txrx_peer_unref_delete(peer);
|
||||
}
|
||||
|
||||
|
@ -712,6 +712,8 @@ struct ol_txrx_pdev_t {
|
||||
*/
|
||||
OL_RX_MUTEX_TYPE last_real_peer_mutex;
|
||||
|
||||
qdf_spinlock_t peer_map_unmap_lock;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
struct {
|
||||
|
Loading…
Reference in New Issue
Block a user