wifi: iwlwifi: mvm: protect TXQ list manipulation

[ Upstream commit 923bf981eb ]

Some recent upstream debugging uncovered the fact that in
iwlwifi, the TXQ list manipulation is racy.

Introduce a new state bit for when the TXQ is completely
ready and can be used without locking, and if that's not
set yet acquire the lock to check everything correctly.

Reviewed-by: Benjamin Berg <benjamin.berg@intel.com>
Tested-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Johannes Berg 2023-03-17 10:53:25 +01:00 committed by Greg Kroah-Hartman
parent 742ae1a6c6
commit 6c2103d9a8
4 changed files with 39 additions and 34 deletions

View File

@ -757,42 +757,25 @@ static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
/*
* Please note that racing is handled very carefully here:
* mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
* deleted afterwards.
* This means that if:
* mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
* queue is allocated and we can TX.
* mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
* a race, should defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
* need to allocate the queue and defer the frame.
* mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
* queue is already scheduled for allocation, no need to allocate,
* should defer the frame.
*/
/* If the queue is allocated TX and return. */
if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
/*
* Check that list is empty to avoid a race where txq_id is
* already updated, but the queue allocation work wasn't
* finished
*/
if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
return;
if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
!txq->sta) {
iwl_mvm_mac_itxq_xmit(hw, txq);
return;
}
/* The list is being deleted only after the queue is fully allocated. */
if (!list_empty(&mvmtxq->list))
return;
/* iwl_mvm_mac_itxq_xmit() will later be called by the worker
* to handle any packets we leave on the txq now
*/
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
schedule_work(&mvm->add_stream_wk);
spin_lock_bh(&mvm->add_stream_lock);
/* The list is being deleted only after the queue is fully allocated. */
if (list_empty(&mvmtxq->list) &&
/* recheck under lock */
!test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) {
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
schedule_work(&mvm->add_stream_wk);
}
spin_unlock_bh(&mvm->add_stream_lock);
}
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \

View File

@ -731,6 +731,7 @@ struct iwl_mvm_txq {
atomic_t tx_request;
#define IWL_MVM_TXQ_STATE_STOP_FULL 0
#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
#define IWL_MVM_TXQ_STATE_READY 2
unsigned long state;
};
@ -829,6 +830,7 @@ struct iwl_mvm {
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
};
struct work_struct add_stream_wk; /* To add streams to queues */
spinlock_t add_stream_lock;
const char *nvm_file_name;
struct iwl_nvm_data *nvm_data;

View File

@ -1184,6 +1184,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
init_waitqueue_head(&mvm->rx_sync_waitq);

View File

@ -383,8 +383,11 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_tid(sta, tid);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_unlock_bh(&mvm->add_stream_lock);
}
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
@ -478,8 +481,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
disable_agg_tids |= BIT(tid);
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
spin_unlock_bh(&mvm->add_stream_lock);
}
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@ -1443,12 +1449,22 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
* a queue in the function itself.
*/
if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
spin_lock_bh(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
spin_unlock_bh(&mvm->add_stream_lock);
continue;
}
list_del_init(&mvmtxq->list);
/* now we're ready, any remaining races/concurrency will be
* handled in iwl_mvm_mac_itxq_xmit()
*/
set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
local_bh_disable();
spin_lock(&mvm->add_stream_lock);
list_del_init(&mvmtxq->list);
spin_unlock(&mvm->add_stream_lock);
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
local_bh_enable();
}
@ -1862,8 +1878,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
struct iwl_mvm_txq *mvmtxq =
iwl_mvm_txq_from_mac80211(sta->txq[i]);
spin_lock_bh(&mvm->add_stream_lock);
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
list_del_init(&mvmtxq->list);
clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
spin_unlock_bh(&mvm->add_stream_lock);
}
}