/* async_handlers_wk is now blocked */
- /*
- * The work item could be running or queued if the
- * ROC time event stops just as we get here.
- */
- flush_work(&mvm->roc_done_wk);
-
iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
+ /*
+ * The work item could be running or queued if the
+ * ROC time event stops just as we get here.
+ */
+ flush_work(&mvm->roc_done_wk);
+
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
mutex_unlock(&mvm->mutex);
WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
if (drop) {
- if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
+ if (iwl_mvm_flush_sta(mvm, mvmsta, false))
IWL_ERR(mvm, "flush request fail\n");
} else {
msk |= mvmsta->tfd_queue_msk;
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
u16 tids, u32 flags);
return ret;
/* flush its queues here since we are freeing mvm_sta */
- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
if (ret)
return ret;
if (iwl_mvm_has_new_tx_api(mvm)) {
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
switch (vif->type) {
case NL80211_IFTYPE_AP:
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
+ iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
- * We have to send the command asynchronously since this cannot
- * be under the mutex for locking reasons, but that's not an
- * issue as it will have to complete before the next command is
- * executed, and a new time event means a new command.
*/
- iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
- /* Do the same for the P2P device queue (STA) */
+ mutex_lock(&mvm->mutex);
if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
if (!WARN_ON(!mvm->p2p_device_vif)) {
mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
- iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
- CMD_ASYNC);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
}
+ } else {
+ /* do the same in case of hot spot 2.0 */
+ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
}
+
+ mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
duration = tt->params.ct_kill_duration;
+ flush_work(&mvm->roc_done_wk);
+
mutex_lock(&mvm->mutex);
if (__iwl_mvm_mac_start(mvm))
return ret;
}
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
{
struct iwl_mvm_int_sta *int_sta = sta;
struct iwl_mvm_sta *mvm_sta = sta;
offsetof(struct iwl_mvm_sta, sta_id));
if (iwl_mvm_has_new_tx_api(mvm))
- return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
- 0xffff, flags);
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0);
if (internal)
- return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
- flags);
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0);
- return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
}