1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
80 if (iwl_mvm_has_new_rx_api(mvm) ||
81 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
82 return sizeof(struct iwl_mvm_add_sta_cmd);
84 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
87 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
88 enum nl80211_iftype iftype)
93 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
94 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
96 lockdep_assert_held(&mvm->mutex);
98 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
99 if (iftype != NL80211_IFTYPE_STATION)
100 reserved_ids = BIT(0);
102 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
103 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
104 if (BIT(sta_id) & reserved_ids)
107 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
108 lockdep_is_held(&mvm->mutex)))
111 return IWL_MVM_INVALID_STA;
114 /* send station add/update command to firmware */
115 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
116 bool update, unsigned int flags)
118 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
119 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
120 .sta_id = mvm_sta->sta_id,
121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
122 .add_modify = update ? 1 : 0,
123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
124 STA_FLG_MIMO_EN_MSK |
125 STA_FLG_RTS_MIMO_PROT),
126 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
130 u32 agg_size = 0, mpdu_dens = 0;
132 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
133 add_sta_cmd.station_type = mvm_sta->sta_type;
135 if (!update || (flags & STA_MODIFY_QUEUES)) {
136 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
138 if (!iwl_mvm_has_new_tx_api(mvm)) {
139 add_sta_cmd.tfd_queue_msk =
140 cpu_to_le32(mvm_sta->tfd_queue_msk);
142 if (flags & STA_MODIFY_QUEUES)
143 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
145 WARN_ON(flags & STA_MODIFY_QUEUES);
149 switch (sta->bandwidth) {
150 case IEEE80211_STA_RX_BW_160:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
153 case IEEE80211_STA_RX_BW_80:
154 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
156 case IEEE80211_STA_RX_BW_40:
157 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
159 case IEEE80211_STA_RX_BW_20:
160 if (sta->ht_cap.ht_supported)
161 add_sta_cmd.station_flags |=
162 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
166 switch (sta->rx_nss) {
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
178 switch (sta->smps_mode) {
179 case IEEE80211_SMPS_AUTOMATIC:
180 case IEEE80211_SMPS_NUM_MODES:
183 case IEEE80211_SMPS_STATIC:
185 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
186 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
188 case IEEE80211_SMPS_DYNAMIC:
189 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
191 case IEEE80211_SMPS_OFF:
196 if (sta->ht_cap.ht_supported) {
197 add_sta_cmd.station_flags_msk |=
198 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
199 STA_FLG_AGG_MPDU_DENS_MSK);
201 mpdu_dens = sta->ht_cap.ampdu_density;
204 if (sta->vht_cap.vht_supported) {
205 agg_size = sta->vht_cap.cap &
206 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
208 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
209 } else if (sta->ht_cap.ht_supported) {
210 agg_size = sta->ht_cap.ampdu_factor;
213 add_sta_cmd.station_flags |=
214 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
215 add_sta_cmd.station_flags |=
216 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
217 if (mvm_sta->associated)
218 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
221 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
223 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
224 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
225 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
226 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
227 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
228 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
229 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
230 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
231 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
232 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
235 status = ADD_STA_SUCCESS;
236 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
237 iwl_mvm_add_sta_cmd_size(mvm),
238 &add_sta_cmd, &status);
242 switch (status & IWL_ADD_STA_STATUS_MASK) {
243 case ADD_STA_SUCCESS:
244 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
248 IWL_ERR(mvm, "ADD_STA failed\n");
255 static void iwl_mvm_rx_agg_session_expired(unsigned long data)
257 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
258 struct iwl_mvm_baid_data *ba_data;
259 struct ieee80211_sta *sta;
260 struct iwl_mvm_sta *mvm_sta;
261 unsigned long timeout;
265 ba_data = rcu_dereference(*rcu_ptr);
267 if (WARN_ON(!ba_data))
270 if (!ba_data->timeout)
273 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
274 if (time_is_after_jiffies(timeout)) {
275 mod_timer(&ba_data->session_timer, timeout);
280 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
283 * sta should be valid unless the following happens:
284 * The firmware asserts which triggers a reconfig flow, but
285 * the reconfig fails before we set the pointer to sta into
286 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
287 * A-MDPU and hence the timer continues to run. Then, the
288 * timer expires and sta is NULL.
293 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
294 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
295 sta->addr, ba_data->tid);
300 /* Disable aggregations for a bitmap of TIDs for a given station */
301 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
302 unsigned long disable_agg_tids,
305 struct iwl_mvm_add_sta_cmd cmd = {};
306 struct ieee80211_sta *sta;
307 struct iwl_mvm_sta *mvmsta;
312 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
315 spin_lock_bh(&mvm->queue_info_lock);
316 sta_id = mvm->queue_info[queue].ra_sta_id;
317 spin_unlock_bh(&mvm->queue_info_lock);
321 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
323 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
328 mvmsta = iwl_mvm_sta_from_mac80211(sta);
330 mvmsta->tid_disable_agg |= disable_agg_tids;
332 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
333 cmd.sta_id = mvmsta->sta_id;
334 cmd.add_modify = STA_MODE_MODIFY;
335 cmd.modify_mask = STA_MODIFY_QUEUES;
336 if (disable_agg_tids)
337 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
339 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
340 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
341 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
345 /* Notify FW of queue removal from the STA queues */
346 status = ADD_STA_SUCCESS;
347 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
348 iwl_mvm_add_sta_cmd_size(mvm),
354 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
356 struct ieee80211_sta *sta;
357 struct iwl_mvm_sta *mvmsta;
358 unsigned long tid_bitmap;
359 unsigned long agg_tids = 0;
363 lockdep_assert_held(&mvm->mutex);
365 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
368 spin_lock_bh(&mvm->queue_info_lock);
369 sta_id = mvm->queue_info[queue].ra_sta_id;
370 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
371 spin_unlock_bh(&mvm->queue_info_lock);
373 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
374 lockdep_is_held(&mvm->mutex));
376 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
379 mvmsta = iwl_mvm_sta_from_mac80211(sta);
381 spin_lock_bh(&mvmsta->lock);
382 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
383 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
384 agg_tids |= BIT(tid);
386 spin_unlock_bh(&mvmsta->lock);
392 * Remove a queue from a station's resources.
393 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
394 * doesn't disable the queue
396 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
398 struct ieee80211_sta *sta;
399 struct iwl_mvm_sta *mvmsta;
400 unsigned long tid_bitmap;
401 unsigned long disable_agg_tids = 0;
405 lockdep_assert_held(&mvm->mutex);
407 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
410 spin_lock_bh(&mvm->queue_info_lock);
411 sta_id = mvm->queue_info[queue].ra_sta_id;
412 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
413 spin_unlock_bh(&mvm->queue_info_lock);
417 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
419 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
424 mvmsta = iwl_mvm_sta_from_mac80211(sta);
426 spin_lock_bh(&mvmsta->lock);
427 /* Unmap MAC queues and TIDs from this queue */
428 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
429 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
430 disable_agg_tids |= BIT(tid);
431 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
434 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
435 spin_unlock_bh(&mvmsta->lock);
440 * The TX path may have been using this TXQ_ID from the tid_data,
441 * so make sure it's no longer running so that we can safely reuse
442 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
443 * above, but nothing guarantees we've stopped using them. Thus,
444 * without this, we could get to iwl_mvm_disable_txq() and remove
445 * the queue while still sending frames to it.
449 return disable_agg_tids;
452 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
455 struct iwl_mvm_sta *mvmsta;
456 u8 txq_curr_ac, sta_id, tid;
457 unsigned long disable_agg_tids = 0;
460 lockdep_assert_held(&mvm->mutex);
462 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
465 spin_lock_bh(&mvm->queue_info_lock);
466 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
467 sta_id = mvm->queue_info[queue].ra_sta_id;
468 tid = mvm->queue_info[queue].txq_tid;
469 spin_unlock_bh(&mvm->queue_info_lock);
471 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
472 if (WARN_ON(!mvmsta))
475 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
476 /* Disable the queue */
477 if (disable_agg_tids)
478 iwl_mvm_invalidate_sta_queue(mvm, queue,
479 disable_agg_tids, false);
481 ret = iwl_mvm_disable_txq(mvm, queue,
482 mvmsta->vif->hw_queue[txq_curr_ac],
485 /* Re-mark the inactive queue as inactive */
486 spin_lock_bh(&mvm->queue_info_lock);
487 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
488 spin_unlock_bh(&mvm->queue_info_lock);
490 "Failed to free inactive queue %d (ret=%d)\n",
496 /* If TXQ is allocated to another STA, update removal in FW */
498 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
503 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
504 unsigned long tfd_queue_mask, u8 ac)
507 u8 ac_to_queue[IEEE80211_NUM_ACS];
510 lockdep_assert_held(&mvm->queue_info_lock);
511 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
514 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
516 /* See what ACs the existing queues for this STA have */
517 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
518 /* Only DATA queues can be shared */
519 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
520 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
523 /* Don't try and take queues being reconfigured */
524 if (mvm->queue_info[queue].status ==
525 IWL_MVM_QUEUE_RECONFIGURING)
528 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
532 * The queue to share is chosen only from DATA queues as follows (in
533 * descending priority):
536 * 3. Highest AC queue that is lower than new AC
537 * 4. Any existing AC (there always is at least 1 DATA queue)
540 /* Priority 1: An AC_BE queue */
541 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
542 queue = ac_to_queue[IEEE80211_AC_BE];
543 /* Priority 2: Same AC queue */
544 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
545 queue = ac_to_queue[ac];
546 /* Priority 3a: If new AC is VO and VI exists - use VI */
547 else if (ac == IEEE80211_AC_VO &&
548 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
549 queue = ac_to_queue[IEEE80211_AC_VI];
550 /* Priority 3b: No BE so only AC less than the new one is BK */
551 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
552 queue = ac_to_queue[IEEE80211_AC_BK];
553 /* Priority 4a: No BE nor BK - use VI if exists */
554 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
555 queue = ac_to_queue[IEEE80211_AC_VI];
556 /* Priority 4b: No BE, BK nor VI - use VO if exists */
557 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
558 queue = ac_to_queue[IEEE80211_AC_VO];
560 /* Make sure queue found (or not) is legal */
561 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
562 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
563 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
564 IWL_ERR(mvm, "No DATA queues available to share\n");
568 /* Make sure the queue isn't in the middle of being reconfigured */
569 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
571 "TXQ %d is in the middle of re-config - try again\n",
580 * If a given queue has a higher AC than the TID stream that is being compared
581 * to, the queue needs to be redirected to the lower AC. This function does that
582 * in such a case, otherwise - if no redirection required - it does nothing,
583 * unless the %force param is true.
585 int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
586 int ac, int ssn, unsigned int wdg_timeout,
589 struct iwl_scd_txq_cfg_cmd cmd = {
591 .action = SCD_CFG_DISABLE_QUEUE,
597 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
601 * If the AC is lower than current one - FIFO needs to be redirected to
602 * the lowest one of the streams in the queue. Check if this is needed
604 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
605 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
606 * we need to check if the numerical value of X is LARGER than of Y.
608 spin_lock_bh(&mvm->queue_info_lock);
609 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
610 spin_unlock_bh(&mvm->queue_info_lock);
612 IWL_DEBUG_TX_QUEUES(mvm,
613 "No redirection needed on TXQ #%d\n",
618 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
619 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
620 cmd.tid = mvm->queue_info[queue].txq_tid;
621 mq = mvm->hw_queue_to_mac80211[queue];
622 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
623 spin_unlock_bh(&mvm->queue_info_lock);
625 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
626 queue, iwl_mvm_ac_to_tx_fifo[ac]);
628 /* Stop MAC queues and wait for this queue to empty */
629 iwl_mvm_stop_mac_queues(mvm, mq);
630 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
632 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
638 /* Before redirecting the queue we need to de-activate it */
639 iwl_trans_txq_disable(mvm->trans, queue, false);
640 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
642 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
645 /* Make sure the SCD wrptr is correctly set before reconfiguring */
646 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
648 /* Update the TID "owner" of the queue */
649 spin_lock_bh(&mvm->queue_info_lock);
650 mvm->queue_info[queue].txq_tid = tid;
651 spin_unlock_bh(&mvm->queue_info_lock);
653 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
655 /* Redirect to lower AC */
656 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
657 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
660 /* Update AC marking of the queue */
661 spin_lock_bh(&mvm->queue_info_lock);
662 mvm->queue_info[queue].mac80211_ac = ac;
663 spin_unlock_bh(&mvm->queue_info_lock);
666 * Mark queue as shared in transport if shared
667 * Note this has to be done after queue enablement because enablement
668 * can also set this value, and there is no indication there to shared
672 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
675 /* Continue using the MAC queues */
676 iwl_mvm_start_mac_queues(mvm, mq);
681 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
682 struct ieee80211_sta *sta, u8 ac,
685 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
686 unsigned int wdg_timeout =
687 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
688 u8 mac_queue = mvmsta->vif->hw_queue[ac];
691 lockdep_assert_held(&mvm->mutex);
693 IWL_DEBUG_TX_QUEUES(mvm,
694 "Allocating queue for sta %d on tid %d\n",
695 mvmsta->sta_id, tid);
696 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
701 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
703 spin_lock_bh(&mvmsta->lock);
704 mvmsta->tid_data[tid].txq_id = queue;
705 mvmsta->tid_data[tid].is_tid_active = true;
706 spin_unlock_bh(&mvmsta->lock);
711 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
712 struct ieee80211_sta *sta, u8 ac, int tid,
713 struct ieee80211_hdr *hdr)
715 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
716 struct iwl_trans_txq_scd_cfg cfg = {
717 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
718 .sta_id = mvmsta->sta_id,
720 .frame_limit = IWL_FRAME_LIMIT,
722 unsigned int wdg_timeout =
723 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
724 u8 mac_queue = mvmsta->vif->hw_queue[ac];
726 bool using_inactive_queue = false, same_sta = false;
727 unsigned long disable_agg_tids = 0;
728 enum iwl_mvm_agg_state queue_state;
729 bool shared_queue = false, inc_ssn;
731 unsigned long tfd_queue_mask;
734 lockdep_assert_held(&mvm->mutex);
736 if (iwl_mvm_has_new_tx_api(mvm))
737 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
739 spin_lock_bh(&mvmsta->lock);
740 tfd_queue_mask = mvmsta->tfd_queue_msk;
741 spin_unlock_bh(&mvmsta->lock);
743 spin_lock_bh(&mvm->queue_info_lock);
746 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
749 if (!ieee80211_is_data_qos(hdr->frame_control) ||
750 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
751 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
752 IWL_MVM_DQA_MIN_MGMT_QUEUE,
753 IWL_MVM_DQA_MAX_MGMT_QUEUE);
754 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
755 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
758 /* If no such queue is found, we'll use a DATA queue instead */
761 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
762 (mvm->queue_info[mvmsta->reserved_queue].status ==
763 IWL_MVM_QUEUE_RESERVED ||
764 mvm->queue_info[mvmsta->reserved_queue].status ==
765 IWL_MVM_QUEUE_INACTIVE)) {
766 queue = mvmsta->reserved_queue;
767 mvm->queue_info[queue].reserved = true;
768 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
772 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
773 IWL_MVM_DQA_MIN_DATA_QUEUE,
774 IWL_MVM_DQA_MAX_DATA_QUEUE);
777 * Check if this queue is already allocated but inactive.
778 * In such a case, we'll need to first free this queue before enabling
779 * it again, so we'll mark it as reserved to make sure no new traffic
783 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
784 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
785 using_inactive_queue = true;
786 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
787 IWL_DEBUG_TX_QUEUES(mvm,
788 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
789 queue, mvmsta->sta_id, tid);
792 /* No free queue - we'll have to share */
794 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
797 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
802 * Mark TXQ as ready, even though it hasn't been fully configured yet,
803 * to make sure no one else takes it.
804 * This will allow avoiding re-acquiring the lock at the end of the
805 * configuration. On error we'll mark it back as free.
807 if ((queue > 0) && !shared_queue)
808 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
810 spin_unlock_bh(&mvm->queue_info_lock);
812 /* This shouldn't happen - out of queues */
813 if (WARN_ON(queue <= 0)) {
814 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
820 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
821 * but for configuring the SCD to send A-MPDUs we need to mark the queue
823 * Mark all DATA queues as allowing to be aggregated at some point
825 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
826 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
829 * If this queue was previously inactive (idle) - we need to free it
832 if (using_inactive_queue) {
833 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
838 IWL_DEBUG_TX_QUEUES(mvm,
839 "Allocating %squeue #%d to sta %d on tid %d\n",
840 shared_queue ? "shared " : "", queue,
841 mvmsta->sta_id, tid);
844 /* Disable any open aggs on this queue */
845 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
847 if (disable_agg_tids) {
848 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
850 iwl_mvm_invalidate_sta_queue(mvm, queue,
851 disable_agg_tids, false);
855 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
856 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
857 ssn, &cfg, wdg_timeout);
859 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
860 le16_add_cpu(&hdr->seq_ctrl, 0x10);
864 * Mark queue as shared in transport if shared
865 * Note this has to be done after queue enablement because enablement
866 * can also set this value, and there is no indication there to shared
870 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
872 spin_lock_bh(&mvmsta->lock);
874 * This looks racy, but it is not. We have only one packet for
875 * this ra/tid in our Tx path since we stop the Qdisc when we
876 * need to allocate a new TFD queue.
879 mvmsta->tid_data[tid].seq_number += 0x10;
880 mvmsta->tid_data[tid].txq_id = queue;
881 mvmsta->tid_data[tid].is_tid_active = true;
882 mvmsta->tfd_queue_msk |= BIT(queue);
883 queue_state = mvmsta->tid_data[tid].state;
885 if (mvmsta->reserved_queue == queue)
886 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
887 spin_unlock_bh(&mvmsta->lock);
890 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
894 /* If we need to re-enable aggregations... */
895 if (queue_state == IWL_AGG_ON) {
896 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
901 /* Redirect queue, if needed */
902 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
911 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
916 static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
918 struct iwl_scd_txq_cfg_cmd cmd = {
920 .action = SCD_CFG_UPDATE_QUEUE_TID,
923 unsigned long tid_bitmap;
926 lockdep_assert_held(&mvm->mutex);
928 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
931 spin_lock_bh(&mvm->queue_info_lock);
932 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
933 spin_unlock_bh(&mvm->queue_info_lock);
935 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
938 /* Find any TID for queue */
939 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
941 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
943 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
945 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
950 spin_lock_bh(&mvm->queue_info_lock);
951 mvm->queue_info[queue].txq_tid = tid;
952 spin_unlock_bh(&mvm->queue_info_lock);
953 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
957 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
959 struct ieee80211_sta *sta;
960 struct iwl_mvm_sta *mvmsta;
963 unsigned long tid_bitmap;
964 unsigned int wdg_timeout;
968 /* queue sharing is disabled on new TX path */
969 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
972 lockdep_assert_held(&mvm->mutex);
974 spin_lock_bh(&mvm->queue_info_lock);
975 sta_id = mvm->queue_info[queue].ra_sta_id;
976 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
977 spin_unlock_bh(&mvm->queue_info_lock);
979 /* Find TID for queue, and make sure it is the only one on the queue */
980 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
981 if (tid_bitmap != BIT(tid)) {
982 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
987 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
990 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
991 lockdep_is_held(&mvm->mutex));
993 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
996 mvmsta = iwl_mvm_sta_from_mac80211(sta);
997 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
999 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1001 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1002 tid_to_mac80211_ac[tid], ssn,
1005 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1009 /* If aggs should be turned back on - do it */
1010 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1011 struct iwl_mvm_add_sta_cmd cmd = {0};
1013 mvmsta->tid_disable_agg &= ~BIT(tid);
1015 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1016 cmd.sta_id = mvmsta->sta_id;
1017 cmd.add_modify = STA_MODE_MODIFY;
1018 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1019 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1020 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1022 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1023 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1025 IWL_DEBUG_TX_QUEUES(mvm,
1026 "TXQ #%d is now aggregated again\n",
1029 /* Mark queue intenally as aggregating again */
1030 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1034 spin_lock_bh(&mvm->queue_info_lock);
1035 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1036 spin_unlock_bh(&mvm->queue_info_lock);
1039 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1041 if (tid == IWL_MAX_TID_COUNT)
1042 return IEEE80211_AC_VO; /* MGMT */
1044 return tid_to_mac80211_ac[tid];
1047 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1048 struct ieee80211_sta *sta, int tid)
1050 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1051 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1052 struct sk_buff *skb;
1053 struct ieee80211_hdr *hdr;
1054 struct sk_buff_head deferred_tx;
1056 bool no_queue = false; /* Marks if there is a problem with the queue */
1059 lockdep_assert_held(&mvm->mutex);
1061 skb = skb_peek(&tid_data->deferred_tx_frames);
1064 hdr = (void *)skb->data;
1066 ac = iwl_mvm_tid_to_ac_queue(tid);
1067 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1069 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1070 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1072 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1073 mvmsta->sta_id, tid);
1076 * Mark queue as problematic so later the deferred traffic is
1077 * freed, as we can do nothing with it
1082 __skb_queue_head_init(&deferred_tx);
1084 /* Disable bottom-halves when entering TX path */
1086 spin_lock(&mvmsta->lock);
1087 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1088 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1089 spin_unlock(&mvmsta->lock);
1091 while ((skb = __skb_dequeue(&deferred_tx)))
1092 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1093 ieee80211_free_txskb(mvm->hw, skb);
1097 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1100 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1102 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1104 struct ieee80211_sta *sta;
1105 struct iwl_mvm_sta *mvmsta;
1106 unsigned long deferred_tid_traffic;
1107 int queue, sta_id, tid;
1109 /* Check inactivity of queues */
1110 iwl_mvm_inactivity_check(mvm);
1112 mutex_lock(&mvm->mutex);
1114 /* No queue reconfiguration in TVQM mode */
1115 if (iwl_mvm_has_new_tx_api(mvm))
1118 /* Reconfigure queues requiring reconfiguation */
1119 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
1123 spin_lock_bh(&mvm->queue_info_lock);
1124 reconfig = (mvm->queue_info[queue].status ==
1125 IWL_MVM_QUEUE_RECONFIGURING);
1128 * We need to take into account a situation in which a TXQ was
1129 * allocated to TID x, and then turned shared by adding TIDs y
1130 * and z. If TID x becomes inactive and is removed from the TXQ,
1131 * ownership must be given to one of the remaining TIDs.
1132 * This is mainly because if TID x continues - a new queue can't
1133 * be allocated for it as long as it is an owner of another TXQ.
1135 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1136 BIT(mvm->queue_info[queue].txq_tid)) &&
1137 (mvm->queue_info[queue].status ==
1138 IWL_MVM_QUEUE_SHARED);
1139 spin_unlock_bh(&mvm->queue_info_lock);
1142 iwl_mvm_unshare_queue(mvm, queue);
1143 else if (change_owner)
1144 iwl_mvm_change_queue_owner(mvm, queue);
1148 /* Go over all stations with deferred traffic */
1149 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1150 IWL_MVM_STATION_COUNT) {
1151 clear_bit(sta_id, mvm->sta_deferred_frames);
1152 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1153 lockdep_is_held(&mvm->mutex));
1154 if (IS_ERR_OR_NULL(sta))
1157 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1158 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1160 for_each_set_bit(tid, &deferred_tid_traffic,
1161 IWL_MAX_TID_COUNT + 1)
1162 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1165 mutex_unlock(&mvm->mutex);
1168 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1169 struct ieee80211_sta *sta,
1170 enum nl80211_iftype vif_type)
1172 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1174 bool using_inactive_queue = false, same_sta = false;
1176 /* queue reserving is disabled on new TX path */
1177 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1181 * Check for inactive queues, so we don't reach a situation where we
1182 * can't add a STA due to a shortage in queues that doesn't really exist
1184 iwl_mvm_inactivity_check(mvm);
1186 spin_lock_bh(&mvm->queue_info_lock);
1188 /* Make sure we have free resources for this STA */
1189 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1190 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
1191 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1192 IWL_MVM_QUEUE_FREE))
1193 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1195 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1196 IWL_MVM_DQA_MIN_DATA_QUEUE,
1197 IWL_MVM_DQA_MAX_DATA_QUEUE);
1199 spin_unlock_bh(&mvm->queue_info_lock);
1200 IWL_ERR(mvm, "No available queues for new station\n");
1202 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1204 * If this queue is already allocated but inactive we'll need to
1205 * first free this queue before enabling it again, we'll mark
1206 * it as reserved to make sure no new traffic arrives on it
1208 using_inactive_queue = true;
1209 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
1211 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1213 spin_unlock_bh(&mvm->queue_info_lock);
1215 mvmsta->reserved_queue = queue;
1217 if (using_inactive_queue)
1218 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1220 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1221 queue, mvmsta->sta_id);
1227 * In DQA mode, after a HW restart the queues should be allocated as before, in
1228 * order to avoid race conditions when there are shared queues. This function
1229 * does the re-mapping and queue allocation.
1231 * Note that re-enabling aggregations isn't done in this function.
1233 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1234 struct iwl_mvm_sta *mvm_sta)
1236 unsigned int wdg_timeout =
1237 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1239 struct iwl_trans_txq_scd_cfg cfg = {
1240 .sta_id = mvm_sta->sta_id,
1241 .frame_limit = IWL_FRAME_LIMIT,
1244 /* Make sure reserved queue is still marked as such (if allocated) */
1245 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1246 mvm->queue_info[mvm_sta->reserved_queue].status =
1247 IWL_MVM_QUEUE_RESERVED;
1249 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1250 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1251 int txq_id = tid_data->txq_id;
1255 if (txq_id == IWL_MVM_INVALID_QUEUE)
1258 skb_queue_head_init(&tid_data->deferred_tx_frames);
1260 ac = tid_to_mac80211_ac[i];
1261 mac_queue = mvm_sta->vif->hw_queue[ac];
1263 if (iwl_mvm_has_new_tx_api(mvm)) {
1264 IWL_DEBUG_TX_QUEUES(mvm,
1265 "Re-mapping sta %d tid %d\n",
1266 mvm_sta->sta_id, i);
1267 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1270 tid_data->txq_id = txq_id;
1272 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1275 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1276 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1278 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1280 IWL_DEBUG_TX_QUEUES(mvm,
1281 "Re-mapping sta %d tid %d to queue %d\n",
1282 mvm_sta->sta_id, i, txq_id);
1284 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1286 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1291 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1292 struct iwl_mvm_int_sta *sta,
1294 u16 mac_id, u16 color)
1296 struct iwl_mvm_add_sta_cmd cmd;
1298 u32 status = ADD_STA_SUCCESS;
1300 lockdep_assert_held(&mvm->mutex);
1302 memset(&cmd, 0, sizeof(cmd));
1303 cmd.sta_id = sta->sta_id;
1304 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1306 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1307 cmd.station_type = sta->type;
1309 if (!iwl_mvm_has_new_tx_api(mvm))
1310 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1311 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1314 memcpy(cmd.addr, addr, ETH_ALEN);
1316 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1317 iwl_mvm_add_sta_cmd_size(mvm),
1322 switch (status & IWL_ADD_STA_STATUS_MASK) {
1323 case ADD_STA_SUCCESS:
1324 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1328 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1335 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1336 struct ieee80211_vif *vif,
1337 struct ieee80211_sta *sta)
1339 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1340 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1341 struct iwl_mvm_rxq_dup_data *dup_data;
1343 bool sta_update = false;
1344 unsigned int sta_flags = 0;
1346 lockdep_assert_held(&mvm->mutex);
1348 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1349 sta_id = iwl_mvm_find_free_sta_id(mvm,
1350 ieee80211_vif_type_p2p(vif));
1352 sta_id = mvm_sta->sta_id;
1354 if (sta_id == IWL_MVM_INVALID_STA)
1357 spin_lock_init(&mvm_sta->lock);
1359 /* if this is a HW restart re-alloc existing queues */
1360 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1361 struct iwl_mvm_int_sta tmp_sta = {
1363 .type = mvm_sta->sta_type,
1367 * First add an empty station since allocating
1368 * a queue requires a valid station
1370 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1371 mvmvif->id, mvmvif->color);
1375 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1377 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1381 mvm_sta->sta_id = sta_id;
1382 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1385 if (!mvm->trans->cfg->gen2)
1386 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1388 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1389 mvm_sta->tx_protection = 0;
1390 mvm_sta->tt_tx_protection = false;
1391 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1393 /* HW restart, don't assume the memory has been zeroed */
1394 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1395 mvm_sta->tfd_queue_msk = 0;
1397 /* for HW restart - reset everything but the sequence number */
1398 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1399 u16 seq = mvm_sta->tid_data[i].seq_number;
1400 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1401 mvm_sta->tid_data[i].seq_number = seq;
1404 * Mark all queues for this STA as unallocated and defer TX
1405 * frames until the queue is allocated
1407 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1408 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1410 mvm_sta->deferred_traffic_tid_map = 0;
1411 mvm_sta->agg_tids = 0;
1413 if (iwl_mvm_has_new_rx_api(mvm) &&
1414 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1417 dup_data = kcalloc(mvm->trans->num_rx_queues,
1418 sizeof(*dup_data), GFP_KERNEL);
1422 * Initialize all the last_seq values to 0xffff which can never
1423 * compare equal to the frame's seq_ctrl in the check in
1424 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1425 * number and fragmented packets don't reach that function.
1427 * This thus allows receiving a packet with seqno 0 and the
1428 * retry bit set as the very first packet on a new TID.
1430 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1431 memset(dup_data[q].last_seq, 0xff,
1432 sizeof(dup_data[q].last_seq));
1433 mvm_sta->dup_data = dup_data;
1436 if (!iwl_mvm_has_new_tx_api(mvm)) {
1437 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1438 ieee80211_vif_type_p2p(vif));
1444 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1448 if (vif->type == NL80211_IFTYPE_STATION) {
1450 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1451 mvmvif->ap_sta_id = sta_id;
1453 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1457 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1465 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1468 struct iwl_mvm_add_sta_cmd cmd = {};
1472 lockdep_assert_held(&mvm->mutex);
1474 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1475 cmd.sta_id = mvmsta->sta_id;
1476 cmd.add_modify = STA_MODE_MODIFY;
1477 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1478 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1480 status = ADD_STA_SUCCESS;
1481 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1482 iwl_mvm_add_sta_cmd_size(mvm),
1487 switch (status & IWL_ADD_STA_STATUS_MASK) {
1488 case ADD_STA_SUCCESS:
1489 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1494 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1503 * Remove a station from the FW table. Before sending the command to remove
1504 * the station validate that the station is indeed known to the driver (sanity
1507 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1509 struct ieee80211_sta *sta;
1510 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1515 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1516 lockdep_is_held(&mvm->mutex));
1518 /* Note: internal stations are marked as error values */
1520 IWL_ERR(mvm, "Invalid station id\n");
1524 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1525 sizeof(rm_sta_cmd), &rm_sta_cmd);
1527 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1534 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1535 struct ieee80211_vif *vif,
1536 struct iwl_mvm_sta *mvm_sta)
1541 lockdep_assert_held(&mvm->mutex);
1543 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1544 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1547 ac = iwl_mvm_tid_to_ac_queue(i);
1548 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1549 vif->hw_queue[ac], i, 0);
1550 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1554 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1555 struct iwl_mvm_sta *mvm_sta)
1559 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1563 spin_lock_bh(&mvm_sta->lock);
1564 txq_id = mvm_sta->tid_data[i].txq_id;
1565 spin_unlock_bh(&mvm_sta->lock);
1567 if (txq_id == IWL_MVM_INVALID_QUEUE)
1570 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1578 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1579 struct ieee80211_vif *vif,
1580 struct ieee80211_sta *sta)
1582 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1583 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1584 u8 sta_id = mvm_sta->sta_id;
1587 lockdep_assert_held(&mvm->mutex);
1589 if (iwl_mvm_has_new_rx_api(mvm))
1590 kfree(mvm_sta->dup_data);
1592 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1596 /* flush its queues here since we are freeing mvm_sta */
1597 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1600 if (iwl_mvm_has_new_tx_api(mvm)) {
1601 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1603 u32 q_mask = mvm_sta->tfd_queue_msk;
1605 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1611 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1613 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1615 /* If there is a TXQ still marked as reserved - free it */
1616 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1617 u8 reserved_txq = mvm_sta->reserved_queue;
1618 enum iwl_mvm_queue_status *status;
1621 * If no traffic has gone through the reserved TXQ - it
1622 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1623 * should be manually marked as free again
1625 spin_lock_bh(&mvm->queue_info_lock);
1626 status = &mvm->queue_info[reserved_txq].status;
1627 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1628 (*status != IWL_MVM_QUEUE_FREE),
1629 "sta_id %d reserved txq %d status %d",
1630 sta_id, reserved_txq, *status)) {
1631 spin_unlock_bh(&mvm->queue_info_lock);
1635 *status = IWL_MVM_QUEUE_FREE;
1636 spin_unlock_bh(&mvm->queue_info_lock);
1639 if (vif->type == NL80211_IFTYPE_STATION &&
1640 mvmvif->ap_sta_id == sta_id) {
1641 /* if associated - we can't remove the AP STA now */
1642 if (vif->bss_conf.assoc)
1645 /* unassoc - go ahead - remove the AP STA now */
1646 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1648 /* clear d0i3_ap_sta_id if no longer relevant */
1649 if (mvm->d0i3_ap_sta_id == sta_id)
1650 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1654 * This shouldn't happen - the TDLS channel switch should be canceled
1655 * before the STA is removed.
1657 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1658 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1659 cancel_delayed_work(&mvm->tdls_cs.dwork);
1663 * Make sure that the tx response code sees the station as -EBUSY and
1664 * calls the drain worker.
1666 spin_lock_bh(&mvm_sta->lock);
1667 spin_unlock_bh(&mvm_sta->lock);
1669 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1670 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1675 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1676 struct ieee80211_vif *vif,
1679 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1681 lockdep_assert_held(&mvm->mutex);
1683 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1687 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1688 struct iwl_mvm_int_sta *sta,
1689 u32 qmask, enum nl80211_iftype iftype,
1690 enum iwl_sta_type type)
1692 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1693 sta->sta_id == IWL_MVM_INVALID_STA) {
1694 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1695 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1699 sta->tfd_queue_msk = qmask;
1702 /* put a non-NULL value so iterating over the stations won't stop */
1703 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1707 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1709 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1710 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1711 sta->sta_id = IWL_MVM_INVALID_STA;
1714 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
1717 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1718 mvm->cfg->base_params->wd_timeout :
1719 IWL_WATCHDOG_DISABLED;
1721 if (iwl_mvm_has_new_tx_api(mvm)) {
1723 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
1726 *queue = tvqm_queue;
1728 struct iwl_trans_txq_scd_cfg cfg = {
1731 .tid = IWL_MAX_TID_COUNT,
1733 .frame_limit = IWL_FRAME_LIMIT,
1736 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
1740 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1744 lockdep_assert_held(&mvm->mutex);
1746 /* Allocate aux station and assign to it the aux queue */
1747 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1748 NL80211_IFTYPE_UNSPECIFIED,
1749 IWL_STA_AUX_ACTIVITY);
1753 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1754 if (!iwl_mvm_has_new_tx_api(mvm))
1755 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1756 mvm->aux_sta.sta_id,
1757 IWL_MVM_TX_FIFO_MCAST);
1759 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1762 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1767 * For a000 firmware and on we cannot add queue to a station unknown
1768 * to firmware so enable queue here - after the station was added
1770 if (iwl_mvm_has_new_tx_api(mvm))
1771 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
1772 mvm->aux_sta.sta_id,
1773 IWL_MVM_TX_FIFO_MCAST);
1778 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1780 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1783 lockdep_assert_held(&mvm->mutex);
1785 /* Map snif queue to fifo - must happen before adding snif station */
1786 if (!iwl_mvm_has_new_tx_api(mvm))
1787 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1788 mvm->snif_sta.sta_id,
1789 IWL_MVM_TX_FIFO_BE);
1791 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1797 * For 22000 firmware and on we cannot add queue to a station unknown
1798 * to firmware so enable queue here - after the station was added
1800 if (iwl_mvm_has_new_tx_api(mvm))
1801 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
1802 mvm->snif_sta.sta_id,
1803 IWL_MVM_TX_FIFO_BE);
1808 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1812 lockdep_assert_held(&mvm->mutex);
1814 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
1815 IWL_MAX_TID_COUNT, 0);
1816 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1818 IWL_WARN(mvm, "Failed sending remove station\n");
1823 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1825 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1828 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1830 lockdep_assert_held(&mvm->mutex);
1832 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1836 * Send the add station command for the vif's broadcast station.
1837 * Assumes that the station was already allocated.
1839 * @mvm: the mvm component
1840 * @vif: the interface to which the broadcast station is added
1841 * @bsta: the broadcast station to add.
1843 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1845 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1846 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1847 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1848 const u8 *baddr = _baddr;
1851 unsigned int wdg_timeout =
1852 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1853 struct iwl_trans_txq_scd_cfg cfg = {
1854 .fifo = IWL_MVM_TX_FIFO_VO,
1855 .sta_id = mvmvif->bcast_sta.sta_id,
1856 .tid = IWL_MAX_TID_COUNT,
1858 .frame_limit = IWL_FRAME_LIMIT,
1861 lockdep_assert_held(&mvm->mutex);
1863 if (!iwl_mvm_has_new_tx_api(mvm)) {
1864 if (vif->type == NL80211_IFTYPE_AP ||
1865 vif->type == NL80211_IFTYPE_ADHOC)
1866 queue = mvm->probe_queue;
1867 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1868 queue = mvm->p2p_dev_queue;
1869 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
1872 bsta->tfd_queue_msk |= BIT(queue);
1874 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1878 if (vif->type == NL80211_IFTYPE_ADHOC)
1879 baddr = vif->bss_conf.bssid;
1881 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
1884 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1885 mvmvif->id, mvmvif->color);
1890 * For a000 firmware and on we cannot add queue to a station unknown
1891 * to firmware so enable queue here - after the station was added
1893 if (iwl_mvm_has_new_tx_api(mvm)) {
1894 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1899 if (vif->type == NL80211_IFTYPE_AP ||
1900 vif->type == NL80211_IFTYPE_ADHOC)
1901 mvm->probe_queue = queue;
1902 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1903 mvm->p2p_dev_queue = queue;
1909 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1910 struct ieee80211_vif *vif)
1912 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1915 lockdep_assert_held(&mvm->mutex);
1917 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
1919 switch (vif->type) {
1920 case NL80211_IFTYPE_AP:
1921 case NL80211_IFTYPE_ADHOC:
1922 queue = mvm->probe_queue;
1924 case NL80211_IFTYPE_P2P_DEVICE:
1925 queue = mvm->p2p_dev_queue;
1928 WARN(1, "Can't free bcast queue on vif type %d\n",
1933 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
1934 if (iwl_mvm_has_new_tx_api(mvm))
1937 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
1938 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
1941 /* Send the FW a request to remove the station from it's internal data
1942 * structures, but DO NOT remove the entry from the local data structures. */
1943 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1945 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1948 lockdep_assert_held(&mvm->mutex);
1950 iwl_mvm_free_bcast_sta_queues(mvm, vif);
1952 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
1954 IWL_WARN(mvm, "Failed sending remove station\n");
1958 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1960 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1962 lockdep_assert_held(&mvm->mutex);
1964 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
1965 ieee80211_vif_type_p2p(vif),
1966 IWL_STA_GENERAL_PURPOSE);
1969 /* Allocate a new station entry for the broadcast station to the given vif,
1970 * and send it to the FW.
1971 * Note that each P2P mac should have its own broadcast station.
1973 * @mvm: the mvm component
1974 * @vif: the interface to which the broadcast station is added
1975 * @bsta: the broadcast station to add. */
1976 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1978 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1979 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1982 lockdep_assert_held(&mvm->mutex);
1984 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1988 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
1991 iwl_mvm_dealloc_int_sta(mvm, bsta);
1996 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1998 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2000 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2004 * Send the FW a request to remove the station from it's internal data
2005 * structures, and in addition remove it from the local data structure.
2007 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2011 lockdep_assert_held(&mvm->mutex);
2013 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2015 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2021 * Allocate a new station entry for the multicast station to the given vif,
2022 * and send it to the FW.
2023 * Note that each AP/GO mac should have its own multicast station.
2025 * @mvm: the mvm component
2026 * @vif: the interface to which the multicast station is added
2028 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2030 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2031 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2032 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2033 const u8 *maddr = _maddr;
2034 struct iwl_trans_txq_scd_cfg cfg = {
2035 .fifo = IWL_MVM_TX_FIFO_MCAST,
2036 .sta_id = msta->sta_id,
2039 .frame_limit = IWL_FRAME_LIMIT,
2041 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2044 lockdep_assert_held(&mvm->mutex);
2046 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2047 vif->type != NL80211_IFTYPE_ADHOC))
2051 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2052 * invalid, so make sure we use the queue we want.
2053 * Note that this is done here as we want to avoid making DQA
2054 * changes in mac80211 layer.
2056 if (vif->type == NL80211_IFTYPE_ADHOC) {
2057 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2058 mvmvif->cab_queue = vif->cab_queue;
2062 * While in previous FWs we had to exclude cab queue from TFD queue
2063 * mask, now it is needed as any other queue.
2065 if (!iwl_mvm_has_new_tx_api(mvm) &&
2066 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2067 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2069 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2071 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2072 mvmvif->id, mvmvif->color);
2074 iwl_mvm_dealloc_int_sta(mvm, msta);
2079 * Enable cab queue after the ADD_STA command is sent.
2080 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
2081 * command with unknown station id, and for FW that doesn't support
2082 * station API since the cab queue is not included in the
2085 if (iwl_mvm_has_new_tx_api(mvm)) {
2086 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2090 mvmvif->cab_queue = queue;
2091 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2092 IWL_UCODE_TLV_API_STA_TYPE))
2093 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2100 * Send the FW a request to remove the station from it's internal data
2101 * structures, and in addition remove it from the local data structure.
2103 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2108 lockdep_assert_held(&mvm->mutex);
2110 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2112 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2115 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2117 IWL_WARN(mvm, "Failed sending remove station\n");
2122 #define IWL_MAX_RX_BA_SESSIONS 16
2124 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2126 struct iwl_mvm_delba_notif notif = {
2127 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2131 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2134 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2135 struct iwl_mvm_baid_data *data)
2139 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2141 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2143 struct iwl_mvm_reorder_buffer *reorder_buf =
2144 &data->reorder_buf[i];
2146 spin_lock_bh(&reorder_buf->lock);
2147 if (likely(!reorder_buf->num_stored)) {
2148 spin_unlock_bh(&reorder_buf->lock);
2153 * This shouldn't happen in regular DELBA since the internal
2154 * delBA notification should trigger a release of all frames in
2155 * the reorder buffer.
2159 for (j = 0; j < reorder_buf->buf_size; j++)
2160 __skb_queue_purge(&reorder_buf->entries[j]);
2162 * Prevent timer re-arm. This prevents a very far fetched case
2163 * where we timed out on the notification. There may be prior
2164 * RX frames pending in the RX queue before the notification
2165 * that might get processed between now and the actual deletion
2166 * and we would re-arm the timer although we are deleting the
2169 reorder_buf->removed = true;
2170 spin_unlock_bh(&reorder_buf->lock);
2171 del_timer_sync(&reorder_buf->reorder_timer);
2175 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2177 struct iwl_mvm_baid_data *data,
2178 u16 ssn, u8 buf_size)
2182 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2183 struct iwl_mvm_reorder_buffer *reorder_buf =
2184 &data->reorder_buf[i];
2187 reorder_buf->num_stored = 0;
2188 reorder_buf->head_sn = ssn;
2189 reorder_buf->buf_size = buf_size;
2190 /* rx reorder timer */
2191 reorder_buf->reorder_timer.function =
2192 iwl_mvm_reorder_timer_expired;
2193 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2194 init_timer(&reorder_buf->reorder_timer);
2195 spin_lock_init(&reorder_buf->lock);
2196 reorder_buf->mvm = mvm;
2197 reorder_buf->queue = i;
2198 reorder_buf->sta_id = sta_id;
2199 reorder_buf->valid = false;
2200 for (j = 0; j < reorder_buf->buf_size; j++)
2201 __skb_queue_head_init(&reorder_buf->entries[j]);
2205 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2206 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
2208 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2209 struct iwl_mvm_add_sta_cmd cmd = {};
2210 struct iwl_mvm_baid_data *baid_data = NULL;
2214 lockdep_assert_held(&mvm->mutex);
2216 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2217 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2221 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2223 * Allocate here so if allocation fails we can bail out early
2224 * before starting the BA session in the firmware
2226 baid_data = kzalloc(sizeof(*baid_data) +
2227 mvm->trans->num_rx_queues *
2228 sizeof(baid_data->reorder_buf[0]),
2234 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2235 cmd.sta_id = mvm_sta->sta_id;
2236 cmd.add_modify = STA_MODE_MODIFY;
2238 cmd.add_immediate_ba_tid = (u8) tid;
2239 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2240 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
2242 cmd.remove_immediate_ba_tid = (u8) tid;
2244 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2245 STA_MODIFY_REMOVE_BA_TID;
2247 status = ADD_STA_SUCCESS;
2248 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2249 iwl_mvm_add_sta_cmd_size(mvm),
2254 switch (status & IWL_ADD_STA_STATUS_MASK) {
2255 case ADD_STA_SUCCESS:
2256 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2257 start ? "start" : "stopp");
2259 case ADD_STA_IMMEDIATE_BA_FAILURE:
2260 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2265 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2266 start ? "start" : "stopp", status);
2276 mvm->rx_ba_sessions++;
2278 if (!iwl_mvm_has_new_rx_api(mvm))
2281 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2285 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2286 IWL_ADD_STA_BAID_SHIFT);
2287 baid_data->baid = baid;
2288 baid_data->timeout = timeout;
2289 baid_data->last_rx = jiffies;
2290 setup_timer(&baid_data->session_timer,
2291 iwl_mvm_rx_agg_session_expired,
2292 (unsigned long)&mvm->baid_map[baid]);
2293 baid_data->mvm = mvm;
2294 baid_data->tid = tid;
2295 baid_data->sta_id = mvm_sta->sta_id;
2297 mvm_sta->tid_to_baid[tid] = baid;
2299 mod_timer(&baid_data->session_timer,
2300 TU_TO_EXP_TIME(timeout * 2));
2302 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2303 baid_data, ssn, buf_size);
2305 * protect the BA data with RCU to cover a case where our
2306 * internal RX sync mechanism will timeout (not that it's
2307 * supposed to happen) and we will free the session data while
2308 * RX is being processed in parallel
2310 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2311 mvm_sta->sta_id, tid, baid);
2312 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2313 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2315 u8 baid = mvm_sta->tid_to_baid[tid];
2317 if (mvm->rx_ba_sessions > 0)
2318 /* check that restart flow didn't zero the counter */
2319 mvm->rx_ba_sessions--;
2320 if (!iwl_mvm_has_new_rx_api(mvm))
2323 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2326 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2327 if (WARN_ON(!baid_data))
2330 /* synchronize all rx queues so we can safely delete */
2331 iwl_mvm_free_reorder(mvm, baid_data);
2332 del_timer_sync(&baid_data->session_timer);
2333 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2334 kfree_rcu(baid_data, rcu_head);
2335 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2344 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2345 int tid, u8 queue, bool start)
2347 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2348 struct iwl_mvm_add_sta_cmd cmd = {};
2352 lockdep_assert_held(&mvm->mutex);
2355 mvm_sta->tfd_queue_msk |= BIT(queue);
2356 mvm_sta->tid_disable_agg &= ~BIT(tid);
2358 /* In DQA-mode the queue isn't removed on agg termination */
2359 mvm_sta->tid_disable_agg |= BIT(tid);
2362 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2363 cmd.sta_id = mvm_sta->sta_id;
2364 cmd.add_modify = STA_MODE_MODIFY;
2365 if (!iwl_mvm_has_new_tx_api(mvm))
2366 cmd.modify_mask = STA_MODIFY_QUEUES;
2367 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2368 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2369 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2371 status = ADD_STA_SUCCESS;
2372 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2373 iwl_mvm_add_sta_cmd_size(mvm),
2378 switch (status & IWL_ADD_STA_STATUS_MASK) {
2379 case ADD_STA_SUCCESS:
2383 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2384 start ? "start" : "stopp", status);
2391 const u8 tid_to_mac80211_ac[] = {
2400 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2403 static const u8 tid_to_ucode_ac[] = {
2414 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2415 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2417 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2418 struct iwl_mvm_tid_data *tid_data;
2423 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2426 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2427 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2429 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2430 mvmsta->tid_data[tid].state);
2434 lockdep_assert_held(&mvm->mutex);
2436 spin_lock_bh(&mvmsta->lock);
2438 /* possible race condition - we entered D0i3 while starting agg */
2439 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2440 spin_unlock_bh(&mvmsta->lock);
2441 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2445 spin_lock(&mvm->queue_info_lock);
2448 * Note the possible cases:
2449 * 1. An enabled TXQ - TXQ needs to become agg'ed
2450 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2453 txq_id = mvmsta->tid_data[tid].txq_id;
2454 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2455 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2456 IWL_MVM_DQA_MIN_DATA_QUEUE,
2457 IWL_MVM_DQA_MAX_DATA_QUEUE);
2459 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2465 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2466 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2467 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2469 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2470 tid, IWL_MAX_HW_QUEUES - 1);
2473 } else if (unlikely(mvm->queue_info[txq_id].status ==
2474 IWL_MVM_QUEUE_SHARED)) {
2476 IWL_DEBUG_TX_QUEUES(mvm,
2477 "Can't start tid %d agg on shared queue!\n",
2482 spin_unlock(&mvm->queue_info_lock);
2484 IWL_DEBUG_TX_QUEUES(mvm,
2485 "AGG for tid %d will be on queue #%d\n",
2488 tid_data = &mvmsta->tid_data[tid];
2489 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2490 tid_data->txq_id = txq_id;
2491 *ssn = tid_data->ssn;
2493 IWL_DEBUG_TX_QUEUES(mvm,
2494 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2495 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2496 tid_data->next_reclaimed);
2499 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
2500 * to align the wrap around of ssn so we compare relevant values.
2502 normalized_ssn = tid_data->ssn;
2503 if (mvm->trans->cfg->gen2)
2504 normalized_ssn &= 0xff;
2506 if (normalized_ssn == tid_data->next_reclaimed) {
2507 tid_data->state = IWL_AGG_STARTING;
2508 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2510 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2517 spin_unlock(&mvm->queue_info_lock);
2519 spin_unlock_bh(&mvmsta->lock);
2524 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2525 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2528 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2529 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2530 unsigned int wdg_timeout =
2531 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2533 bool alloc_queue = true;
2534 enum iwl_mvm_queue_status queue_status;
2537 struct iwl_trans_txq_scd_cfg cfg = {
2538 .sta_id = mvmsta->sta_id,
2540 .frame_limit = buf_size,
2544 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2545 != IWL_MAX_TID_COUNT);
2547 if (!mvm->trans->cfg->gen2)
2548 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2550 buf_size = min_t(int, buf_size,
2551 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF);
2553 spin_lock_bh(&mvmsta->lock);
2554 ssn = tid_data->ssn;
2555 queue = tid_data->txq_id;
2556 tid_data->state = IWL_AGG_ON;
2557 mvmsta->agg_tids |= BIT(tid);
2558 tid_data->ssn = 0xffff;
2559 tid_data->amsdu_in_ampdu_allowed = amsdu;
2560 spin_unlock_bh(&mvmsta->lock);
2562 if (iwl_mvm_has_new_tx_api(mvm)) {
2564 * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
2565 * no need to check queue's status
2567 if (buf_size < mvmsta->max_agg_bufsize)
2570 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2576 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2578 spin_lock_bh(&mvm->queue_info_lock);
2579 queue_status = mvm->queue_info[queue].status;
2580 spin_unlock_bh(&mvm->queue_info_lock);
2582 /* Maybe there is no need to even alloc a queue... */
2583 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2584 alloc_queue = false;
2587 * Only reconfig the SCD for the queue if the window size has
2588 * changed from current (become smaller)
2590 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2592 * If reconfiguring an existing queue, it first must be
2595 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2599 "Error draining queue before reconfig\n");
2603 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2604 mvmsta->sta_id, tid,
2608 "Error reconfiguring TXQ #%d\n", queue);
2614 iwl_mvm_enable_txq(mvm, queue,
2615 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2618 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2619 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2620 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2625 /* No need to mark as reserved */
2626 spin_lock_bh(&mvm->queue_info_lock);
2627 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2628 spin_unlock_bh(&mvm->queue_info_lock);
2632 * Even though in theory the peer could have different
2633 * aggregation reorder buffer sizes for different sessions,
2634 * our ucode doesn't allow for that and has a global limit
2635 * for each station. Therefore, use the minimum of all the
2636 * aggregation sessions and our default value.
2638 mvmsta->max_agg_bufsize =
2639 min(mvmsta->max_agg_bufsize, buf_size);
2640 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2642 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2645 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
2648 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2649 struct iwl_mvm_sta *mvmsta,
2650 struct iwl_mvm_tid_data *tid_data)
2652 u16 txq_id = tid_data->txq_id;
2654 if (iwl_mvm_has_new_tx_api(mvm))
2657 spin_lock_bh(&mvm->queue_info_lock);
2659 * The TXQ is marked as reserved only if no traffic came through yet
2660 * This means no traffic has been sent on this TID (agg'd or not), so
2661 * we no longer have use for the queue. Since it hasn't even been
2662 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2665 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
2666 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2667 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
2670 spin_unlock_bh(&mvm->queue_info_lock);
2673 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2674 struct ieee80211_sta *sta, u16 tid)
2676 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2677 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2682 * If mac80211 is cleaning its state, then say that we finished since
2683 * our state has been cleared anyway.
2685 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2686 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2690 spin_lock_bh(&mvmsta->lock);
2692 txq_id = tid_data->txq_id;
2694 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2695 mvmsta->sta_id, tid, txq_id, tid_data->state);
2697 mvmsta->agg_tids &= ~BIT(tid);
2699 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
2701 switch (tid_data->state) {
2703 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2705 IWL_DEBUG_TX_QUEUES(mvm,
2706 "ssn = %d, next_recl = %d\n",
2707 tid_data->ssn, tid_data->next_reclaimed);
2709 tid_data->ssn = 0xffff;
2710 tid_data->state = IWL_AGG_OFF;
2711 spin_unlock_bh(&mvmsta->lock);
2713 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2715 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2717 case IWL_AGG_STARTING:
2718 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2720 * The agg session has been stopped before it was set up. This
2721 * can happen when the AddBA timer times out for example.
2724 /* No barriers since we are under mutex */
2725 lockdep_assert_held(&mvm->mutex);
2727 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2728 tid_data->state = IWL_AGG_OFF;
2733 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2734 mvmsta->sta_id, tid, tid_data->state);
2736 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2740 spin_unlock_bh(&mvmsta->lock);
2745 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2746 struct ieee80211_sta *sta, u16 tid)
2748 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2749 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2751 enum iwl_mvm_agg_state old_state;
2754 * First set the agg state to OFF to avoid calling
2755 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2757 spin_lock_bh(&mvmsta->lock);
2758 txq_id = tid_data->txq_id;
2759 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2760 mvmsta->sta_id, tid, txq_id, tid_data->state);
2761 old_state = tid_data->state;
2762 tid_data->state = IWL_AGG_OFF;
2763 mvmsta->agg_tids &= ~BIT(tid);
2764 spin_unlock_bh(&mvmsta->lock);
2766 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
2768 if (old_state >= IWL_AGG_ON) {
2769 iwl_mvm_drain_sta(mvm, mvmsta, true);
2771 if (iwl_mvm_has_new_tx_api(mvm)) {
2772 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2774 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2775 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2777 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2778 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2779 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
2782 iwl_mvm_drain_sta(mvm, mvmsta, false);
2784 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2790 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2792 int i, max = -1, max_offs = -1;
2794 lockdep_assert_held(&mvm->mutex);
2796 /* Pick the unused key offset with the highest 'deleted'
2797 * counter. Every time a key is deleted, all the counters
2798 * are incremented and the one that was just deleted is
2799 * reset to zero. Thus, the highest counter is the one
2800 * that was deleted longest ago. Pick that one.
2802 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2803 if (test_bit(i, mvm->fw_key_table))
2805 if (mvm->fw_key_deleted[i] > max) {
2806 max = mvm->fw_key_deleted[i];
2812 return STA_KEY_IDX_INVALID;
2817 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2818 struct ieee80211_vif *vif,
2819 struct ieee80211_sta *sta)
2821 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2824 return iwl_mvm_sta_from_mac80211(sta);
2827 * The device expects GTKs for station interfaces to be
2828 * installed as GTKs for the AP station. If we have no
2829 * station ID, then use AP's station ID.
2831 if (vif->type == NL80211_IFTYPE_STATION &&
2832 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
2833 u8 sta_id = mvmvif->ap_sta_id;
2835 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2836 lockdep_is_held(&mvm->mutex));
2839 * It is possible that the 'sta' parameter is NULL,
2840 * for example when a GTK is removed - the sta_id will then
2841 * be the AP ID, and no station was passed by mac80211.
2843 if (IS_ERR_OR_NULL(sta))
2846 return iwl_mvm_sta_from_mac80211(sta);
2852 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2854 struct ieee80211_key_conf *key, bool mcast,
2855 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2859 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2860 struct iwl_mvm_add_sta_key_cmd cmd;
2868 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2869 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2871 if (sta_id == IWL_MVM_INVALID_STA)
2874 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
2875 STA_KEY_FLG_KEYID_MSK;
2876 key_flags = cpu_to_le16(keyidx);
2877 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2879 switch (key->cipher) {
2880 case WLAN_CIPHER_SUITE_TKIP:
2881 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2883 memcpy((void *)&u.cmd.tx_mic_key,
2884 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2887 memcpy((void *)&u.cmd.rx_mic_key,
2888 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2890 pn = atomic64_read(&key->tx_pn);
2893 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2894 for (i = 0; i < 5; i++)
2895 u.cmd_v1.tkip_rx_ttak[i] =
2896 cpu_to_le16(tkip_p1k[i]);
2898 memcpy(u.cmd.common.key, key->key, key->keylen);
2900 case WLAN_CIPHER_SUITE_CCMP:
2901 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2902 memcpy(u.cmd.common.key, key->key, key->keylen);
2904 pn = atomic64_read(&key->tx_pn);
2906 case WLAN_CIPHER_SUITE_WEP104:
2907 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2909 case WLAN_CIPHER_SUITE_WEP40:
2910 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2911 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
2913 case WLAN_CIPHER_SUITE_GCMP_256:
2914 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2916 case WLAN_CIPHER_SUITE_GCMP:
2917 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2918 memcpy(u.cmd.common.key, key->key, key->keylen);
2920 pn = atomic64_read(&key->tx_pn);
2923 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2924 memcpy(u.cmd.common.key, key->key, key->keylen);
2928 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2930 u.cmd.common.key_offset = key_offset;
2931 u.cmd.common.key_flags = key_flags;
2932 u.cmd.common.sta_id = sta_id;
2935 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2936 size = sizeof(u.cmd);
2938 size = sizeof(u.cmd_v1);
2941 status = ADD_STA_SUCCESS;
2942 if (cmd_flags & CMD_ASYNC)
2943 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2946 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2950 case ADD_STA_SUCCESS:
2951 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2955 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2962 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2963 struct ieee80211_key_conf *keyconf,
2964 u8 sta_id, bool remove_key)
2966 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2968 /* verify the key details match the required command's expectations */
2969 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2970 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2971 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2972 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2973 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2976 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2977 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
2980 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2981 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2984 /* This is a valid situation for IGTK */
2985 if (sta_id == IWL_MVM_INVALID_STA)
2988 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2990 struct ieee80211_key_seq seq;
2993 switch (keyconf->cipher) {
2994 case WLAN_CIPHER_SUITE_AES_CMAC:
2995 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2997 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2998 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2999 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3005 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3006 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3007 igtk_cmd.ctrl_flags |=
3008 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3009 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3010 pn = seq.aes_cmac.pn;
3011 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3012 ((u64) pn[4] << 8) |
3013 ((u64) pn[3] << 16) |
3014 ((u64) pn[2] << 24) |
3015 ((u64) pn[1] << 32) |
3016 ((u64) pn[0] << 40));
3019 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3020 remove_key ? "removing" : "installing",
3023 if (!iwl_mvm_has_new_rx_api(mvm)) {
3024 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3025 .ctrl_flags = igtk_cmd.ctrl_flags,
3026 .key_id = igtk_cmd.key_id,
3027 .sta_id = igtk_cmd.sta_id,
3028 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3031 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3032 ARRAY_SIZE(igtk_cmd_v1.igtk));
3033 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3034 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3036 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3037 sizeof(igtk_cmd), &igtk_cmd);
3041 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3042 struct ieee80211_vif *vif,
3043 struct ieee80211_sta *sta)
3045 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3050 if (vif->type == NL80211_IFTYPE_STATION &&
3051 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3052 u8 sta_id = mvmvif->ap_sta_id;
3053 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3054 lockdep_is_held(&mvm->mutex));
3062 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3063 struct ieee80211_vif *vif,
3064 struct ieee80211_sta *sta,
3065 struct ieee80211_key_conf *keyconf,
3071 struct ieee80211_key_seq seq;
3076 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3078 sta_id = mvm_sta->sta_id;
3079 } else if (vif->type == NL80211_IFTYPE_AP &&
3080 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3081 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3083 sta_id = mvmvif->mcast_sta.sta_id;
3085 IWL_ERR(mvm, "Failed to find station id\n");
3089 switch (keyconf->cipher) {
3090 case WLAN_CIPHER_SUITE_TKIP:
3091 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3092 /* get phase 1 key from mac80211 */
3093 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3094 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3095 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3096 seq.tkip.iv32, p1k, 0, key_offset);
3098 case WLAN_CIPHER_SUITE_CCMP:
3099 case WLAN_CIPHER_SUITE_WEP40:
3100 case WLAN_CIPHER_SUITE_WEP104:
3101 case WLAN_CIPHER_SUITE_GCMP:
3102 case WLAN_CIPHER_SUITE_GCMP_256:
3103 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3104 0, NULL, 0, key_offset);
3107 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3108 0, NULL, 0, key_offset);
3114 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3115 struct ieee80211_key_conf *keyconf,
3119 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3120 struct iwl_mvm_add_sta_key_cmd cmd;
3122 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3123 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3128 /* This is a valid situation for GTK removal */
3129 if (sta_id == IWL_MVM_INVALID_STA)
3132 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3133 STA_KEY_FLG_KEYID_MSK);
3134 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3135 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3138 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3141 * The fields assigned here are in the same location at the start
3142 * of the command, so we can do this union trick.
3144 u.cmd.common.key_flags = key_flags;
3145 u.cmd.common.key_offset = keyconf->hw_key_idx;
3146 u.cmd.common.sta_id = sta_id;
3148 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3150 status = ADD_STA_SUCCESS;
3151 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3155 case ADD_STA_SUCCESS:
3156 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3160 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3167 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3168 struct ieee80211_vif *vif,
3169 struct ieee80211_sta *sta,
3170 struct ieee80211_key_conf *keyconf,
3173 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3174 struct iwl_mvm_sta *mvm_sta;
3175 u8 sta_id = IWL_MVM_INVALID_STA;
3177 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3179 lockdep_assert_held(&mvm->mutex);
3181 if (vif->type != NL80211_IFTYPE_AP ||
3182 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3183 /* Get the station id from the mvm local station table */
3184 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3186 IWL_ERR(mvm, "Failed to find station\n");
3189 sta_id = mvm_sta->sta_id;
3192 * It is possible that the 'sta' parameter is NULL, and thus
3193 * there is a need to retrieve the sta from the local station
3197 sta = rcu_dereference_protected(
3198 mvm->fw_id_to_mac_id[sta_id],
3199 lockdep_is_held(&mvm->mutex));
3200 if (IS_ERR_OR_NULL(sta)) {
3201 IWL_ERR(mvm, "Invalid station id\n");
3206 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3209 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3211 sta_id = mvmvif->mcast_sta.sta_id;
3214 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3215 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3216 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3217 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3221 /* If the key_offset is not pre-assigned, we need to find a
3222 * new offset to use. In normal cases, the offset is not
3223 * pre-assigned, but during HW_RESTART we want to reuse the
3224 * same indices, so we pass them when this function is called.
3226 * In D3 entry, we need to hardcoded the indices (because the
3227 * firmware hardcodes the PTK offset to 0). In this case, we
3228 * need to make sure we don't overwrite the hw_key_idx in the
3229 * keyconf structure, because otherwise we cannot configure
3230 * the original ones back when resuming.
3232 if (key_offset == STA_KEY_IDX_INVALID) {
3233 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3234 if (key_offset == STA_KEY_IDX_INVALID)
3236 keyconf->hw_key_idx = key_offset;
3239 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3244 * For WEP, the same key is used for multicast and unicast. Upload it
3245 * again, using the same key offset, and now pointing the other one
3246 * to the same key slot (offset).
3247 * If this fails, remove the original as well.
3249 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3250 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3252 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3253 key_offset, !mcast);
3255 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3260 __set_bit(key_offset, mvm->fw_key_table);
3263 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3264 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3265 sta ? sta->addr : zero_addr, ret);
3269 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3270 struct ieee80211_vif *vif,
3271 struct ieee80211_sta *sta,
3272 struct ieee80211_key_conf *keyconf)
3274 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3275 struct iwl_mvm_sta *mvm_sta;
3276 u8 sta_id = IWL_MVM_INVALID_STA;
3279 lockdep_assert_held(&mvm->mutex);
3281 /* Get the station from the mvm local station table */
3282 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3284 sta_id = mvm_sta->sta_id;
3285 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3286 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3289 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3290 keyconf->keyidx, sta_id);
3292 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3293 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3294 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3295 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3297 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3298 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3299 keyconf->hw_key_idx);
3303 /* track which key was deleted last */
3304 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3305 if (mvm->fw_key_deleted[i] < U8_MAX)
3306 mvm->fw_key_deleted[i]++;
3308 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3310 if (sta && !mvm_sta) {
3311 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3315 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3319 /* delete WEP key twice to get rid of (now useless) offset */
3320 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3321 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3322 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3327 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3328 struct ieee80211_vif *vif,
3329 struct ieee80211_key_conf *keyconf,
3330 struct ieee80211_sta *sta, u32 iv32,
3333 struct iwl_mvm_sta *mvm_sta;
3334 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3338 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3339 if (WARN_ON_ONCE(!mvm_sta))
3341 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3342 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
3348 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3349 struct ieee80211_sta *sta)
3351 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3352 struct iwl_mvm_add_sta_cmd cmd = {
3353 .add_modify = STA_MODE_MODIFY,
3354 .sta_id = mvmsta->sta_id,
3355 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3356 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3360 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3361 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3363 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3366 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3367 struct ieee80211_sta *sta,
3368 enum ieee80211_frame_release_type reason,
3369 u16 cnt, u16 tids, bool more_data,
3370 bool single_sta_queue)
3372 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3373 struct iwl_mvm_add_sta_cmd cmd = {
3374 .add_modify = STA_MODE_MODIFY,
3375 .sta_id = mvmsta->sta_id,
3376 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3377 .sleep_tx_count = cpu_to_le16(cnt),
3378 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3381 unsigned long _tids = tids;
3383 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3384 * Note that this field is reserved and unused by firmware not
3385 * supporting GO uAPSD, so it's safe to always do this.
3387 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3388 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3390 /* If we're releasing frames from aggregation or dqa queues then check
3391 * if all the queues that we're releasing frames from, combined, have:
3392 * - more frames than the service period, in which case more_data
3394 * - fewer than 'cnt' frames, in which case we need to adjust the
3395 * firmware command (but do that unconditionally)
3397 if (single_sta_queue) {
3398 int remaining = cnt;
3401 spin_lock_bh(&mvmsta->lock);
3402 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3403 struct iwl_mvm_tid_data *tid_data;
3406 tid_data = &mvmsta->tid_data[tid];
3408 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3409 if (n_queued > remaining) {
3414 remaining -= n_queued;
3416 sleep_tx_count = cnt - remaining;
3417 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3418 mvmsta->sleep_tx_count = sleep_tx_count;
3419 spin_unlock_bh(&mvmsta->lock);
3421 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3422 if (WARN_ON(cnt - remaining == 0)) {
3423 ieee80211_sta_eosp(sta);
3428 /* Note: this is ignored by firmware not supporting GO uAPSD */
3430 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3432 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3433 mvmsta->next_status_eosp = true;
3434 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3436 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3439 /* block the Tx queues until the FW updated the sleep Tx count */
3440 iwl_trans_block_txq_ptrs(mvm->trans, true);
3442 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3443 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3444 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3446 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3449 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3450 struct iwl_rx_cmd_buffer *rxb)
3452 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3453 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3454 struct ieee80211_sta *sta;
3455 u32 sta_id = le32_to_cpu(notif->sta_id);
3457 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3461 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3462 if (!IS_ERR_OR_NULL(sta))
3463 ieee80211_sta_eosp(sta);
3467 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3468 struct iwl_mvm_sta *mvmsta, bool disable)
3470 struct iwl_mvm_add_sta_cmd cmd = {
3471 .add_modify = STA_MODE_MODIFY,
3472 .sta_id = mvmsta->sta_id,
3473 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3474 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3475 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3479 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3480 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3482 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3485 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3486 struct ieee80211_sta *sta,
3489 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3491 spin_lock_bh(&mvm_sta->lock);
3493 if (mvm_sta->disable_tx == disable) {
3494 spin_unlock_bh(&mvm_sta->lock);
3498 mvm_sta->disable_tx = disable;
3500 /* Tell mac80211 to start/stop queuing tx for this station */
3501 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3503 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3505 spin_unlock_bh(&mvm_sta->lock);
3508 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3509 struct iwl_mvm_vif *mvmvif,
3510 struct iwl_mvm_int_sta *sta,
3513 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3514 struct iwl_mvm_add_sta_cmd cmd = {
3515 .add_modify = STA_MODE_MODIFY,
3516 .sta_id = sta->sta_id,
3517 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3518 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3519 .mac_id_n_color = cpu_to_le32(id),
3523 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3524 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3526 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3529 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3530 struct iwl_mvm_vif *mvmvif,
3533 struct ieee80211_sta *sta;
3534 struct iwl_mvm_sta *mvm_sta;
3537 lockdep_assert_held(&mvm->mutex);
3539 /* Block/unblock all the stations of the given mvmvif */
3540 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3541 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3542 lockdep_is_held(&mvm->mutex));
3543 if (IS_ERR_OR_NULL(sta))
3546 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3547 if (mvm_sta->mac_id_n_color !=
3548 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3551 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3554 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3557 /* Need to block/unblock also multicast station */
3558 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3559 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3560 &mvmvif->mcast_sta, disable);
3563 * Only unblock the broadcast station (FW blocks it for immediate
3564 * quiet, not the driver)
3566 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3567 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3568 &mvmvif->bcast_sta, disable);
3571 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3573 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3574 struct iwl_mvm_sta *mvmsta;
3578 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3580 if (!WARN_ON(!mvmsta))
3581 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3586 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3588 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3591 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
3592 * to align the wrap around of ssn so we compare relevant values.
3594 if (mvm->trans->cfg->gen2)
3597 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);