2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 #include <linux/log2.h>
28 #include <linux/bitfield.h>
30 /* when under memory pressure rx ring refill may fail and needs a retry */
31 #define HTT_RX_RING_REFILL_RETRY_MS 50
33 #define HTT_RX_RING_REFILL_RESCHED_MS 5
35 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
37 static struct sk_buff *
38 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
40 struct ath10k_skb_rxcb *rxcb;
42 hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43 if (rxcb->paddr == paddr)
44 return ATH10K_RXCB_SKB(rxcb);
50 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
53 struct ath10k_skb_rxcb *rxcb;
57 if (htt->rx_ring.in_ord_rx) {
58 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59 skb = ATH10K_RXCB_SKB(rxcb);
60 dma_unmap_single(htt->ar->dev, rxcb->paddr,
61 skb->len + skb_tailroom(skb),
63 hash_del(&rxcb->hlist);
64 dev_kfree_skb_any(skb);
67 for (i = 0; i < htt->rx_ring.size; i++) {
68 skb = htt->rx_ring.netbufs_ring[i];
72 rxcb = ATH10K_SKB_RXCB(skb);
73 dma_unmap_single(htt->ar->dev, rxcb->paddr,
74 skb->len + skb_tailroom(skb),
76 dev_kfree_skb_any(skb);
80 htt->rx_ring.fill_cnt = 0;
81 hash_init(htt->rx_ring.skb_table);
82 memset(htt->rx_ring.netbufs_ring, 0,
83 htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
86 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
88 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
91 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
93 return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
96 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
99 htt->rx_ring.paddrs_ring_32 = vaddr;
102 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
105 htt->rx_ring.paddrs_ring_64 = vaddr;
108 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
109 dma_addr_t paddr, int idx)
111 htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
114 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
115 dma_addr_t paddr, int idx)
117 htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
120 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
122 htt->rx_ring.paddrs_ring_32[idx] = 0;
125 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
127 htt->rx_ring.paddrs_ring_64[idx] = 0;
130 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
132 return (void *)htt->rx_ring.paddrs_ring_32;
135 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
137 return (void *)htt->rx_ring.paddrs_ring_64;
140 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
142 struct htt_rx_desc *rx_desc;
143 struct ath10k_skb_rxcb *rxcb;
148 /* The Full Rx Reorder firmware has no way of telling the host
149 * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
150 * To keep things simple make sure ring is always half empty. This
151 * guarantees there'll be no replenishment overruns possible.
153 BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
155 idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
157 if (idx < 0 || idx >= htt->rx_ring.size) {
158 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
159 idx &= htt->rx_ring.size_mask;
165 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
171 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
173 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
176 /* Clear rx_desc attention word before posting to Rx ring */
177 rx_desc = (struct htt_rx_desc *)skb->data;
178 rx_desc->attention.flags = __cpu_to_le32(0);
180 paddr = dma_map_single(htt->ar->dev, skb->data,
181 skb->len + skb_tailroom(skb),
184 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
185 dev_kfree_skb_any(skb);
190 rxcb = ATH10K_SKB_RXCB(skb);
192 htt->rx_ring.netbufs_ring[idx] = skb;
193 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
194 htt->rx_ring.fill_cnt++;
196 if (htt->rx_ring.in_ord_rx) {
197 hash_add(htt->rx_ring.skb_table,
198 &ATH10K_SKB_RXCB(skb)->hlist,
204 idx &= htt->rx_ring.size_mask;
209 * Make sure the rx buffer is updated before available buffer
210 * index to avoid any potential rx ring corruption.
213 *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
217 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
219 lockdep_assert_held(&htt->rx_ring.lock);
220 return __ath10k_htt_rx_ring_fill_n(htt, num);
223 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
225 int ret, num_deficit, num_to_fill;
227 /* Refilling the whole RX ring buffer proves to be a bad idea. The
228 * reason is RX may take up significant amount of CPU cycles and starve
229 * other tasks, e.g. TX on an ethernet device while acting as a bridge
230 * with ath10k wlan interface. This ended up with very poor performance
231 * once CPU the host system was overwhelmed with RX on ath10k.
233 * By limiting the number of refills the replenishing occurs
234 * progressively. This in turns makes use of the fact tasklets are
235 * processed in FIFO order. This means actual RX processing can starve
236 * out refilling. If there's not enough buffers on RX ring FW will not
237 * report RX until it is refilled with enough buffers. This
238 * automatically balances load wrt to CPU power.
240 * This probably comes at a cost of lower maximum throughput but
241 * improves the average and stability.
243 spin_lock_bh(&htt->rx_ring.lock);
244 num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
245 num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
246 num_deficit -= num_to_fill;
247 ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
248 if (ret == -ENOMEM) {
250 * Failed to fill it to the desired level -
251 * we'll start a timer and try again next time.
252 * As long as enough buffers are left in the ring for
253 * another A-MPDU rx, no special recovery is needed.
255 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
256 msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
257 } else if (num_deficit > 0) {
258 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
259 msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
261 spin_unlock_bh(&htt->rx_ring.lock);
264 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
266 struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
268 ath10k_htt_rx_msdu_buff_replenish(htt);
271 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
273 struct ath10k_htt *htt = &ar->htt;
276 spin_lock_bh(&htt->rx_ring.lock);
277 ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
278 htt->rx_ring.fill_cnt));
281 ath10k_htt_rx_ring_free(htt);
283 spin_unlock_bh(&htt->rx_ring.lock);
288 void ath10k_htt_rx_free(struct ath10k_htt *htt)
290 del_timer_sync(&htt->rx_ring.refill_retry_timer);
292 skb_queue_purge(&htt->rx_msdus_q);
293 skb_queue_purge(&htt->rx_in_ord_compl_q);
294 skb_queue_purge(&htt->tx_fetch_ind_q);
296 spin_lock_bh(&htt->rx_ring.lock);
297 ath10k_htt_rx_ring_free(htt);
298 spin_unlock_bh(&htt->rx_ring.lock);
300 dma_free_coherent(htt->ar->dev,
301 ath10k_htt_get_rx_ring_size(htt),
302 ath10k_htt_get_vaddr_ring(htt),
303 htt->rx_ring.base_paddr);
305 dma_free_coherent(htt->ar->dev,
306 sizeof(*htt->rx_ring.alloc_idx.vaddr),
307 htt->rx_ring.alloc_idx.vaddr,
308 htt->rx_ring.alloc_idx.paddr);
310 kfree(htt->rx_ring.netbufs_ring);
313 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
315 struct ath10k *ar = htt->ar;
317 struct sk_buff *msdu;
319 lockdep_assert_held(&htt->rx_ring.lock);
321 if (htt->rx_ring.fill_cnt == 0) {
322 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
326 idx = htt->rx_ring.sw_rd_idx.msdu_payld;
327 msdu = htt->rx_ring.netbufs_ring[idx];
328 htt->rx_ring.netbufs_ring[idx] = NULL;
329 ath10k_htt_reset_paddrs_ring(htt, idx);
332 idx &= htt->rx_ring.size_mask;
333 htt->rx_ring.sw_rd_idx.msdu_payld = idx;
334 htt->rx_ring.fill_cnt--;
336 dma_unmap_single(htt->ar->dev,
337 ATH10K_SKB_RXCB(msdu)->paddr,
338 msdu->len + skb_tailroom(msdu),
340 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
341 msdu->data, msdu->len + skb_tailroom(msdu));
346 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
347 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
348 struct sk_buff_head *amsdu)
350 struct ath10k *ar = htt->ar;
351 int msdu_len, msdu_chaining = 0;
352 struct sk_buff *msdu;
353 struct htt_rx_desc *rx_desc;
355 lockdep_assert_held(&htt->rx_ring.lock);
358 int last_msdu, msdu_len_invalid, msdu_chained;
360 msdu = ath10k_htt_rx_netbuf_pop(htt);
362 __skb_queue_purge(amsdu);
366 __skb_queue_tail(amsdu, msdu);
368 rx_desc = (struct htt_rx_desc *)msdu->data;
370 /* FIXME: we must report msdu payload since this is what caller
373 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
374 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
377 * Sanity check - confirm the HW is finished filling in the
379 * If the HW and SW are working correctly, then it's guaranteed
380 * that the HW's MAC DMA is done before this point in the SW.
381 * To prevent the case that we handle a stale Rx descriptor,
382 * just assert for now until we have a way to recover.
384 if (!(__le32_to_cpu(rx_desc->attention.flags)
385 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
386 __skb_queue_purge(amsdu);
390 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
391 & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
392 RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
393 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
394 RX_MSDU_START_INFO0_MSDU_LENGTH);
395 msdu_chained = rx_desc->frag_info.ring2_more_count;
397 if (msdu_len_invalid)
401 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
402 msdu_len -= msdu->len;
404 /* Note: Chained buffers do not contain rx descriptor */
405 while (msdu_chained--) {
406 msdu = ath10k_htt_rx_netbuf_pop(htt);
408 __skb_queue_purge(amsdu);
412 __skb_queue_tail(amsdu, msdu);
414 skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
415 msdu_len -= msdu->len;
419 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
420 RX_MSDU_END_INFO0_LAST_MSDU;
422 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
423 sizeof(*rx_desc) - sizeof(u32));
429 if (skb_queue_empty(amsdu))
433 * Don't refill the ring yet.
435 * First, the elements popped here are still in use - it is not
436 * safe to overwrite them until the matching call to
437 * mpdu_desc_list_next. Second, for efficiency it is preferable to
438 * refill the rx ring with 1 PPDU's worth of rx buffers (something
439 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
440 * (something like 3 buffers). Consequently, we'll rely on the txrx
441 * SW to tell us when it is done pulling all the PPDU's rx buffers
442 * out of the rx ring, and then refill it just once.
445 return msdu_chaining;
448 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
451 struct ath10k *ar = htt->ar;
452 struct ath10k_skb_rxcb *rxcb;
453 struct sk_buff *msdu;
455 lockdep_assert_held(&htt->rx_ring.lock);
457 msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
461 rxcb = ATH10K_SKB_RXCB(msdu);
462 hash_del(&rxcb->hlist);
463 htt->rx_ring.fill_cnt--;
465 dma_unmap_single(htt->ar->dev, rxcb->paddr,
466 msdu->len + skb_tailroom(msdu),
468 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
469 msdu->data, msdu->len + skb_tailroom(msdu));
474 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
475 struct htt_rx_in_ord_ind *ev,
476 struct sk_buff_head *list)
478 struct ath10k *ar = htt->ar;
479 struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
480 struct htt_rx_desc *rxd;
481 struct sk_buff *msdu;
486 lockdep_assert_held(&htt->rx_ring.lock);
488 msdu_count = __le16_to_cpu(ev->msdu_count);
489 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
491 while (msdu_count--) {
492 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
494 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
496 __skb_queue_purge(list);
500 __skb_queue_tail(list, msdu);
503 rxd = (void *)msdu->data;
505 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
507 skb_put(msdu, sizeof(*rxd));
508 skb_pull(msdu, sizeof(*rxd));
509 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
511 if (!(__le32_to_cpu(rxd->attention.flags) &
512 RX_ATTENTION_FLAGS_MSDU_DONE)) {
513 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
524 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
525 struct htt_rx_in_ord_ind *ev,
526 struct sk_buff_head *list)
528 struct ath10k *ar = htt->ar;
529 struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
530 struct htt_rx_desc *rxd;
531 struct sk_buff *msdu;
536 lockdep_assert_held(&htt->rx_ring.lock);
538 msdu_count = __le16_to_cpu(ev->msdu_count);
539 is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
541 while (msdu_count--) {
542 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
543 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
545 __skb_queue_purge(list);
549 __skb_queue_tail(list, msdu);
552 rxd = (void *)msdu->data;
554 trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
556 skb_put(msdu, sizeof(*rxd));
557 skb_pull(msdu, sizeof(*rxd));
558 skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
560 if (!(__le32_to_cpu(rxd->attention.flags) &
561 RX_ATTENTION_FLAGS_MSDU_DONE)) {
562 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
573 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
575 struct ath10k *ar = htt->ar;
577 void *vaddr, *vaddr_ring;
579 struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
581 htt->rx_confused = false;
583 /* XXX: The fill level could be changed during runtime in response to
584 * the host processing latency. Is this really worth it?
586 htt->rx_ring.size = HTT_RX_RING_SIZE;
587 htt->rx_ring.size_mask = htt->rx_ring.size - 1;
588 htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
590 if (!is_power_of_2(htt->rx_ring.size)) {
591 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
595 htt->rx_ring.netbufs_ring =
596 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
598 if (!htt->rx_ring.netbufs_ring)
601 size = ath10k_htt_get_rx_ring_size(htt);
603 vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
607 ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
608 htt->rx_ring.base_paddr = paddr;
610 vaddr = dma_alloc_coherent(htt->ar->dev,
611 sizeof(*htt->rx_ring.alloc_idx.vaddr),
616 htt->rx_ring.alloc_idx.vaddr = vaddr;
617 htt->rx_ring.alloc_idx.paddr = paddr;
618 htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
619 *htt->rx_ring.alloc_idx.vaddr = 0;
621 /* Initialize the Rx refill retry timer */
622 timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
624 spin_lock_init(&htt->rx_ring.lock);
626 htt->rx_ring.fill_cnt = 0;
627 htt->rx_ring.sw_rd_idx.msdu_payld = 0;
628 hash_init(htt->rx_ring.skb_table);
630 skb_queue_head_init(&htt->rx_msdus_q);
631 skb_queue_head_init(&htt->rx_in_ord_compl_q);
632 skb_queue_head_init(&htt->tx_fetch_ind_q);
633 atomic_set(&htt->num_mpdus_ready, 0);
635 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
636 htt->rx_ring.size, htt->rx_ring.fill_level);
640 dma_free_coherent(htt->ar->dev,
641 ath10k_htt_get_rx_ring_size(htt),
643 htt->rx_ring.base_paddr);
645 kfree(htt->rx_ring.netbufs_ring);
650 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
651 enum htt_rx_mpdu_encrypt_type type)
654 case HTT_RX_MPDU_ENCRYPT_NONE:
656 case HTT_RX_MPDU_ENCRYPT_WEP40:
657 case HTT_RX_MPDU_ENCRYPT_WEP104:
658 return IEEE80211_WEP_IV_LEN;
659 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
660 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
661 return IEEE80211_TKIP_IV_LEN;
662 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
663 return IEEE80211_CCMP_HDR_LEN;
664 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
665 return IEEE80211_CCMP_256_HDR_LEN;
666 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
667 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
668 return IEEE80211_GCMP_HDR_LEN;
669 case HTT_RX_MPDU_ENCRYPT_WEP128:
670 case HTT_RX_MPDU_ENCRYPT_WAPI:
674 ath10k_warn(ar, "unsupported encryption type %d\n", type);
678 #define MICHAEL_MIC_LEN 8
680 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
681 enum htt_rx_mpdu_encrypt_type type)
684 case HTT_RX_MPDU_ENCRYPT_NONE:
685 case HTT_RX_MPDU_ENCRYPT_WEP40:
686 case HTT_RX_MPDU_ENCRYPT_WEP104:
687 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
688 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
690 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
691 return IEEE80211_CCMP_MIC_LEN;
692 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
693 return IEEE80211_CCMP_256_MIC_LEN;
694 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
695 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
696 return IEEE80211_GCMP_MIC_LEN;
697 case HTT_RX_MPDU_ENCRYPT_WEP128:
698 case HTT_RX_MPDU_ENCRYPT_WAPI:
702 ath10k_warn(ar, "unsupported encryption type %d\n", type);
706 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
707 enum htt_rx_mpdu_encrypt_type type)
710 case HTT_RX_MPDU_ENCRYPT_NONE:
711 case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
712 case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
713 case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
714 case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
716 case HTT_RX_MPDU_ENCRYPT_WEP40:
717 case HTT_RX_MPDU_ENCRYPT_WEP104:
718 return IEEE80211_WEP_ICV_LEN;
719 case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
720 case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
721 return IEEE80211_TKIP_ICV_LEN;
722 case HTT_RX_MPDU_ENCRYPT_WEP128:
723 case HTT_RX_MPDU_ENCRYPT_WAPI:
727 ath10k_warn(ar, "unsupported encryption type %d\n", type);
731 struct amsdu_subframe_hdr {
737 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
739 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
745 ret = RATE_INFO_BW_20;
748 ret = RATE_INFO_BW_40;
751 ret = RATE_INFO_BW_80;
754 ret = RATE_INFO_BW_160;
761 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
762 struct ieee80211_rx_status *status,
763 struct htt_rx_desc *rxd)
765 struct ieee80211_supported_band *sband;
766 u8 cck, rate, bw, sgi, mcs, nss;
769 u32 info1, info2, info3;
772 info1 = __le32_to_cpu(rxd->ppdu_start.info1);
773 info2 = __le32_to_cpu(rxd->ppdu_start.info2);
774 info3 = __le32_to_cpu(rxd->ppdu_start.info3);
776 preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
780 /* To get legacy rate index band is required. Since band can't
781 * be undefined check if freq is non-zero.
786 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
787 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
788 rate &= ~RX_PPDU_START_RATE_FLAG;
790 sband = &ar->mac.sbands[status->band];
791 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
794 case HTT_RX_HT_WITH_TXBF:
795 /* HT-SIG - Table 20-11 in info2 and info3 */
798 bw = (info2 >> 7) & 1;
799 sgi = (info3 >> 7) & 1;
801 status->rate_idx = mcs;
802 status->encoding = RX_ENC_HT;
804 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
806 status->bw = RATE_INFO_BW_40;
809 case HTT_RX_VHT_WITH_TXBF:
810 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
815 stbc = (info2 >> 3) & 1;
816 group_id = (info2 >> 4) & 0x3F;
818 if (GROUP_ID_IS_SU_MIMO(group_id)) {
819 mcs = (info3 >> 4) & 0x0F;
820 nsts_su = ((info2 >> 10) & 0x07);
822 nss = (nsts_su >> 2) + 1;
826 /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
827 * so it's impossible to decode MCS. Also since
828 * firmware consumes Group Id Management frames host
829 * has no knowledge regarding group/user position
830 * mapping so it's impossible to pick the correct Nsts
833 * Bandwidth and SGI are valid so report the rateinfo
834 * on best-effort basis.
841 ath10k_warn(ar, "invalid MCS received %u\n", mcs);
842 ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
843 __le32_to_cpu(rxd->attention.flags),
844 __le32_to_cpu(rxd->mpdu_start.info0),
845 __le32_to_cpu(rxd->mpdu_start.info1),
846 __le32_to_cpu(rxd->msdu_start.common.info0),
847 __le32_to_cpu(rxd->msdu_start.common.info1),
848 rxd->ppdu_start.info0,
849 __le32_to_cpu(rxd->ppdu_start.info1),
850 __le32_to_cpu(rxd->ppdu_start.info2),
851 __le32_to_cpu(rxd->ppdu_start.info3),
852 __le32_to_cpu(rxd->ppdu_start.info4));
854 ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
855 __le32_to_cpu(rxd->msdu_end.common.info0),
856 __le32_to_cpu(rxd->mpdu_end.info0));
858 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
859 "rx desc msdu payload: ",
860 rxd->msdu_payload, 50);
863 status->rate_idx = mcs;
867 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
869 status->bw = ath10k_bw_to_mac80211_bw(bw);
870 status->encoding = RX_ENC_VHT;
877 static struct ieee80211_channel *
878 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
880 struct ath10k_peer *peer;
881 struct ath10k_vif *arvif;
882 struct cfg80211_chan_def def;
885 lockdep_assert_held(&ar->data_lock);
890 if (rxd->attention.flags &
891 __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
894 if (!(rxd->msdu_end.common.info0 &
895 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
898 peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
899 RX_MPDU_START_INFO0_PEER_IDX);
901 peer = ath10k_peer_find_by_id(ar, peer_id);
905 arvif = ath10k_get_arvif(ar, peer->vdev_id);
906 if (WARN_ON_ONCE(!arvif))
909 if (ath10k_mac_vif_chan(arvif->vif, &def))
915 static struct ieee80211_channel *
916 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
918 struct ath10k_vif *arvif;
919 struct cfg80211_chan_def def;
921 lockdep_assert_held(&ar->data_lock);
923 list_for_each_entry(arvif, &ar->arvifs, list) {
924 if (arvif->vdev_id == vdev_id &&
925 ath10k_mac_vif_chan(arvif->vif, &def) == 0)
933 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
934 struct ieee80211_chanctx_conf *conf,
937 struct cfg80211_chan_def *def = data;
942 static struct ieee80211_channel *
943 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
945 struct cfg80211_chan_def def = {};
947 ieee80211_iter_chan_contexts_atomic(ar->hw,
948 ath10k_htt_rx_h_any_chan_iter,
954 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
955 struct ieee80211_rx_status *status,
956 struct htt_rx_desc *rxd,
959 struct ieee80211_channel *ch;
961 spin_lock_bh(&ar->data_lock);
962 ch = ar->scan_channel;
966 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
968 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
970 ch = ath10k_htt_rx_h_any_channel(ar);
972 ch = ar->tgt_oper_chan;
973 spin_unlock_bh(&ar->data_lock);
978 status->band = ch->band;
979 status->freq = ch->center_freq;
984 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
985 struct ieee80211_rx_status *status,
986 struct htt_rx_desc *rxd)
990 for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
991 status->chains &= ~BIT(i);
993 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
994 status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
995 rxd->ppdu_start.rssi_chains[i].pri20_mhz;
997 status->chains |= BIT(i);
1001 /* FIXME: Get real NF */
1002 status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1003 rxd->ppdu_start.rssi_comb;
1004 status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1007 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1008 struct ieee80211_rx_status *status,
1009 struct htt_rx_desc *rxd)
1011 /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1012 * means all prior MSDUs in a PPDU are reported to mac80211 without the
1013 * TSF. Is it worth holding frames until end of PPDU is known?
1015 * FIXME: Can we get/compute 64bit TSF?
1017 status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1018 status->flag |= RX_FLAG_MACTIME_END;
1021 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1022 struct sk_buff_head *amsdu,
1023 struct ieee80211_rx_status *status,
1026 struct sk_buff *first;
1027 struct htt_rx_desc *rxd;
1031 if (skb_queue_empty(amsdu))
1034 first = skb_peek(amsdu);
1035 rxd = (void *)first->data - sizeof(*rxd);
1037 is_first_ppdu = !!(rxd->attention.flags &
1038 __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1039 is_last_ppdu = !!(rxd->attention.flags &
1040 __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1042 if (is_first_ppdu) {
1043 /* New PPDU starts so clear out the old per-PPDU status. */
1045 status->rate_idx = 0;
1047 status->encoding = RX_ENC_LEGACY;
1048 status->bw = RATE_INFO_BW_20;
1050 status->flag &= ~RX_FLAG_MACTIME_END;
1051 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1053 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1054 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1055 status->ampdu_reference = ar->ampdu_reference;
1057 ath10k_htt_rx_h_signal(ar, status, rxd);
1058 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1059 ath10k_htt_rx_h_rates(ar, status, rxd);
1063 ath10k_htt_rx_h_mactime(ar, status, rxd);
1065 /* set ampdu last segment flag */
1066 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1067 ar->ampdu_reference++;
1071 static const char * const tid_to_ac[] = {
1082 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1087 if (!ieee80211_is_data_qos(hdr->frame_control))
1090 qc = ieee80211_get_qos_ctl(hdr);
1091 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1093 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1095 snprintf(out, size, "tid %d", tid);
1100 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1101 struct ieee80211_rx_status *rx_status,
1102 struct sk_buff *skb)
1104 struct ieee80211_rx_status *status;
1106 status = IEEE80211_SKB_RXCB(skb);
1107 *status = *rx_status;
1109 skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1112 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1114 struct ieee80211_rx_status *status;
1115 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1118 status = IEEE80211_SKB_RXCB(skb);
1120 ath10k_dbg(ar, ATH10K_DBG_DATA,
1121 "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1124 ieee80211_get_SA(hdr),
1125 ath10k_get_tid(hdr, tid, sizeof(tid)),
1126 is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1128 (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1129 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1130 (status->encoding == RX_ENC_HT) ? "ht" : "",
1131 (status->encoding == RX_ENC_VHT) ? "vht" : "",
1132 (status->bw == RATE_INFO_BW_40) ? "40" : "",
1133 (status->bw == RATE_INFO_BW_80) ? "80" : "",
1134 (status->bw == RATE_INFO_BW_160) ? "160" : "",
1135 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1139 status->band, status->flag,
1140 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1141 !!(status->flag & RX_FLAG_MMIC_ERROR),
1142 !!(status->flag & RX_FLAG_AMSDU_MORE));
1143 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1144 skb->data, skb->len);
1145 trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1146 trace_ath10k_rx_payload(ar, skb->data, skb->len);
1148 ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1151 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1152 struct ieee80211_hdr *hdr)
1154 int len = ieee80211_hdrlen(hdr->frame_control);
1156 if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1157 ar->running_fw->fw_file.fw_features))
1158 len = round_up(len, 4);
1163 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1164 struct sk_buff *msdu,
1165 struct ieee80211_rx_status *status,
1166 enum htt_rx_mpdu_encrypt_type enctype,
1169 struct ieee80211_hdr *hdr;
1170 struct htt_rx_desc *rxd;
1176 rxd = (void *)msdu->data - sizeof(*rxd);
1177 is_first = !!(rxd->msdu_end.common.info0 &
1178 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1179 is_last = !!(rxd->msdu_end.common.info0 &
1180 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1182 /* Delivered decapped frame:
1184 * [crypto param] <-- can be trimmed if !fcs_err &&
1185 * !decrypt_err && !peer_idx_invalid
1186 * [amsdu header] <-- only if A-MSDU
1189 * [FCS] <-- at end, needs to be trimmed
1192 /* This probably shouldn't happen but warn just in case */
1193 if (unlikely(WARN_ON_ONCE(!is_first)))
1196 /* This probably shouldn't happen but warn just in case */
1197 if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1200 skb_trim(msdu, msdu->len - FCS_LEN);
1202 /* In most cases this will be true for sniffed frames. It makes sense
1203 * to deliver them as-is without stripping the crypto param. This is
1204 * necessary for software based decryption.
1206 * If there's no error then the frame is decrypted. At least that is
1207 * the case for frames that come in via fragmented rx indication.
1212 /* The payload is decrypted so strip crypto params. Start from tail
1213 * since hdr is used to compute some stuff.
1216 hdr = (void *)msdu->data;
1219 if (status->flag & RX_FLAG_IV_STRIPPED) {
1220 skb_trim(msdu, msdu->len -
1221 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1223 skb_trim(msdu, msdu->len -
1224 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1227 if (status->flag & RX_FLAG_MIC_STRIPPED)
1228 skb_trim(msdu, msdu->len -
1229 ath10k_htt_rx_crypto_mic_len(ar, enctype));
1232 if (status->flag & RX_FLAG_ICV_STRIPPED)
1233 skb_trim(msdu, msdu->len -
1234 ath10k_htt_rx_crypto_icv_len(ar, enctype));
1238 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1239 !ieee80211_has_morefrags(hdr->frame_control) &&
1240 enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1241 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1244 if (status->flag & RX_FLAG_IV_STRIPPED) {
1245 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1246 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1248 memmove((void *)msdu->data + crypto_len,
1249 (void *)msdu->data, hdr_len);
1250 skb_pull(msdu, crypto_len);
1254 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1255 struct sk_buff *msdu,
1256 struct ieee80211_rx_status *status,
1257 const u8 first_hdr[64],
1258 enum htt_rx_mpdu_encrypt_type enctype)
1260 struct ieee80211_hdr *hdr;
1261 struct htt_rx_desc *rxd;
1266 int bytes_aligned = ar->hw_params.decap_align_bytes;
1268 /* Delivered decapped frame:
1269 * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1272 * Note: The nwifi header doesn't have QoS Control and is
1273 * (always?) a 3addr frame.
1275 * Note2: There's no A-MSDU subframe header. Even if it's part
1279 /* pull decapped header and copy SA & DA */
1280 rxd = (void *)msdu->data - sizeof(*rxd);
1282 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1283 skb_put(msdu, l3_pad_bytes);
1285 hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1287 hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1288 ether_addr_copy(da, ieee80211_get_DA(hdr));
1289 ether_addr_copy(sa, ieee80211_get_SA(hdr));
1290 skb_pull(msdu, hdr_len);
1292 /* push original 802.11 header */
1293 hdr = (struct ieee80211_hdr *)first_hdr;
1294 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1296 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1297 memcpy(skb_push(msdu,
1298 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1299 (void *)hdr + round_up(hdr_len, bytes_aligned),
1300 ath10k_htt_rx_crypto_param_len(ar, enctype));
1303 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1305 /* original 802.11 header has a different DA and in
1306 * case of 4addr it may also have different SA
1308 hdr = (struct ieee80211_hdr *)msdu->data;
1309 ether_addr_copy(ieee80211_get_DA(hdr), da);
1310 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1313 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1314 struct sk_buff *msdu,
1315 enum htt_rx_mpdu_encrypt_type enctype)
1317 struct ieee80211_hdr *hdr;
1318 struct htt_rx_desc *rxd;
1319 size_t hdr_len, crypto_len;
1321 bool is_first, is_last, is_amsdu;
1322 int bytes_aligned = ar->hw_params.decap_align_bytes;
1324 rxd = (void *)msdu->data - sizeof(*rxd);
1325 hdr = (void *)rxd->rx_hdr_status;
1327 is_first = !!(rxd->msdu_end.common.info0 &
1328 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1329 is_last = !!(rxd->msdu_end.common.info0 &
1330 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1331 is_amsdu = !(is_first && is_last);
1336 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1337 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1339 rfc1042 += round_up(hdr_len, bytes_aligned) +
1340 round_up(crypto_len, bytes_aligned);
1344 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1349 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1350 struct sk_buff *msdu,
1351 struct ieee80211_rx_status *status,
1352 const u8 first_hdr[64],
1353 enum htt_rx_mpdu_encrypt_type enctype)
1355 struct ieee80211_hdr *hdr;
1362 struct htt_rx_desc *rxd;
1363 int bytes_aligned = ar->hw_params.decap_align_bytes;
1365 /* Delivered decapped frame:
1366 * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1370 rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1371 if (WARN_ON_ONCE(!rfc1042))
1374 rxd = (void *)msdu->data - sizeof(*rxd);
1375 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1376 skb_put(msdu, l3_pad_bytes);
1377 skb_pull(msdu, l3_pad_bytes);
1379 /* pull decapped header and copy SA & DA */
1380 eth = (struct ethhdr *)msdu->data;
1381 ether_addr_copy(da, eth->h_dest);
1382 ether_addr_copy(sa, eth->h_source);
1383 skb_pull(msdu, sizeof(struct ethhdr));
1385 /* push rfc1042/llc/snap */
1386 memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1387 sizeof(struct rfc1042_hdr));
1389 /* push original 802.11 header */
1390 hdr = (struct ieee80211_hdr *)first_hdr;
1391 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1393 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1394 memcpy(skb_push(msdu,
1395 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1396 (void *)hdr + round_up(hdr_len, bytes_aligned),
1397 ath10k_htt_rx_crypto_param_len(ar, enctype));
1400 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1402 /* original 802.11 header has a different DA and in
1403 * case of 4addr it may also have different SA
1405 hdr = (struct ieee80211_hdr *)msdu->data;
1406 ether_addr_copy(ieee80211_get_DA(hdr), da);
1407 ether_addr_copy(ieee80211_get_SA(hdr), sa);
1410 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1411 struct sk_buff *msdu,
1412 struct ieee80211_rx_status *status,
1413 const u8 first_hdr[64],
1414 enum htt_rx_mpdu_encrypt_type enctype)
1416 struct ieee80211_hdr *hdr;
1419 struct htt_rx_desc *rxd;
1420 int bytes_aligned = ar->hw_params.decap_align_bytes;
1422 /* Delivered decapped frame:
1423 * [amsdu header] <-- replaced with 802.11 hdr
1428 rxd = (void *)msdu->data - sizeof(*rxd);
1429 l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1431 skb_put(msdu, l3_pad_bytes);
1432 skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1434 hdr = (struct ieee80211_hdr *)first_hdr;
1435 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1437 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1438 memcpy(skb_push(msdu,
1439 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1440 (void *)hdr + round_up(hdr_len, bytes_aligned),
1441 ath10k_htt_rx_crypto_param_len(ar, enctype));
1444 memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1447 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1448 struct sk_buff *msdu,
1449 struct ieee80211_rx_status *status,
1451 enum htt_rx_mpdu_encrypt_type enctype,
1454 struct htt_rx_desc *rxd;
1455 enum rx_msdu_decap_format decap;
1457 /* First msdu's decapped header:
1458 * [802.11 header] <-- padded to 4 bytes long
1459 * [crypto param] <-- padded to 4 bytes long
1460 * [amsdu header] <-- only if A-MSDU
1463 * Other (2nd, 3rd, ..) msdu's decapped header:
1464 * [amsdu header] <-- only if A-MSDU
1468 rxd = (void *)msdu->data - sizeof(*rxd);
1469 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1470 RX_MSDU_START_INFO1_DECAP_FORMAT);
1473 case RX_MSDU_DECAP_RAW:
1474 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1477 case RX_MSDU_DECAP_NATIVE_WIFI:
1478 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1481 case RX_MSDU_DECAP_ETHERNET2_DIX:
1482 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1484 case RX_MSDU_DECAP_8023_SNAP_LLC:
1485 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1491 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1493 struct htt_rx_desc *rxd;
1495 bool is_ip4, is_ip6;
1496 bool is_tcp, is_udp;
1497 bool ip_csum_ok, tcpudp_csum_ok;
1499 rxd = (void *)skb->data - sizeof(*rxd);
1500 flags = __le32_to_cpu(rxd->attention.flags);
1501 info = __le32_to_cpu(rxd->msdu_start.common.info1);
1503 is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1504 is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1505 is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1506 is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1507 ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1508 tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1510 if (!is_ip4 && !is_ip6)
1511 return CHECKSUM_NONE;
1512 if (!is_tcp && !is_udp)
1513 return CHECKSUM_NONE;
1515 return CHECKSUM_NONE;
1516 if (!tcpudp_csum_ok)
1517 return CHECKSUM_NONE;
1519 return CHECKSUM_UNNECESSARY;
1522 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1524 msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1527 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1528 struct sk_buff_head *amsdu,
1529 struct ieee80211_rx_status *status,
1530 bool fill_crypt_header,
1532 enum ath10k_pkt_rx_err *err)
1534 struct sk_buff *first;
1535 struct sk_buff *last;
1536 struct sk_buff *msdu;
1537 struct htt_rx_desc *rxd;
1538 struct ieee80211_hdr *hdr;
1539 enum htt_rx_mpdu_encrypt_type enctype;
1543 bool has_crypto_err;
1545 bool has_peer_idx_invalid;
1550 if (skb_queue_empty(amsdu))
1553 first = skb_peek(amsdu);
1554 rxd = (void *)first->data - sizeof(*rxd);
1556 is_mgmt = !!(rxd->attention.flags &
1557 __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1559 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1560 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1562 /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1563 * decapped header. It'll be used for undecapping of each MSDU.
1565 hdr = (void *)rxd->rx_hdr_status;
1566 memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1569 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1571 /* Each A-MSDU subframe will use the original header as the base and be
1572 * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1574 hdr = (void *)first_hdr;
1576 if (ieee80211_is_data_qos(hdr->frame_control)) {
1577 qos = ieee80211_get_qos_ctl(hdr);
1578 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1581 /* Some attention flags are valid only in the last MSDU. */
1582 last = skb_peek_tail(amsdu);
1583 rxd = (void *)last->data - sizeof(*rxd);
1584 attention = __le32_to_cpu(rxd->attention.flags);
1586 has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1587 has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1588 has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1589 has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1591 /* Note: If hardware captures an encrypted frame that it can't decrypt,
1592 * e.g. due to fcs error, missing peer or invalid key data it will
1593 * report the frame as raw.
1595 is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1598 !has_peer_idx_invalid);
1600 /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1601 status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1602 RX_FLAG_MMIC_ERROR |
1604 RX_FLAG_IV_STRIPPED |
1605 RX_FLAG_ONLY_MONITOR |
1606 RX_FLAG_MMIC_STRIPPED);
1609 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1612 status->flag |= RX_FLAG_MMIC_ERROR;
1616 *err = ATH10K_PKT_RX_ERR_FCS;
1617 else if (has_tkip_err)
1618 *err = ATH10K_PKT_RX_ERR_TKIP;
1619 else if (has_crypto_err)
1620 *err = ATH10K_PKT_RX_ERR_CRYPT;
1621 else if (has_peer_idx_invalid)
1622 *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1625 /* Firmware reports all necessary management frames via WMI already.
1626 * They are not reported to monitor interfaces at all so pass the ones
1627 * coming via HTT to monitor interfaces instead. This simplifies
1631 status->flag |= RX_FLAG_ONLY_MONITOR;
1634 status->flag |= RX_FLAG_DECRYPTED;
1636 if (likely(!is_mgmt))
1637 status->flag |= RX_FLAG_MMIC_STRIPPED;
1639 if (fill_crypt_header)
1640 status->flag |= RX_FLAG_MIC_STRIPPED |
1641 RX_FLAG_ICV_STRIPPED;
1643 status->flag |= RX_FLAG_IV_STRIPPED;
1646 skb_queue_walk(amsdu, msdu) {
1647 ath10k_htt_rx_h_csum_offload(msdu);
1648 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1651 /* Undecapping involves copying the original 802.11 header back
1652 * to sk_buff. If frame is protected and hardware has decrypted
1653 * it then remove the protected bit.
1660 if (fill_crypt_header)
1663 hdr = (void *)msdu->data;
1664 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1668 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1669 struct sk_buff_head *amsdu,
1670 struct ieee80211_rx_status *status)
1672 struct sk_buff *msdu;
1673 struct sk_buff *first_subframe;
1675 first_subframe = skb_peek(amsdu);
1677 while ((msdu = __skb_dequeue(amsdu))) {
1678 /* Setup per-MSDU flags */
1679 if (skb_queue_empty(amsdu))
1680 status->flag &= ~RX_FLAG_AMSDU_MORE;
1682 status->flag |= RX_FLAG_AMSDU_MORE;
1684 if (msdu == first_subframe) {
1685 first_subframe = NULL;
1686 status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1688 status->flag |= RX_FLAG_ALLOW_SAME_PN;
1691 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1695 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1696 unsigned long int *unchain_cnt)
1698 struct sk_buff *skb, *first;
1701 int amsdu_len = skb_queue_len(amsdu);
1703 /* TODO: Might could optimize this by using
1704 * skb_try_coalesce or similar method to
1705 * decrease copying, or maybe get mac80211 to
1706 * provide a way to just receive a list of
1710 first = __skb_dequeue(amsdu);
1712 /* Allocate total length all at once. */
1713 skb_queue_walk(amsdu, skb)
1714 total_len += skb->len;
1716 space = total_len - skb_tailroom(first);
1718 (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1719 /* TODO: bump some rx-oom error stat */
1720 /* put it back together so we can free the
1721 * whole list at once.
1723 __skb_queue_head(amsdu, first);
1727 /* Walk list again, copying contents into
1730 while ((skb = __skb_dequeue(amsdu))) {
1731 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1733 dev_kfree_skb_any(skb);
1736 __skb_queue_head(amsdu, first);
1738 *unchain_cnt += amsdu_len - 1;
1743 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1744 struct sk_buff_head *amsdu,
1745 unsigned long int *drop_cnt,
1746 unsigned long int *unchain_cnt)
1748 struct sk_buff *first;
1749 struct htt_rx_desc *rxd;
1750 enum rx_msdu_decap_format decap;
1752 first = skb_peek(amsdu);
1753 rxd = (void *)first->data - sizeof(*rxd);
1754 decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1755 RX_MSDU_START_INFO1_DECAP_FORMAT);
1757 /* FIXME: Current unchaining logic can only handle simple case of raw
1758 * msdu chaining. If decapping is other than raw the chaining may be
1759 * more complex and this isn't handled by the current code. Don't even
1760 * try re-constructing such frames - it'll be pretty much garbage.
1762 if (decap != RX_MSDU_DECAP_RAW ||
1763 skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1764 *drop_cnt += skb_queue_len(amsdu);
1765 __skb_queue_purge(amsdu);
1769 ath10k_unchain_msdu(amsdu, unchain_cnt);
1772 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
1773 struct sk_buff_head *amsdu)
1776 struct sk_buff *first;
1777 bool is_first, is_last;
1778 struct htt_rx_desc *rxd;
1779 struct ieee80211_hdr *hdr;
1780 size_t hdr_len, crypto_len;
1781 enum htt_rx_mpdu_encrypt_type enctype;
1782 int bytes_aligned = ar->hw_params.decap_align_bytes;
1784 first = skb_peek(amsdu);
1786 rxd = (void *)first->data - sizeof(*rxd);
1787 hdr = (void *)rxd->rx_hdr_status;
1789 is_first = !!(rxd->msdu_end.common.info0 &
1790 __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1791 is_last = !!(rxd->msdu_end.common.info0 &
1792 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1794 /* Return in case of non-aggregated msdu */
1795 if (is_first && is_last)
1798 /* First msdu flag is not set for the first msdu of the list */
1802 enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1803 RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1805 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1806 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1808 subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
1811 /* Validate if the amsdu has a proper first subframe.
1812 * There are chances a single msdu can be received as amsdu when
1813 * the unauthenticated amsdu flag of a QoS header
1814 * gets flipped in non-SPP AMSDU's, in such cases the first
1815 * subframe has llc/snap header in place of a valid da.
1816 * return false if the da matches rfc1042 pattern
1818 if (ether_addr_equal(subframe_hdr, rfc1042_header))
1824 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1825 struct sk_buff_head *amsdu,
1826 struct ieee80211_rx_status *rx_status)
1828 if (!rx_status->freq) {
1829 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1833 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1834 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1838 if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
1839 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
1846 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1847 struct sk_buff_head *amsdu,
1848 struct ieee80211_rx_status *rx_status,
1849 unsigned long int *drop_cnt)
1851 if (skb_queue_empty(amsdu))
1854 if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1858 *drop_cnt += skb_queue_len(amsdu);
1860 __skb_queue_purge(amsdu);
1863 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1865 struct ath10k *ar = htt->ar;
1866 struct ieee80211_rx_status *rx_status = &htt->rx_status;
1867 struct sk_buff_head amsdu;
1869 unsigned long int drop_cnt = 0;
1870 unsigned long int unchain_cnt = 0;
1871 unsigned long int drop_cnt_filter = 0;
1872 unsigned long int msdus_to_queue, num_msdus;
1873 enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
1874 u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
1876 __skb_queue_head_init(&amsdu);
1878 spin_lock_bh(&htt->rx_ring.lock);
1879 if (htt->rx_confused) {
1880 spin_unlock_bh(&htt->rx_ring.lock);
1883 ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1884 spin_unlock_bh(&htt->rx_ring.lock);
1887 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1888 __skb_queue_purge(&amsdu);
1889 /* FIXME: It's probably a good idea to reboot the
1890 * device instead of leaving it inoperable.
1892 htt->rx_confused = true;
1896 num_msdus = skb_queue_len(&amsdu);
1898 ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1900 /* only for ret = 1 indicates chained msdus */
1902 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
1904 ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
1905 ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
1906 msdus_to_queue = skb_queue_len(&amsdu);
1907 ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
1909 ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
1910 unchain_cnt, drop_cnt, drop_cnt_filter,
1916 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1917 struct htt_rx_indication *rx)
1919 struct ath10k *ar = htt->ar;
1920 struct htt_rx_indication_mpdu_range *mpdu_ranges;
1921 int num_mpdu_ranges;
1922 int i, mpdu_count = 0;
1926 num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1927 HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1928 peer_id = __le16_to_cpu(rx->hdr.peer_id);
1929 tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
1931 mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1933 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1935 (sizeof(struct htt_rx_indication_mpdu_range) *
1938 for (i = 0; i < num_mpdu_ranges; i++)
1939 mpdu_count += mpdu_ranges[i].mpdu_count;
1941 atomic_add(mpdu_count, &htt->num_mpdus_ready);
1943 ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
1947 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1948 struct sk_buff *skb)
1950 struct ath10k_htt *htt = &ar->htt;
1951 struct htt_resp *resp = (struct htt_resp *)skb->data;
1952 struct htt_tx_done tx_done = {};
1953 int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1958 case HTT_DATA_TX_STATUS_NO_ACK:
1959 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1961 case HTT_DATA_TX_STATUS_OK:
1962 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1964 case HTT_DATA_TX_STATUS_DISCARD:
1965 case HTT_DATA_TX_STATUS_POSTPONE:
1966 case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1967 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1970 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1971 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1975 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1976 resp->data_tx_completion.num_msdus);
1978 for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1979 msdu_id = resp->data_tx_completion.msdus[i];
1980 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1982 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1983 * interrupt and main interrupt (MSI/-X range case) for the same
1984 * HTC service so it should be safe to use kfifo_put w/o lock.
1986 * From kfifo_put() documentation:
1987 * Note that with only one concurrent reader and one concurrent
1988 * writer, you don't need extra locking to use these macro.
1990 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1991 ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1992 tx_done.msdu_id, tx_done.status);
1993 ath10k_txrx_tx_unref(htt, &tx_done);
1998 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
2000 struct htt_rx_addba *ev = &resp->rx_addba;
2001 struct ath10k_peer *peer;
2002 struct ath10k_vif *arvif;
2003 u16 info0, tid, peer_id;
2005 info0 = __le16_to_cpu(ev->info0);
2006 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2007 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2009 ath10k_dbg(ar, ATH10K_DBG_HTT,
2010 "htt rx addba tid %hu peer_id %hu size %hhu\n",
2011 tid, peer_id, ev->window_size);
2013 spin_lock_bh(&ar->data_lock);
2014 peer = ath10k_peer_find_by_id(ar, peer_id);
2016 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2018 spin_unlock_bh(&ar->data_lock);
2022 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2024 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2026 spin_unlock_bh(&ar->data_lock);
2030 ath10k_dbg(ar, ATH10K_DBG_HTT,
2031 "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2032 peer->addr, tid, ev->window_size);
2034 ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2035 spin_unlock_bh(&ar->data_lock);
2038 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
2040 struct htt_rx_delba *ev = &resp->rx_delba;
2041 struct ath10k_peer *peer;
2042 struct ath10k_vif *arvif;
2043 u16 info0, tid, peer_id;
2045 info0 = __le16_to_cpu(ev->info0);
2046 tid = MS(info0, HTT_RX_BA_INFO0_TID);
2047 peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2049 ath10k_dbg(ar, ATH10K_DBG_HTT,
2050 "htt rx delba tid %hu peer_id %hu\n",
2053 spin_lock_bh(&ar->data_lock);
2054 peer = ath10k_peer_find_by_id(ar, peer_id);
2056 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2058 spin_unlock_bh(&ar->data_lock);
2062 arvif = ath10k_get_arvif(ar, peer->vdev_id);
2064 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2066 spin_unlock_bh(&ar->data_lock);
2070 ath10k_dbg(ar, ATH10K_DBG_HTT,
2071 "htt rx stop rx ba session sta %pM tid %hu\n",
2074 ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2075 spin_unlock_bh(&ar->data_lock);
2078 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
2079 struct sk_buff_head *amsdu)
2081 struct sk_buff *msdu;
2082 struct htt_rx_desc *rxd;
2084 if (skb_queue_empty(list))
2087 if (WARN_ON(!skb_queue_empty(amsdu)))
2090 while ((msdu = __skb_dequeue(list))) {
2091 __skb_queue_tail(amsdu, msdu);
2093 rxd = (void *)msdu->data - sizeof(*rxd);
2094 if (rxd->msdu_end.common.info0 &
2095 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
2099 msdu = skb_peek_tail(amsdu);
2100 rxd = (void *)msdu->data - sizeof(*rxd);
2101 if (!(rxd->msdu_end.common.info0 &
2102 __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
2103 skb_queue_splice_init(amsdu, list);
2110 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
2111 struct sk_buff *skb)
2113 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2115 if (!ieee80211_has_protected(hdr->frame_control))
2118 /* Offloaded frames are already decrypted but firmware insists they are
2119 * protected in the 802.11 header. Strip the flag. Otherwise mac80211
2120 * will drop the frame.
2123 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2124 status->flag |= RX_FLAG_DECRYPTED |
2125 RX_FLAG_IV_STRIPPED |
2126 RX_FLAG_MMIC_STRIPPED;
2129 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2130 struct sk_buff_head *list)
2132 struct ath10k_htt *htt = &ar->htt;
2133 struct ieee80211_rx_status *status = &htt->rx_status;
2134 struct htt_rx_offload_msdu *rx;
2135 struct sk_buff *msdu;
2138 while ((msdu = __skb_dequeue(list))) {
2139 /* Offloaded frames don't have Rx descriptor. Instead they have
2140 * a short meta information header.
2143 rx = (void *)msdu->data;
2145 skb_put(msdu, sizeof(*rx));
2146 skb_pull(msdu, sizeof(*rx));
2148 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2149 ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2150 dev_kfree_skb_any(msdu);
2154 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2156 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2157 * actual payload is unaligned. Align the frame. Otherwise
2158 * mac80211 complains. This shouldn't reduce performance much
2159 * because these offloaded frames are rare.
2161 offset = 4 - ((unsigned long)msdu->data & 3);
2162 skb_put(msdu, offset);
2163 memmove(msdu->data + offset, msdu->data, msdu->len);
2164 skb_pull(msdu, offset);
2166 /* FIXME: The frame is NWifi. Re-construct QoS Control
2167 * if possible later.
2170 memset(status, 0, sizeof(*status));
2171 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2173 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2174 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2175 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2179 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2181 struct ath10k_htt *htt = &ar->htt;
2182 struct htt_resp *resp = (void *)skb->data;
2183 struct ieee80211_rx_status *status = &htt->rx_status;
2184 struct sk_buff_head list;
2185 struct sk_buff_head amsdu;
2194 lockdep_assert_held(&htt->rx_ring.lock);
2196 if (htt->rx_confused)
2199 skb_pull(skb, sizeof(resp->hdr));
2200 skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2202 peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2203 msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2204 vdev_id = resp->rx_in_ord_ind.vdev_id;
2205 tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2206 offload = !!(resp->rx_in_ord_ind.info &
2207 HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2208 frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2210 ath10k_dbg(ar, ATH10K_DBG_HTT,
2211 "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2212 vdev_id, peer_id, tid, offload, frag, msdu_count);
2214 if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2215 ath10k_warn(ar, "dropping invalid in order rx indication\n");
2219 /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2220 * extracted and processed.
2222 __skb_queue_head_init(&list);
2223 if (ar->hw_params.target_64bit)
2224 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2227 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2231 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2232 htt->rx_confused = true;
2236 /* Offloaded frames are very different and need to be handled
2240 ath10k_htt_rx_h_rx_offload(ar, &list);
2242 while (!skb_queue_empty(&list)) {
2243 __skb_queue_head_init(&amsdu);
2244 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2247 /* Note: The in-order indication may report interleaved
2248 * frames from different PPDUs meaning reported rx rate
2249 * to mac80211 isn't accurate/reliable. It's still
2250 * better to report something than nothing though. This
2251 * should still give an idea about rx rate to the user.
2253 ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
2254 ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
2255 ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
2257 ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
2262 /* Should not happen. */
2263 ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2264 htt->rx_confused = true;
2265 __skb_queue_purge(&list);
2272 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2273 const __le32 *resp_ids,
2279 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2282 for (i = 0; i < num_resp_ids; i++) {
2283 resp_id = le32_to_cpu(resp_ids[i]);
2285 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2288 /* TODO: free resp_id */
2292 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2294 struct ieee80211_hw *hw = ar->hw;
2295 struct ieee80211_txq *txq;
2296 struct htt_resp *resp = (struct htt_resp *)skb->data;
2297 struct htt_tx_fetch_record *record;
2299 size_t max_num_bytes;
2300 size_t max_num_msdus;
2303 const __le32 *resp_ids;
2311 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2313 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2314 if (unlikely(skb->len < len)) {
2315 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2319 num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2320 num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2322 len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2323 len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2325 if (unlikely(skb->len < len)) {
2326 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2330 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2331 num_records, num_resp_ids,
2332 le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2334 if (!ar->htt.tx_q_state.enabled) {
2335 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2339 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2340 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2346 for (i = 0; i < num_records; i++) {
2347 record = &resp->tx_fetch_ind.records[i];
2348 peer_id = MS(le16_to_cpu(record->info),
2349 HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2350 tid = MS(le16_to_cpu(record->info),
2351 HTT_TX_FETCH_RECORD_INFO_TID);
2352 max_num_msdus = le16_to_cpu(record->num_msdus);
2353 max_num_bytes = le32_to_cpu(record->num_bytes);
2355 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2356 i, peer_id, tid, max_num_msdus, max_num_bytes);
2358 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2359 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2360 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2365 spin_lock_bh(&ar->data_lock);
2366 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2367 spin_unlock_bh(&ar->data_lock);
2369 /* It is okay to release the lock and use txq because RCU read
2373 if (unlikely(!txq)) {
2374 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2382 while (num_msdus < max_num_msdus &&
2383 num_bytes < max_num_bytes) {
2384 ret = ath10k_mac_tx_push_txq(hw, txq);
2392 record->num_msdus = cpu_to_le16(num_msdus);
2393 record->num_bytes = cpu_to_le32(num_bytes);
2395 ath10k_htt_tx_txq_recalc(hw, txq);
2400 resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2401 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2403 ret = ath10k_htt_tx_fetch_resp(ar,
2404 resp->tx_fetch_ind.token,
2405 resp->tx_fetch_ind.fetch_seq_num,
2406 resp->tx_fetch_ind.records,
2408 if (unlikely(ret)) {
2409 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2410 le32_to_cpu(resp->tx_fetch_ind.token), ret);
2411 /* FIXME: request fw restart */
2414 ath10k_htt_tx_txq_sync(ar);
2417 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2418 struct sk_buff *skb)
2420 const struct htt_resp *resp = (void *)skb->data;
2424 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2426 len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2427 if (unlikely(skb->len < len)) {
2428 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2432 num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2433 len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2435 if (unlikely(skb->len < len)) {
2436 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2440 ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2441 resp->tx_fetch_confirm.resp_ids,
2445 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2446 struct sk_buff *skb)
2448 const struct htt_resp *resp = (void *)skb->data;
2449 const struct htt_tx_mode_switch_record *record;
2450 struct ieee80211_txq *txq;
2451 struct ath10k_txq *artxq;
2454 enum htt_tx_mode_switch_mode mode;
2463 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2465 len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2466 if (unlikely(skb->len < len)) {
2467 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2471 info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2472 info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2474 enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2475 num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2476 mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2477 threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2479 ath10k_dbg(ar, ATH10K_DBG_HTT,
2480 "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2481 info0, info1, enable, num_records, mode, threshold);
2483 len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2485 if (unlikely(skb->len < len)) {
2486 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2491 case HTT_TX_MODE_SWITCH_PUSH:
2492 case HTT_TX_MODE_SWITCH_PUSH_PULL:
2495 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2503 ar->htt.tx_q_state.enabled = enable;
2504 ar->htt.tx_q_state.mode = mode;
2505 ar->htt.tx_q_state.num_push_allowed = threshold;
2509 for (i = 0; i < num_records; i++) {
2510 record = &resp->tx_mode_switch_ind.records[i];
2511 info0 = le16_to_cpu(record->info0);
2512 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2513 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2515 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2516 unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2517 ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2522 spin_lock_bh(&ar->data_lock);
2523 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2524 spin_unlock_bh(&ar->data_lock);
2526 /* It is okay to release the lock and use txq because RCU read
2530 if (unlikely(!txq)) {
2531 ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2536 spin_lock_bh(&ar->htt.tx_lock);
2537 artxq = (void *)txq->drv_priv;
2538 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2539 spin_unlock_bh(&ar->htt.tx_lock);
2544 ath10k_mac_tx_push_pending(ar);
2547 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2551 release = ath10k_htt_t2h_msg_handler(ar, skb);
2553 /* Free the indication buffer */
2555 dev_kfree_skb_any(skb);
2558 static inline bool is_valid_legacy_rate(u8 rate)
2560 static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2561 18, 24, 36, 48, 54};
2564 for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2565 if (rate == legacy_rates[i])
2573 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2574 struct ieee80211_sta *sta,
2575 struct ath10k_per_peer_tx_stats *peer_stats)
2577 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2579 struct rate_info txrate;
2581 lockdep_assert_held(&ar->data_lock);
2583 txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2584 txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2585 txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2586 txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
2587 sgi = ATH10K_HW_GI(peer_stats->flags);
2589 if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
2590 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
2594 if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
2595 (txrate.mcs > 7 || txrate.nss < 1)) {
2596 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
2597 txrate.mcs, txrate.nss);
2601 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2603 if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
2604 txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
2605 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2607 if (!is_valid_legacy_rate(rate)) {
2608 ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2613 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2615 if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2617 arsta->txrate.legacy = rate;
2618 } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
2619 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
2620 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
2622 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
2623 arsta->txrate.mcs = txrate.mcs;
2627 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2629 arsta->txrate.nss = txrate.nss;
2630 arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
2633 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2634 struct sk_buff *skb)
2636 struct htt_resp *resp = (struct htt_resp *)skb->data;
2637 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2638 struct htt_per_peer_tx_stats_ind *tx_stats;
2639 struct ieee80211_sta *sta;
2640 struct ath10k_peer *peer;
2642 u8 ppdu_len, num_ppdu;
2644 num_ppdu = resp->peer_tx_stats.num_ppdu;
2645 ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
2647 if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
2648 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
2652 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2653 (resp->peer_tx_stats.payload);
2654 peer_id = __le16_to_cpu(tx_stats->peer_id);
2657 spin_lock_bh(&ar->data_lock);
2658 peer = ath10k_peer_find_by_id(ar, peer_id);
2659 if (!peer || !peer->sta) {
2660 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2666 for (i = 0; i < num_ppdu; i++) {
2667 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2668 (resp->peer_tx_stats.payload + i * ppdu_len);
2670 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
2671 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
2672 p_tx_stats->failed_bytes =
2673 __le32_to_cpu(tx_stats->failed_bytes);
2674 p_tx_stats->ratecode = tx_stats->ratecode;
2675 p_tx_stats->flags = tx_stats->flags;
2676 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
2677 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
2678 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
2680 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2684 spin_unlock_bh(&ar->data_lock);
2688 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
2690 struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
2691 struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2692 struct ath10k_10_2_peer_tx_stats *tx_stats;
2693 struct ieee80211_sta *sta;
2694 struct ath10k_peer *peer;
2695 u16 log_type = __le16_to_cpu(hdr->log_type);
2698 if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
2701 tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
2702 ATH10K_10_2_TX_STATS_OFFSET);
2704 if (!tx_stats->tx_ppdu_cnt)
2707 peer_id = tx_stats->peer_id;
2710 spin_lock_bh(&ar->data_lock);
2711 peer = ath10k_peer_find_by_id(ar, peer_id);
2712 if (!peer || !peer->sta) {
2713 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
2719 for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
2720 p_tx_stats->succ_bytes =
2721 __le16_to_cpu(tx_stats->success_bytes[i]);
2722 p_tx_stats->retry_bytes =
2723 __le16_to_cpu(tx_stats->retry_bytes[i]);
2724 p_tx_stats->failed_bytes =
2725 __le16_to_cpu(tx_stats->failed_bytes[i]);
2726 p_tx_stats->ratecode = tx_stats->ratecode[i];
2727 p_tx_stats->flags = tx_stats->flags[i];
2728 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
2729 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
2730 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
2732 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2734 spin_unlock_bh(&ar->data_lock);
2740 spin_unlock_bh(&ar->data_lock);
2744 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2746 struct ath10k_htt *htt = &ar->htt;
2747 struct htt_resp *resp = (struct htt_resp *)skb->data;
2748 enum htt_t2h_msg_type type;
2750 /* confirm alignment */
2751 if (!IS_ALIGNED((unsigned long)skb->data, 4))
2752 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2754 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2755 resp->hdr.msg_type);
2757 if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2758 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2759 resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2762 type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2765 case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2766 htt->target_version_major = resp->ver_resp.major;
2767 htt->target_version_minor = resp->ver_resp.minor;
2768 complete(&htt->target_version_received);
2771 case HTT_T2H_MSG_TYPE_RX_IND:
2772 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2774 case HTT_T2H_MSG_TYPE_PEER_MAP: {
2775 struct htt_peer_map_event ev = {
2776 .vdev_id = resp->peer_map.vdev_id,
2777 .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2779 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2780 ath10k_peer_map_event(htt, &ev);
2783 case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2784 struct htt_peer_unmap_event ev = {
2785 .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2787 ath10k_peer_unmap_event(htt, &ev);
2790 case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2791 struct htt_tx_done tx_done = {};
2792 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2793 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
2795 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2798 case HTT_MGMT_TX_STATUS_OK:
2799 tx_done.status = HTT_TX_COMPL_STATE_ACK;
2800 if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
2802 (resp->mgmt_tx_completion.flags &
2803 HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
2805 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
2809 case HTT_MGMT_TX_STATUS_RETRY:
2810 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2812 case HTT_MGMT_TX_STATUS_DROP:
2813 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2817 status = ath10k_txrx_tx_unref(htt, &tx_done);
2819 spin_lock_bh(&htt->tx_lock);
2820 ath10k_htt_tx_mgmt_dec_pending(htt);
2821 spin_unlock_bh(&htt->tx_lock);
2825 case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2826 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2828 case HTT_T2H_MSG_TYPE_SEC_IND: {
2829 struct ath10k *ar = htt->ar;
2830 struct htt_security_indication *ev = &resp->security_indication;
2832 ath10k_dbg(ar, ATH10K_DBG_HTT,
2833 "sec ind peer_id %d unicast %d type %d\n",
2834 __le16_to_cpu(ev->peer_id),
2835 !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2836 MS(ev->flags, HTT_SECURITY_TYPE));
2837 complete(&ar->install_key_done);
2840 case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2841 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2842 skb->data, skb->len);
2843 atomic_inc(&htt->num_mpdus_ready);
2846 case HTT_T2H_MSG_TYPE_TEST:
2848 case HTT_T2H_MSG_TYPE_STATS_CONF:
2849 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2851 case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2852 /* Firmware can return tx frames if it's unable to fully
2853 * process them and suspects host may be able to fix it. ath10k
2854 * sends all tx frames as already inspected so this shouldn't
2855 * happen unless fw has a bug.
2857 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2859 case HTT_T2H_MSG_TYPE_RX_ADDBA:
2860 ath10k_htt_rx_addba(ar, resp);
2862 case HTT_T2H_MSG_TYPE_RX_DELBA:
2863 ath10k_htt_rx_delba(ar, resp);
2865 case HTT_T2H_MSG_TYPE_PKTLOG: {
2866 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2868 offsetof(struct htt_resp,
2869 pktlog_msg.payload));
2871 if (ath10k_peer_stats_enabled(ar))
2872 ath10k_fetch_10_2_tx_stats(ar,
2873 resp->pktlog_msg.payload);
2876 case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2877 /* Ignore this event because mac80211 takes care of Rx
2878 * aggregation reordering.
2882 case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2883 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2886 case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2888 case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2889 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2890 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2892 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
2893 ath10k_dbg(ar, ATH10K_DBG_HTT,
2894 "htt chan change freq %u phymode %s\n",
2895 freq, ath10k_wmi_phymode_str(phymode));
2898 case HTT_T2H_MSG_TYPE_AGGR_CONF:
2900 case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2901 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2903 if (!tx_fetch_ind) {
2904 ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2907 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2910 case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2911 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2913 case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2914 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2916 case HTT_T2H_MSG_TYPE_PEER_STATS:
2917 ath10k_htt_fetch_peer_stats(ar, skb);
2919 case HTT_T2H_MSG_TYPE_EN_STATS:
2921 ath10k_warn(ar, "htt event (%d) not handled\n",
2922 resp->hdr.msg_type);
2923 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2924 skb->data, skb->len);
2929 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2931 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2932 struct sk_buff *skb)
2934 trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2935 dev_kfree_skb_any(skb);
2937 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2939 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
2941 struct sk_buff *skb;
2943 while (quota < budget) {
2944 if (skb_queue_empty(&ar->htt.rx_msdus_q))
2947 skb = skb_dequeue(&ar->htt.rx_msdus_q);
2950 ath10k_process_rx(ar, skb);
2957 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2959 struct ath10k_htt *htt = &ar->htt;
2960 struct htt_tx_done tx_done = {};
2961 struct sk_buff_head tx_ind_q;
2962 struct sk_buff *skb;
2963 unsigned long flags;
2964 int quota = 0, done, ret;
2965 bool resched_napi = false;
2967 __skb_queue_head_init(&tx_ind_q);
2969 /* Process pending frames before dequeuing more data
2972 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2973 if (quota == budget) {
2974 resched_napi = true;
2978 while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
2979 spin_lock_bh(&htt->rx_ring.lock);
2980 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
2981 spin_unlock_bh(&htt->rx_ring.lock);
2983 dev_kfree_skb_any(skb);
2985 resched_napi = true;
2990 while (atomic_read(&htt->num_mpdus_ready)) {
2991 ret = ath10k_htt_rx_handle_amsdu(htt);
2993 resched_napi = true;
2996 atomic_dec(&htt->num_mpdus_ready);
2999 /* Deliver received data after processing data from hardware */
3000 quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
3002 /* From NAPI documentation:
3003 * The napi poll() function may also process TX completions, in which
3004 * case if it processes the entire TX ring then it should count that
3005 * work as the rest of the budget.
3007 if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
3010 /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
3011 * From kfifo_get() documentation:
3012 * Note that with only one concurrent reader and one concurrent writer,
3013 * you don't need extra locking to use these macro.
3015 while (kfifo_get(&htt->txdone_fifo, &tx_done))
3016 ath10k_txrx_tx_unref(htt, &tx_done);
3018 ath10k_mac_tx_push_pending(ar);
3020 spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
3021 skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
3022 spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
3024 while ((skb = __skb_dequeue(&tx_ind_q))) {
3025 ath10k_htt_rx_tx_fetch_ind(ar, skb);
3026 dev_kfree_skb_any(skb);
3030 ath10k_htt_rx_msdu_buff_replenish(htt);
3031 /* In case of rx failure or more data to read, report budget
3032 * to reschedule NAPI poll
3034 done = resched_napi ? budget : quota;
3038 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
3040 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
3041 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
3042 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
3043 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
3044 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
3045 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
3048 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
3049 .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
3050 .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
3051 .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
3052 .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
3053 .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
3056 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
3058 struct ath10k *ar = htt->ar;
3060 if (ar->hw_params.target_64bit)
3061 htt->rx_ops = &htt_rx_ops_64;
3063 htt->rx_ops = &htt_rx_ops_32;