GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / wireless / ath / ath10k / htt_rx.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
4  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5  *
6  * Permission to use, copy, modify, and/or distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18
19 #include "core.h"
20 #include "htc.h"
21 #include "htt.h"
22 #include "txrx.h"
23 #include "debug.h"
24 #include "trace.h"
25 #include "mac.h"
26
27 #include <linux/log2.h>
28 #include <linux/bitfield.h>
29
30 /* when under memory pressure rx ring refill may fail and needs a retry */
31 #define HTT_RX_RING_REFILL_RETRY_MS 50
32
33 #define HTT_RX_RING_REFILL_RESCHED_MS 5
34
35 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
36
37 static struct sk_buff *
38 ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
39 {
40         struct ath10k_skb_rxcb *rxcb;
41
42         hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
43                 if (rxcb->paddr == paddr)
44                         return ATH10K_RXCB_SKB(rxcb);
45
46         WARN_ON_ONCE(1);
47         return NULL;
48 }
49
50 static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
51 {
52         struct sk_buff *skb;
53         struct ath10k_skb_rxcb *rxcb;
54         struct hlist_node *n;
55         int i;
56
57         if (htt->rx_ring.in_ord_rx) {
58                 hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
59                         skb = ATH10K_RXCB_SKB(rxcb);
60                         dma_unmap_single(htt->ar->dev, rxcb->paddr,
61                                          skb->len + skb_tailroom(skb),
62                                          DMA_FROM_DEVICE);
63                         hash_del(&rxcb->hlist);
64                         dev_kfree_skb_any(skb);
65                 }
66         } else {
67                 for (i = 0; i < htt->rx_ring.size; i++) {
68                         skb = htt->rx_ring.netbufs_ring[i];
69                         if (!skb)
70                                 continue;
71
72                         rxcb = ATH10K_SKB_RXCB(skb);
73                         dma_unmap_single(htt->ar->dev, rxcb->paddr,
74                                          skb->len + skb_tailroom(skb),
75                                          DMA_FROM_DEVICE);
76                         dev_kfree_skb_any(skb);
77                 }
78         }
79
80         htt->rx_ring.fill_cnt = 0;
81         hash_init(htt->rx_ring.skb_table);
82         memset(htt->rx_ring.netbufs_ring, 0,
83                htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
84 }
85
86 static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
87 {
88         return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
89 }
90
91 static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
92 {
93         return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
94 }
95
96 static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
97                                              void *vaddr)
98 {
99         htt->rx_ring.paddrs_ring_32 = vaddr;
100 }
101
102 static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
103                                              void *vaddr)
104 {
105         htt->rx_ring.paddrs_ring_64 = vaddr;
106 }
107
108 static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
109                                           dma_addr_t paddr, int idx)
110 {
111         htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
112 }
113
114 static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
115                                           dma_addr_t paddr, int idx)
116 {
117         htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
118 }
119
120 static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
121 {
122         htt->rx_ring.paddrs_ring_32[idx] = 0;
123 }
124
125 static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
126 {
127         htt->rx_ring.paddrs_ring_64[idx] = 0;
128 }
129
130 static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
131 {
132         return (void *)htt->rx_ring.paddrs_ring_32;
133 }
134
135 static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
136 {
137         return (void *)htt->rx_ring.paddrs_ring_64;
138 }
139
140 static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
141 {
142         struct htt_rx_desc *rx_desc;
143         struct ath10k_skb_rxcb *rxcb;
144         struct sk_buff *skb;
145         dma_addr_t paddr;
146         int ret = 0, idx;
147
148         /* The Full Rx Reorder firmware has no way of telling the host
149          * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
150          * To keep things simple make sure ring is always half empty. This
151          * guarantees there'll be no replenishment overruns possible.
152          */
153         BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
154
155         idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
156
157         if (idx < 0 || idx >= htt->rx_ring.size) {
158                 ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
159                 idx &= htt->rx_ring.size_mask;
160                 ret = -ENOMEM;
161                 goto fail;
162         }
163
164         while (num > 0) {
165                 skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
166                 if (!skb) {
167                         ret = -ENOMEM;
168                         goto fail;
169                 }
170
171                 if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
172                         skb_pull(skb,
173                                  PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
174                                  skb->data);
175
176                 /* Clear rx_desc attention word before posting to Rx ring */
177                 rx_desc = (struct htt_rx_desc *)skb->data;
178                 rx_desc->attention.flags = __cpu_to_le32(0);
179
180                 paddr = dma_map_single(htt->ar->dev, skb->data,
181                                        skb->len + skb_tailroom(skb),
182                                        DMA_FROM_DEVICE);
183
184                 if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
185                         dev_kfree_skb_any(skb);
186                         ret = -ENOMEM;
187                         goto fail;
188                 }
189
190                 rxcb = ATH10K_SKB_RXCB(skb);
191                 rxcb->paddr = paddr;
192                 htt->rx_ring.netbufs_ring[idx] = skb;
193                 ath10k_htt_set_paddrs_ring(htt, paddr, idx);
194                 htt->rx_ring.fill_cnt++;
195
196                 if (htt->rx_ring.in_ord_rx) {
197                         hash_add(htt->rx_ring.skb_table,
198                                  &ATH10K_SKB_RXCB(skb)->hlist,
199                                  paddr);
200                 }
201
202                 num--;
203                 idx++;
204                 idx &= htt->rx_ring.size_mask;
205         }
206
207 fail:
208         /*
209          * Make sure the rx buffer is updated before available buffer
210          * index to avoid any potential rx ring corruption.
211          */
212         mb();
213         *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
214         return ret;
215 }
216
217 static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
218 {
219         lockdep_assert_held(&htt->rx_ring.lock);
220         return __ath10k_htt_rx_ring_fill_n(htt, num);
221 }
222
223 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
224 {
225         int ret, num_deficit, num_to_fill;
226
227         /* Refilling the whole RX ring buffer proves to be a bad idea. The
228          * reason is RX may take up significant amount of CPU cycles and starve
229          * other tasks, e.g. TX on an ethernet device while acting as a bridge
230          * with ath10k wlan interface. This ended up with very poor performance
231          * once CPU the host system was overwhelmed with RX on ath10k.
232          *
233          * By limiting the number of refills the replenishing occurs
234          * progressively. This in turns makes use of the fact tasklets are
235          * processed in FIFO order. This means actual RX processing can starve
236          * out refilling. If there's not enough buffers on RX ring FW will not
237          * report RX until it is refilled with enough buffers. This
238          * automatically balances load wrt to CPU power.
239          *
240          * This probably comes at a cost of lower maximum throughput but
241          * improves the average and stability.
242          */
243         spin_lock_bh(&htt->rx_ring.lock);
244         num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
245         num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
246         num_deficit -= num_to_fill;
247         ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
248         if (ret == -ENOMEM) {
249                 /*
250                  * Failed to fill it to the desired level -
251                  * we'll start a timer and try again next time.
252                  * As long as enough buffers are left in the ring for
253                  * another A-MPDU rx, no special recovery is needed.
254                  */
255                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
256                           msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
257         } else if (num_deficit > 0) {
258                 mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
259                           msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
260         }
261         spin_unlock_bh(&htt->rx_ring.lock);
262 }
263
264 static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
265 {
266         struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
267
268         ath10k_htt_rx_msdu_buff_replenish(htt);
269 }
270
271 int ath10k_htt_rx_ring_refill(struct ath10k *ar)
272 {
273         struct ath10k_htt *htt = &ar->htt;
274         int ret;
275
276         spin_lock_bh(&htt->rx_ring.lock);
277         ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
278                                               htt->rx_ring.fill_cnt));
279
280         if (ret)
281                 ath10k_htt_rx_ring_free(htt);
282
283         spin_unlock_bh(&htt->rx_ring.lock);
284
285         return ret;
286 }
287
288 void ath10k_htt_rx_free(struct ath10k_htt *htt)
289 {
290         del_timer_sync(&htt->rx_ring.refill_retry_timer);
291
292         skb_queue_purge(&htt->rx_msdus_q);
293         skb_queue_purge(&htt->rx_in_ord_compl_q);
294         skb_queue_purge(&htt->tx_fetch_ind_q);
295
296         spin_lock_bh(&htt->rx_ring.lock);
297         ath10k_htt_rx_ring_free(htt);
298         spin_unlock_bh(&htt->rx_ring.lock);
299
300         dma_free_coherent(htt->ar->dev,
301                           ath10k_htt_get_rx_ring_size(htt),
302                           ath10k_htt_get_vaddr_ring(htt),
303                           htt->rx_ring.base_paddr);
304
305         dma_free_coherent(htt->ar->dev,
306                           sizeof(*htt->rx_ring.alloc_idx.vaddr),
307                           htt->rx_ring.alloc_idx.vaddr,
308                           htt->rx_ring.alloc_idx.paddr);
309
310         kfree(htt->rx_ring.netbufs_ring);
311 }
312
313 static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
314 {
315         struct ath10k *ar = htt->ar;
316         int idx;
317         struct sk_buff *msdu;
318
319         lockdep_assert_held(&htt->rx_ring.lock);
320
321         if (htt->rx_ring.fill_cnt == 0) {
322                 ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
323                 return NULL;
324         }
325
326         idx = htt->rx_ring.sw_rd_idx.msdu_payld;
327         msdu = htt->rx_ring.netbufs_ring[idx];
328         htt->rx_ring.netbufs_ring[idx] = NULL;
329         ath10k_htt_reset_paddrs_ring(htt, idx);
330
331         idx++;
332         idx &= htt->rx_ring.size_mask;
333         htt->rx_ring.sw_rd_idx.msdu_payld = idx;
334         htt->rx_ring.fill_cnt--;
335
336         dma_unmap_single(htt->ar->dev,
337                          ATH10K_SKB_RXCB(msdu)->paddr,
338                          msdu->len + skb_tailroom(msdu),
339                          DMA_FROM_DEVICE);
340         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
341                         msdu->data, msdu->len + skb_tailroom(msdu));
342
343         return msdu;
344 }
345
346 /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
347 static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
348                                    struct sk_buff_head *amsdu)
349 {
350         struct ath10k *ar = htt->ar;
351         int msdu_len, msdu_chaining = 0;
352         struct sk_buff *msdu;
353         struct htt_rx_desc *rx_desc;
354
355         lockdep_assert_held(&htt->rx_ring.lock);
356
357         for (;;) {
358                 int last_msdu, msdu_len_invalid, msdu_chained;
359
360                 msdu = ath10k_htt_rx_netbuf_pop(htt);
361                 if (!msdu) {
362                         __skb_queue_purge(amsdu);
363                         return -ENOENT;
364                 }
365
366                 __skb_queue_tail(amsdu, msdu);
367
368                 rx_desc = (struct htt_rx_desc *)msdu->data;
369
370                 /* FIXME: we must report msdu payload since this is what caller
371                  * expects now
372                  */
373                 skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
374                 skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
375
376                 /*
377                  * Sanity check - confirm the HW is finished filling in the
378                  * rx data.
379                  * If the HW and SW are working correctly, then it's guaranteed
380                  * that the HW's MAC DMA is done before this point in the SW.
381                  * To prevent the case that we handle a stale Rx descriptor,
382                  * just assert for now until we have a way to recover.
383                  */
384                 if (!(__le32_to_cpu(rx_desc->attention.flags)
385                                 & RX_ATTENTION_FLAGS_MSDU_DONE)) {
386                         __skb_queue_purge(amsdu);
387                         return -EIO;
388                 }
389
390                 msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
391                                         & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
392                                            RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
393                 msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
394                               RX_MSDU_START_INFO0_MSDU_LENGTH);
395                 msdu_chained = rx_desc->frag_info.ring2_more_count;
396
397                 if (msdu_len_invalid)
398                         msdu_len = 0;
399
400                 skb_trim(msdu, 0);
401                 skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
402                 msdu_len -= msdu->len;
403
404                 /* Note: Chained buffers do not contain rx descriptor */
405                 while (msdu_chained--) {
406                         msdu = ath10k_htt_rx_netbuf_pop(htt);
407                         if (!msdu) {
408                                 __skb_queue_purge(amsdu);
409                                 return -ENOENT;
410                         }
411
412                         __skb_queue_tail(amsdu, msdu);
413                         skb_trim(msdu, 0);
414                         skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
415                         msdu_len -= msdu->len;
416                         msdu_chaining = 1;
417                 }
418
419                 last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
420                                 RX_MSDU_END_INFO0_LAST_MSDU;
421
422                 trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
423                                          sizeof(*rx_desc) - sizeof(u32));
424
425                 if (last_msdu)
426                         break;
427         }
428
429         if (skb_queue_empty(amsdu))
430                 msdu_chaining = -1;
431
432         /*
433          * Don't refill the ring yet.
434          *
435          * First, the elements popped here are still in use - it is not
436          * safe to overwrite them until the matching call to
437          * mpdu_desc_list_next. Second, for efficiency it is preferable to
438          * refill the rx ring with 1 PPDU's worth of rx buffers (something
439          * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
440          * (something like 3 buffers). Consequently, we'll rely on the txrx
441          * SW to tell us when it is done pulling all the PPDU's rx buffers
442          * out of the rx ring, and then refill it just once.
443          */
444
445         return msdu_chaining;
446 }
447
448 static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
449                                                u64 paddr)
450 {
451         struct ath10k *ar = htt->ar;
452         struct ath10k_skb_rxcb *rxcb;
453         struct sk_buff *msdu;
454
455         lockdep_assert_held(&htt->rx_ring.lock);
456
457         msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
458         if (!msdu)
459                 return NULL;
460
461         rxcb = ATH10K_SKB_RXCB(msdu);
462         hash_del(&rxcb->hlist);
463         htt->rx_ring.fill_cnt--;
464
465         dma_unmap_single(htt->ar->dev, rxcb->paddr,
466                          msdu->len + skb_tailroom(msdu),
467                          DMA_FROM_DEVICE);
468         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
469                         msdu->data, msdu->len + skb_tailroom(msdu));
470
471         return msdu;
472 }
473
474 static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
475                                           struct htt_rx_in_ord_ind *ev,
476                                           struct sk_buff_head *list)
477 {
478         struct ath10k *ar = htt->ar;
479         struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
480         struct htt_rx_desc *rxd;
481         struct sk_buff *msdu;
482         int msdu_count;
483         bool is_offload;
484         u32 paddr;
485
486         lockdep_assert_held(&htt->rx_ring.lock);
487
488         msdu_count = __le16_to_cpu(ev->msdu_count);
489         is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
490
491         while (msdu_count--) {
492                 paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
493
494                 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
495                 if (!msdu) {
496                         __skb_queue_purge(list);
497                         return -ENOENT;
498                 }
499
500                 __skb_queue_tail(list, msdu);
501
502                 if (!is_offload) {
503                         rxd = (void *)msdu->data;
504
505                         trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
506
507                         skb_put(msdu, sizeof(*rxd));
508                         skb_pull(msdu, sizeof(*rxd));
509                         skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
510
511                         if (!(__le32_to_cpu(rxd->attention.flags) &
512                               RX_ATTENTION_FLAGS_MSDU_DONE)) {
513                                 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
514                                 return -EIO;
515                         }
516                 }
517
518                 msdu_desc++;
519         }
520
521         return 0;
522 }
523
524 static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
525                                           struct htt_rx_in_ord_ind *ev,
526                                           struct sk_buff_head *list)
527 {
528         struct ath10k *ar = htt->ar;
529         struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
530         struct htt_rx_desc *rxd;
531         struct sk_buff *msdu;
532         int msdu_count;
533         bool is_offload;
534         u64 paddr;
535
536         lockdep_assert_held(&htt->rx_ring.lock);
537
538         msdu_count = __le16_to_cpu(ev->msdu_count);
539         is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
540
541         while (msdu_count--) {
542                 paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
543                 msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
544                 if (!msdu) {
545                         __skb_queue_purge(list);
546                         return -ENOENT;
547                 }
548
549                 __skb_queue_tail(list, msdu);
550
551                 if (!is_offload) {
552                         rxd = (void *)msdu->data;
553
554                         trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
555
556                         skb_put(msdu, sizeof(*rxd));
557                         skb_pull(msdu, sizeof(*rxd));
558                         skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
559
560                         if (!(__le32_to_cpu(rxd->attention.flags) &
561                               RX_ATTENTION_FLAGS_MSDU_DONE)) {
562                                 ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
563                                 return -EIO;
564                         }
565                 }
566
567                 msdu_desc++;
568         }
569
570         return 0;
571 }
572
573 int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
574 {
575         struct ath10k *ar = htt->ar;
576         dma_addr_t paddr;
577         void *vaddr, *vaddr_ring;
578         size_t size;
579         struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
580
581         htt->rx_confused = false;
582
583         /* XXX: The fill level could be changed during runtime in response to
584          * the host processing latency. Is this really worth it?
585          */
586         htt->rx_ring.size = HTT_RX_RING_SIZE;
587         htt->rx_ring.size_mask = htt->rx_ring.size - 1;
588         htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
589
590         if (!is_power_of_2(htt->rx_ring.size)) {
591                 ath10k_warn(ar, "htt rx ring size is not power of 2\n");
592                 return -EINVAL;
593         }
594
595         htt->rx_ring.netbufs_ring =
596                 kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
597                         GFP_KERNEL);
598         if (!htt->rx_ring.netbufs_ring)
599                 goto err_netbuf;
600
601         size = ath10k_htt_get_rx_ring_size(htt);
602
603         vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
604         if (!vaddr_ring)
605                 goto err_dma_ring;
606
607         ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
608         htt->rx_ring.base_paddr = paddr;
609
610         vaddr = dma_alloc_coherent(htt->ar->dev,
611                                    sizeof(*htt->rx_ring.alloc_idx.vaddr),
612                                    &paddr, GFP_KERNEL);
613         if (!vaddr)
614                 goto err_dma_idx;
615
616         htt->rx_ring.alloc_idx.vaddr = vaddr;
617         htt->rx_ring.alloc_idx.paddr = paddr;
618         htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
619         *htt->rx_ring.alloc_idx.vaddr = 0;
620
621         /* Initialize the Rx refill retry timer */
622         timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
623
624         spin_lock_init(&htt->rx_ring.lock);
625
626         htt->rx_ring.fill_cnt = 0;
627         htt->rx_ring.sw_rd_idx.msdu_payld = 0;
628         hash_init(htt->rx_ring.skb_table);
629
630         skb_queue_head_init(&htt->rx_msdus_q);
631         skb_queue_head_init(&htt->rx_in_ord_compl_q);
632         skb_queue_head_init(&htt->tx_fetch_ind_q);
633         atomic_set(&htt->num_mpdus_ready, 0);
634
635         ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
636                    htt->rx_ring.size, htt->rx_ring.fill_level);
637         return 0;
638
639 err_dma_idx:
640         dma_free_coherent(htt->ar->dev,
641                           ath10k_htt_get_rx_ring_size(htt),
642                           vaddr_ring,
643                           htt->rx_ring.base_paddr);
644 err_dma_ring:
645         kfree(htt->rx_ring.netbufs_ring);
646 err_netbuf:
647         return -ENOMEM;
648 }
649
650 static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
651                                           enum htt_rx_mpdu_encrypt_type type)
652 {
653         switch (type) {
654         case HTT_RX_MPDU_ENCRYPT_NONE:
655                 return 0;
656         case HTT_RX_MPDU_ENCRYPT_WEP40:
657         case HTT_RX_MPDU_ENCRYPT_WEP104:
658                 return IEEE80211_WEP_IV_LEN;
659         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
660         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
661                 return IEEE80211_TKIP_IV_LEN;
662         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
663                 return IEEE80211_CCMP_HDR_LEN;
664         case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
665                 return IEEE80211_CCMP_256_HDR_LEN;
666         case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
667         case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
668                 return IEEE80211_GCMP_HDR_LEN;
669         case HTT_RX_MPDU_ENCRYPT_WEP128:
670         case HTT_RX_MPDU_ENCRYPT_WAPI:
671                 break;
672         }
673
674         ath10k_warn(ar, "unsupported encryption type %d\n", type);
675         return 0;
676 }
677
678 #define MICHAEL_MIC_LEN 8
679
680 static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
681                                         enum htt_rx_mpdu_encrypt_type type)
682 {
683         switch (type) {
684         case HTT_RX_MPDU_ENCRYPT_NONE:
685         case HTT_RX_MPDU_ENCRYPT_WEP40:
686         case HTT_RX_MPDU_ENCRYPT_WEP104:
687         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
688         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
689                 return 0;
690         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
691                 return IEEE80211_CCMP_MIC_LEN;
692         case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
693                 return IEEE80211_CCMP_256_MIC_LEN;
694         case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
695         case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
696                 return IEEE80211_GCMP_MIC_LEN;
697         case HTT_RX_MPDU_ENCRYPT_WEP128:
698         case HTT_RX_MPDU_ENCRYPT_WAPI:
699                 break;
700         }
701
702         ath10k_warn(ar, "unsupported encryption type %d\n", type);
703         return 0;
704 }
705
706 static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
707                                         enum htt_rx_mpdu_encrypt_type type)
708 {
709         switch (type) {
710         case HTT_RX_MPDU_ENCRYPT_NONE:
711         case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
712         case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
713         case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
714         case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
715                 return 0;
716         case HTT_RX_MPDU_ENCRYPT_WEP40:
717         case HTT_RX_MPDU_ENCRYPT_WEP104:
718                 return IEEE80211_WEP_ICV_LEN;
719         case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
720         case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
721                 return IEEE80211_TKIP_ICV_LEN;
722         case HTT_RX_MPDU_ENCRYPT_WEP128:
723         case HTT_RX_MPDU_ENCRYPT_WAPI:
724                 break;
725         }
726
727         ath10k_warn(ar, "unsupported encryption type %d\n", type);
728         return 0;
729 }
730
731 struct amsdu_subframe_hdr {
732         u8 dst[ETH_ALEN];
733         u8 src[ETH_ALEN];
734         __be16 len;
735 } __packed;
736
737 #define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
738
739 static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
740 {
741         u8 ret = 0;
742
743         switch (bw) {
744         case 0:
745                 ret = RATE_INFO_BW_20;
746                 break;
747         case 1:
748                 ret = RATE_INFO_BW_40;
749                 break;
750         case 2:
751                 ret = RATE_INFO_BW_80;
752                 break;
753         case 3:
754                 ret = RATE_INFO_BW_160;
755                 break;
756         }
757
758         return ret;
759 }
760
761 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
762                                   struct ieee80211_rx_status *status,
763                                   struct htt_rx_desc *rxd)
764 {
765         struct ieee80211_supported_band *sband;
766         u8 cck, rate, bw, sgi, mcs, nss;
767         u8 preamble = 0;
768         u8 group_id;
769         u32 info1, info2, info3;
770         u32 stbc, nsts_su;
771
772         info1 = __le32_to_cpu(rxd->ppdu_start.info1);
773         info2 = __le32_to_cpu(rxd->ppdu_start.info2);
774         info3 = __le32_to_cpu(rxd->ppdu_start.info3);
775
776         preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
777
778         switch (preamble) {
779         case HTT_RX_LEGACY:
780                 /* To get legacy rate index band is required. Since band can't
781                  * be undefined check if freq is non-zero.
782                  */
783                 if (!status->freq)
784                         return;
785
786                 cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
787                 rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
788                 rate &= ~RX_PPDU_START_RATE_FLAG;
789
790                 sband = &ar->mac.sbands[status->band];
791                 status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
792                 break;
793         case HTT_RX_HT:
794         case HTT_RX_HT_WITH_TXBF:
795                 /* HT-SIG - Table 20-11 in info2 and info3 */
796                 mcs = info2 & 0x1F;
797                 nss = mcs >> 3;
798                 bw = (info2 >> 7) & 1;
799                 sgi = (info3 >> 7) & 1;
800
801                 status->rate_idx = mcs;
802                 status->encoding = RX_ENC_HT;
803                 if (sgi)
804                         status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
805                 if (bw)
806                         status->bw = RATE_INFO_BW_40;
807                 break;
808         case HTT_RX_VHT:
809         case HTT_RX_VHT_WITH_TXBF:
810                 /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
811                  * TODO check this
812                  */
813                 bw = info2 & 3;
814                 sgi = info3 & 1;
815                 stbc = (info2 >> 3) & 1;
816                 group_id = (info2 >> 4) & 0x3F;
817
818                 if (GROUP_ID_IS_SU_MIMO(group_id)) {
819                         mcs = (info3 >> 4) & 0x0F;
820                         nsts_su = ((info2 >> 10) & 0x07);
821                         if (stbc)
822                                 nss = (nsts_su >> 2) + 1;
823                         else
824                                 nss = (nsts_su + 1);
825                 } else {
826                         /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
827                          * so it's impossible to decode MCS. Also since
828                          * firmware consumes Group Id Management frames host
829                          * has no knowledge regarding group/user position
830                          * mapping so it's impossible to pick the correct Nsts
831                          * from VHT-SIG-A1.
832                          *
833                          * Bandwidth and SGI are valid so report the rateinfo
834                          * on best-effort basis.
835                          */
836                         mcs = 0;
837                         nss = 1;
838                 }
839
840                 if (mcs > 0x09) {
841                         ath10k_warn(ar, "invalid MCS received %u\n", mcs);
842                         ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
843                                     __le32_to_cpu(rxd->attention.flags),
844                                     __le32_to_cpu(rxd->mpdu_start.info0),
845                                     __le32_to_cpu(rxd->mpdu_start.info1),
846                                     __le32_to_cpu(rxd->msdu_start.common.info0),
847                                     __le32_to_cpu(rxd->msdu_start.common.info1),
848                                     rxd->ppdu_start.info0,
849                                     __le32_to_cpu(rxd->ppdu_start.info1),
850                                     __le32_to_cpu(rxd->ppdu_start.info2),
851                                     __le32_to_cpu(rxd->ppdu_start.info3),
852                                     __le32_to_cpu(rxd->ppdu_start.info4));
853
854                         ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
855                                     __le32_to_cpu(rxd->msdu_end.common.info0),
856                                     __le32_to_cpu(rxd->mpdu_end.info0));
857
858                         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
859                                         "rx desc msdu payload: ",
860                                         rxd->msdu_payload, 50);
861                 }
862
863                 status->rate_idx = mcs;
864                 status->nss = nss;
865
866                 if (sgi)
867                         status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
868
869                 status->bw = ath10k_bw_to_mac80211_bw(bw);
870                 status->encoding = RX_ENC_VHT;
871                 break;
872         default:
873                 break;
874         }
875 }
876
877 static struct ieee80211_channel *
878 ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
879 {
880         struct ath10k_peer *peer;
881         struct ath10k_vif *arvif;
882         struct cfg80211_chan_def def;
883         u16 peer_id;
884
885         lockdep_assert_held(&ar->data_lock);
886
887         if (!rxd)
888                 return NULL;
889
890         if (rxd->attention.flags &
891             __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
892                 return NULL;
893
894         if (!(rxd->msdu_end.common.info0 &
895               __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
896                 return NULL;
897
898         peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
899                      RX_MPDU_START_INFO0_PEER_IDX);
900
901         peer = ath10k_peer_find_by_id(ar, peer_id);
902         if (!peer)
903                 return NULL;
904
905         arvif = ath10k_get_arvif(ar, peer->vdev_id);
906         if (WARN_ON_ONCE(!arvif))
907                 return NULL;
908
909         if (ath10k_mac_vif_chan(arvif->vif, &def))
910                 return NULL;
911
912         return def.chan;
913 }
914
915 static struct ieee80211_channel *
916 ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
917 {
918         struct ath10k_vif *arvif;
919         struct cfg80211_chan_def def;
920
921         lockdep_assert_held(&ar->data_lock);
922
923         list_for_each_entry(arvif, &ar->arvifs, list) {
924                 if (arvif->vdev_id == vdev_id &&
925                     ath10k_mac_vif_chan(arvif->vif, &def) == 0)
926                         return def.chan;
927         }
928
929         return NULL;
930 }
931
932 static void
933 ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
934                               struct ieee80211_chanctx_conf *conf,
935                               void *data)
936 {
937         struct cfg80211_chan_def *def = data;
938
939         *def = conf->def;
940 }
941
942 static struct ieee80211_channel *
943 ath10k_htt_rx_h_any_channel(struct ath10k *ar)
944 {
945         struct cfg80211_chan_def def = {};
946
947         ieee80211_iter_chan_contexts_atomic(ar->hw,
948                                             ath10k_htt_rx_h_any_chan_iter,
949                                             &def);
950
951         return def.chan;
952 }
953
954 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
955                                     struct ieee80211_rx_status *status,
956                                     struct htt_rx_desc *rxd,
957                                     u32 vdev_id)
958 {
959         struct ieee80211_channel *ch;
960
961         spin_lock_bh(&ar->data_lock);
962         ch = ar->scan_channel;
963         if (!ch)
964                 ch = ar->rx_channel;
965         if (!ch)
966                 ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
967         if (!ch)
968                 ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
969         if (!ch)
970                 ch = ath10k_htt_rx_h_any_channel(ar);
971         if (!ch)
972                 ch = ar->tgt_oper_chan;
973         spin_unlock_bh(&ar->data_lock);
974
975         if (!ch)
976                 return false;
977
978         status->band = ch->band;
979         status->freq = ch->center_freq;
980
981         return true;
982 }
983
984 static void ath10k_htt_rx_h_signal(struct ath10k *ar,
985                                    struct ieee80211_rx_status *status,
986                                    struct htt_rx_desc *rxd)
987 {
988         int i;
989
990         for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
991                 status->chains &= ~BIT(i);
992
993                 if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
994                         status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
995                                 rxd->ppdu_start.rssi_chains[i].pri20_mhz;
996
997                         status->chains |= BIT(i);
998                 }
999         }
1000
1001         /* FIXME: Get real NF */
1002         status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
1003                          rxd->ppdu_start.rssi_comb;
1004         status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
1005 }
1006
1007 static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
1008                                     struct ieee80211_rx_status *status,
1009                                     struct htt_rx_desc *rxd)
1010 {
1011         /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
1012          * means all prior MSDUs in a PPDU are reported to mac80211 without the
1013          * TSF. Is it worth holding frames until end of PPDU is known?
1014          *
1015          * FIXME: Can we get/compute 64bit TSF?
1016          */
1017         status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
1018         status->flag |= RX_FLAG_MACTIME_END;
1019 }
1020
1021 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
1022                                  struct sk_buff_head *amsdu,
1023                                  struct ieee80211_rx_status *status,
1024                                  u32 vdev_id)
1025 {
1026         struct sk_buff *first;
1027         struct htt_rx_desc *rxd;
1028         bool is_first_ppdu;
1029         bool is_last_ppdu;
1030
1031         if (skb_queue_empty(amsdu))
1032                 return;
1033
1034         first = skb_peek(amsdu);
1035         rxd = (void *)first->data - sizeof(*rxd);
1036
1037         is_first_ppdu = !!(rxd->attention.flags &
1038                            __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
1039         is_last_ppdu = !!(rxd->attention.flags &
1040                           __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
1041
1042         if (is_first_ppdu) {
1043                 /* New PPDU starts so clear out the old per-PPDU status. */
1044                 status->freq = 0;
1045                 status->rate_idx = 0;
1046                 status->nss = 0;
1047                 status->encoding = RX_ENC_LEGACY;
1048                 status->bw = RATE_INFO_BW_20;
1049
1050                 status->flag &= ~RX_FLAG_MACTIME_END;
1051                 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1052
1053                 status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
1054                 status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
1055                 status->ampdu_reference = ar->ampdu_reference;
1056
1057                 ath10k_htt_rx_h_signal(ar, status, rxd);
1058                 ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
1059                 ath10k_htt_rx_h_rates(ar, status, rxd);
1060         }
1061
1062         if (is_last_ppdu) {
1063                 ath10k_htt_rx_h_mactime(ar, status, rxd);
1064
1065                 /* set ampdu last segment flag */
1066                 status->flag |= RX_FLAG_AMPDU_IS_LAST;
1067                 ar->ampdu_reference++;
1068         }
1069 }
1070
1071 static const char * const tid_to_ac[] = {
1072         "BE",
1073         "BK",
1074         "BK",
1075         "BE",
1076         "VI",
1077         "VI",
1078         "VO",
1079         "VO",
1080 };
1081
1082 static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
1083 {
1084         u8 *qc;
1085         int tid;
1086
1087         if (!ieee80211_is_data_qos(hdr->frame_control))
1088                 return "";
1089
1090         qc = ieee80211_get_qos_ctl(hdr);
1091         tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1092         if (tid < 8)
1093                 snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
1094         else
1095                 snprintf(out, size, "tid %d", tid);
1096
1097         return out;
1098 }
1099
1100 static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
1101                                        struct ieee80211_rx_status *rx_status,
1102                                        struct sk_buff *skb)
1103 {
1104         struct ieee80211_rx_status *status;
1105
1106         status = IEEE80211_SKB_RXCB(skb);
1107         *status = *rx_status;
1108
1109         skb_queue_tail(&ar->htt.rx_msdus_q, skb);
1110 }
1111
1112 static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
1113 {
1114         struct ieee80211_rx_status *status;
1115         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1116         char tid[32];
1117
1118         status = IEEE80211_SKB_RXCB(skb);
1119
1120         ath10k_dbg(ar, ATH10K_DBG_DATA,
1121                    "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1122                    skb,
1123                    skb->len,
1124                    ieee80211_get_SA(hdr),
1125                    ath10k_get_tid(hdr, tid, sizeof(tid)),
1126                    is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
1127                                                         "mcast" : "ucast",
1128                    (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
1129                    (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1130                    (status->encoding == RX_ENC_HT) ? "ht" : "",
1131                    (status->encoding == RX_ENC_VHT) ? "vht" : "",
1132                    (status->bw == RATE_INFO_BW_40) ? "40" : "",
1133                    (status->bw == RATE_INFO_BW_80) ? "80" : "",
1134                    (status->bw == RATE_INFO_BW_160) ? "160" : "",
1135                    status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1136                    status->rate_idx,
1137                    status->nss,
1138                    status->freq,
1139                    status->band, status->flag,
1140                    !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1141                    !!(status->flag & RX_FLAG_MMIC_ERROR),
1142                    !!(status->flag & RX_FLAG_AMSDU_MORE));
1143         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
1144                         skb->data, skb->len);
1145         trace_ath10k_rx_hdr(ar, skb->data, skb->len);
1146         trace_ath10k_rx_payload(ar, skb->data, skb->len);
1147
1148         ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
1149 }
1150
1151 static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
1152                                       struct ieee80211_hdr *hdr)
1153 {
1154         int len = ieee80211_hdrlen(hdr->frame_control);
1155
1156         if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
1157                       ar->running_fw->fw_file.fw_features))
1158                 len = round_up(len, 4);
1159
1160         return len;
1161 }
1162
1163 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
1164                                         struct sk_buff *msdu,
1165                                         struct ieee80211_rx_status *status,
1166                                         enum htt_rx_mpdu_encrypt_type enctype,
1167                                         bool is_decrypted)
1168 {
1169         struct ieee80211_hdr *hdr;
1170         struct htt_rx_desc *rxd;
1171         size_t hdr_len;
1172         size_t crypto_len;
1173         bool is_first;
1174         bool is_last;
1175
1176         rxd = (void *)msdu->data - sizeof(*rxd);
1177         is_first = !!(rxd->msdu_end.common.info0 &
1178                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1179         is_last = !!(rxd->msdu_end.common.info0 &
1180                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1181
1182         /* Delivered decapped frame:
1183          * [802.11 header]
1184          * [crypto param] <-- can be trimmed if !fcs_err &&
1185          *                    !decrypt_err && !peer_idx_invalid
1186          * [amsdu header] <-- only if A-MSDU
1187          * [rfc1042/llc]
1188          * [payload]
1189          * [FCS] <-- at end, needs to be trimmed
1190          */
1191
1192         /* This probably shouldn't happen but warn just in case */
1193         if (unlikely(WARN_ON_ONCE(!is_first)))
1194                 return;
1195
1196         /* This probably shouldn't happen but warn just in case */
1197         if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
1198                 return;
1199
1200         skb_trim(msdu, msdu->len - FCS_LEN);
1201
1202         /* In most cases this will be true for sniffed frames. It makes sense
1203          * to deliver them as-is without stripping the crypto param. This is
1204          * necessary for software based decryption.
1205          *
1206          * If there's no error then the frame is decrypted. At least that is
1207          * the case for frames that come in via fragmented rx indication.
1208          */
1209         if (!is_decrypted)
1210                 return;
1211
1212         /* The payload is decrypted so strip crypto params. Start from tail
1213          * since hdr is used to compute some stuff.
1214          */
1215
1216         hdr = (void *)msdu->data;
1217
1218         /* Tail */
1219         if (status->flag & RX_FLAG_IV_STRIPPED) {
1220                 skb_trim(msdu, msdu->len -
1221                          ath10k_htt_rx_crypto_mic_len(ar, enctype));
1222
1223                 skb_trim(msdu, msdu->len -
1224                          ath10k_htt_rx_crypto_icv_len(ar, enctype));
1225         } else {
1226                 /* MIC */
1227                 if (status->flag & RX_FLAG_MIC_STRIPPED)
1228                         skb_trim(msdu, msdu->len -
1229                                  ath10k_htt_rx_crypto_mic_len(ar, enctype));
1230
1231                 /* ICV */
1232                 if (status->flag & RX_FLAG_ICV_STRIPPED)
1233                         skb_trim(msdu, msdu->len -
1234                                  ath10k_htt_rx_crypto_icv_len(ar, enctype));
1235         }
1236
1237         /* MMIC */
1238         if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1239             !ieee80211_has_morefrags(hdr->frame_control) &&
1240             enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1241                 skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
1242
1243         /* Head */
1244         if (status->flag & RX_FLAG_IV_STRIPPED) {
1245                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1246                 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1247
1248                 memmove((void *)msdu->data + crypto_len,
1249                         (void *)msdu->data, hdr_len);
1250                 skb_pull(msdu, crypto_len);
1251         }
1252 }
1253
1254 static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
1255                                           struct sk_buff *msdu,
1256                                           struct ieee80211_rx_status *status,
1257                                           const u8 first_hdr[64],
1258                                           enum htt_rx_mpdu_encrypt_type enctype)
1259 {
1260         struct ieee80211_hdr *hdr;
1261         struct htt_rx_desc *rxd;
1262         size_t hdr_len;
1263         u8 da[ETH_ALEN];
1264         u8 sa[ETH_ALEN];
1265         int l3_pad_bytes;
1266         int bytes_aligned = ar->hw_params.decap_align_bytes;
1267
1268         /* Delivered decapped frame:
1269          * [nwifi 802.11 header] <-- replaced with 802.11 hdr
1270          * [rfc1042/llc]
1271          *
1272          * Note: The nwifi header doesn't have QoS Control and is
1273          * (always?) a 3addr frame.
1274          *
1275          * Note2: There's no A-MSDU subframe header. Even if it's part
1276          * of an A-MSDU.
1277          */
1278
1279         /* pull decapped header and copy SA & DA */
1280         rxd = (void *)msdu->data - sizeof(*rxd);
1281
1282         l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1283         skb_put(msdu, l3_pad_bytes);
1284
1285         hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
1286
1287         hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
1288         ether_addr_copy(da, ieee80211_get_DA(hdr));
1289         ether_addr_copy(sa, ieee80211_get_SA(hdr));
1290         skb_pull(msdu, hdr_len);
1291
1292         /* push original 802.11 header */
1293         hdr = (struct ieee80211_hdr *)first_hdr;
1294         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1295
1296         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1297                 memcpy(skb_push(msdu,
1298                                 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1299                        (void *)hdr + round_up(hdr_len, bytes_aligned),
1300                         ath10k_htt_rx_crypto_param_len(ar, enctype));
1301         }
1302
1303         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1304
1305         /* original 802.11 header has a different DA and in
1306          * case of 4addr it may also have different SA
1307          */
1308         hdr = (struct ieee80211_hdr *)msdu->data;
1309         ether_addr_copy(ieee80211_get_DA(hdr), da);
1310         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1311 }
1312
1313 static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
1314                                           struct sk_buff *msdu,
1315                                           enum htt_rx_mpdu_encrypt_type enctype)
1316 {
1317         struct ieee80211_hdr *hdr;
1318         struct htt_rx_desc *rxd;
1319         size_t hdr_len, crypto_len;
1320         void *rfc1042;
1321         bool is_first, is_last, is_amsdu;
1322         int bytes_aligned = ar->hw_params.decap_align_bytes;
1323
1324         rxd = (void *)msdu->data - sizeof(*rxd);
1325         hdr = (void *)rxd->rx_hdr_status;
1326
1327         is_first = !!(rxd->msdu_end.common.info0 &
1328                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1329         is_last = !!(rxd->msdu_end.common.info0 &
1330                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1331         is_amsdu = !(is_first && is_last);
1332
1333         rfc1042 = hdr;
1334
1335         if (is_first) {
1336                 hdr_len = ieee80211_hdrlen(hdr->frame_control);
1337                 crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1338
1339                 rfc1042 += round_up(hdr_len, bytes_aligned) +
1340                            round_up(crypto_len, bytes_aligned);
1341         }
1342
1343         if (is_amsdu)
1344                 rfc1042 += sizeof(struct amsdu_subframe_hdr);
1345
1346         return rfc1042;
1347 }
1348
1349 static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
1350                                         struct sk_buff *msdu,
1351                                         struct ieee80211_rx_status *status,
1352                                         const u8 first_hdr[64],
1353                                         enum htt_rx_mpdu_encrypt_type enctype)
1354 {
1355         struct ieee80211_hdr *hdr;
1356         struct ethhdr *eth;
1357         size_t hdr_len;
1358         void *rfc1042;
1359         u8 da[ETH_ALEN];
1360         u8 sa[ETH_ALEN];
1361         int l3_pad_bytes;
1362         struct htt_rx_desc *rxd;
1363         int bytes_aligned = ar->hw_params.decap_align_bytes;
1364
1365         /* Delivered decapped frame:
1366          * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
1367          * [payload]
1368          */
1369
1370         rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
1371         if (WARN_ON_ONCE(!rfc1042))
1372                 return;
1373
1374         rxd = (void *)msdu->data - sizeof(*rxd);
1375         l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1376         skb_put(msdu, l3_pad_bytes);
1377         skb_pull(msdu, l3_pad_bytes);
1378
1379         /* pull decapped header and copy SA & DA */
1380         eth = (struct ethhdr *)msdu->data;
1381         ether_addr_copy(da, eth->h_dest);
1382         ether_addr_copy(sa, eth->h_source);
1383         skb_pull(msdu, sizeof(struct ethhdr));
1384
1385         /* push rfc1042/llc/snap */
1386         memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
1387                sizeof(struct rfc1042_hdr));
1388
1389         /* push original 802.11 header */
1390         hdr = (struct ieee80211_hdr *)first_hdr;
1391         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1392
1393         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1394                 memcpy(skb_push(msdu,
1395                                 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1396                        (void *)hdr + round_up(hdr_len, bytes_aligned),
1397                         ath10k_htt_rx_crypto_param_len(ar, enctype));
1398         }
1399
1400         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1401
1402         /* original 802.11 header has a different DA and in
1403          * case of 4addr it may also have different SA
1404          */
1405         hdr = (struct ieee80211_hdr *)msdu->data;
1406         ether_addr_copy(ieee80211_get_DA(hdr), da);
1407         ether_addr_copy(ieee80211_get_SA(hdr), sa);
1408 }
1409
1410 static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
1411                                          struct sk_buff *msdu,
1412                                          struct ieee80211_rx_status *status,
1413                                          const u8 first_hdr[64],
1414                                          enum htt_rx_mpdu_encrypt_type enctype)
1415 {
1416         struct ieee80211_hdr *hdr;
1417         size_t hdr_len;
1418         int l3_pad_bytes;
1419         struct htt_rx_desc *rxd;
1420         int bytes_aligned = ar->hw_params.decap_align_bytes;
1421
1422         /* Delivered decapped frame:
1423          * [amsdu header] <-- replaced with 802.11 hdr
1424          * [rfc1042/llc]
1425          * [payload]
1426          */
1427
1428         rxd = (void *)msdu->data - sizeof(*rxd);
1429         l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
1430
1431         skb_put(msdu, l3_pad_bytes);
1432         skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
1433
1434         hdr = (struct ieee80211_hdr *)first_hdr;
1435         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1436
1437         if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
1438                 memcpy(skb_push(msdu,
1439                                 ath10k_htt_rx_crypto_param_len(ar, enctype)),
1440                        (void *)hdr + round_up(hdr_len, bytes_aligned),
1441                         ath10k_htt_rx_crypto_param_len(ar, enctype));
1442         }
1443
1444         memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
1445 }
1446
1447 static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
1448                                     struct sk_buff *msdu,
1449                                     struct ieee80211_rx_status *status,
1450                                     u8 first_hdr[64],
1451                                     enum htt_rx_mpdu_encrypt_type enctype,
1452                                     bool is_decrypted)
1453 {
1454         struct htt_rx_desc *rxd;
1455         enum rx_msdu_decap_format decap;
1456
1457         /* First msdu's decapped header:
1458          * [802.11 header] <-- padded to 4 bytes long
1459          * [crypto param] <-- padded to 4 bytes long
1460          * [amsdu header] <-- only if A-MSDU
1461          * [rfc1042/llc]
1462          *
1463          * Other (2nd, 3rd, ..) msdu's decapped header:
1464          * [amsdu header] <-- only if A-MSDU
1465          * [rfc1042/llc]
1466          */
1467
1468         rxd = (void *)msdu->data - sizeof(*rxd);
1469         decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1470                    RX_MSDU_START_INFO1_DECAP_FORMAT);
1471
1472         switch (decap) {
1473         case RX_MSDU_DECAP_RAW:
1474                 ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
1475                                             is_decrypted);
1476                 break;
1477         case RX_MSDU_DECAP_NATIVE_WIFI:
1478                 ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
1479                                               enctype);
1480                 break;
1481         case RX_MSDU_DECAP_ETHERNET2_DIX:
1482                 ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
1483                 break;
1484         case RX_MSDU_DECAP_8023_SNAP_LLC:
1485                 ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
1486                                              enctype);
1487                 break;
1488         }
1489 }
1490
1491 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
1492 {
1493         struct htt_rx_desc *rxd;
1494         u32 flags, info;
1495         bool is_ip4, is_ip6;
1496         bool is_tcp, is_udp;
1497         bool ip_csum_ok, tcpudp_csum_ok;
1498
1499         rxd = (void *)skb->data - sizeof(*rxd);
1500         flags = __le32_to_cpu(rxd->attention.flags);
1501         info = __le32_to_cpu(rxd->msdu_start.common.info1);
1502
1503         is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
1504         is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
1505         is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
1506         is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
1507         ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
1508         tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
1509
1510         if (!is_ip4 && !is_ip6)
1511                 return CHECKSUM_NONE;
1512         if (!is_tcp && !is_udp)
1513                 return CHECKSUM_NONE;
1514         if (!ip_csum_ok)
1515                 return CHECKSUM_NONE;
1516         if (!tcpudp_csum_ok)
1517                 return CHECKSUM_NONE;
1518
1519         return CHECKSUM_UNNECESSARY;
1520 }
1521
1522 static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
1523 {
1524         msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
1525 }
1526
1527 static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
1528                                  struct sk_buff_head *amsdu,
1529                                  struct ieee80211_rx_status *status,
1530                                  bool fill_crypt_header,
1531                                  u8 *rx_hdr,
1532                                  enum ath10k_pkt_rx_err *err)
1533 {
1534         struct sk_buff *first;
1535         struct sk_buff *last;
1536         struct sk_buff *msdu;
1537         struct htt_rx_desc *rxd;
1538         struct ieee80211_hdr *hdr;
1539         enum htt_rx_mpdu_encrypt_type enctype;
1540         u8 first_hdr[64];
1541         u8 *qos;
1542         bool has_fcs_err;
1543         bool has_crypto_err;
1544         bool has_tkip_err;
1545         bool has_peer_idx_invalid;
1546         bool is_decrypted;
1547         bool is_mgmt;
1548         u32 attention;
1549
1550         if (skb_queue_empty(amsdu))
1551                 return;
1552
1553         first = skb_peek(amsdu);
1554         rxd = (void *)first->data - sizeof(*rxd);
1555
1556         is_mgmt = !!(rxd->attention.flags &
1557                      __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
1558
1559         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1560                      RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1561
1562         /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
1563          * decapped header. It'll be used for undecapping of each MSDU.
1564          */
1565         hdr = (void *)rxd->rx_hdr_status;
1566         memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1567
1568         if (rx_hdr)
1569                 memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
1570
1571         /* Each A-MSDU subframe will use the original header as the base and be
1572          * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
1573          */
1574         hdr = (void *)first_hdr;
1575
1576         if (ieee80211_is_data_qos(hdr->frame_control)) {
1577                 qos = ieee80211_get_qos_ctl(hdr);
1578                 qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1579         }
1580
1581         /* Some attention flags are valid only in the last MSDU. */
1582         last = skb_peek_tail(amsdu);
1583         rxd = (void *)last->data - sizeof(*rxd);
1584         attention = __le32_to_cpu(rxd->attention.flags);
1585
1586         has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
1587         has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1588         has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
1589         has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
1590
1591         /* Note: If hardware captures an encrypted frame that it can't decrypt,
1592          * e.g. due to fcs error, missing peer or invalid key data it will
1593          * report the frame as raw.
1594          */
1595         is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
1596                         !has_fcs_err &&
1597                         !has_crypto_err &&
1598                         !has_peer_idx_invalid);
1599
1600         /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
1601         status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
1602                           RX_FLAG_MMIC_ERROR |
1603                           RX_FLAG_DECRYPTED |
1604                           RX_FLAG_IV_STRIPPED |
1605                           RX_FLAG_ONLY_MONITOR |
1606                           RX_FLAG_MMIC_STRIPPED);
1607
1608         if (has_fcs_err)
1609                 status->flag |= RX_FLAG_FAILED_FCS_CRC;
1610
1611         if (has_tkip_err)
1612                 status->flag |= RX_FLAG_MMIC_ERROR;
1613
1614         if (err) {
1615                 if (has_fcs_err)
1616                         *err = ATH10K_PKT_RX_ERR_FCS;
1617                 else if (has_tkip_err)
1618                         *err = ATH10K_PKT_RX_ERR_TKIP;
1619                 else if (has_crypto_err)
1620                         *err = ATH10K_PKT_RX_ERR_CRYPT;
1621                 else if (has_peer_idx_invalid)
1622                         *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
1623         }
1624
1625         /* Firmware reports all necessary management frames via WMI already.
1626          * They are not reported to monitor interfaces at all so pass the ones
1627          * coming via HTT to monitor interfaces instead. This simplifies
1628          * matters a lot.
1629          */
1630         if (is_mgmt)
1631                 status->flag |= RX_FLAG_ONLY_MONITOR;
1632
1633         if (is_decrypted) {
1634                 status->flag |= RX_FLAG_DECRYPTED;
1635
1636                 if (likely(!is_mgmt))
1637                         status->flag |= RX_FLAG_MMIC_STRIPPED;
1638
1639                 if (fill_crypt_header)
1640                         status->flag |= RX_FLAG_MIC_STRIPPED |
1641                                         RX_FLAG_ICV_STRIPPED;
1642                 else
1643                         status->flag |= RX_FLAG_IV_STRIPPED;
1644         }
1645
1646         skb_queue_walk(amsdu, msdu) {
1647                 ath10k_htt_rx_h_csum_offload(msdu);
1648                 ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
1649                                         is_decrypted);
1650
1651                 /* Undecapping involves copying the original 802.11 header back
1652                  * to sk_buff. If frame is protected and hardware has decrypted
1653                  * it then remove the protected bit.
1654                  */
1655                 if (!is_decrypted)
1656                         continue;
1657                 if (is_mgmt)
1658                         continue;
1659
1660                 if (fill_crypt_header)
1661                         continue;
1662
1663                 hdr = (void *)msdu->data;
1664                 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
1665         }
1666 }
1667
1668 static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
1669                                     struct sk_buff_head *amsdu,
1670                                     struct ieee80211_rx_status *status)
1671 {
1672         struct sk_buff *msdu;
1673         struct sk_buff *first_subframe;
1674
1675         first_subframe = skb_peek(amsdu);
1676
1677         while ((msdu = __skb_dequeue(amsdu))) {
1678                 /* Setup per-MSDU flags */
1679                 if (skb_queue_empty(amsdu))
1680                         status->flag &= ~RX_FLAG_AMSDU_MORE;
1681                 else
1682                         status->flag |= RX_FLAG_AMSDU_MORE;
1683
1684                 if (msdu == first_subframe) {
1685                         first_subframe = NULL;
1686                         status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
1687                 } else {
1688                         status->flag |= RX_FLAG_ALLOW_SAME_PN;
1689                 }
1690
1691                 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
1692         }
1693 }
1694
1695 static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
1696                                unsigned long int *unchain_cnt)
1697 {
1698         struct sk_buff *skb, *first;
1699         int space;
1700         int total_len = 0;
1701         int amsdu_len = skb_queue_len(amsdu);
1702
1703         /* TODO:  Might could optimize this by using
1704          * skb_try_coalesce or similar method to
1705          * decrease copying, or maybe get mac80211 to
1706          * provide a way to just receive a list of
1707          * skb?
1708          */
1709
1710         first = __skb_dequeue(amsdu);
1711
1712         /* Allocate total length all at once. */
1713         skb_queue_walk(amsdu, skb)
1714                 total_len += skb->len;
1715
1716         space = total_len - skb_tailroom(first);
1717         if ((space > 0) &&
1718             (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
1719                 /* TODO:  bump some rx-oom error stat */
1720                 /* put it back together so we can free the
1721                  * whole list at once.
1722                  */
1723                 __skb_queue_head(amsdu, first);
1724                 return -1;
1725         }
1726
1727         /* Walk list again, copying contents into
1728          * msdu_head
1729          */
1730         while ((skb = __skb_dequeue(amsdu))) {
1731                 skb_copy_from_linear_data(skb, skb_put(first, skb->len),
1732                                           skb->len);
1733                 dev_kfree_skb_any(skb);
1734         }
1735
1736         __skb_queue_head(amsdu, first);
1737
1738         *unchain_cnt += amsdu_len - 1;
1739
1740         return 0;
1741 }
1742
1743 static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
1744                                     struct sk_buff_head *amsdu,
1745                                     unsigned long int *drop_cnt,
1746                                     unsigned long int *unchain_cnt)
1747 {
1748         struct sk_buff *first;
1749         struct htt_rx_desc *rxd;
1750         enum rx_msdu_decap_format decap;
1751
1752         first = skb_peek(amsdu);
1753         rxd = (void *)first->data - sizeof(*rxd);
1754         decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
1755                    RX_MSDU_START_INFO1_DECAP_FORMAT);
1756
1757         /* FIXME: Current unchaining logic can only handle simple case of raw
1758          * msdu chaining. If decapping is other than raw the chaining may be
1759          * more complex and this isn't handled by the current code. Don't even
1760          * try re-constructing such frames - it'll be pretty much garbage.
1761          */
1762         if (decap != RX_MSDU_DECAP_RAW ||
1763             skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
1764                 *drop_cnt += skb_queue_len(amsdu);
1765                 __skb_queue_purge(amsdu);
1766                 return;
1767         }
1768
1769         ath10k_unchain_msdu(amsdu, unchain_cnt);
1770 }
1771
1772 static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
1773                                          struct sk_buff_head *amsdu)
1774 {
1775         u8 *subframe_hdr;
1776         struct sk_buff *first;
1777         bool is_first, is_last;
1778         struct htt_rx_desc *rxd;
1779         struct ieee80211_hdr *hdr;
1780         size_t hdr_len, crypto_len;
1781         enum htt_rx_mpdu_encrypt_type enctype;
1782         int bytes_aligned = ar->hw_params.decap_align_bytes;
1783
1784         first = skb_peek(amsdu);
1785
1786         rxd = (void *)first->data - sizeof(*rxd);
1787         hdr = (void *)rxd->rx_hdr_status;
1788
1789         is_first = !!(rxd->msdu_end.common.info0 &
1790                       __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
1791         is_last = !!(rxd->msdu_end.common.info0 &
1792                      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
1793
1794         /* Return in case of non-aggregated msdu */
1795         if (is_first && is_last)
1796                 return true;
1797
1798         /* First msdu flag is not set for the first msdu of the list */
1799         if (!is_first)
1800                 return false;
1801
1802         enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1803                      RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1804
1805         hdr_len = ieee80211_hdrlen(hdr->frame_control);
1806         crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
1807
1808         subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
1809                        crypto_len;
1810
1811         /* Validate if the amsdu has a proper first subframe.
1812          * There are chances a single msdu can be received as amsdu when
1813          * the unauthenticated amsdu flag of a QoS header
1814          * gets flipped in non-SPP AMSDU's, in such cases the first
1815          * subframe has llc/snap header in place of a valid da.
1816          * return false if the da matches rfc1042 pattern
1817          */
1818         if (ether_addr_equal(subframe_hdr, rfc1042_header))
1819                 return false;
1820
1821         return true;
1822 }
1823
1824 static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
1825                                         struct sk_buff_head *amsdu,
1826                                         struct ieee80211_rx_status *rx_status)
1827 {
1828         if (!rx_status->freq) {
1829                 ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
1830                 return false;
1831         }
1832
1833         if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
1834                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
1835                 return false;
1836         }
1837
1838         if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
1839                 ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
1840                 return false;
1841         }
1842
1843         return true;
1844 }
1845
1846 static void ath10k_htt_rx_h_filter(struct ath10k *ar,
1847                                    struct sk_buff_head *amsdu,
1848                                    struct ieee80211_rx_status *rx_status,
1849                                    unsigned long int *drop_cnt)
1850 {
1851         if (skb_queue_empty(amsdu))
1852                 return;
1853
1854         if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
1855                 return;
1856
1857         if (drop_cnt)
1858                 *drop_cnt += skb_queue_len(amsdu);
1859
1860         __skb_queue_purge(amsdu);
1861 }
1862
1863 static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
1864 {
1865         struct ath10k *ar = htt->ar;
1866         struct ieee80211_rx_status *rx_status = &htt->rx_status;
1867         struct sk_buff_head amsdu;
1868         int ret;
1869         unsigned long int drop_cnt = 0;
1870         unsigned long int unchain_cnt = 0;
1871         unsigned long int drop_cnt_filter = 0;
1872         unsigned long int msdus_to_queue, num_msdus;
1873         enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
1874         u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
1875
1876         __skb_queue_head_init(&amsdu);
1877
1878         spin_lock_bh(&htt->rx_ring.lock);
1879         if (htt->rx_confused) {
1880                 spin_unlock_bh(&htt->rx_ring.lock);
1881                 return -EIO;
1882         }
1883         ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
1884         spin_unlock_bh(&htt->rx_ring.lock);
1885
1886         if (ret < 0) {
1887                 ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
1888                 __skb_queue_purge(&amsdu);
1889                 /* FIXME: It's probably a good idea to reboot the
1890                  * device instead of leaving it inoperable.
1891                  */
1892                 htt->rx_confused = true;
1893                 return ret;
1894         }
1895
1896         num_msdus = skb_queue_len(&amsdu);
1897
1898         ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
1899
1900         /* only for ret = 1 indicates chained msdus */
1901         if (ret > 0)
1902                 ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
1903
1904         ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
1905         ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
1906         msdus_to_queue = skb_queue_len(&amsdu);
1907         ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
1908
1909         ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
1910                                        unchain_cnt, drop_cnt, drop_cnt_filter,
1911                                        msdus_to_queue);
1912
1913         return 0;
1914 }
1915
1916 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
1917                                       struct htt_rx_indication *rx)
1918 {
1919         struct ath10k *ar = htt->ar;
1920         struct htt_rx_indication_mpdu_range *mpdu_ranges;
1921         int num_mpdu_ranges;
1922         int i, mpdu_count = 0;
1923         u16 peer_id;
1924         u8 tid;
1925
1926         num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
1927                              HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
1928         peer_id = __le16_to_cpu(rx->hdr.peer_id);
1929         tid =  MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
1930
1931         mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
1932
1933         ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1934                         rx, sizeof(*rx) +
1935                         (sizeof(struct htt_rx_indication_mpdu_range) *
1936                                 num_mpdu_ranges));
1937
1938         for (i = 0; i < num_mpdu_ranges; i++)
1939                 mpdu_count += mpdu_ranges[i].mpdu_count;
1940
1941         atomic_add(mpdu_count, &htt->num_mpdus_ready);
1942
1943         ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
1944                                              num_mpdu_ranges);
1945 }
1946
1947 static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
1948                                        struct sk_buff *skb)
1949 {
1950         struct ath10k_htt *htt = &ar->htt;
1951         struct htt_resp *resp = (struct htt_resp *)skb->data;
1952         struct htt_tx_done tx_done = {};
1953         int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
1954         __le16 msdu_id;
1955         int i;
1956
1957         switch (status) {
1958         case HTT_DATA_TX_STATUS_NO_ACK:
1959                 tx_done.status = HTT_TX_COMPL_STATE_NOACK;
1960                 break;
1961         case HTT_DATA_TX_STATUS_OK:
1962                 tx_done.status = HTT_TX_COMPL_STATE_ACK;
1963                 break;
1964         case HTT_DATA_TX_STATUS_DISCARD:
1965         case HTT_DATA_TX_STATUS_POSTPONE:
1966         case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
1967                 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1968                 break;
1969         default:
1970                 ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1971                 tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
1972                 break;
1973         }
1974
1975         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1976                    resp->data_tx_completion.num_msdus);
1977
1978         for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
1979                 msdu_id = resp->data_tx_completion.msdus[i];
1980                 tx_done.msdu_id = __le16_to_cpu(msdu_id);
1981
1982                 /* kfifo_put: In practice firmware shouldn't fire off per-CE
1983                  * interrupt and main interrupt (MSI/-X range case) for the same
1984                  * HTC service so it should be safe to use kfifo_put w/o lock.
1985                  *
1986                  * From kfifo_put() documentation:
1987                  *  Note that with only one concurrent reader and one concurrent
1988                  *  writer, you don't need extra locking to use these macro.
1989                  */
1990                 if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
1991                         ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
1992                                     tx_done.msdu_id, tx_done.status);
1993                         ath10k_txrx_tx_unref(htt, &tx_done);
1994                 }
1995         }
1996 }
1997
1998 static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
1999 {
2000         struct htt_rx_addba *ev = &resp->rx_addba;
2001         struct ath10k_peer *peer;
2002         struct ath10k_vif *arvif;
2003         u16 info0, tid, peer_id;
2004
2005         info0 = __le16_to_cpu(ev->info0);
2006         tid = MS(info0, HTT_RX_BA_INFO0_TID);
2007         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2008
2009         ath10k_dbg(ar, ATH10K_DBG_HTT,
2010                    "htt rx addba tid %hu peer_id %hu size %hhu\n",
2011                    tid, peer_id, ev->window_size);
2012
2013         spin_lock_bh(&ar->data_lock);
2014         peer = ath10k_peer_find_by_id(ar, peer_id);
2015         if (!peer) {
2016                 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2017                             peer_id);
2018                 spin_unlock_bh(&ar->data_lock);
2019                 return;
2020         }
2021
2022         arvif = ath10k_get_arvif(ar, peer->vdev_id);
2023         if (!arvif) {
2024                 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2025                             peer->vdev_id);
2026                 spin_unlock_bh(&ar->data_lock);
2027                 return;
2028         }
2029
2030         ath10k_dbg(ar, ATH10K_DBG_HTT,
2031                    "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
2032                    peer->addr, tid, ev->window_size);
2033
2034         ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2035         spin_unlock_bh(&ar->data_lock);
2036 }
2037
2038 static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
2039 {
2040         struct htt_rx_delba *ev = &resp->rx_delba;
2041         struct ath10k_peer *peer;
2042         struct ath10k_vif *arvif;
2043         u16 info0, tid, peer_id;
2044
2045         info0 = __le16_to_cpu(ev->info0);
2046         tid = MS(info0, HTT_RX_BA_INFO0_TID);
2047         peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
2048
2049         ath10k_dbg(ar, ATH10K_DBG_HTT,
2050                    "htt rx delba tid %hu peer_id %hu\n",
2051                    tid, peer_id);
2052
2053         spin_lock_bh(&ar->data_lock);
2054         peer = ath10k_peer_find_by_id(ar, peer_id);
2055         if (!peer) {
2056                 ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
2057                             peer_id);
2058                 spin_unlock_bh(&ar->data_lock);
2059                 return;
2060         }
2061
2062         arvif = ath10k_get_arvif(ar, peer->vdev_id);
2063         if (!arvif) {
2064                 ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
2065                             peer->vdev_id);
2066                 spin_unlock_bh(&ar->data_lock);
2067                 return;
2068         }
2069
2070         ath10k_dbg(ar, ATH10K_DBG_HTT,
2071                    "htt rx stop rx ba session sta %pM tid %hu\n",
2072                    peer->addr, tid);
2073
2074         ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
2075         spin_unlock_bh(&ar->data_lock);
2076 }
2077
2078 static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
2079                                        struct sk_buff_head *amsdu)
2080 {
2081         struct sk_buff *msdu;
2082         struct htt_rx_desc *rxd;
2083
2084         if (skb_queue_empty(list))
2085                 return -ENOBUFS;
2086
2087         if (WARN_ON(!skb_queue_empty(amsdu)))
2088                 return -EINVAL;
2089
2090         while ((msdu = __skb_dequeue(list))) {
2091                 __skb_queue_tail(amsdu, msdu);
2092
2093                 rxd = (void *)msdu->data - sizeof(*rxd);
2094                 if (rxd->msdu_end.common.info0 &
2095                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
2096                         break;
2097         }
2098
2099         msdu = skb_peek_tail(amsdu);
2100         rxd = (void *)msdu->data - sizeof(*rxd);
2101         if (!(rxd->msdu_end.common.info0 &
2102               __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
2103                 skb_queue_splice_init(amsdu, list);
2104                 return -EAGAIN;
2105         }
2106
2107         return 0;
2108 }
2109
2110 static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
2111                                             struct sk_buff *skb)
2112 {
2113         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2114
2115         if (!ieee80211_has_protected(hdr->frame_control))
2116                 return;
2117
2118         /* Offloaded frames are already decrypted but firmware insists they are
2119          * protected in the 802.11 header. Strip the flag.  Otherwise mac80211
2120          * will drop the frame.
2121          */
2122
2123         hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2124         status->flag |= RX_FLAG_DECRYPTED |
2125                         RX_FLAG_IV_STRIPPED |
2126                         RX_FLAG_MMIC_STRIPPED;
2127 }
2128
2129 static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
2130                                        struct sk_buff_head *list)
2131 {
2132         struct ath10k_htt *htt = &ar->htt;
2133         struct ieee80211_rx_status *status = &htt->rx_status;
2134         struct htt_rx_offload_msdu *rx;
2135         struct sk_buff *msdu;
2136         size_t offset;
2137
2138         while ((msdu = __skb_dequeue(list))) {
2139                 /* Offloaded frames don't have Rx descriptor. Instead they have
2140                  * a short meta information header.
2141                  */
2142
2143                 rx = (void *)msdu->data;
2144
2145                 skb_put(msdu, sizeof(*rx));
2146                 skb_pull(msdu, sizeof(*rx));
2147
2148                 if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
2149                         ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
2150                         dev_kfree_skb_any(msdu);
2151                         continue;
2152                 }
2153
2154                 skb_put(msdu, __le16_to_cpu(rx->msdu_len));
2155
2156                 /* Offloaded rx header length isn't multiple of 2 nor 4 so the
2157                  * actual payload is unaligned. Align the frame.  Otherwise
2158                  * mac80211 complains.  This shouldn't reduce performance much
2159                  * because these offloaded frames are rare.
2160                  */
2161                 offset = 4 - ((unsigned long)msdu->data & 3);
2162                 skb_put(msdu, offset);
2163                 memmove(msdu->data + offset, msdu->data, msdu->len);
2164                 skb_pull(msdu, offset);
2165
2166                 /* FIXME: The frame is NWifi. Re-construct QoS Control
2167                  * if possible later.
2168                  */
2169
2170                 memset(status, 0, sizeof(*status));
2171                 status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2172
2173                 ath10k_htt_rx_h_rx_offload_prot(status, msdu);
2174                 ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
2175                 ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
2176         }
2177 }
2178
2179 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
2180 {
2181         struct ath10k_htt *htt = &ar->htt;
2182         struct htt_resp *resp = (void *)skb->data;
2183         struct ieee80211_rx_status *status = &htt->rx_status;
2184         struct sk_buff_head list;
2185         struct sk_buff_head amsdu;
2186         u16 peer_id;
2187         u16 msdu_count;
2188         u8 vdev_id;
2189         u8 tid;
2190         bool offload;
2191         bool frag;
2192         int ret;
2193
2194         lockdep_assert_held(&htt->rx_ring.lock);
2195
2196         if (htt->rx_confused)
2197                 return -EIO;
2198
2199         skb_pull(skb, sizeof(resp->hdr));
2200         skb_pull(skb, sizeof(resp->rx_in_ord_ind));
2201
2202         peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
2203         msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
2204         vdev_id = resp->rx_in_ord_ind.vdev_id;
2205         tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
2206         offload = !!(resp->rx_in_ord_ind.info &
2207                         HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
2208         frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
2209
2210         ath10k_dbg(ar, ATH10K_DBG_HTT,
2211                    "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
2212                    vdev_id, peer_id, tid, offload, frag, msdu_count);
2213
2214         if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
2215                 ath10k_warn(ar, "dropping invalid in order rx indication\n");
2216                 return -EINVAL;
2217         }
2218
2219         /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
2220          * extracted and processed.
2221          */
2222         __skb_queue_head_init(&list);
2223         if (ar->hw_params.target_64bit)
2224                 ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
2225                                                      &list);
2226         else
2227                 ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
2228                                                      &list);
2229
2230         if (ret < 0) {
2231                 ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
2232                 htt->rx_confused = true;
2233                 return -EIO;
2234         }
2235
2236         /* Offloaded frames are very different and need to be handled
2237          * separately.
2238          */
2239         if (offload)
2240                 ath10k_htt_rx_h_rx_offload(ar, &list);
2241
2242         while (!skb_queue_empty(&list)) {
2243                 __skb_queue_head_init(&amsdu);
2244                 ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
2245                 switch (ret) {
2246                 case 0:
2247                         /* Note: The in-order indication may report interleaved
2248                          * frames from different PPDUs meaning reported rx rate
2249                          * to mac80211 isn't accurate/reliable. It's still
2250                          * better to report something than nothing though. This
2251                          * should still give an idea about rx rate to the user.
2252                          */
2253                         ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
2254                         ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
2255                         ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
2256                                              NULL);
2257                         ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
2258                         break;
2259                 case -EAGAIN:
2260                         /* fall through */
2261                 default:
2262                         /* Should not happen. */
2263                         ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
2264                         htt->rx_confused = true;
2265                         __skb_queue_purge(&list);
2266                         return -EIO;
2267                 }
2268         }
2269         return ret;
2270 }
2271
2272 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
2273                                                    const __le32 *resp_ids,
2274                                                    int num_resp_ids)
2275 {
2276         int i;
2277         u32 resp_id;
2278
2279         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
2280                    num_resp_ids);
2281
2282         for (i = 0; i < num_resp_ids; i++) {
2283                 resp_id = le32_to_cpu(resp_ids[i]);
2284
2285                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
2286                            resp_id);
2287
2288                 /* TODO: free resp_id */
2289         }
2290 }
2291
2292 static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2293 {
2294         struct ieee80211_hw *hw = ar->hw;
2295         struct ieee80211_txq *txq;
2296         struct htt_resp *resp = (struct htt_resp *)skb->data;
2297         struct htt_tx_fetch_record *record;
2298         size_t len;
2299         size_t max_num_bytes;
2300         size_t max_num_msdus;
2301         size_t num_bytes;
2302         size_t num_msdus;
2303         const __le32 *resp_ids;
2304         u16 num_records;
2305         u16 num_resp_ids;
2306         u16 peer_id;
2307         u8 tid;
2308         int ret;
2309         int i;
2310
2311         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
2312
2313         len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
2314         if (unlikely(skb->len < len)) {
2315                 ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
2316                 return;
2317         }
2318
2319         num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
2320         num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
2321
2322         len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
2323         len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
2324
2325         if (unlikely(skb->len < len)) {
2326                 ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
2327                 return;
2328         }
2329
2330         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
2331                    num_records, num_resp_ids,
2332                    le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
2333
2334         if (!ar->htt.tx_q_state.enabled) {
2335                 ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
2336                 return;
2337         }
2338
2339         if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
2340                 ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
2341                 return;
2342         }
2343
2344         rcu_read_lock();
2345
2346         for (i = 0; i < num_records; i++) {
2347                 record = &resp->tx_fetch_ind.records[i];
2348                 peer_id = MS(le16_to_cpu(record->info),
2349                              HTT_TX_FETCH_RECORD_INFO_PEER_ID);
2350                 tid = MS(le16_to_cpu(record->info),
2351                          HTT_TX_FETCH_RECORD_INFO_TID);
2352                 max_num_msdus = le16_to_cpu(record->num_msdus);
2353                 max_num_bytes = le32_to_cpu(record->num_bytes);
2354
2355                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
2356                            i, peer_id, tid, max_num_msdus, max_num_bytes);
2357
2358                 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2359                     unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2360                         ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2361                                     peer_id, tid);
2362                         continue;
2363                 }
2364
2365                 spin_lock_bh(&ar->data_lock);
2366                 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2367                 spin_unlock_bh(&ar->data_lock);
2368
2369                 /* It is okay to release the lock and use txq because RCU read
2370                  * lock is held.
2371                  */
2372
2373                 if (unlikely(!txq)) {
2374                         ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2375                                     peer_id, tid);
2376                         continue;
2377                 }
2378
2379                 num_msdus = 0;
2380                 num_bytes = 0;
2381
2382                 while (num_msdus < max_num_msdus &&
2383                        num_bytes < max_num_bytes) {
2384                         ret = ath10k_mac_tx_push_txq(hw, txq);
2385                         if (ret < 0)
2386                                 break;
2387
2388                         num_msdus++;
2389                         num_bytes += ret;
2390                 }
2391
2392                 record->num_msdus = cpu_to_le16(num_msdus);
2393                 record->num_bytes = cpu_to_le32(num_bytes);
2394
2395                 ath10k_htt_tx_txq_recalc(hw, txq);
2396         }
2397
2398         rcu_read_unlock();
2399
2400         resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
2401         ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
2402
2403         ret = ath10k_htt_tx_fetch_resp(ar,
2404                                        resp->tx_fetch_ind.token,
2405                                        resp->tx_fetch_ind.fetch_seq_num,
2406                                        resp->tx_fetch_ind.records,
2407                                        num_records);
2408         if (unlikely(ret)) {
2409                 ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
2410                             le32_to_cpu(resp->tx_fetch_ind.token), ret);
2411                 /* FIXME: request fw restart */
2412         }
2413
2414         ath10k_htt_tx_txq_sync(ar);
2415 }
2416
2417 static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
2418                                            struct sk_buff *skb)
2419 {
2420         const struct htt_resp *resp = (void *)skb->data;
2421         size_t len;
2422         int num_resp_ids;
2423
2424         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
2425
2426         len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
2427         if (unlikely(skb->len < len)) {
2428                 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
2429                 return;
2430         }
2431
2432         num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
2433         len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
2434
2435         if (unlikely(skb->len < len)) {
2436                 ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
2437                 return;
2438         }
2439
2440         ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
2441                                                resp->tx_fetch_confirm.resp_ids,
2442                                                num_resp_ids);
2443 }
2444
2445 static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
2446                                              struct sk_buff *skb)
2447 {
2448         const struct htt_resp *resp = (void *)skb->data;
2449         const struct htt_tx_mode_switch_record *record;
2450         struct ieee80211_txq *txq;
2451         struct ath10k_txq *artxq;
2452         size_t len;
2453         size_t num_records;
2454         enum htt_tx_mode_switch_mode mode;
2455         bool enable;
2456         u16 info0;
2457         u16 info1;
2458         u16 threshold;
2459         u16 peer_id;
2460         u8 tid;
2461         int i;
2462
2463         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
2464
2465         len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
2466         if (unlikely(skb->len < len)) {
2467                 ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
2468                 return;
2469         }
2470
2471         info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
2472         info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
2473
2474         enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
2475         num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2476         mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
2477         threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
2478
2479         ath10k_dbg(ar, ATH10K_DBG_HTT,
2480                    "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
2481                    info0, info1, enable, num_records, mode, threshold);
2482
2483         len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
2484
2485         if (unlikely(skb->len < len)) {
2486                 ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
2487                 return;
2488         }
2489
2490         switch (mode) {
2491         case HTT_TX_MODE_SWITCH_PUSH:
2492         case HTT_TX_MODE_SWITCH_PUSH_PULL:
2493                 break;
2494         default:
2495                 ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
2496                             mode);
2497                 return;
2498         }
2499
2500         if (!enable)
2501                 return;
2502
2503         ar->htt.tx_q_state.enabled = enable;
2504         ar->htt.tx_q_state.mode = mode;
2505         ar->htt.tx_q_state.num_push_allowed = threshold;
2506
2507         rcu_read_lock();
2508
2509         for (i = 0; i < num_records; i++) {
2510                 record = &resp->tx_mode_switch_ind.records[i];
2511                 info0 = le16_to_cpu(record->info0);
2512                 peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
2513                 tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
2514
2515                 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
2516                     unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
2517                         ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
2518                                     peer_id, tid);
2519                         continue;
2520                 }
2521
2522                 spin_lock_bh(&ar->data_lock);
2523                 txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
2524                 spin_unlock_bh(&ar->data_lock);
2525
2526                 /* It is okay to release the lock and use txq because RCU read
2527                  * lock is held.
2528                  */
2529
2530                 if (unlikely(!txq)) {
2531                         ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
2532                                     peer_id, tid);
2533                         continue;
2534                 }
2535
2536                 spin_lock_bh(&ar->htt.tx_lock);
2537                 artxq = (void *)txq->drv_priv;
2538                 artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
2539                 spin_unlock_bh(&ar->htt.tx_lock);
2540         }
2541
2542         rcu_read_unlock();
2543
2544         ath10k_mac_tx_push_pending(ar);
2545 }
2546
2547 void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2548 {
2549         bool release;
2550
2551         release = ath10k_htt_t2h_msg_handler(ar, skb);
2552
2553         /* Free the indication buffer */
2554         if (release)
2555                 dev_kfree_skb_any(skb);
2556 }
2557
2558 static inline bool is_valid_legacy_rate(u8 rate)
2559 {
2560         static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
2561                                           18, 24, 36, 48, 54};
2562         int i;
2563
2564         for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
2565                 if (rate == legacy_rates[i])
2566                         return true;
2567         }
2568
2569         return false;
2570 }
2571
2572 static void
2573 ath10k_update_per_peer_tx_stats(struct ath10k *ar,
2574                                 struct ieee80211_sta *sta,
2575                                 struct ath10k_per_peer_tx_stats *peer_stats)
2576 {
2577         struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
2578         u8 rate = 0, sgi;
2579         struct rate_info txrate;
2580
2581         lockdep_assert_held(&ar->data_lock);
2582
2583         txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
2584         txrate.bw = ATH10K_HW_BW(peer_stats->flags);
2585         txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
2586         txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
2587         sgi = ATH10K_HW_GI(peer_stats->flags);
2588
2589         if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
2590                 ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats",  txrate.mcs);
2591                 return;
2592         }
2593
2594         if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
2595             (txrate.mcs > 7 || txrate.nss < 1)) {
2596                 ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
2597                             txrate.mcs, txrate.nss);
2598                 return;
2599         }
2600
2601         memset(&arsta->txrate, 0, sizeof(arsta->txrate));
2602
2603         if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
2604             txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
2605                 rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
2606
2607                 if (!is_valid_legacy_rate(rate)) {
2608                         ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
2609                                     rate);
2610                         return;
2611                 }
2612
2613                 /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
2614                 rate *= 10;
2615                 if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
2616                         rate = rate - 5;
2617                 arsta->txrate.legacy = rate;
2618         } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
2619                 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
2620                 arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
2621         } else {
2622                 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
2623                 arsta->txrate.mcs = txrate.mcs;
2624         }
2625
2626         if (sgi)
2627                 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
2628
2629         arsta->txrate.nss = txrate.nss;
2630         arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
2631 }
2632
2633 static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
2634                                         struct sk_buff *skb)
2635 {
2636         struct htt_resp *resp = (struct htt_resp *)skb->data;
2637         struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2638         struct htt_per_peer_tx_stats_ind *tx_stats;
2639         struct ieee80211_sta *sta;
2640         struct ath10k_peer *peer;
2641         int peer_id, i;
2642         u8 ppdu_len, num_ppdu;
2643
2644         num_ppdu = resp->peer_tx_stats.num_ppdu;
2645         ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
2646
2647         if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
2648                 ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
2649                 return;
2650         }
2651
2652         tx_stats = (struct htt_per_peer_tx_stats_ind *)
2653                         (resp->peer_tx_stats.payload);
2654         peer_id = __le16_to_cpu(tx_stats->peer_id);
2655
2656         rcu_read_lock();
2657         spin_lock_bh(&ar->data_lock);
2658         peer = ath10k_peer_find_by_id(ar, peer_id);
2659         if (!peer || !peer->sta) {
2660                 ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
2661                             peer_id);
2662                 goto out;
2663         }
2664
2665         sta = peer->sta;
2666         for (i = 0; i < num_ppdu; i++) {
2667                 tx_stats = (struct htt_per_peer_tx_stats_ind *)
2668                            (resp->peer_tx_stats.payload + i * ppdu_len);
2669
2670                 p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
2671                 p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
2672                 p_tx_stats->failed_bytes =
2673                                 __le32_to_cpu(tx_stats->failed_bytes);
2674                 p_tx_stats->ratecode = tx_stats->ratecode;
2675                 p_tx_stats->flags = tx_stats->flags;
2676                 p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
2677                 p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
2678                 p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
2679
2680                 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2681         }
2682
2683 out:
2684         spin_unlock_bh(&ar->data_lock);
2685         rcu_read_unlock();
2686 }
2687
2688 static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
2689 {
2690         struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
2691         struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
2692         struct ath10k_10_2_peer_tx_stats *tx_stats;
2693         struct ieee80211_sta *sta;
2694         struct ath10k_peer *peer;
2695         u16 log_type = __le16_to_cpu(hdr->log_type);
2696         u32 peer_id = 0, i;
2697
2698         if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
2699                 return;
2700
2701         tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
2702                     ATH10K_10_2_TX_STATS_OFFSET);
2703
2704         if (!tx_stats->tx_ppdu_cnt)
2705                 return;
2706
2707         peer_id = tx_stats->peer_id;
2708
2709         rcu_read_lock();
2710         spin_lock_bh(&ar->data_lock);
2711         peer = ath10k_peer_find_by_id(ar, peer_id);
2712         if (!peer || !peer->sta) {
2713                 ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
2714                             peer_id);
2715                 goto out;
2716         }
2717
2718         sta = peer->sta;
2719         for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
2720                 p_tx_stats->succ_bytes =
2721                         __le16_to_cpu(tx_stats->success_bytes[i]);
2722                 p_tx_stats->retry_bytes =
2723                         __le16_to_cpu(tx_stats->retry_bytes[i]);
2724                 p_tx_stats->failed_bytes =
2725                         __le16_to_cpu(tx_stats->failed_bytes[i]);
2726                 p_tx_stats->ratecode = tx_stats->ratecode[i];
2727                 p_tx_stats->flags = tx_stats->flags[i];
2728                 p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
2729                 p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
2730                 p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
2731
2732                 ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
2733         }
2734         spin_unlock_bh(&ar->data_lock);
2735         rcu_read_unlock();
2736
2737         return;
2738
2739 out:
2740         spin_unlock_bh(&ar->data_lock);
2741         rcu_read_unlock();
2742 }
2743
2744 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
2745 {
2746         struct ath10k_htt *htt = &ar->htt;
2747         struct htt_resp *resp = (struct htt_resp *)skb->data;
2748         enum htt_t2h_msg_type type;
2749
2750         /* confirm alignment */
2751         if (!IS_ALIGNED((unsigned long)skb->data, 4))
2752                 ath10k_warn(ar, "unaligned htt message, expect trouble\n");
2753
2754         ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
2755                    resp->hdr.msg_type);
2756
2757         if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
2758                 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
2759                            resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
2760                 return true;
2761         }
2762         type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
2763
2764         switch (type) {
2765         case HTT_T2H_MSG_TYPE_VERSION_CONF: {
2766                 htt->target_version_major = resp->ver_resp.major;
2767                 htt->target_version_minor = resp->ver_resp.minor;
2768                 complete(&htt->target_version_received);
2769                 break;
2770         }
2771         case HTT_T2H_MSG_TYPE_RX_IND:
2772                 ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
2773                 break;
2774         case HTT_T2H_MSG_TYPE_PEER_MAP: {
2775                 struct htt_peer_map_event ev = {
2776                         .vdev_id = resp->peer_map.vdev_id,
2777                         .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
2778                 };
2779                 memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
2780                 ath10k_peer_map_event(htt, &ev);
2781                 break;
2782         }
2783         case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
2784                 struct htt_peer_unmap_event ev = {
2785                         .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
2786                 };
2787                 ath10k_peer_unmap_event(htt, &ev);
2788                 break;
2789         }
2790         case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
2791                 struct htt_tx_done tx_done = {};
2792                 int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
2793                 int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
2794
2795                 tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
2796
2797                 switch (status) {
2798                 case HTT_MGMT_TX_STATUS_OK:
2799                         tx_done.status = HTT_TX_COMPL_STATE_ACK;
2800                         if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
2801                                      ar->wmi.svc_map) &&
2802                             (resp->mgmt_tx_completion.flags &
2803                              HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
2804                                 tx_done.ack_rssi =
2805                                 FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
2806                                           info);
2807                         }
2808                         break;
2809                 case HTT_MGMT_TX_STATUS_RETRY:
2810                         tx_done.status = HTT_TX_COMPL_STATE_NOACK;
2811                         break;
2812                 case HTT_MGMT_TX_STATUS_DROP:
2813                         tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
2814                         break;
2815                 }
2816
2817                 status = ath10k_txrx_tx_unref(htt, &tx_done);
2818                 if (!status) {
2819                         spin_lock_bh(&htt->tx_lock);
2820                         ath10k_htt_tx_mgmt_dec_pending(htt);
2821                         spin_unlock_bh(&htt->tx_lock);
2822                 }
2823                 break;
2824         }
2825         case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
2826                 ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
2827                 break;
2828         case HTT_T2H_MSG_TYPE_SEC_IND: {
2829                 struct ath10k *ar = htt->ar;
2830                 struct htt_security_indication *ev = &resp->security_indication;
2831
2832                 ath10k_dbg(ar, ATH10K_DBG_HTT,
2833                            "sec ind peer_id %d unicast %d type %d\n",
2834                           __le16_to_cpu(ev->peer_id),
2835                           !!(ev->flags & HTT_SECURITY_IS_UNICAST),
2836                           MS(ev->flags, HTT_SECURITY_TYPE));
2837                 complete(&ar->install_key_done);
2838                 break;
2839         }
2840         case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
2841                 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2842                                 skb->data, skb->len);
2843                 atomic_inc(&htt->num_mpdus_ready);
2844                 break;
2845         }
2846         case HTT_T2H_MSG_TYPE_TEST:
2847                 break;
2848         case HTT_T2H_MSG_TYPE_STATS_CONF:
2849                 trace_ath10k_htt_stats(ar, skb->data, skb->len);
2850                 break;
2851         case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
2852                 /* Firmware can return tx frames if it's unable to fully
2853                  * process them and suspects host may be able to fix it. ath10k
2854                  * sends all tx frames as already inspected so this shouldn't
2855                  * happen unless fw has a bug.
2856                  */
2857                 ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
2858                 break;
2859         case HTT_T2H_MSG_TYPE_RX_ADDBA:
2860                 ath10k_htt_rx_addba(ar, resp);
2861                 break;
2862         case HTT_T2H_MSG_TYPE_RX_DELBA:
2863                 ath10k_htt_rx_delba(ar, resp);
2864                 break;
2865         case HTT_T2H_MSG_TYPE_PKTLOG: {
2866                 trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
2867                                         skb->len -
2868                                         offsetof(struct htt_resp,
2869                                                  pktlog_msg.payload));
2870
2871                 if (ath10k_peer_stats_enabled(ar))
2872                         ath10k_fetch_10_2_tx_stats(ar,
2873                                                    resp->pktlog_msg.payload);
2874                 break;
2875         }
2876         case HTT_T2H_MSG_TYPE_RX_FLUSH: {
2877                 /* Ignore this event because mac80211 takes care of Rx
2878                  * aggregation reordering.
2879                  */
2880                 break;
2881         }
2882         case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
2883                 skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
2884                 return false;
2885         }
2886         case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
2887                 break;
2888         case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
2889                 u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
2890                 u32 freq = __le32_to_cpu(resp->chan_change.freq);
2891
2892                 ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
2893                 ath10k_dbg(ar, ATH10K_DBG_HTT,
2894                            "htt chan change freq %u phymode %s\n",
2895                            freq, ath10k_wmi_phymode_str(phymode));
2896                 break;
2897         }
2898         case HTT_T2H_MSG_TYPE_AGGR_CONF:
2899                 break;
2900         case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
2901                 struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
2902
2903                 if (!tx_fetch_ind) {
2904                         ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
2905                         break;
2906                 }
2907                 skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
2908                 break;
2909         }
2910         case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
2911                 ath10k_htt_rx_tx_fetch_confirm(ar, skb);
2912                 break;
2913         case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
2914                 ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
2915                 break;
2916         case HTT_T2H_MSG_TYPE_PEER_STATS:
2917                 ath10k_htt_fetch_peer_stats(ar, skb);
2918                 break;
2919         case HTT_T2H_MSG_TYPE_EN_STATS:
2920         default:
2921                 ath10k_warn(ar, "htt event (%d) not handled\n",
2922                             resp->hdr.msg_type);
2923                 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
2924                                 skb->data, skb->len);
2925                 break;
2926         }
2927         return true;
2928 }
2929 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
2930
2931 void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
2932                                              struct sk_buff *skb)
2933 {
2934         trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
2935         dev_kfree_skb_any(skb);
2936 }
2937 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
2938
2939 static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
2940 {
2941         struct sk_buff *skb;
2942
2943         while (quota < budget) {
2944                 if (skb_queue_empty(&ar->htt.rx_msdus_q))
2945                         break;
2946
2947                 skb = skb_dequeue(&ar->htt.rx_msdus_q);
2948                 if (!skb)
2949                         break;
2950                 ath10k_process_rx(ar, skb);
2951                 quota++;
2952         }
2953
2954         return quota;
2955 }
2956
2957 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
2958 {
2959         struct ath10k_htt *htt = &ar->htt;
2960         struct htt_tx_done tx_done = {};
2961         struct sk_buff_head tx_ind_q;
2962         struct sk_buff *skb;
2963         unsigned long flags;
2964         int quota = 0, done, ret;
2965         bool resched_napi = false;
2966
2967         __skb_queue_head_init(&tx_ind_q);
2968
2969         /* Process pending frames before dequeuing more data
2970          * from hardware.
2971          */
2972         quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
2973         if (quota == budget) {
2974                 resched_napi = true;
2975                 goto exit;
2976         }
2977
2978         while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
2979                 spin_lock_bh(&htt->rx_ring.lock);
2980                 ret = ath10k_htt_rx_in_ord_ind(ar, skb);
2981                 spin_unlock_bh(&htt->rx_ring.lock);
2982
2983                 dev_kfree_skb_any(skb);
2984                 if (ret == -EIO) {
2985                         resched_napi = true;
2986                         goto exit;
2987                 }
2988         }
2989
2990         while (atomic_read(&htt->num_mpdus_ready)) {
2991                 ret = ath10k_htt_rx_handle_amsdu(htt);
2992                 if (ret == -EIO) {
2993                         resched_napi = true;
2994                         goto exit;
2995                 }
2996                 atomic_dec(&htt->num_mpdus_ready);
2997         }
2998
2999         /* Deliver received data after processing data from hardware */
3000         quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
3001
3002         /* From NAPI documentation:
3003          *  The napi poll() function may also process TX completions, in which
3004          *  case if it processes the entire TX ring then it should count that
3005          *  work as the rest of the budget.
3006          */
3007         if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
3008                 quota = budget;
3009
3010         /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
3011          * From kfifo_get() documentation:
3012          *  Note that with only one concurrent reader and one concurrent writer,
3013          *  you don't need extra locking to use these macro.
3014          */
3015         while (kfifo_get(&htt->txdone_fifo, &tx_done))
3016                 ath10k_txrx_tx_unref(htt, &tx_done);
3017
3018         ath10k_mac_tx_push_pending(ar);
3019
3020         spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
3021         skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
3022         spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
3023
3024         while ((skb = __skb_dequeue(&tx_ind_q))) {
3025                 ath10k_htt_rx_tx_fetch_ind(ar, skb);
3026                 dev_kfree_skb_any(skb);
3027         }
3028
3029 exit:
3030         ath10k_htt_rx_msdu_buff_replenish(htt);
3031         /* In case of rx failure or more data to read, report budget
3032          * to reschedule NAPI poll
3033          */
3034         done = resched_napi ? budget : quota;
3035
3036         return done;
3037 }
3038 EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
3039
3040 static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
3041         .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
3042         .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
3043         .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
3044         .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
3045         .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
3046 };
3047
3048 static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
3049         .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
3050         .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
3051         .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
3052         .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
3053         .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
3054 };
3055
3056 void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
3057 {
3058         struct ath10k *ar = htt->ar;
3059
3060         if (ar->hw_params.target_64bit)
3061                 htt->rx_ops = &htt_rx_ops_64;
3062         else
3063                 htt->rx_ops = &htt_rx_ops_32;
3064 }