2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/dma-mapping.h>
21 #define DMA_DUMMY_TXWI ((void *) ~0)
24 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
29 spin_lock_init(&q->lock);
30 INIT_LIST_HEAD(&q->swq);
32 size = q->ndesc * sizeof(struct mt76_desc);
33 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
37 size = q->ndesc * sizeof(*q->entry);
38 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
42 /* clear descriptors */
43 for (i = 0; i < q->ndesc; i++)
44 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
46 iowrite32(q->desc_dma, &q->regs->desc_base);
47 iowrite32(0, &q->regs->cpu_idx);
48 iowrite32(0, &q->regs->dma_idx);
49 iowrite32(q->ndesc, &q->regs->ring_size);
55 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
56 struct mt76_queue_buf *buf, int nbufs, u32 info,
57 struct sk_buff *skb, void *txwi)
59 struct mt76_desc *desc;
64 q->entry[q->head].txwi = DMA_DUMMY_TXWI;
66 for (i = 0; i < nbufs; i += 2, buf += 2) {
67 u32 buf0 = buf[0].addr, buf1 = 0;
69 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
72 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
76 ctrl |= MT_DMA_CTL_LAST_SEC0;
77 else if (i == nbufs - 2)
78 ctrl |= MT_DMA_CTL_LAST_SEC1;
81 q->head = (q->head + 1) % q->ndesc;
85 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
86 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
87 WRITE_ONCE(desc->info, cpu_to_le32(info));
88 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
93 q->entry[idx].txwi = txwi;
94 q->entry[idx].skb = skb;
100 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
101 struct mt76_queue_entry *prev_e)
103 struct mt76_queue_entry *e = &q->entry[idx];
104 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
105 u32 ctrl = le32_to_cpu(__ctrl);
107 if (!e->txwi || !e->skb) {
108 __le32 addr = READ_ONCE(q->desc[idx].buf0);
109 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
111 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
115 if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
116 __le32 addr = READ_ONCE(q->desc[idx].buf1);
117 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
119 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
123 if (e->txwi == DMA_DUMMY_TXWI)
127 memset(e, 0, sizeof(*e));
131 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
133 q->head = ioread32(&q->regs->dma_idx);
135 iowrite32(q->head, &q->regs->cpu_idx);
139 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
141 struct mt76_queue *q = &dev->q_tx[qid];
142 struct mt76_queue_entry entry;
149 spin_lock_bh(&q->lock);
153 last = ioread32(&q->regs->dma_idx);
155 while (q->queued && q->tail != last) {
156 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
161 dev->drv->tx_complete_skb(dev, q, &entry, flush);
164 mt76_put_txwi(dev, entry.txwi);
168 q->tail = (q->tail + 1) % q->ndesc;
171 if (!flush && q->tail == last)
172 last = ioread32(&q->regs->dma_idx);
176 mt76_txq_schedule(dev, q);
178 mt76_dma_sync_idx(dev, q);
180 wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
183 wake_up(&dev->tx_wait);
185 spin_unlock_bh(&q->lock);
188 ieee80211_wake_queue(dev->hw, qid);
192 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
193 int *len, u32 *info, bool *more)
195 struct mt76_queue_entry *e = &q->entry[idx];
196 struct mt76_desc *desc = &q->desc[idx];
199 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
201 buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
203 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
204 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
205 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
209 *info = le32_to_cpu(desc->info);
211 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
218 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
219 int *len, u32 *info, bool *more)
227 if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
230 q->tail = (q->tail + 1) % q->ndesc;
233 return mt76_dma_get_buf(dev, q, idx, len, info, more);
237 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
239 iowrite32(q->head, &q->regs->cpu_idx);
242 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
243 struct sk_buff *skb, struct mt76_wcid *wcid,
244 struct ieee80211_sta *sta)
246 struct mt76_queue_entry e;
247 struct mt76_txwi_cache *t;
248 struct mt76_queue_buf buf[32];
249 struct sk_buff *iter;
255 t = mt76_get_txwi(dev);
257 ieee80211_free_txskb(dev->hw, skb);
261 dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
263 ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
265 dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
270 len = skb->len - skb->data_len;
271 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
272 if (dma_mapping_error(dev->dev, addr)) {
278 buf[n].addr = t->dma_addr;
279 buf[n++].len = dev->drv->txwi_size;
283 skb_walk_frags(skb, iter) {
284 if (n == ARRAY_SIZE(buf))
287 addr = dma_map_single(dev->dev, iter->data, iter->len,
289 if (dma_mapping_error(dev->dev, addr))
293 buf[n++].len = iter->len;
296 if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
299 return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
303 for (n--; n > 0; n--)
304 dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
310 dev->drv->tx_complete_skb(dev, q, &e, true);
311 mt76_put_txwi(dev, t);
314 EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
317 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
322 int len = SKB_WITH_OVERHEAD(q->buf_size);
323 int offset = q->buf_offset;
325 void *(*alloc)(unsigned int fragsz);
328 alloc = napi_alloc_frag;
330 alloc = netdev_alloc_frag;
332 spin_lock_bh(&q->lock);
334 while (q->queued < q->ndesc - 1) {
335 struct mt76_queue_buf qbuf;
337 buf = alloc(q->buf_size);
341 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
342 if (dma_mapping_error(dev->dev, addr)) {
347 qbuf.addr = addr + offset;
348 qbuf.len = len - offset;
349 idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
354 mt76_dma_kick_queue(dev, q);
356 spin_unlock_bh(&q->lock);
362 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
367 spin_lock_bh(&q->lock);
369 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
375 spin_unlock_bh(&q->lock);
379 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
381 struct mt76_queue *q = &dev->q_rx[qid];
384 for (i = 0; i < q->ndesc; i++)
385 q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
387 mt76_dma_rx_cleanup(dev, q);
388 mt76_dma_sync_idx(dev, q);
389 mt76_dma_rx_fill(dev, q, false);
393 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
396 struct sk_buff *skb = q->rx_head;
397 struct skb_shared_info *shinfo = skb_shinfo(skb);
398 int nr_frags = shinfo->nr_frags;
400 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
401 struct page *page = virt_to_head_page(data);
402 int offset = data - page_address(page) + q->buf_offset;
404 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
413 if (nr_frags < ARRAY_SIZE(shinfo->frags))
414 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
420 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
428 while (done < budget) {
431 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
436 mt76_add_fragment(dev, q, data, len, more);
440 skb = build_skb(data, q->buf_size);
446 skb_reserve(skb, q->buf_offset);
447 if (skb->tail + len > skb->end) {
452 if (q == &dev->q_rx[MT_RXQ_MCU]) {
453 u32 *rxfce = (u32 *) skb->cb;
465 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
468 mt76_dma_rx_fill(dev, q, true);
473 mt76_dma_rx_poll(struct napi_struct *napi, int budget)
475 struct mt76_dev *dev;
476 int qid, done = 0, cur;
478 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
479 qid = napi - dev->napi;
484 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
485 mt76_rx_poll_complete(dev, qid, napi);
487 } while (cur && done < budget);
493 dev->drv->rx_poll_complete(dev, qid);
500 mt76_dma_init(struct mt76_dev *dev)
504 init_dummy_netdev(&dev->napi_dev);
506 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
507 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
509 mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
510 skb_queue_head_init(&dev->rx_skb[i]);
511 napi_enable(&dev->napi[i]);
517 static const struct mt76_queue_ops mt76_dma_ops = {
518 .init = mt76_dma_init,
519 .alloc = mt76_dma_alloc_queue,
520 .add_buf = mt76_dma_add_buf,
521 .tx_queue_skb = mt76_dma_tx_queue_skb,
522 .tx_cleanup = mt76_dma_tx_cleanup,
523 .rx_reset = mt76_dma_rx_reset,
524 .kick = mt76_dma_kick_queue,
527 int mt76_dma_attach(struct mt76_dev *dev)
529 dev->queue_ops = &mt76_dma_ops;
532 EXPORT_SYMBOL_GPL(mt76_dma_attach);
534 void mt76_dma_cleanup(struct mt76_dev *dev)
538 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
539 mt76_dma_tx_cleanup(dev, i, true);
541 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
542 netif_napi_del(&dev->napi[i]);
543 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
546 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);