2 * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 /* DXE - DMA transfer engine
18 * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19 * through low channels data packets are transfered
20 * through high channels managment packets are transfered
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/interrupt.h>
26 #include <linux/soc/qcom/smem_state.h>
30 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
32 wcn36xx_dbg(WCN36XX_DBG_DXE,
33 "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
36 writel(data, wcn->ccu_base + addr);
39 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
41 wcn36xx_dbg(WCN36XX_DBG_DXE,
42 "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
45 writel(data, wcn->dxe_base + addr);
48 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
50 *data = readl(wcn->dxe_base + addr);
52 wcn36xx_dbg(WCN36XX_DBG_DXE,
53 "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
57 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
59 struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
62 for (i = 0; i < ch->desc_num && ctl; i++) {
69 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
71 struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
75 spin_lock_init(&ch->lock);
76 for (i = 0; i < ch->desc_num; i++) {
77 cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
81 cur_ctl->ctl_blk_order = i;
83 ch->head_blk_ctl = cur_ctl;
84 ch->tail_blk_ctl = cur_ctl;
85 } else if (ch->desc_num - 1 == i) {
86 prev_ctl->next = cur_ctl;
87 cur_ctl->next = ch->head_blk_ctl;
89 prev_ctl->next = cur_ctl;
97 wcn36xx_dxe_free_ctl_block(ch);
101 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
105 wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106 wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107 wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108 wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
110 wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111 wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112 wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113 wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
115 wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
116 wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
118 wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119 wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
121 wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122 wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
124 wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125 wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
127 wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128 wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
130 /* DXE control block allocation */
131 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
134 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
137 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
140 ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
144 /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
145 ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146 WCN36XX_SMSM_WLAN_TX_ENABLE |
147 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148 WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
155 wcn36xx_err("Failed to allocate DXE control blocks\n");
156 wcn36xx_dxe_free_ctl_blks(wcn);
160 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
162 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163 wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165 wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
168 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
170 struct wcn36xx_dxe_desc *cur_dxe = NULL;
171 struct wcn36xx_dxe_desc *prev_dxe = NULL;
172 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
179 if (!wcn_ch->cpu_addr)
182 memset(wcn_ch->cpu_addr, 0, size);
184 cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
185 cur_ctl = wcn_ch->head_blk_ctl;
187 for (i = 0; i < wcn_ch->desc_num; i++) {
188 cur_ctl->desc = cur_dxe;
189 cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
190 i * sizeof(struct wcn36xx_dxe_desc);
192 switch (wcn_ch->ch_type) {
193 case WCN36XX_DXE_CH_TX_L:
194 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
195 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
197 case WCN36XX_DXE_CH_TX_H:
198 cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
199 cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
201 case WCN36XX_DXE_CH_RX_L:
202 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
203 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
205 case WCN36XX_DXE_CH_RX_H:
206 cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
207 cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
211 cur_dxe->phy_next_l = 0;
212 } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
213 prev_dxe->phy_next_l =
214 cur_ctl->desc_phy_addr;
215 } else if (i == (wcn_ch->desc_num - 1)) {
216 prev_dxe->phy_next_l =
217 cur_ctl->desc_phy_addr;
218 cur_dxe->phy_next_l =
219 wcn_ch->head_blk_ctl->desc_phy_addr;
221 cur_ctl = cur_ctl->next;
229 static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
233 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
234 dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
237 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
238 struct wcn36xx_dxe_mem_pool *pool)
240 int i, chunk_size = pool->chunk_size;
241 dma_addr_t bd_phy_addr = pool->phy_addr;
242 void *bd_cpu_addr = pool->virt_addr;
243 struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
245 for (i = 0; i < ch->desc_num; i++) {
246 /* Only every second dxe needs a bd pointer,
247 the other will point to the skb data */
249 cur->bd_phy_addr = bd_phy_addr;
250 cur->bd_cpu_addr = bd_cpu_addr;
251 bd_phy_addr += chunk_size;
252 bd_cpu_addr += chunk_size;
254 cur->bd_phy_addr = 0;
255 cur->bd_cpu_addr = NULL;
261 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
265 wcn36xx_dxe_read_register(wcn,
266 WCN36XX_DXE_INT_MASK_REG,
271 wcn36xx_dxe_write_register(wcn,
272 WCN36XX_DXE_INT_MASK_REG,
277 static int wcn36xx_dxe_fill_skb(struct device *dev,
278 struct wcn36xx_dxe_ctl *ctl,
281 struct wcn36xx_dxe_desc *dxe = ctl->desc;
284 skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
288 dxe->dst_addr_l = dma_map_single(dev,
289 skb_tail_pointer(skb),
292 if (dma_mapping_error(dev, dxe->dst_addr_l)) {
293 dev_err(dev, "unable to map skb\n");
302 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
303 struct wcn36xx_dxe_ch *wcn_ch)
306 struct wcn36xx_dxe_ctl *cur_ctl = NULL;
308 cur_ctl = wcn_ch->head_blk_ctl;
310 for (i = 0; i < wcn_ch->desc_num; i++) {
311 wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
312 cur_ctl = cur_ctl->next;
318 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
319 struct wcn36xx_dxe_ch *wcn_ch)
321 struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
324 for (i = 0; i < wcn_ch->desc_num; i++) {
330 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
332 struct ieee80211_tx_info *info;
336 spin_lock_irqsave(&wcn->dxe_lock, flags);
337 skb = wcn->tx_ack_skb;
338 wcn->tx_ack_skb = NULL;
339 spin_unlock_irqrestore(&wcn->dxe_lock, flags);
342 wcn36xx_warn("Spurious TX complete indication\n");
346 info = IEEE80211_SKB_CB(skb);
349 info->flags |= IEEE80211_TX_STAT_ACK;
351 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
353 ieee80211_tx_status_irqsafe(wcn->hw, skb);
354 ieee80211_wake_queues(wcn->hw);
357 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
359 struct wcn36xx_dxe_ctl *ctl;
360 struct ieee80211_tx_info *info;
364 * Make at least one loop of do-while because in case ring is
365 * completely full head and tail are pointing to the same element
366 * and while-do will not make any cycles.
368 spin_lock_irqsave(&ch->lock, flags);
369 ctl = ch->tail_blk_ctl;
371 if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
375 READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
376 dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
377 ctl->skb->len, DMA_TO_DEVICE);
378 info = IEEE80211_SKB_CB(ctl->skb);
379 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
380 /* Keep frame until TX status comes */
381 ieee80211_free_txskb(wcn->hw, ctl->skb);
384 if (wcn->queues_stopped) {
385 wcn->queues_stopped = false;
386 ieee80211_wake_queues(wcn->hw);
392 } while (ctl != ch->head_blk_ctl);
394 ch->tail_blk_ctl = ctl;
395 spin_unlock_irqrestore(&ch->lock, flags);
398 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
400 struct wcn36xx *wcn = (struct wcn36xx *)dev;
401 int int_src, int_reason;
403 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
405 if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
406 wcn36xx_dxe_read_register(wcn,
407 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
410 wcn36xx_dxe_write_register(wcn,
411 WCN36XX_DXE_0_INT_CLR,
412 WCN36XX_INT_MASK_CHAN_TX_H);
414 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
415 wcn36xx_dxe_write_register(wcn,
416 WCN36XX_DXE_0_INT_ERR_CLR,
417 WCN36XX_INT_MASK_CHAN_TX_H);
419 wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
423 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
424 wcn36xx_dxe_write_register(wcn,
425 WCN36XX_DXE_0_INT_DONE_CLR,
426 WCN36XX_INT_MASK_CHAN_TX_H);
429 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
430 wcn36xx_dxe_write_register(wcn,
431 WCN36XX_DXE_0_INT_ED_CLR,
432 WCN36XX_INT_MASK_CHAN_TX_H);
435 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
438 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
439 WCN36XX_CH_STAT_INT_ED_MASK))
440 reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
443 if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
444 wcn36xx_dxe_read_register(wcn,
445 WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
448 wcn36xx_dxe_write_register(wcn,
449 WCN36XX_DXE_0_INT_CLR,
450 WCN36XX_INT_MASK_CHAN_TX_L);
453 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
454 wcn36xx_dxe_write_register(wcn,
455 WCN36XX_DXE_0_INT_ERR_CLR,
456 WCN36XX_INT_MASK_CHAN_TX_L);
458 wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
462 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
463 wcn36xx_dxe_write_register(wcn,
464 WCN36XX_DXE_0_INT_DONE_CLR,
465 WCN36XX_INT_MASK_CHAN_TX_L);
468 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
469 wcn36xx_dxe_write_register(wcn,
470 WCN36XX_DXE_0_INT_ED_CLR,
471 WCN36XX_INT_MASK_CHAN_TX_L);
474 wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
477 if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
478 WCN36XX_CH_STAT_INT_ED_MASK))
479 reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
485 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
487 struct wcn36xx *wcn = (struct wcn36xx *)dev;
489 wcn36xx_dxe_rx_frame(wcn);
494 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
498 ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
499 IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
501 wcn36xx_err("failed to alloc tx irq\n");
505 ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
508 wcn36xx_err("failed to alloc rx irq\n");
512 enable_irq_wake(wcn->rx_irq);
517 free_irq(wcn->tx_irq, wcn);
523 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
524 struct wcn36xx_dxe_ch *ch,
530 struct wcn36xx_dxe_desc *dxe;
531 struct wcn36xx_dxe_ctl *ctl;
537 wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
538 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
540 if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
541 wcn36xx_dxe_write_register(wcn,
542 WCN36XX_DXE_0_INT_ERR_CLR,
545 wcn36xx_err("DXE IRQ reported error on RX channel\n");
548 if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
549 wcn36xx_dxe_write_register(wcn,
550 WCN36XX_DXE_0_INT_DONE_CLR,
553 if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
554 wcn36xx_dxe_write_register(wcn,
555 WCN36XX_DXE_0_INT_ED_CLR,
558 if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
559 WCN36XX_CH_STAT_INT_ED_MASK)))
562 spin_lock(&ch->lock);
564 ctl = ch->head_blk_ctl;
567 while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
568 /* do not read until we own DMA descriptor */
571 /* read/modify DMA descriptor */
573 dma_addr = dxe->dst_addr_l;
574 ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
576 /* new skb allocation ok. Use the new one and queue
577 * the old one to network system.
579 dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
581 wcn36xx_rx_skb(wcn, skb);
583 /* else keep old skb not submitted and reuse it for rx DMA
584 * (dropping the packet that it contained)
587 /* flush descriptor changes before re-marking as valid */
594 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
596 ch->head_blk_ctl = ctl;
598 spin_unlock(&ch->lock);
603 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
607 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
610 if (int_src & WCN36XX_DXE_INT_CH1_MASK)
611 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
612 WCN36XX_DXE_CTRL_RX_L,
613 WCN36XX_DXE_INT_CH1_MASK,
614 WCN36XX_INT_MASK_CHAN_RX_L,
615 WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
618 if (int_src & WCN36XX_DXE_INT_CH3_MASK)
619 wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
620 WCN36XX_DXE_CTRL_RX_H,
621 WCN36XX_DXE_INT_CH3_MASK,
622 WCN36XX_INT_MASK_CHAN_RX_H,
623 WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
626 wcn36xx_warn("No DXE interrupt pending\n");
629 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
634 /* Allocate BD headers for MGMT frames */
636 /* Where this come from ask QC */
637 wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
638 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
640 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
641 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
646 wcn->mgmt_mem_pool.virt_addr = cpu_addr;
647 memset(cpu_addr, 0, s);
649 /* Allocate BD headers for DATA frames */
651 /* Where this come from ask QC */
652 wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
653 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
655 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
656 cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
661 wcn->data_mem_pool.virt_addr = cpu_addr;
662 memset(cpu_addr, 0, s);
667 wcn36xx_dxe_free_mem_pools(wcn);
668 wcn36xx_err("Failed to allocate BD mempool\n");
672 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
674 if (wcn->mgmt_mem_pool.virt_addr)
675 dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
676 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
677 wcn->mgmt_mem_pool.virt_addr,
678 wcn->mgmt_mem_pool.phy_addr);
680 if (wcn->data_mem_pool.virt_addr) {
681 dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
682 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
683 wcn->data_mem_pool.virt_addr,
684 wcn->data_mem_pool.phy_addr);
688 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
689 struct wcn36xx_vif *vif_priv,
690 struct wcn36xx_tx_bd *bd,
694 struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
695 struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
696 struct wcn36xx_dxe_ch *ch = NULL;
700 ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
702 spin_lock_irqsave(&ch->lock, flags);
703 ctl_bd = ch->head_blk_ctl;
704 ctl_skb = ctl_bd->next;
707 * If skb is not null that means that we reached the tail of the ring
708 * hence ring is full. Stop queues to let mac80211 back off until ring
709 * has an empty slot again.
711 if (NULL != ctl_skb->skb) {
712 ieee80211_stop_queues(wcn->hw);
713 wcn->queues_stopped = true;
714 spin_unlock_irqrestore(&ch->lock, flags);
718 if (unlikely(ctl_skb->bd_cpu_addr)) {
719 wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
724 desc_bd = ctl_bd->desc;
725 desc_skb = ctl_skb->desc;
729 /* write buffer descriptor */
730 memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
732 /* Set source address of the BD we send */
733 desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
734 desc_bd->dst_addr_l = ch->dxe_wq;
735 desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
737 wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
739 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
740 (char *)desc_bd, sizeof(*desc_bd));
741 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
742 "BD >>> ", (char *)ctl_bd->bd_cpu_addr,
743 sizeof(struct wcn36xx_tx_bd));
745 desc_skb->src_addr_l = dma_map_single(wcn->dev,
749 if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
750 dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
756 desc_skb->dst_addr_l = ch->dxe_wq;
757 desc_skb->fr_len = ctl_skb->skb->len;
759 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
760 (char *)desc_skb, sizeof(*desc_skb));
761 wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
762 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
764 /* Move the head of the ring to the next empty descriptor */
765 ch->head_blk_ctl = ctl_skb->next;
767 /* Commit all previous writes and set descriptors to VALID */
769 desc_skb->ctrl = ch->ctrl_skb;
771 desc_bd->ctrl = ch->ctrl_bd;
774 * When connected and trying to send data frame chip can be in sleep
775 * mode and writing to the register will not wake up the chip. Instead
776 * notify chip about new frame through SMSM bus.
778 if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
779 qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
780 WCN36XX_SMSM_WLAN_TX_ENABLE,
781 WCN36XX_SMSM_WLAN_TX_ENABLE);
783 /* indicate End Of Packet and generate interrupt on descriptor
786 wcn36xx_dxe_write_register(wcn,
787 ch->reg_ctrl, ch->def_ctrl);
792 spin_unlock_irqrestore(&ch->lock, flags);
796 int wcn36xx_dxe_init(struct wcn36xx *wcn)
798 int reg_data = 0, ret;
800 reg_data = WCN36XX_DXE_REG_RESET;
801 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
803 /* Select channels for rx avail and xfer done interrupts... */
804 reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
805 WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
807 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
809 wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
811 /***************************************/
812 /* Init descriptors for TX LOW channel */
813 /***************************************/
814 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
816 dev_err(wcn->dev, "Error allocating descriptor\n");
819 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
821 /* Write channel head to a NEXT register */
822 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
823 wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
825 /* Program DMA destination addr for TX LOW */
826 wcn36xx_dxe_write_register(wcn,
827 WCN36XX_DXE_CH_DEST_ADDR_TX_L,
828 WCN36XX_DXE_WQ_TX_L);
830 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
831 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
833 /***************************************/
834 /* Init descriptors for TX HIGH channel */
835 /***************************************/
836 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
838 dev_err(wcn->dev, "Error allocating descriptor\n");
842 wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
844 /* Write channel head to a NEXT register */
845 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
846 wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
848 /* Program DMA destination addr for TX HIGH */
849 wcn36xx_dxe_write_register(wcn,
850 WCN36XX_DXE_CH_DEST_ADDR_TX_H,
851 WCN36XX_DXE_WQ_TX_H);
853 wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
855 /* Enable channel interrupts */
856 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
858 /***************************************/
859 /* Init descriptors for RX LOW channel */
860 /***************************************/
861 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
863 dev_err(wcn->dev, "Error allocating descriptor\n");
868 /* For RX we need to preallocated buffers */
869 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
871 /* Write channel head to a NEXT register */
872 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
873 wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
875 /* Write DMA source address */
876 wcn36xx_dxe_write_register(wcn,
877 WCN36XX_DXE_CH_SRC_ADDR_RX_L,
878 WCN36XX_DXE_WQ_RX_L);
880 /* Program preallocated destination address */
881 wcn36xx_dxe_write_register(wcn,
882 WCN36XX_DXE_CH_DEST_ADDR_RX_L,
883 wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
885 /* Enable default control registers */
886 wcn36xx_dxe_write_register(wcn,
887 WCN36XX_DXE_REG_CTL_RX_L,
888 WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
890 /* Enable channel interrupts */
891 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
893 /***************************************/
894 /* Init descriptors for RX HIGH channel */
895 /***************************************/
896 ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
898 dev_err(wcn->dev, "Error allocating descriptor\n");
902 /* For RX we need to prealocat buffers */
903 wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
905 /* Write chanel head to a NEXT register */
906 wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
907 wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
909 /* Write DMA source address */
910 wcn36xx_dxe_write_register(wcn,
911 WCN36XX_DXE_CH_SRC_ADDR_RX_H,
912 WCN36XX_DXE_WQ_RX_H);
914 /* Program preallocated destination address */
915 wcn36xx_dxe_write_register(wcn,
916 WCN36XX_DXE_CH_DEST_ADDR_RX_H,
917 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
919 /* Enable default control registers */
920 wcn36xx_dxe_write_register(wcn,
921 WCN36XX_DXE_REG_CTL_RX_H,
922 WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
924 /* Enable channel interrupts */
925 wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
927 ret = wcn36xx_dxe_request_irqs(wcn);
934 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
936 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
938 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
940 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
945 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
947 free_irq(wcn->tx_irq, wcn);
948 free_irq(wcn->rx_irq, wcn);
950 if (wcn->tx_ack_skb) {
951 ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
952 wcn->tx_ack_skb = NULL;
955 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
956 wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
958 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
959 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
960 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
961 wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);