2 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
21 #include <linux/of_device.h>
22 #include <linux/platform_device.h>
23 #include <linux/regulator/consumer.h>
31 #define ATH10K_SNOC_RX_POST_RETRY_MS 50
32 #define CE_POLL_PIPE 4
34 static char *const ce_name[] = {
49 static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
50 {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
51 {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
52 {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
53 {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
56 static struct ath10k_wcn3990_clk_info clk_cfg[] = {
57 {NULL, "cxo_ref_clk_pin", 0, false},
60 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
61 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
62 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
63 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
64 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
66 static const struct ath10k_snoc_drv_priv drv_priv = {
67 .hw_rev = ATH10K_HW_WCN3990,
68 .dma_mask = DMA_BIT_MASK(37),
71 static struct ce_attr host_ce_config_wlan[] = {
72 /* CE0: host->target HTC control streams */
74 .flags = CE_ATTR_FLAGS,
78 .send_cb = ath10k_snoc_htc_tx_cb,
81 /* CE1: target->host HTT + HTC control */
83 .flags = CE_ATTR_FLAGS,
87 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
90 /* CE2: target->host WMI */
92 .flags = CE_ATTR_FLAGS,
96 .recv_cb = ath10k_snoc_htc_rx_cb,
99 /* CE3: host->target WMI */
101 .flags = CE_ATTR_FLAGS,
105 .send_cb = ath10k_snoc_htc_tx_cb,
108 /* CE4: host->target HTT */
110 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
114 .send_cb = ath10k_snoc_htt_tx_cb,
117 /* CE5: target->host HTT (ipa_uc->target ) */
119 .flags = CE_ATTR_FLAGS,
122 .dest_nentries = 512,
123 .recv_cb = ath10k_snoc_htt_rx_cb,
126 /* CE6: target autonomous hif_memcpy */
128 .flags = CE_ATTR_FLAGS,
134 /* CE7: ce_diag, the Diagnostic Window */
136 .flags = CE_ATTR_FLAGS,
142 /* CE8: Target to uMC */
144 .flags = CE_ATTR_FLAGS,
147 .dest_nentries = 128,
150 /* CE9 target->host HTT */
152 .flags = CE_ATTR_FLAGS,
155 .dest_nentries = 512,
156 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
159 /* CE10: target->host HTT */
161 .flags = CE_ATTR_FLAGS,
164 .dest_nentries = 512,
165 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
168 /* CE11: target -> host PKTLOG */
170 .flags = CE_ATTR_FLAGS,
173 .dest_nentries = 512,
174 .recv_cb = ath10k_snoc_htt_htc_rx_cb,
178 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
180 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
181 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
185 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
186 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
190 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
191 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
195 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
196 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
200 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
201 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
205 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
206 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
210 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
211 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
215 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
216 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
220 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
221 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
225 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
226 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
230 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
231 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
235 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
236 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
240 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
241 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
245 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
246 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
250 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
251 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
255 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
256 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
260 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
261 __cpu_to_le32(PIPEDIR_OUT),
264 { /* in = DL = target -> host */
265 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
266 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
269 { /* in = DL = target -> host */
270 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
271 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
274 { /* in = DL = target -> host pktlog */
275 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
276 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
279 /* (Additions here) */
288 void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
290 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
292 iowrite32(value, ar_snoc->mem + offset);
295 u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
297 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
300 val = ioread32(ar_snoc->mem + offset);
305 static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
307 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
308 struct ath10k *ar = pipe->hif_ce_state;
309 struct ath10k_ce *ce = ath10k_ce_priv(ar);
314 skb = dev_alloc_skb(pipe->buf_sz);
318 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
320 paddr = dma_map_single(ar->dev, skb->data,
321 skb->len + skb_tailroom(skb),
323 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
324 ath10k_warn(ar, "failed to dma map snoc rx buf\n");
325 dev_kfree_skb_any(skb);
329 ATH10K_SKB_RXCB(skb)->paddr = paddr;
331 spin_lock_bh(&ce->ce_lock);
332 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
333 spin_unlock_bh(&ce->ce_lock);
335 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
337 dev_kfree_skb_any(skb);
344 static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
346 struct ath10k *ar = pipe->hif_ce_state;
347 struct ath10k_ce *ce = ath10k_ce_priv(ar);
348 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
349 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
352 if (pipe->buf_sz == 0)
355 if (!ce_pipe->dest_ring)
358 spin_lock_bh(&ce->ce_lock);
359 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
360 spin_unlock_bh(&ce->ce_lock);
362 ret = __ath10k_snoc_rx_post_buf(pipe);
366 ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
367 mod_timer(&ar_snoc->rx_post_retry, jiffies +
368 ATH10K_SNOC_RX_POST_RETRY_MS);
374 static void ath10k_snoc_rx_post(struct ath10k *ar)
376 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
379 for (i = 0; i < CE_COUNT; i++)
380 ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
383 static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
384 void (*callback)(struct ath10k *ar,
385 struct sk_buff *skb))
387 struct ath10k *ar = ce_state->ar;
388 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
389 struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
391 struct sk_buff_head list;
392 void *transfer_context;
393 unsigned int nbytes, max_nbytes;
395 __skb_queue_head_init(&list);
396 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
398 skb = transfer_context;
399 max_nbytes = skb->len + skb_tailroom(skb);
400 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
401 max_nbytes, DMA_FROM_DEVICE);
403 if (unlikely(max_nbytes < nbytes)) {
404 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
406 dev_kfree_skb_any(skb);
410 skb_put(skb, nbytes);
411 __skb_queue_tail(&list, skb);
414 while ((skb = __skb_dequeue(&list))) {
415 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
416 ce_state->id, skb->len);
421 ath10k_snoc_rx_post_pipe(pipe_info);
424 static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
426 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
429 static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
431 /* CE4 polling needs to be done whenever CE pipe which transports
432 * HTT Rx (target->host) is processed.
434 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
436 ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
439 static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
441 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
442 ath10k_htt_t2h_msg_handler(ar, skb);
445 static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
447 ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
448 ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
451 static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
453 struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
454 struct ath10k *ar = ar_snoc->ar;
456 ath10k_snoc_rx_post(ar);
459 static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
461 struct ath10k *ar = ce_state->ar;
462 struct sk_buff_head list;
465 __skb_queue_head_init(&list);
466 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
470 __skb_queue_tail(&list, skb);
473 while ((skb = __skb_dequeue(&list)))
474 ath10k_htc_tx_completion_handler(ar, skb);
477 static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
479 struct ath10k *ar = ce_state->ar;
482 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
486 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
487 skb->len, DMA_TO_DEVICE);
488 ath10k_htt_hif_tx_complete(ar, skb);
492 static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
493 struct ath10k_hif_sg_item *items, int n_items)
495 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
496 struct ath10k_ce *ce = ath10k_ce_priv(ar);
497 struct ath10k_snoc_pipe *snoc_pipe;
498 struct ath10k_ce_pipe *ce_pipe;
501 snoc_pipe = &ar_snoc->pipe_info[pipe_id];
502 ce_pipe = snoc_pipe->ce_hdl;
503 spin_lock_bh(&ce->ce_lock);
505 for (i = 0; i < n_items - 1; i++) {
506 ath10k_dbg(ar, ATH10K_DBG_SNOC,
507 "snoc tx item %d paddr %pad len %d n_items %d\n",
508 i, &items[i].paddr, items[i].len, n_items);
510 err = ath10k_ce_send_nolock(ce_pipe,
511 items[i].transfer_context,
514 items[i].transfer_id,
515 CE_SEND_FLAG_GATHER);
520 ath10k_dbg(ar, ATH10K_DBG_SNOC,
521 "snoc tx item %d paddr %pad len %d n_items %d\n",
522 i, &items[i].paddr, items[i].len, n_items);
524 err = ath10k_ce_send_nolock(ce_pipe,
525 items[i].transfer_context,
528 items[i].transfer_id,
533 spin_unlock_bh(&ce->ce_lock);
539 __ath10k_ce_send_revert(ce_pipe);
541 spin_unlock_bh(&ce->ce_lock);
545 static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
546 struct bmi_target_info *target_info)
548 target_info->version = ATH10K_HW_WCN3990;
549 target_info->type = ATH10K_HW_WCN3990;
554 static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
556 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
558 ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
560 return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
563 static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
568 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
571 resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
573 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
576 ath10k_ce_per_engine_service(ar, pipe);
579 static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
581 u8 *ul_pipe, u8 *dl_pipe)
583 const struct service_to_pipe *entry;
584 bool ul_set = false, dl_set = false;
587 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
589 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
590 entry = &target_service_to_ce_map_wlan[i];
592 if (__le32_to_cpu(entry->service_id) != service_id)
595 switch (__le32_to_cpu(entry->pipedir)) {
600 *dl_pipe = __le32_to_cpu(entry->pipenum);
605 *ul_pipe = __le32_to_cpu(entry->pipenum);
611 *dl_pipe = __le32_to_cpu(entry->pipenum);
612 *ul_pipe = __le32_to_cpu(entry->pipenum);
619 if (WARN_ON(!ul_set || !dl_set))
625 static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
626 u8 *ul_pipe, u8 *dl_pipe)
628 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
630 (void)ath10k_snoc_hif_map_service_to_pipe(ar,
631 ATH10K_HTC_SVC_ID_RSVD_CTRL,
635 static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
637 ath10k_ce_disable_interrupts(ar);
640 static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
642 ath10k_ce_enable_interrupts(ar);
645 static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
647 struct ath10k_ce_pipe *ce_pipe;
648 struct ath10k_ce_ring *ce_ring;
653 ar = snoc_pipe->hif_ce_state;
654 ce_pipe = snoc_pipe->ce_hdl;
655 ce_ring = ce_pipe->dest_ring;
660 if (!snoc_pipe->buf_sz)
663 for (i = 0; i < ce_ring->nentries; i++) {
664 skb = ce_ring->per_transfer_context[i];
668 ce_ring->per_transfer_context[i] = NULL;
670 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
671 skb->len + skb_tailroom(skb),
673 dev_kfree_skb_any(skb);
677 static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
679 struct ath10k_ce_pipe *ce_pipe;
680 struct ath10k_ce_ring *ce_ring;
681 struct ath10k_snoc *ar_snoc;
686 ar = snoc_pipe->hif_ce_state;
687 ar_snoc = ath10k_snoc_priv(ar);
688 ce_pipe = snoc_pipe->ce_hdl;
689 ce_ring = ce_pipe->src_ring;
694 if (!snoc_pipe->buf_sz)
697 for (i = 0; i < ce_ring->nentries; i++) {
698 skb = ce_ring->per_transfer_context[i];
702 ce_ring->per_transfer_context[i] = NULL;
704 ath10k_htc_tx_completion_handler(ar, skb);
708 static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
710 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
711 struct ath10k_snoc_pipe *pipe_info;
714 del_timer_sync(&ar_snoc->rx_post_retry);
715 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
716 pipe_info = &ar_snoc->pipe_info[pipe_num];
717 ath10k_snoc_rx_pipe_cleanup(pipe_info);
718 ath10k_snoc_tx_pipe_cleanup(pipe_info);
722 static void ath10k_snoc_hif_stop(struct ath10k *ar)
724 ath10k_snoc_irq_disable(ar);
725 ath10k_snoc_buffer_cleanup(ar);
726 napi_synchronize(&ar->napi);
727 napi_disable(&ar->napi);
728 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
731 static int ath10k_snoc_hif_start(struct ath10k *ar)
733 ath10k_snoc_irq_enable(ar);
734 ath10k_snoc_rx_post(ar);
736 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
741 static int ath10k_snoc_init_pipes(struct ath10k *ar)
745 for (i = 0; i < CE_COUNT; i++) {
746 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
748 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
757 static int ath10k_snoc_wlan_enable(struct ath10k *ar)
762 static void ath10k_snoc_wlan_disable(struct ath10k *ar)
766 static void ath10k_snoc_hif_power_down(struct ath10k *ar)
768 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
770 ath10k_snoc_wlan_disable(ar);
771 ath10k_ce_free_rri(ar);
774 static int ath10k_snoc_hif_power_up(struct ath10k *ar)
778 ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
779 __func__, ar->state);
781 ret = ath10k_snoc_wlan_enable(ar);
783 ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
787 ath10k_ce_alloc_rri(ar);
789 ret = ath10k_snoc_init_pipes(ar);
791 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
795 napi_enable(&ar->napi);
799 ath10k_ce_free_rri(ar);
800 ath10k_snoc_wlan_disable(ar);
805 static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
806 .read32 = ath10k_snoc_read32,
807 .write32 = ath10k_snoc_write32,
808 .start = ath10k_snoc_hif_start,
809 .stop = ath10k_snoc_hif_stop,
810 .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
811 .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
812 .power_up = ath10k_snoc_hif_power_up,
813 .power_down = ath10k_snoc_hif_power_down,
814 .tx_sg = ath10k_snoc_hif_tx_sg,
815 .send_complete_check = ath10k_snoc_hif_send_complete_check,
816 .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
817 .get_target_info = ath10k_snoc_hif_get_target_info,
820 static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
821 .read32 = ath10k_snoc_read32,
822 .write32 = ath10k_snoc_write32,
825 static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
827 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
830 for (i = 0; i < CE_COUNT_MAX; i++) {
831 if (ar_snoc->ce_irqs[i].irq_line == irq)
834 ath10k_err(ar, "No matching CE id for irq %d\n", irq);
839 static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
841 struct ath10k *ar = arg;
842 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
843 int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
845 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
846 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
851 ath10k_snoc_irq_disable(ar);
852 napi_schedule(&ar->napi);
857 static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
859 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
862 ath10k_ce_per_engine_service_any(ar);
863 done = ath10k_htt_txrx_compl_task(ar, budget);
867 ath10k_snoc_irq_enable(ar);
873 static void ath10k_snoc_init_napi(struct ath10k *ar)
875 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
879 static int ath10k_snoc_request_irq(struct ath10k *ar)
881 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
884 for (id = 0; id < CE_COUNT_MAX; id++) {
885 ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
886 ath10k_snoc_per_engine_handler, 0,
890 "failed to register IRQ handler for CE %d: %d",
899 for (id -= 1; id >= 0; id--)
900 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
905 static void ath10k_snoc_free_irq(struct ath10k *ar)
907 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
910 for (id = 0; id < CE_COUNT_MAX; id++)
911 free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
914 static int ath10k_snoc_resource_init(struct ath10k *ar)
916 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
917 struct platform_device *pdev;
918 struct resource *res;
922 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
924 ath10k_err(ar, "Memory base not found in DT\n");
928 ar_snoc->mem_pa = res->start;
929 ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
932 ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
937 for (i = 0; i < CE_COUNT; i++) {
938 res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
940 ath10k_err(ar, "failed to get IRQ%d\n", i);
944 ar_snoc->ce_irqs[i].irq_line = res->start;
951 static int ath10k_snoc_setup_resource(struct ath10k *ar)
953 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
954 struct ath10k_ce *ce = ath10k_ce_priv(ar);
955 struct ath10k_snoc_pipe *pipe;
958 timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
959 spin_lock_init(&ce->ce_lock);
960 for (i = 0; i < CE_COUNT; i++) {
961 pipe = &ar_snoc->pipe_info[i];
962 pipe->ce_hdl = &ce->ce_states[i];
964 pipe->hif_ce_state = ar;
966 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
968 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
973 pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
975 ath10k_snoc_init_napi(ar);
980 static void ath10k_snoc_release_resource(struct ath10k *ar)
984 netif_napi_del(&ar->napi);
985 for (i = 0; i < CE_COUNT; i++)
986 ath10k_ce_free_pipe(ar, i);
989 static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
990 struct ath10k_wcn3990_vreg_info *vreg_info)
992 struct regulator *reg;
995 reg = devm_regulator_get_optional(dev, vreg_info->name);
1000 if (ret == -EPROBE_DEFER) {
1001 ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
1005 if (vreg_info->required) {
1006 ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
1007 vreg_info->name, ret);
1010 ath10k_dbg(ar, ATH10K_DBG_SNOC,
1011 "Optional regulator %s doesn't exist: %d\n",
1012 vreg_info->name, ret);
1016 vreg_info->reg = reg;
1019 ath10k_dbg(ar, ATH10K_DBG_SNOC,
1020 "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
1021 vreg_info->name, vreg_info->min_v, vreg_info->max_v,
1022 vreg_info->load_ua, vreg_info->settle_delay);
1027 static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
1028 struct ath10k_wcn3990_clk_info *clk_info)
1033 handle = devm_clk_get(dev, clk_info->name);
1034 if (IS_ERR(handle)) {
1035 ret = PTR_ERR(handle);
1036 if (clk_info->required) {
1037 ath10k_err(ar, "snoc clock %s isn't available: %d\n",
1038 clk_info->name, ret);
1041 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
1047 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
1048 clk_info->name, clk_info->freq);
1050 clk_info->handle = handle;
1055 static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
1057 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1058 struct ath10k_wcn3990_vreg_info *vreg_info;
1062 for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1063 vreg_info = &ar_snoc->vreg[i];
1065 if (!vreg_info->reg)
1068 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
1071 ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
1075 "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
1076 vreg_info->name, vreg_info->min_v, vreg_info->max_v);
1077 goto err_reg_config;
1080 if (vreg_info->load_ua) {
1081 ret = regulator_set_load(vreg_info->reg,
1082 vreg_info->load_ua);
1085 "failed to set regulator %s load: %d\n",
1087 vreg_info->load_ua);
1088 goto err_reg_config;
1092 ret = regulator_enable(vreg_info->reg);
1094 ath10k_err(ar, "failed to enable regulator %s\n",
1096 goto err_reg_config;
1099 if (vreg_info->settle_delay)
1100 udelay(vreg_info->settle_delay);
1106 for (; i >= 0; i--) {
1107 vreg_info = &ar_snoc->vreg[i];
1109 if (!vreg_info->reg)
1112 regulator_disable(vreg_info->reg);
1113 regulator_set_load(vreg_info->reg, 0);
1114 regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
1120 static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
1122 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1123 struct ath10k_wcn3990_vreg_info *vreg_info;
1127 for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
1128 vreg_info = &ar_snoc->vreg[i];
1130 if (!vreg_info->reg)
1133 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
1136 ret = regulator_disable(vreg_info->reg);
1138 ath10k_err(ar, "failed to disable regulator %s\n",
1141 ret = regulator_set_load(vreg_info->reg, 0);
1143 ath10k_err(ar, "failed to set load %s\n",
1146 ret = regulator_set_voltage(vreg_info->reg, 0,
1149 ath10k_err(ar, "failed to set voltage %s\n",
1156 static int ath10k_wcn3990_clk_init(struct ath10k *ar)
1158 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1159 struct ath10k_wcn3990_clk_info *clk_info;
1163 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1164 clk_info = &ar_snoc->clk[i];
1166 if (!clk_info->handle)
1169 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
1172 if (clk_info->freq) {
1173 ret = clk_set_rate(clk_info->handle, clk_info->freq);
1176 ath10k_err(ar, "failed to set clock %s freq %u\n",
1177 clk_info->name, clk_info->freq);
1178 goto err_clock_config;
1182 ret = clk_prepare_enable(clk_info->handle);
1184 ath10k_err(ar, "failed to enable clock %s\n",
1186 goto err_clock_config;
1193 for (i = i - 1; i >= 0; i--) {
1194 clk_info = &ar_snoc->clk[i];
1196 if (!clk_info->handle)
1199 clk_disable_unprepare(clk_info->handle);
1205 static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
1207 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1208 struct ath10k_wcn3990_clk_info *clk_info;
1211 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1212 clk_info = &ar_snoc->clk[i];
1214 if (!clk_info->handle)
1217 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
1220 clk_disable_unprepare(clk_info->handle);
1226 static int ath10k_hw_power_on(struct ath10k *ar)
1230 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
1232 ret = ath10k_wcn3990_vreg_on(ar);
1236 ret = ath10k_wcn3990_clk_init(ar);
1243 ath10k_wcn3990_vreg_off(ar);
1247 static int ath10k_hw_power_off(struct ath10k *ar)
1251 ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
1253 ath10k_wcn3990_clk_deinit(ar);
1255 ret = ath10k_wcn3990_vreg_off(ar);
1260 static const struct of_device_id ath10k_snoc_dt_match[] = {
1261 { .compatible = "qcom,wcn3990-wifi",
1266 MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
1268 static int ath10k_snoc_probe(struct platform_device *pdev)
1270 const struct ath10k_snoc_drv_priv *drv_data;
1271 const struct of_device_id *of_id;
1272 struct ath10k_snoc *ar_snoc;
1278 of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
1280 dev_err(&pdev->dev, "failed to find matching device tree id\n");
1284 drv_data = of_id->data;
1287 ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
1289 dev_err(dev, "failed to set dma mask: %d", ret);
1293 ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
1294 drv_data->hw_rev, &ath10k_snoc_hif_ops);
1296 dev_err(dev, "failed to allocate core\n");
1300 ar_snoc = ath10k_snoc_priv(ar);
1301 ar_snoc->dev = pdev;
1302 platform_set_drvdata(pdev, ar);
1304 ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
1305 ar->ce_priv = &ar_snoc->ce;
1307 ret = ath10k_snoc_resource_init(ar);
1309 ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
1310 goto err_core_destroy;
1313 ret = ath10k_snoc_setup_resource(ar);
1315 ath10k_warn(ar, "failed to setup resource: %d\n", ret);
1316 goto err_core_destroy;
1318 ret = ath10k_snoc_request_irq(ar);
1320 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
1321 goto err_release_resource;
1324 ar_snoc->vreg = vreg_cfg;
1325 for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
1326 ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
1331 ar_snoc->clk = clk_cfg;
1332 for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
1333 ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
1338 ret = ath10k_hw_power_on(ar);
1340 ath10k_err(ar, "failed to power on device: %d\n", ret);
1344 ret = ath10k_core_register(ar, drv_data->hw_rev);
1346 ath10k_err(ar, "failed to register driver core: %d\n", ret);
1347 goto err_hw_power_off;
1350 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
1351 ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
1356 ath10k_hw_power_off(ar);
1359 ath10k_snoc_free_irq(ar);
1361 err_release_resource:
1362 ath10k_snoc_release_resource(ar);
1365 ath10k_core_destroy(ar);
1370 static int ath10k_snoc_remove(struct platform_device *pdev)
1372 struct ath10k *ar = platform_get_drvdata(pdev);
1374 ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
1375 ath10k_core_unregister(ar);
1376 ath10k_hw_power_off(ar);
1377 ath10k_snoc_free_irq(ar);
1378 ath10k_snoc_release_resource(ar);
1379 ath10k_core_destroy(ar);
1384 static struct platform_driver ath10k_snoc_driver = {
1385 .probe = ath10k_snoc_probe,
1386 .remove = ath10k_snoc_remove,
1388 .name = "ath10k_snoc",
1389 .of_match_table = ath10k_snoc_dt_match,
1392 module_platform_driver(ath10k_snoc_driver);
1394 MODULE_AUTHOR("Qualcomm");
1395 MODULE_LICENSE("Dual BSD/GPL");
1396 MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");