1 /******************************************************************************
3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 * Copyright(c) 2018 Intel Corporation
8 * Portions of this file are derived from the ipw3945 project, as well
9 * as portions of the ieee80211 subsystem header files.
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
26 * Contact Information:
27 * Intel Linux Wireless <linuxwifi@intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *****************************************************************************/
31 #ifndef __iwl_trans_int_pcie_h__
32 #define __iwl_trans_int_pcie_h__
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/skbuff.h>
37 #include <linux/wait.h>
38 #include <linux/pci.h>
39 #include <linux/timer.h>
40 #include <linux/cpu.h>
44 #include "iwl-trans.h"
45 #include "iwl-debug.h"
47 #include "iwl-op-mode.h"
50 /* We need 2 entries for the TX command and header, and another one might
51 * be needed for potential data in the SKB's head. The remaining ones can
54 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
57 * RX related structures and functions
59 #define RX_NUM_QUEUES 1
60 #define RX_POST_REQ_ALLOC 2
61 #define RX_CLAIM_REQ_ALLOC 8
62 #define RX_PENDING_WATERMARK 16
63 #define FIRST_RX_QUEUE 512
67 /*This file includes the declaration that are internal to the
71 * struct iwl_rx_mem_buffer
72 * @page_dma: bus address of rxb page
73 * @page: driver's pointer to the rxb page
74 * @invalid: rxb is in driver ownership - not owned by HW
75 * @vid: index of this rxb in the global table
76 * @size: size used from the buffer
78 struct iwl_rx_mem_buffer {
83 struct list_head list;
88 * struct isr_statistics - interrupt statistics
91 struct isr_statistics {
105 #define IWL_RX_TD_TYPE_MSK 0xff000000
106 #define IWL_RX_TD_SIZE_MSK 0x00ffffff
107 #define IWL_RX_TD_SIZE_2K BIT(11)
108 #define IWL_RX_TD_TYPE 0
111 * struct iwl_rx_transfer_desc - transfer descriptor
112 * @type_n_size: buffer type (bit 0: external buff valid,
113 * bit 1: optional footer valid, bit 2-7: reserved)
115 * @addr: ptr to free buffer start address
116 * @rbid: unique tag of the buffer
117 * @reserved: reserved
119 struct iwl_rx_transfer_desc {
126 #define IWL_RX_CD_SIZE 0xffffff00
129 * struct iwl_rx_completion_desc - completion descriptor
130 * @type: buffer type (bit 0: external buff valid,
131 * bit 1: optional footer valid, bit 2-7: reserved)
132 * @status: status of the completion
133 * @reserved1: reserved
134 * @rbid: unique tag of the received buffer
135 * @size: buffer size, masked by IWL_RX_CD_SIZE
136 * @reserved2: reserved
138 struct iwl_rx_completion_desc {
148 * struct iwl_rxq - Rx queue
150 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
151 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
152 * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
153 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
154 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
155 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
156 * @tr_tail: driver's pointer to the transmission ring tail buffer
157 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
158 * @cr_tail: driver's pointer to the completion ring tail buffer
159 * @cr_tail_dma: physical address of the buffer for the completion ring tail
160 * @read: Shared index to newest available Rx buffer
161 * @write: Shared index to oldest written Rx packet
162 * @free_count: Number of pre-allocated buffers in rx_free
163 * @used_count: Number of RBDs handled to allocator to use for allocation
165 * @rx_free: list of RBDs with allocated RB ready for use
166 * @rx_used: list of RBDs with no RB attached
167 * @need_update: flag to indicate we need to update read/write index
168 * @rb_stts: driver's pointer to receive buffer status
169 * @rb_stts_dma: bus address of receive buffer status
171 * @queue: actual rx queue. Not used for multi-rx queue.
173 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
182 struct iwl_rx_completion_desc *cd;
184 dma_addr_t used_bd_dma;
186 dma_addr_t tr_tail_dma;
188 dma_addr_t cr_tail_dma;
195 struct list_head rx_free;
196 struct list_head rx_used;
199 dma_addr_t rb_stts_dma;
201 struct napi_struct napi;
202 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
206 * struct iwl_rb_allocator - Rx allocator
207 * @req_pending: number of requests the allcator had not processed yet
208 * @req_ready: number of requests honored and ready for claiming
209 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
210 * the queue. This is a list of &struct iwl_rx_mem_buffer
211 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
212 * of &struct iwl_rx_mem_buffer
213 * @lock: protects the rbd_allocated and rbd_empty lists
214 * @alloc_wq: work queue for background calls
215 * @rx_alloc: work struct for background calls
217 struct iwl_rb_allocator {
218 atomic_t req_pending;
220 struct list_head rbd_allocated;
221 struct list_head rbd_empty;
223 struct workqueue_struct *alloc_wq;
224 struct work_struct rx_alloc;
234 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
235 * @index -- current index
237 static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
239 return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
243 * iwl_get_closed_rb_stts - get closed rb stts from different structs
244 * @rxq - the rxq to get the rb stts from
246 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
249 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
250 __le16 *rb_stts = rxq->rb_stts;
252 return READ_ONCE(*rb_stts);
254 struct iwl_rb_status *rb_stts = rxq->rb_stts;
256 return READ_ONCE(rb_stts->closed_rb_num);
261 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
262 * @index -- current index
264 static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
266 return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
269 struct iwl_cmd_meta {
270 /* only for SYNC commands, iff the reply skb is wanted */
271 struct iwl_host_cmd *source;
277 #define TFD_TX_CMD_SLOTS 256
278 #define TFD_CMD_SLOTS 32
281 * The FH will write back to the first TB only, so we need to copy some data
282 * into the buffer regardless of whether it should be mapped or not.
283 * This indicates how big the first TB must be to include the scratch buffer
284 * and the assigned PN.
285 * Since PN location is 8 bytes at offset 12, it's 20 now.
286 * If we make it bigger then allocations will be bigger and copy slower, so
287 * that's probably not useful.
289 #define IWL_FIRST_TB_SIZE 20
290 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
292 struct iwl_pcie_txq_entry {
293 struct iwl_device_cmd *cmd;
295 /* buffer to free after command completes */
296 const void *free_buf;
297 struct iwl_cmd_meta meta;
300 struct iwl_pcie_first_tb_buf {
301 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
305 * struct iwl_txq - Tx Queue for DMA
306 * @q: generic Rx/Tx queue descriptor
307 * @tfds: transmit frame descriptors (DMA memory)
308 * @first_tb_bufs: start of command headers, including scratch buffers, for
309 * the writeback -- this is DMA memory and an array holding one buffer
310 * for each command on the queue
311 * @first_tb_dma: DMA address for the first_tb_bufs start
312 * @entries: transmit entries (driver state)
314 * @stuck_timer: timer that fires if queue gets stuck
315 * @trans_pcie: pointer back to transport (for timer)
316 * @need_update: indicates need to update read/write index
317 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
318 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
319 * @frozen: tx stuck queue timer is frozen
320 * @frozen_expiry_remainder: remember how long until the timer fires
321 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
322 * @write_ptr: 1-st empty entry (index) host_w
323 * @read_ptr: last used entry (index) host_r
324 * @dma_addr: physical addr for BD's
325 * @n_window: safe queue window
327 * @low_mark: low watermark, resume queue if free space more than this
328 * @high_mark: high watermark, stop queue if free space less than this
330 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
331 * descriptors) and required locking structures.
333 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
334 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
335 * there might be HW changes in the future). For the normal TX
336 * queues, n_window, which is the size of the software queue data
337 * is also 256; however, for the command queue, n_window is only
338 * 32 since we don't need so many commands pending. Since the HW
339 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
340 * This means that we end up with the following:
341 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
342 * SW entries: | 0 | ... | 31 |
343 * where N is a number between 0 and 7. This means that the SW
344 * data is a window overlayed over the HW queue.
348 struct iwl_pcie_first_tb_buf *first_tb_bufs;
349 dma_addr_t first_tb_dma;
350 struct iwl_pcie_txq_entry *entries;
352 unsigned long frozen_expiry_remainder;
353 struct timer_list stuck_timer;
354 struct iwl_trans_pcie *trans_pcie;
359 unsigned long wd_timeout;
360 struct sk_buff_head overflow_q;
361 struct iwl_dma_ptr bc_tbl;
372 static inline dma_addr_t
373 iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
375 return txq->first_tb_dma +
376 sizeof(struct iwl_pcie_first_tb_buf) * idx;
379 struct iwl_tso_hdr_page {
385 * enum iwl_shared_irq_flags - level of sharing for irq
386 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
387 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
389 enum iwl_shared_irq_flags {
390 IWL_SHARED_IRQ_NON_RX = BIT(0),
391 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
395 * enum iwl_image_response_code - image response values
396 * @IWL_IMAGE_RESP_DEF: the default value of the register
397 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
398 * @IWL_IMAGE_RESP_FAIL: iml reading failed
400 enum iwl_image_response_code {
401 IWL_IMAGE_RESP_DEF = 0,
402 IWL_IMAGE_RESP_SUCCESS = 1,
403 IWL_IMAGE_RESP_FAIL = 2,
407 * struct iwl_dram_data
408 * @physical: page phy pointer
409 * @block: pointer to the allocated block/page
410 * @size: size of the block/page
412 struct iwl_dram_data {
419 * struct iwl_self_init_dram - dram data used by self init process
420 * @fw: lmac and umac dram data
421 * @fw_cnt: total number of items in array
422 * @paging: paging dram data
423 * @paging_cnt: total number of items in array
425 struct iwl_self_init_dram {
426 struct iwl_dram_data *fw;
428 struct iwl_dram_data *paging;
433 * struct iwl_trans_pcie - PCIe transport specific data
434 * @rxq: all the RX queue data
435 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
436 * @global_table: table mapping received VID from hw to rxb
437 * @rba: allocator for RX replenishing
438 * @ctxt_info: context information for FW self init
439 * @ctxt_info_gen3: context information for gen3 devices
440 * @prph_info: prph info for self init
441 * @prph_scratch: prph scratch for self init
442 * @ctxt_info_dma_addr: dma addr of context information
443 * @prph_info_dma_addr: dma addr of prph info
444 * @prph_scratch_dma_addr: dma addr of prph scratch
445 * @ctxt_info_dma_addr: dma addr of context information
446 * @init_dram: DRAM data of firmware image (including paging).
447 * Context information addresses will be taken from here.
448 * This is driver's local copy for keeping track of size and
449 * count for allocating and freeing the memory.
450 * @iml: image loader image virtual address
451 * @iml_dma_addr: image loader image DMA address
452 * @trans: pointer to the generic transport area
453 * @scd_base_addr: scheduler sram base address in SRAM
454 * @scd_bc_tbls: pointer to the byte count table of the scheduler
455 * @kw: keep warm address
456 * @pci_dev: basic pci-network driver stuff
457 * @hw_base: pci hardware address support
458 * @ucode_write_complete: indicates that the ucode has been copied.
459 * @ucode_write_waitq: wait queue for uCode load
460 * @cmd_queue - command queue number
461 * @rx_buf_size: Rx buffer size
462 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
463 * @scd_set_active: should the transport configure the SCD for HCMD queue
464 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
466 * @rx_page_order: page order for receive buffer size
467 * @reg_lock: protect hw register access
468 * @mutex: to protect stop_device / start_fw / start_hw
469 * @cmd_in_flight: true when we have a host command in flight
470 * @fw_mon_phys: physical address of the buffer for the firmware monitor
471 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
472 * @fw_mon_size: size of the buffer for the firmware monitor
473 * @msix_entries: array of MSI-X entries
474 * @msix_enabled: true if managed to enable MSI-X
475 * @shared_vec_mask: the type of causes the shared vector handles
476 * (see iwl_shared_irq_flags).
477 * @alloc_vecs: the number of interrupt vectors allocated by the OS
478 * @def_irq: default irq for non rx causes
479 * @fh_init_mask: initial unmasked fh causes
480 * @hw_init_mask: initial unmasked hw causes
481 * @fh_mask: current unmasked fh causes
482 * @hw_mask: current unmasked hw causes
483 * @in_rescan: true if we have triggered a device rescan
484 * @scheduled_for_removal: true if we have scheduled a device removal
486 struct iwl_trans_pcie {
488 struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
489 struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
490 struct iwl_rb_allocator rba;
492 struct iwl_context_info *ctxt_info;
493 struct iwl_context_info_gen3 *ctxt_info_gen3;
495 struct iwl_prph_info *prph_info;
496 struct iwl_prph_scratch *prph_scratch;
498 dma_addr_t ctxt_info_dma_addr;
499 dma_addr_t prph_info_dma_addr;
500 dma_addr_t prph_scratch_dma_addr;
501 dma_addr_t iml_dma_addr;
502 struct iwl_self_init_dram init_dram;
503 struct iwl_trans *trans;
505 struct net_device napi_dev;
507 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
511 dma_addr_t ict_tbl_dma;
514 bool is_down, opmode_down;
516 struct isr_statistics isr_stats;
522 struct iwl_dma_ptr scd_bc_tbls;
523 struct iwl_dma_ptr kw;
525 struct iwl_txq *txq_memory;
526 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
527 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
528 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
530 /* PCI bus related data */
531 struct pci_dev *pci_dev;
532 void __iomem *hw_base;
534 bool ucode_write_complete;
535 wait_queue_head_t ucode_write_waitq;
536 wait_queue_head_t wait_command_queue;
537 wait_queue_head_t d0i3_waitq;
539 u8 page_offs, dev_cmd_offs;
543 unsigned int cmd_q_wdg_timeout;
544 u8 n_no_reclaim_cmds;
545 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
549 enum iwl_amsdu_size rx_buf_size;
553 bool pcie_dbg_dumped_once;
556 /*protect hw register */
558 bool cmd_hold_nic_awake;
559 bool ref_cmd_in_flight;
561 dma_addr_t fw_mon_phys;
562 struct page *fw_mon_page;
565 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
574 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
575 u16 tx_cmd_queue_size;
577 bool scheduled_for_removal;
580 static inline struct iwl_trans_pcie *
581 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
583 return (void *)trans->trans_specific;
586 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
587 struct msix_entry *entry)
590 * Before sending the interrupt the HW disables it to prevent
591 * a nested interrupt. This is done by writing 1 to the corresponding
592 * bit in the mask register. After handling the interrupt, it should be
593 * re-enabled by clearing this bit. This register is defined as
594 * write 1 clear (W1C) register, meaning that it's being clear
595 * by writing 1 to the bit.
597 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
600 static inline struct iwl_trans *
601 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
603 return container_of((void *)trans_pcie, struct iwl_trans,
608 * Convention: trans API functions: iwl_trans_pcie_XXX
609 * Other functions: iwl_pcie_XXX
611 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
612 const struct pci_device_id *ent,
613 const struct iwl_cfg *cfg);
614 void iwl_trans_pcie_free(struct iwl_trans *trans);
616 /*****************************************************
618 ******************************************************/
619 int iwl_pcie_rx_init(struct iwl_trans *trans);
620 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
621 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
622 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
623 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
624 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
625 int iwl_pcie_rx_stop(struct iwl_trans *trans);
626 void iwl_pcie_rx_free(struct iwl_trans *trans);
627 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
628 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
629 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
630 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
631 struct iwl_rxq *rxq);
633 /*****************************************************
634 * ICT - interrupt handling
635 ******************************************************/
636 irqreturn_t iwl_pcie_isr(int irq, void *data);
637 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
638 void iwl_pcie_free_ict(struct iwl_trans *trans);
639 void iwl_pcie_reset_ict(struct iwl_trans *trans);
640 void iwl_pcie_disable_ict(struct iwl_trans *trans);
642 /*****************************************************
644 ******************************************************/
645 int iwl_pcie_tx_init(struct iwl_trans *trans);
646 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
647 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
648 int iwl_pcie_tx_stop(struct iwl_trans *trans);
649 void iwl_pcie_tx_free(struct iwl_trans *trans);
650 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
651 const struct iwl_trans_txq_scd_cfg *cfg,
652 unsigned int wdg_timeout);
653 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
655 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
657 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
658 struct iwl_txq *txq);
659 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
660 struct iwl_device_cmd *dev_cmd, int txq_id);
661 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
662 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
663 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
664 struct iwl_rx_cmd_buffer *rxb);
665 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
666 struct sk_buff_head *skbs);
667 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
669 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
672 if (trans->cfg->use_tfh) {
673 struct iwl_tfh_tfd *tfd = _tfd;
674 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
676 return le16_to_cpu(tb->tb_len);
678 struct iwl_tfd *tfd = _tfd;
679 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
681 return le16_to_cpu(tb->hi_n_len) >> 4;
685 /*****************************************************
687 ******************************************************/
688 void iwl_pcie_dump_csr(struct iwl_trans *trans);
690 /*****************************************************
692 ******************************************************/
693 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
695 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
697 clear_bit(STATUS_INT_ENABLED, &trans->status);
698 if (!trans_pcie->msix_enabled) {
699 /* disable interrupts from uCode/NIC to host */
700 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
702 /* acknowledge/clear/reset any interrupts still pending
703 * from uCode or flow handler (Rx/Tx DMA) */
704 iwl_write32(trans, CSR_INT, 0xffffffff);
705 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
707 /* disable all the interrupt we might use */
708 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
709 trans_pcie->fh_init_mask);
710 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
711 trans_pcie->hw_init_mask);
713 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
716 #define IWL_NUM_OF_COMPLETION_RINGS 31
717 #define IWL_NUM_OF_TRANSFER_RINGS 527
719 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
724 while (start < fw->num_sec &&
725 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
726 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
734 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
735 const struct fw_desc *sec,
736 struct iwl_dram_data *dram)
738 dram->block = dma_alloc_coherent(trans->dev, sec->len,
744 dram->size = sec->len;
745 memcpy(dram->block, sec->data, sec->len);
750 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
752 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
753 struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
757 WARN_ON(dram->fw_cnt);
761 for (i = 0; i < dram->fw_cnt; i++)
762 dma_free_coherent(trans->dev, dram->fw[i].size,
763 dram->fw[i].block, dram->fw[i].physical);
770 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
772 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
774 spin_lock(&trans_pcie->irq_lock);
775 _iwl_disable_interrupts(trans);
776 spin_unlock(&trans_pcie->irq_lock);
779 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
781 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
784 set_bit(STATUS_INT_ENABLED, &trans->status);
785 if (!trans_pcie->msix_enabled) {
786 trans_pcie->inta_mask = CSR_INI_SET_MASK;
787 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
790 * fh/hw_mask keeps all the unmasked causes.
791 * Unlike msi, in msix cause is enabled when it is unset.
793 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
794 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
795 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
796 ~trans_pcie->fh_mask);
797 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
798 ~trans_pcie->hw_mask);
802 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
804 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
806 spin_lock(&trans_pcie->irq_lock);
807 _iwl_enable_interrupts(trans);
808 spin_unlock(&trans_pcie->irq_lock);
810 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
812 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
814 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
815 trans_pcie->hw_mask = msk;
818 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
820 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
822 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
823 trans_pcie->fh_mask = msk;
826 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
828 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
830 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
831 if (!trans_pcie->msix_enabled) {
832 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
833 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
835 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
836 trans_pcie->hw_init_mask);
837 iwl_enable_fh_int_msk_msix(trans,
838 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
842 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
844 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
846 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
848 if (!trans_pcie->msix_enabled) {
850 * When we'll receive the ALIVE interrupt, the ISR will call
851 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
852 * interrupt (which is not really needed anymore) but also the
853 * RX interrupt which will allow us to receive the ALIVE
854 * notification (which is Rx) and continue the flow.
856 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
857 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
859 iwl_enable_hw_int_msk_msix(trans,
860 MSIX_HW_INT_CAUSES_REG_ALIVE);
862 * Leave all the FH causes enabled to get the ALIVE
865 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
869 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
871 return index & (q->n_window - 1);
874 static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
875 struct iwl_txq *txq, int idx)
877 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
879 if (trans->cfg->use_tfh)
880 idx = iwl_pcie_get_cmd_index(txq, idx);
882 return txq->tfds + trans_pcie->tfd_size * idx;
885 static inline const char *queue_name(struct device *dev,
886 struct iwl_trans_pcie *trans_p, int i)
888 if (trans_p->shared_vec_mask) {
889 int vec = trans_p->shared_vec_mask &
890 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
893 return DRV_NAME ": shared IRQ";
895 return devm_kasprintf(dev, GFP_KERNEL,
896 DRV_NAME ": queue %d", i + vec);
899 return DRV_NAME ": default queue";
901 if (i == trans_p->alloc_vecs - 1)
902 return DRV_NAME ": exception";
904 return devm_kasprintf(dev, GFP_KERNEL,
905 DRV_NAME ": queue %d", i);
908 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
910 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
912 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
913 if (!trans_pcie->msix_enabled) {
914 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
915 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
917 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
918 trans_pcie->fh_init_mask);
919 iwl_enable_hw_int_msk_msix(trans,
920 MSIX_HW_INT_CAUSES_REG_RF_KILL);
923 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
925 * On 9000-series devices this bit isn't enabled by default, so
926 * when we power down the device we need set the bit to allow it
927 * to wake up the PCI-E bus for RF-kill interrupts.
929 iwl_set_bit(trans, CSR_GP_CNTRL,
930 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
934 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
936 static inline void iwl_wake_queue(struct iwl_trans *trans,
939 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
941 if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
942 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
943 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
947 static inline void iwl_stop_queue(struct iwl_trans *trans,
950 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
952 if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
953 iwl_op_mode_queue_full(trans->op_mode, txq->id);
954 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
956 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
960 static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
962 int index = iwl_pcie_get_cmd_index(q, i);
963 int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
964 int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
967 (index >= r && index < w) :
968 !(index < r && index >= w);
971 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
973 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
975 lockdep_assert_held(&trans_pcie->mutex);
977 if (trans_pcie->debug_rfkill)
980 return !(iwl_read32(trans, CSR_GP_CNTRL) &
981 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
984 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
985 u32 reg, u32 mask, u32 value)
989 #ifdef CONFIG_IWLWIFI_DEBUG
990 WARN_ON_ONCE(value & ~mask);
993 v = iwl_read32(trans, reg);
996 iwl_write32(trans, reg, v);
999 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
1002 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
1005 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
1008 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
1011 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1013 #ifdef CONFIG_IWLWIFI_DEBUGFS
1014 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
1016 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
1022 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
1023 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
1025 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
1027 void iwl_pcie_rx_allocator_work(struct work_struct *data);
1029 /* common functions that are used by gen2 transport */
1030 void iwl_pcie_apm_config(struct iwl_trans *trans);
1031 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
1032 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
1033 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
1034 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1035 bool was_in_rfkill);
1036 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
1037 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
1038 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
1039 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
1040 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
1041 int slots_num, bool cmd_queue);
1042 int iwl_pcie_txq_alloc(struct iwl_trans *trans,
1043 struct iwl_txq *txq, int slots_num, bool cmd_queue);
1044 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
1045 struct iwl_dma_ptr *ptr, size_t size);
1046 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
1047 void iwl_pcie_apply_destination(struct iwl_trans *trans);
1048 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
1049 struct sk_buff *skb);
1051 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
1054 /* common functions that are used by gen3 transport */
1055 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
1057 /* transport gen 2 exported functions */
1058 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
1059 const struct fw_img *fw, bool run_in_rfkill);
1060 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
1061 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
1062 struct iwl_tx_queue_cfg_cmd *cmd,
1063 int cmd_id, int size,
1064 unsigned int timeout);
1065 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
1066 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
1067 struct iwl_device_cmd *dev_cmd, int txq_id);
1068 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
1069 struct iwl_host_cmd *cmd);
1070 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
1072 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
1073 void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
1074 void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
1075 void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
1076 #endif /* __iwl_trans_int_pcie_h__ */