1 /******************************************************************************
3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 Intel Deutschland GmbH
7 * Portions of this file are derived from the ipw3945 project, as well
8 * as portions of the ieee80211 subsystem header files.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
26 * Contact Information:
27 * Intel Linux Wireless <linuxwifi@intel.com>
28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *****************************************************************************/
31 #ifndef __iwl_trans_int_pcie_h__
32 #define __iwl_trans_int_pcie_h__
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/skbuff.h>
37 #include <linux/wait.h>
38 #include <linux/pci.h>
39 #include <linux/timer.h>
43 #include "iwl-trans.h"
44 #include "iwl-debug.h"
46 #include "iwl-op-mode.h"
48 /* We need 2 entries for the TX command and header, and another one might
49 * be needed for potential data in the SKB's head. The remaining ones can
52 #define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
55 * RX related structures and functions
57 #define RX_NUM_QUEUES 1
58 #define RX_POST_REQ_ALLOC 2
59 #define RX_CLAIM_REQ_ALLOC 8
60 #define RX_PENDING_WATERMARK 16
64 /*This file includes the declaration that are internal to the
68 * struct iwl_rx_mem_buffer
69 * @page_dma: bus address of rxb page
70 * @page: driver's pointer to the rxb page
71 * @vid: index of this rxb in the global table
73 struct iwl_rx_mem_buffer {
77 struct list_head list;
81 * struct isr_statistics - interrupt statistics
84 struct isr_statistics {
99 * struct iwl_rxq - Rx queue
101 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
102 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
103 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
104 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
105 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
106 * @read: Shared index to newest available Rx buffer
107 * @write: Shared index to oldest written Rx packet
108 * @free_count: Number of pre-allocated buffers in rx_free
109 * @used_count: Number of RBDs handled to allocator to use for allocation
111 * @rx_free: list of RBDs with allocated RB ready for use
112 * @rx_used: list of RBDs with no RB attached
113 * @need_update: flag to indicate we need to update read/write index
114 * @rb_stts: driver's pointer to receive buffer status
115 * @rb_stts_dma: bus address of receive buffer status
117 * @queue: actual rx queue. Not used for multi-rx queue.
119 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
126 dma_addr_t used_bd_dma;
133 struct list_head rx_free;
134 struct list_head rx_used;
136 struct iwl_rb_status *rb_stts;
137 dma_addr_t rb_stts_dma;
139 struct napi_struct napi;
140 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
144 * struct iwl_rb_allocator - Rx allocator
145 * @req_pending: number of requests the allcator had not processed yet
146 * @req_ready: number of requests honored and ready for claiming
147 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
148 * the queue. This is a list of &struct iwl_rx_mem_buffer
149 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
150 * of &struct iwl_rx_mem_buffer
151 * @lock: protects the rbd_allocated and rbd_empty lists
152 * @alloc_wq: work queue for background calls
153 * @rx_alloc: work struct for background calls
155 struct iwl_rb_allocator {
156 atomic_t req_pending;
158 struct list_head rbd_allocated;
159 struct list_head rbd_empty;
161 struct workqueue_struct *alloc_wq;
162 struct work_struct rx_alloc;
172 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
173 * @index -- current index
175 static inline int iwl_queue_inc_wrap(int index)
177 return ++index & (TFD_QUEUE_SIZE_MAX - 1);
181 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
182 * @index -- current index
184 static inline int iwl_queue_dec_wrap(int index)
186 return --index & (TFD_QUEUE_SIZE_MAX - 1);
189 struct iwl_cmd_meta {
190 /* only for SYNC commands, iff the reply skb is wanted */
191 struct iwl_host_cmd *source;
196 * Generic queue structure
198 * Contains common data for Rx and Tx queues.
200 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
201 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
202 * there might be HW changes in the future). For the normal TX
203 * queues, n_window, which is the size of the software queue data
204 * is also 256; however, for the command queue, n_window is only
205 * 32 since we don't need so many commands pending. Since the HW
206 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
207 * the software buffers (in the variables @meta, @txb in struct
208 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
209 * the same struct) have 256.
210 * This means that we end up with the following:
211 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
212 * SW entries: | 0 | ... | 31 |
213 * where N is a number between 0 and 7. This means that the SW
214 * data is a window overlayed over the HW queue.
217 int write_ptr; /* 1-st empty entry (index) host_w*/
218 int read_ptr; /* last used entry (index) host_r*/
219 /* use for monitoring and recovering the stuck queue */
220 dma_addr_t dma_addr; /* physical addr for BD's */
221 int n_window; /* safe queue window */
223 int low_mark; /* low watermark, resume queue if free
224 * space more than this */
225 int high_mark; /* high watermark, stop queue if free
226 * space less than this */
229 #define TFD_TX_CMD_SLOTS 256
230 #define TFD_CMD_SLOTS 32
233 * The FH will write back to the first TB only, so we need
234 * to copy some data into the buffer regardless of whether
235 * it should be mapped or not. This indicates how big the
236 * first TB must be to include the scratch buffer. Since
237 * the scratch is 4 bytes at offset 12, it's 16 now. If we
238 * make it bigger then allocations will be bigger and copy
239 * slower, so that's probably not useful.
241 #define IWL_HCMD_SCRATCHBUF_SIZE 16
243 struct iwl_pcie_txq_entry {
244 struct iwl_device_cmd *cmd;
246 /* buffer to free after command completes */
247 const void *free_buf;
248 struct iwl_cmd_meta meta;
251 struct iwl_pcie_txq_scratch_buf {
252 struct iwl_cmd_header hdr;
258 * struct iwl_txq - Tx Queue for DMA
259 * @q: generic Rx/Tx queue descriptor
260 * @tfds: transmit frame descriptors (DMA memory)
261 * @scratchbufs: start of command headers, including scratch buffers, for
262 * the writeback -- this is DMA memory and an array holding one buffer
263 * for each command on the queue
264 * @scratchbufs_dma: DMA address for the scratchbufs start
265 * @entries: transmit entries (driver state)
267 * @stuck_timer: timer that fires if queue gets stuck
268 * @trans_pcie: pointer back to transport (for timer)
269 * @need_update: indicates need to update read/write index
270 * @active: stores if queue is active
271 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
272 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
273 * @frozen: tx stuck queue timer is frozen
274 * @frozen_expiry_remainder: remember how long until the timer fires
276 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
277 * descriptors) and required locking structures.
281 struct iwl_tfd *tfds;
282 struct iwl_pcie_txq_scratch_buf *scratchbufs;
283 dma_addr_t scratchbufs_dma;
284 struct iwl_pcie_txq_entry *entries;
286 unsigned long frozen_expiry_remainder;
287 struct timer_list stuck_timer;
288 struct iwl_trans_pcie *trans_pcie;
294 unsigned long wd_timeout;
295 struct sk_buff_head overflow_q;
298 static inline dma_addr_t
299 iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
301 return txq->scratchbufs_dma +
302 sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
305 struct iwl_tso_hdr_page {
311 * struct iwl_trans_pcie - PCIe transport specific data
312 * @rxq: all the RX queue data
313 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
314 * @global_table: table mapping received VID from hw to rxb
315 * @rba: allocator for RX replenishing
316 * @drv - pointer to iwl_drv
317 * @trans: pointer to the generic transport area
318 * @scd_base_addr: scheduler sram base address in SRAM
319 * @scd_bc_tbls: pointer to the byte count table of the scheduler
320 * @kw: keep warm address
321 * @pci_dev: basic pci-network driver stuff
322 * @hw_base: pci hardware address support
323 * @ucode_write_complete: indicates that the ucode has been copied.
324 * @ucode_write_waitq: wait queue for uCode load
325 * @cmd_queue - command queue number
326 * @rx_buf_size: Rx buffer size
327 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
328 * @scd_set_active: should the transport configure the SCD for HCMD queue
329 * @wide_cmd_header: true when ucode supports wide command header format
330 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
332 * @rx_page_order: page order for receive buffer size
333 * @reg_lock: protect hw register access
334 * @mutex: to protect stop_device / start_fw / start_hw
335 * @cmd_in_flight: true when we have a host command in flight
336 * @fw_mon_phys: physical address of the buffer for the firmware monitor
337 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
338 * @fw_mon_size: size of the buffer for the firmware monitor
340 struct iwl_trans_pcie {
342 struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE];
343 struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
344 struct iwl_rb_allocator rba;
345 struct iwl_trans *trans;
348 struct net_device napi_dev;
350 struct __percpu iwl_tso_hdr_page *tso_hdr_page;
354 dma_addr_t ict_tbl_dma;
358 struct isr_statistics isr_stats;
364 struct iwl_dma_ptr scd_bc_tbls;
365 struct iwl_dma_ptr kw;
368 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
369 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
371 /* PCI bus related data */
372 struct pci_dev *pci_dev;
373 void __iomem *hw_base;
375 bool ucode_write_complete;
376 wait_queue_head_t ucode_write_waitq;
377 wait_queue_head_t wait_command_queue;
378 wait_queue_head_t d0i3_waitq;
382 unsigned int cmd_q_wdg_timeout;
383 u8 n_no_reclaim_cmds;
384 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
386 enum iwl_amsdu_size rx_buf_size;
389 bool wide_cmd_header;
393 /*protect hw register */
395 bool cmd_hold_nic_awake;
396 bool ref_cmd_in_flight;
398 /* protect ref counter */
402 dma_addr_t fw_mon_phys;
403 struct page *fw_mon_page;
407 static inline struct iwl_trans_pcie *
408 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
410 return (void *)trans->trans_specific;
413 static inline struct iwl_trans *
414 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
416 return container_of((void *)trans_pcie, struct iwl_trans,
421 * Convention: trans API functions: iwl_trans_pcie_XXX
422 * Other functions: iwl_pcie_XXX
424 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
425 const struct pci_device_id *ent,
426 const struct iwl_cfg *cfg);
427 void iwl_trans_pcie_free(struct iwl_trans *trans);
429 /*****************************************************
431 ******************************************************/
432 int iwl_pcie_rx_init(struct iwl_trans *trans);
433 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
434 int iwl_pcie_rx_stop(struct iwl_trans *trans);
435 void iwl_pcie_rx_free(struct iwl_trans *trans);
437 /*****************************************************
438 * ICT - interrupt handling
439 ******************************************************/
440 irqreturn_t iwl_pcie_isr(int irq, void *data);
441 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
442 void iwl_pcie_free_ict(struct iwl_trans *trans);
443 void iwl_pcie_reset_ict(struct iwl_trans *trans);
444 void iwl_pcie_disable_ict(struct iwl_trans *trans);
446 /*****************************************************
448 ******************************************************/
449 int iwl_pcie_tx_init(struct iwl_trans *trans);
450 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
451 int iwl_pcie_tx_stop(struct iwl_trans *trans);
452 void iwl_pcie_tx_free(struct iwl_trans *trans);
453 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
454 const struct iwl_trans_txq_scd_cfg *cfg,
455 unsigned int wdg_timeout);
456 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
458 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
459 struct iwl_device_cmd *dev_cmd, int txq_id);
460 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
461 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
462 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
463 struct iwl_rx_cmd_buffer *rxb);
464 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
465 struct sk_buff_head *skbs);
466 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
468 void iwl_trans_pcie_ref(struct iwl_trans *trans);
469 void iwl_trans_pcie_unref(struct iwl_trans *trans);
471 static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
473 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
475 return le16_to_cpu(tb->hi_n_len) >> 4;
478 /*****************************************************
480 ******************************************************/
481 void iwl_pcie_dump_csr(struct iwl_trans *trans);
483 /*****************************************************
485 ******************************************************/
486 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
488 clear_bit(STATUS_INT_ENABLED, &trans->status);
490 /* disable interrupts from uCode/NIC to host */
491 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
493 /* acknowledge/clear/reset any interrupts still pending
494 * from uCode or flow handler (Rx/Tx DMA) */
495 iwl_write32(trans, CSR_INT, 0xffffffff);
496 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
497 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
500 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
502 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
504 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
505 set_bit(STATUS_INT_ENABLED, &trans->status);
506 trans_pcie->inta_mask = CSR_INI_SET_MASK;
507 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
510 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
512 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
514 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
515 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
516 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
519 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
523 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
524 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
525 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
528 static inline void iwl_wake_queue(struct iwl_trans *trans,
531 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
533 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
534 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
535 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
539 static inline void iwl_stop_queue(struct iwl_trans *trans,
542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
544 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
545 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
546 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
548 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
552 static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
554 return q->write_ptr >= q->read_ptr ?
555 (i >= q->read_ptr && i < q->write_ptr) :
556 !(i < q->read_ptr && i >= q->write_ptr);
559 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
561 return index & (q->n_window - 1);
564 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
566 return !(iwl_read32(trans, CSR_GP_CNTRL) &
567 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
570 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
571 u32 reg, u32 mask, u32 value)
575 #ifdef CONFIG_IWLWIFI_DEBUG
576 WARN_ON_ONCE(value & ~mask);
579 v = iwl_read32(trans, reg);
582 iwl_write32(trans, reg, v);
585 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
588 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
591 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
594 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
597 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
599 #ifdef CONFIG_IWLWIFI_DEBUGFS
600 int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
602 static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
608 int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
609 int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
611 #endif /* __iwl_trans_int_pcie_h__ */