1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #ifndef __iwl_trans_h__
8 #define __iwl_trans_h__
10 #include <linux/ieee80211.h>
11 #include <linux/mm.h> /* for page_address */
12 #include <linux/lockdep.h>
13 #include <linux/kernel.h>
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
18 #include "iwl-op-mode.h"
19 #include <linux/firmware.h>
20 #include "fw/api/cmdhdr.h"
21 #include "fw/api/txq.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
26 * DOC: Transport layer - what is it ?
28 * The transport layer is the layer that deals with the HW directly. It provides
29 * an abstraction of the underlying HW to the upper layer. The transport layer
30 * doesn't provide any policy, algorithm or anything of this kind, but only
31 * mechanisms to make the HW do something. It is not completely stateless but
33 * We will have an implementation for each different supported bus.
37 * DOC: Life cycle of the transport layer
39 * The transport layer has a very precise life cycle.
41 * 1) A helper function is called during the module initialization and
42 * registers the bus driver's ops with the transport's alloc function.
43 * 2) Bus's probe calls to the transport layer's allocation functions.
44 * Of course this function is bus specific.
45 * 3) This allocation functions will spawn the upper layer which will
48 * 4) At some point (i.e. mac80211's start call), the op_mode will call
49 * the following sequence:
53 * 5) Then when finished (or reset):
56 * 6) Eventually, the free function will be called.
59 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
61 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
62 #define FH_RSCSR_FRAME_INVALID 0x55550000
63 #define FH_RSCSR_FRAME_ALIGN 0x40
64 #define FH_RSCSR_RPA_EN BIT(25)
65 #define FH_RSCSR_RADA_EN BIT(26)
66 #define FH_RSCSR_RXQ_POS 16
67 #define FH_RSCSR_RXQ_MASK 0x3F0000
69 struct iwl_rx_packet {
71 * The first 4 bytes of the RX frame header contain both the RX frame
72 * size and some flags.
74 * 31: flag flush RB request
75 * 30: flag ignore TC (terminal counter) request
76 * 29: flag fast IRQ request
82 * 22: Checksum enabled
85 * 13-00: RX frame size
88 struct iwl_cmd_header hdr;
92 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
94 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
97 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
99 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
103 * enum CMD_MODE - how to send the host commands ?
105 * @CMD_ASYNC: Return right away and don't wait for the response
106 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
107 * the response. The caller needs to call iwl_free_resp when done.
108 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
109 * called after this command completes. Valid only with CMD_ASYNC.
113 CMD_WANT_SKB = BIT(1),
114 CMD_SEND_IN_RFKILL = BIT(2),
115 CMD_WANT_ASYNC_CALLBACK = BIT(3),
118 #define DEF_CMD_PAYLOAD_SIZE 320
121 * struct iwl_device_cmd
123 * For allocation of the command and tx queues, this establishes the overall
124 * size of the largest command we send to uCode, except for commands that
125 * aren't fully copied and use other TFD space.
127 struct iwl_device_cmd {
130 struct iwl_cmd_header hdr; /* uCode API */
131 u8 payload[DEF_CMD_PAYLOAD_SIZE];
134 struct iwl_cmd_header_wide hdr_wide;
135 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
136 sizeof(struct iwl_cmd_header_wide) +
137 sizeof(struct iwl_cmd_header)];
143 * struct iwl_device_tx_cmd - buffer for TX command
145 * @payload: the payload placeholder
147 * The actual structure is sized dynamically according to need.
149 struct iwl_device_tx_cmd {
150 struct iwl_cmd_header hdr;
154 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
157 * number of transfer buffers (fragments) per transmit frame descriptor;
158 * this is just the driver's idea, the hardware supports 20
160 #define IWL_MAX_CMD_TBS_PER_TFD 2
162 /* We need 2 entries for the TX command and header, and another one might
163 * be needed for potential data in the SKB's head. The remaining ones can
166 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
169 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
171 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
172 * ring. The transport layer doesn't map the command's buffer to DMA, but
173 * rather copies it to a previously allocated DMA buffer. This flag tells
174 * the transport layer not to copy the command, but to map the existing
175 * buffer (that is passed in) instead. This saves the memcpy and allows
176 * commands that are bigger than the fixed buffer to be submitted.
177 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
178 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
179 * chunk internally and free it again after the command completes. This
180 * can (currently) be used only once per command.
181 * Note that a TFD entry after a DUP one cannot be a normal copied one.
183 enum iwl_hcmd_dataflag {
184 IWL_HCMD_DFL_NOCOPY = BIT(0),
185 IWL_HCMD_DFL_DUP = BIT(1),
188 enum iwl_error_event_table_status {
189 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
190 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
191 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
195 * struct iwl_host_cmd - Host command to the uCode
197 * @data: array of chunks that composes the data of the host command
198 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
199 * @_rx_page_order: (internally used to free response packet)
200 * @_rx_page_addr: (internally used to free response packet)
201 * @flags: can be CMD_*
202 * @len: array of the lengths of the chunks in data
203 * @dataflags: IWL_HCMD_DFL_*
204 * @id: command id of the host command, for wide commands encoding the
205 * version and group as well
207 struct iwl_host_cmd {
208 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
209 struct iwl_rx_packet *resp_pkt;
210 unsigned long _rx_page_addr;
215 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
216 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
219 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
221 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
224 struct iwl_rx_cmd_buffer {
229 unsigned int truesize;
232 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
234 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
237 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
242 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
244 r->_page_stolen = true;
249 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
251 __free_pages(r->_page, r->_rx_page_order);
254 #define MAX_NO_RECLAIM_CMDS 6
256 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
259 * Maximum number of HW queues the transport layer
262 #define IWL_MAX_HW_QUEUES 32
263 #define IWL_MAX_TVQM_QUEUES 512
265 #define IWL_MAX_TID_COUNT 8
266 #define IWL_MGMT_TID 15
267 #define IWL_FRAME_LIMIT 64
268 #define IWL_MAX_RX_HW_QUEUES 16
269 #define IWL_9000_MAX_RX_HW_QUEUES 6
272 * enum iwl_wowlan_status - WoWLAN image/device status
273 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
274 * @IWL_D3_STATUS_RESET: device was reset while suspended
282 * enum iwl_trans_status: transport status flags
283 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
284 * @STATUS_DEVICE_ENABLED: APM is enabled
285 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
286 * @STATUS_INT_ENABLED: interrupts are enabled
287 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
288 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
289 * @STATUS_FW_ERROR: the fw is in error state
290 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
292 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
293 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
295 enum iwl_trans_status {
296 STATUS_SYNC_HCMD_ACTIVE,
297 STATUS_DEVICE_ENABLED,
301 STATUS_RFKILL_OPMODE,
303 STATUS_TRANS_GOING_IDLE,
309 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
313 return get_order(2 * 1024);
315 return get_order(4 * 1024);
317 return get_order(8 * 1024);
319 return get_order(16 * 1024);
327 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
344 struct iwl_hcmd_names {
346 const char *const cmd_name;
349 #define HCMD_NAME(x) \
350 { .cmd_id = x, .cmd_name = #x }
352 struct iwl_hcmd_arr {
353 const struct iwl_hcmd_names *arr;
357 #define HCMD_ARR(x) \
358 { .arr = x, .size = ARRAY_SIZE(x) }
361 * struct iwl_trans_config - transport configuration
363 * @op_mode: pointer to the upper layer.
364 * @cmd_queue: the index of the command queue.
365 * Must be set before start_fw.
366 * @cmd_fifo: the fifo for host commands
367 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
368 * @no_reclaim_cmds: Some devices erroneously don't set the
369 * SEQ_RX_FRAME bit on some notifications, this is the
370 * list of such notifications to filter. Max length is
371 * %MAX_NO_RECLAIM_CMDS.
372 * @n_no_reclaim_cmds: # of commands in list
373 * @rx_buf_size: RX buffer size needed for A-MSDUs
374 * if unset 4k will be the RX buffer size
375 * @bc_table_dword: set to true if the BC table expects the byte count to be
376 * in DWORD (as opposed to bytes)
377 * @scd_set_active: should the transport configure the SCD for HCMD queue
378 * @command_groups: array of command groups, each member is an array of the
379 * commands in the group; for debugging only
380 * @command_groups_size: number of command groups, to avoid illegal access
381 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
382 * space for at least two pointers
383 * @fw_reset_handshake: firmware supports reset flow handshake
385 struct iwl_trans_config {
386 struct iwl_op_mode *op_mode;
390 unsigned int cmd_q_wdg_timeout;
391 const u8 *no_reclaim_cmds;
392 unsigned int n_no_reclaim_cmds;
394 enum iwl_amsdu_size rx_buf_size;
397 const struct iwl_hcmd_arr *command_groups;
398 int command_groups_size;
401 bool fw_reset_handshake;
404 struct iwl_trans_dump_data {
411 struct iwl_trans_txq_scd_cfg {
420 * struct iwl_trans_rxq_dma_data - RX queue DMA data
421 * @fr_bd_cb: DMA address of free BD cyclic buffer
422 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
423 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
424 * @ur_bd_cb: DMA address of used BD cyclic buffer
426 struct iwl_trans_rxq_dma_data {
434 * struct iwl_trans_ops - transport specific operations
436 * All the handlers MUST be implemented
438 * @start_hw: starts the HW. From that point on, the HW can send interrupts.
440 * @op_mode_leave: Turn off the HW RF kill indication if on
442 * @start_fw: allocates and inits all the resources for the transport
443 * layer. Also kick a fw image.
445 * @fw_alive: called when the fw sends alive notification. If the fw provides
446 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
448 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
449 * the HW. From that point on, the HW will be stopped but will still issue
450 * an interrupt if the HW RF kill switch is triggered.
451 * This callback must do the right thing and not crash even if %start_hw()
452 * was called but not &start_fw(). May sleep.
453 * @d3_suspend: put the device into the correct mode for WoWLAN during
454 * suspend. This is optional, if not implemented WoWLAN will not be
455 * supported. This callback may sleep.
456 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
457 * talk to the WoWLAN image to get its status. This is optional, if not
458 * implemented WoWLAN will not be supported. This callback may sleep.
459 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
460 * If RFkill is asserted in the middle of a SYNC host command, it must
461 * return -ERFKILL straight away.
462 * May sleep only if CMD_ASYNC is not set
463 * @tx: send an skb. The transport relies on the op_mode to zero the
464 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
465 * the CSUM will be taken care of (TCP CSUM and IP header in case of
466 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
467 * header if it is IPv4.
469 * @reclaim: free packet until ssn. Returns a list of freed packets.
471 * @txq_enable: setup a queue. To setup an AC queue, use the
472 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
473 * this one. The op_mode must not configure the HCMD queue. The scheduler
474 * configuration may be %NULL, in which case the hardware will not be
475 * configured. If true is returned, the operation mode needs to increment
476 * the sequence number of the packets routed to this queue because of a
477 * hardware scheduler bug. May sleep.
478 * @txq_disable: de-configure a Tx queue to send AMPDUs
480 * @txq_set_shared_mode: change Tx queue shared/unshared marking
481 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
482 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
483 * @freeze_txq_timer: prevents the timer of the queue from firing until the
484 * queue is set to awake. Must be atomic.
485 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
486 * that the transport needs to refcount the calls since this function
487 * will be called several times with block = true, and then the queues
488 * need to be unblocked only after the same number of calls with
490 * @write8: write a u8 to a register at offset ofs from the BAR
491 * @write32: write a u32 to a register at offset ofs from the BAR
492 * @read32: read a u32 register at offset ofs from the BAR
493 * @read_prph: read a DWORD from a periphery register
494 * @write_prph: write a DWORD to a periphery register
495 * @read_mem: read device's SRAM in DWORD
496 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
498 * @read_config32: read a u32 value from the device's config space at
500 * @configure: configure parameters required by the transport layer from
501 * the op_mode. May be called several times before start_fw, can't be
503 * @set_pmi: set the power pmi state
504 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
505 * Sleeping is not allowed between grab_nic_access and
506 * release_nic_access.
507 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
508 * must be the same one that was sent before to the grab_nic_access.
509 * @set_bits_mask - set SRAM register according to value and mask.
510 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
511 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
512 * Note that the transport must fill in the proper file headers.
513 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
514 * of the trans debugfs
515 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
517 * @interrupts: disable/enable interrupts to transport
519 struct iwl_trans_ops {
521 int (*start_hw)(struct iwl_trans *iwl_trans);
522 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
523 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
525 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
526 void (*stop_device)(struct iwl_trans *trans);
528 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
529 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
530 bool test, bool reset);
532 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
534 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
535 struct iwl_device_tx_cmd *dev_cmd, int queue);
536 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
537 struct sk_buff_head *skbs);
539 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
541 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
542 const struct iwl_trans_txq_scd_cfg *cfg,
543 unsigned int queue_wdg_timeout);
544 void (*txq_disable)(struct iwl_trans *trans, int queue,
546 /* 22000 functions */
547 int (*txq_alloc)(struct iwl_trans *trans,
548 __le16 flags, u8 sta_id, u8 tid,
549 int cmd_id, int size,
550 unsigned int queue_wdg_timeout);
551 void (*txq_free)(struct iwl_trans *trans, int queue);
552 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
553 struct iwl_trans_rxq_dma_data *data);
555 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
558 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
559 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
560 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
562 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
564 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
565 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
566 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
567 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
568 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
569 int (*read_mem)(struct iwl_trans *trans, u32 addr,
570 void *buf, int dwords);
571 int (*write_mem)(struct iwl_trans *trans, u32 addr,
572 const void *buf, int dwords);
573 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
574 void (*configure)(struct iwl_trans *trans,
575 const struct iwl_trans_config *trans_cfg);
576 void (*set_pmi)(struct iwl_trans *trans, bool state);
577 void (*sw_reset)(struct iwl_trans *trans);
578 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
579 void (*release_nic_access)(struct iwl_trans *trans,
580 unsigned long *flags);
581 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
583 int (*suspend)(struct iwl_trans *trans);
584 void (*resume)(struct iwl_trans *trans);
586 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
588 void (*debugfs_cleanup)(struct iwl_trans *trans);
589 void (*sync_nmi)(struct iwl_trans *trans);
590 int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
591 void (*interrupts)(struct iwl_trans *trans, bool enable);
595 * enum iwl_trans_state - state of the transport layer
597 * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
598 * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
599 * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
601 enum iwl_trans_state {
603 IWL_TRANS_FW_STARTED,
608 * DOC: Platform power management
610 * In system-wide power management the entire platform goes into a low
611 * power state (e.g. idle or suspend to RAM) at the same time and the
612 * device is configured as a wakeup source for the entire platform.
613 * This is usually triggered by userspace activity (e.g. the user
614 * presses the suspend button or a power management daemon decides to
615 * put the platform in low power mode). The device's behavior in this
616 * mode is dictated by the wake-on-WLAN configuration.
618 * The terms used for the device's behavior are as follows:
620 * - D0: the device is fully powered and the host is awake;
621 * - D3: the device is in low power mode and only reacts to
622 * specific events (e.g. magic-packet received or scan
625 * These terms reflect the power modes in the firmware and are not to
626 * be confused with the physical device power state.
630 * enum iwl_plat_pm_mode - platform power management mode
632 * This enumeration describes the device's platform power management
633 * behavior when in system-wide suspend (i.e WoWLAN).
635 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
636 * device. In system-wide suspend mode, it means that the all
637 * connections will be closed automatically by mac80211 before
638 * the platform is suspended.
639 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
641 enum iwl_plat_pm_mode {
642 IWL_PLAT_PM_MODE_DISABLED,
647 * enum iwl_ini_cfg_state
648 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
649 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
650 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
651 * are corrupted. The rest of the debug TLVs will still be used
653 enum iwl_ini_cfg_state {
654 IWL_INI_CFG_STATE_NOT_LOADED,
655 IWL_INI_CFG_STATE_LOADED,
656 IWL_INI_CFG_STATE_CORRUPTED,
659 /* Max time to wait for nmi interrupt */
660 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
663 * struct iwl_dram_data
664 * @physical: page phy pointer
665 * @block: pointer to the allocated block/page
666 * @size: size of the block/page
668 struct iwl_dram_data {
675 * struct iwl_fw_mon - fw monitor per allocation id
676 * @num_frags: number of fragments
677 * @frags: an array of DRAM buffer fragments
681 struct iwl_dram_data *frags;
685 * struct iwl_self_init_dram - dram data used by self init process
686 * @fw: lmac and umac dram data
687 * @fw_cnt: total number of items in array
688 * @paging: paging dram data
689 * @paging_cnt: total number of items in array
691 struct iwl_self_init_dram {
692 struct iwl_dram_data *fw;
694 struct iwl_dram_data *paging;
699 * struct iwl_trans_debug - transport debug related data
701 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
702 * @rec_on: true iff there is a fw debug recording currently active
703 * @dest_tlv: points to the destination TLV for debug
704 * @conf_tlv: array of pointers to configuration TLVs for debug
705 * @trigger_tlv: array of pointers to triggers TLVs for debug
706 * @lmac_error_event_table: addrs of lmacs error tables
707 * @umac_error_event_table: addr of umac error table
708 * @error_event_table_tlv_status: bitmap that indicates what error table
709 * pointers was recevied via TLV. uses enum &iwl_error_event_table_status
710 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
711 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
712 * @fw_mon_cfg: debug buffer allocation configuration
713 * @fw_mon_ini: DRAM buffer fragments per allocation id
714 * @fw_mon: DRAM buffer for firmware monitor
715 * @hw_error: equals true if hw error interrupt was received from the FW
716 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
717 * @active_regions: active regions
718 * @debug_info_tlv_list: list of debug info TLVs
719 * @time_point: array of debug time points
720 * @periodic_trig_list: periodic triggers list
721 * @domains_bitmap: bitmap of active domains other than
722 * &IWL_FW_INI_DOMAIN_ALWAYS_ON
724 struct iwl_trans_debug {
728 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
729 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
730 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
732 u32 lmac_error_event_table[2];
733 u32 umac_error_event_table;
734 unsigned int error_event_table_tlv_status;
736 enum iwl_ini_cfg_state internal_ini_cfg;
737 enum iwl_ini_cfg_state external_ini_cfg;
739 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
740 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
742 struct iwl_dram_data fw_mon;
745 enum iwl_fw_ini_buffer_location ini_dest;
747 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
748 struct list_head debug_info_tlv_list;
749 struct iwl_dbg_tlv_time_point_data
750 time_point[IWL_FW_INI_TIME_POINT_NUM];
751 struct list_head periodic_trig_list;
762 struct iwl_cmd_meta {
763 /* only for SYNC commands, iff the reply skb is wanted */
764 struct iwl_host_cmd *source;
770 * The FH will write back to the first TB only, so we need to copy some data
771 * into the buffer regardless of whether it should be mapped or not.
772 * This indicates how big the first TB must be to include the scratch buffer
773 * and the assigned PN.
774 * Since PN location is 8 bytes at offset 12, it's 20 now.
775 * If we make it bigger then allocations will be bigger and copy slower, so
776 * that's probably not useful.
778 #define IWL_FIRST_TB_SIZE 20
779 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
781 struct iwl_pcie_txq_entry {
784 /* buffer to free after command completes */
785 const void *free_buf;
786 struct iwl_cmd_meta meta;
789 struct iwl_pcie_first_tb_buf {
790 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
794 * struct iwl_txq - Tx Queue for DMA
795 * @q: generic Rx/Tx queue descriptor
796 * @tfds: transmit frame descriptors (DMA memory)
797 * @first_tb_bufs: start of command headers, including scratch buffers, for
798 * the writeback -- this is DMA memory and an array holding one buffer
799 * for each command on the queue
800 * @first_tb_dma: DMA address for the first_tb_bufs start
801 * @entries: transmit entries (driver state)
803 * @stuck_timer: timer that fires if queue gets stuck
804 * @trans: pointer back to transport (for timer)
805 * @need_update: indicates need to update read/write index
806 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
807 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
808 * @frozen: tx stuck queue timer is frozen
809 * @frozen_expiry_remainder: remember how long until the timer fires
810 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
811 * @write_ptr: 1-st empty entry (index) host_w
812 * @read_ptr: last used entry (index) host_r
813 * @dma_addr: physical addr for BD's
814 * @n_window: safe queue window
816 * @low_mark: low watermark, resume queue if free space more than this
817 * @high_mark: high watermark, stop queue if free space less than this
819 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
820 * descriptors) and required locking structures.
822 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
823 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
824 * there might be HW changes in the future). For the normal TX
825 * queues, n_window, which is the size of the software queue data
826 * is also 256; however, for the command queue, n_window is only
827 * 32 since we don't need so many commands pending. Since the HW
828 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
829 * This means that we end up with the following:
830 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
831 * SW entries: | 0 | ... | 31 |
832 * where N is a number between 0 and 7. This means that the SW
833 * data is a window overlayed over the HW queue.
837 struct iwl_pcie_first_tb_buf *first_tb_bufs;
838 dma_addr_t first_tb_dma;
839 struct iwl_pcie_txq_entry *entries;
840 /* lock for syncing changes on the queue */
842 unsigned long frozen_expiry_remainder;
843 struct timer_list stuck_timer;
844 struct iwl_trans *trans;
849 unsigned long wd_timeout;
850 struct sk_buff_head overflow_q;
851 struct iwl_dma_ptr bc_tbl;
865 * struct iwl_trans_txqs - transport tx queues data
867 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
868 * @page_offs: offset from skb->cb to mac header page pointer
869 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
870 * @queue_used - bit mask of used queues
871 * @queue_stopped - bit mask of stopped queues
872 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
874 struct iwl_trans_txqs {
875 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
876 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
877 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
878 struct dma_pool *bc_pool;
883 struct __percpu iwl_tso_hdr_page * tso_hdr_page;
888 unsigned int wdg_timeout;
897 struct iwl_dma_ptr scd_bc_tbls;
901 * struct iwl_trans - transport common data
903 * @ops - pointer to iwl_trans_ops
904 * @op_mode - pointer to the op_mode
905 * @trans_cfg: the trans-specific configuration part
906 * @cfg - pointer to the configuration
907 * @drv - pointer to iwl_drv
908 * @status: a bit-mask of transport status flags
909 * @dev - pointer to struct device * that represents the device
910 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
911 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
912 * @hw_rf_id a u32 with the device RF ID
913 * @hw_id: a u32 with the ID of the device / sub-device.
914 * Set during transport allocation.
915 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
916 * @pm_support: set to true in start_hw if link pm is supported
917 * @ltr_enabled: set to true if the LTR is enabled
918 * @wide_cmd_header: true when ucode supports wide command header format
919 * @num_rx_queues: number of RX queues allocated by the transport;
920 * the transport must set this before calling iwl_drv_start()
921 * @iml_len: the length of the image loader
922 * @iml: a pointer to the image loader itself
923 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
924 * The user should use iwl_trans_{alloc,free}_tx_cmd.
925 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
926 * starting the firmware, used for tracing
927 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
928 * start of the 802.11 header in the @rx_mpdu_cmd
929 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
930 * @system_pm_mode: the system-wide power management mode in use.
931 * This mode is set dynamically, depending on the WoWLAN values
932 * configured from the userspace at runtime.
933 * @iwl_trans_txqs: transport tx queues data.
936 const struct iwl_trans_ops *ops;
937 struct iwl_op_mode *op_mode;
938 const struct iwl_cfg_trans_params *trans_cfg;
939 const struct iwl_cfg *cfg;
941 enum iwl_trans_state state;
942 unsigned long status;
952 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
958 const struct iwl_hcmd_arr *command_groups;
959 int command_groups_size;
960 bool wide_cmd_header;
967 /* The following fields are internal only */
968 struct kmem_cache *dev_cmd_pool;
969 char dev_cmd_pool_name[50];
971 struct dentry *dbgfs_dir;
973 #ifdef CONFIG_LOCKDEP
974 struct lockdep_map sync_cmd_lockdep_map;
977 struct iwl_trans_debug dbg;
978 struct iwl_self_init_dram init_dram;
980 enum iwl_plat_pm_mode system_pm_mode;
983 struct iwl_trans_txqs txqs;
985 /* pointer to trans specific struct */
986 /*Ensure that this pointer will always be aligned to sizeof pointer */
987 char trans_specific[] __aligned(sizeof(void *));
990 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
991 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
993 static inline void iwl_trans_configure(struct iwl_trans *trans,
994 const struct iwl_trans_config *trans_cfg)
996 trans->op_mode = trans_cfg->op_mode;
998 trans->ops->configure(trans, trans_cfg);
999 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1002 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1006 return trans->ops->start_hw(trans);
1009 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1013 if (trans->ops->op_mode_leave)
1014 trans->ops->op_mode_leave(trans);
1016 trans->op_mode = NULL;
1018 trans->state = IWL_TRANS_NO_FW;
1021 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1025 trans->state = IWL_TRANS_FW_ALIVE;
1027 trans->ops->fw_alive(trans, scd_addr);
1030 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1031 const struct fw_img *fw,
1038 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1040 clear_bit(STATUS_FW_ERROR, &trans->status);
1041 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1043 trans->state = IWL_TRANS_FW_STARTED;
1048 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1052 trans->ops->stop_device(trans);
1054 trans->state = IWL_TRANS_NO_FW;
1057 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1061 if (!trans->ops->d3_suspend)
1064 return trans->ops->d3_suspend(trans, test, reset);
1067 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1068 enum iwl_d3_status *status,
1069 bool test, bool reset)
1072 if (!trans->ops->d3_resume)
1075 return trans->ops->d3_resume(trans, status, test, reset);
1078 static inline int iwl_trans_suspend(struct iwl_trans *trans)
1080 if (!trans->ops->suspend)
1083 return trans->ops->suspend(trans);
1086 static inline void iwl_trans_resume(struct iwl_trans *trans)
1088 if (trans->ops->resume)
1089 trans->ops->resume(trans);
1092 static inline struct iwl_trans_dump_data *
1093 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
1095 if (!trans->ops->dump_data)
1097 return trans->ops->dump_data(trans, dump_mask);
1100 static inline struct iwl_device_tx_cmd *
1101 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1103 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1106 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1108 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1109 struct iwl_device_tx_cmd *dev_cmd)
1111 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1114 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1115 struct iwl_device_tx_cmd *dev_cmd, int queue)
1117 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1120 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1121 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1125 return trans->ops->tx(trans, skb, dev_cmd, queue);
1128 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1129 int ssn, struct sk_buff_head *skbs)
1131 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1132 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1136 trans->ops->reclaim(trans, queue, ssn, skbs);
1139 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1142 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1143 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1147 trans->ops->set_q_ptrs(trans, queue, ptr);
1150 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1153 trans->ops->txq_disable(trans, queue, configure_scd);
1157 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1158 const struct iwl_trans_txq_scd_cfg *cfg,
1159 unsigned int queue_wdg_timeout)
1163 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1164 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1168 return trans->ops->txq_enable(trans, queue, ssn,
1169 cfg, queue_wdg_timeout);
1173 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1174 struct iwl_trans_rxq_dma_data *data)
1176 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1179 return trans->ops->rxq_dma_data(trans, queue, data);
1183 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1185 if (WARN_ON_ONCE(!trans->ops->txq_free))
1188 trans->ops->txq_free(trans, queue);
1192 iwl_trans_txq_alloc(struct iwl_trans *trans,
1193 __le16 flags, u8 sta_id, u8 tid,
1194 int cmd_id, int size,
1195 unsigned int wdg_timeout)
1199 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1202 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1203 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1207 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1208 cmd_id, size, wdg_timeout);
1211 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1212 int queue, bool shared_mode)
1214 if (trans->ops->txq_set_shared_mode)
1215 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1218 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1219 int fifo, int sta_id, int tid,
1220 int frame_limit, u16 ssn,
1221 unsigned int queue_wdg_timeout)
1223 struct iwl_trans_txq_scd_cfg cfg = {
1227 .frame_limit = frame_limit,
1228 .aggregate = sta_id >= 0,
1231 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1235 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1236 unsigned int queue_wdg_timeout)
1238 struct iwl_trans_txq_scd_cfg cfg = {
1241 .tid = IWL_MAX_TID_COUNT,
1242 .frame_limit = IWL_FRAME_LIMIT,
1246 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1249 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1253 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1254 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1258 if (trans->ops->freeze_txq_timer)
1259 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1262 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1265 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1266 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1270 if (trans->ops->block_txq_ptrs)
1271 trans->ops->block_txq_ptrs(trans, block);
1274 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1277 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1280 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1281 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1285 return trans->ops->wait_tx_queues_empty(trans, txqs);
1288 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1290 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1293 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1294 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1298 return trans->ops->wait_txq_empty(trans, queue);
1301 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1303 trans->ops->write8(trans, ofs, val);
1306 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1308 trans->ops->write32(trans, ofs, val);
1311 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1313 return trans->ops->read32(trans, ofs);
1316 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1318 return trans->ops->read_prph(trans, ofs);
1321 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1324 return trans->ops->write_prph(trans, ofs, val);
1327 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1328 void *buf, int dwords)
1330 return trans->ops->read_mem(trans, addr, buf, dwords);
1333 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1335 if (__builtin_constant_p(bufsize)) \
1336 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1337 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1340 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1344 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1350 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1351 const void *buf, int dwords)
1353 return trans->ops->write_mem(trans, addr, buf, dwords);
1356 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1359 return iwl_trans_write_mem(trans, addr, &val, 1);
1362 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1364 if (trans->ops->set_pmi)
1365 trans->ops->set_pmi(trans, state);
1368 static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1370 if (trans->ops->sw_reset)
1371 trans->ops->sw_reset(trans);
1375 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1377 trans->ops->set_bits_mask(trans, reg, mask, value);
1380 #define iwl_trans_grab_nic_access(trans, flags) \
1381 __cond_lock(nic_access, \
1382 likely((trans)->ops->grab_nic_access(trans, flags)))
1384 static inline void __releases(nic_access)
1385 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1387 trans->ops->release_nic_access(trans, flags);
1388 __release(nic_access);
1391 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1393 if (WARN_ON_ONCE(!trans->op_mode))
1396 /* prevent double restarts due to the same erroneous FW */
1397 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1398 iwl_op_mode_nic_error(trans->op_mode);
1399 trans->state = IWL_TRANS_NO_FW;
1403 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1405 return trans->state == IWL_TRANS_FW_ALIVE;
1408 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1410 if (trans->ops->sync_nmi)
1411 trans->ops->sync_nmi(trans);
1414 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1417 static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
1418 const void *data, u32 len)
1420 if (trans->ops->set_pnvm) {
1421 int ret = trans->ops->set_pnvm(trans, data, len);
1427 trans->pnvm_loaded = true;
1432 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1434 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1435 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1438 static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1440 if (trans->ops->interrupts)
1441 trans->ops->interrupts(trans, enable);
1444 /*****************************************************
1445 * transport helper functions
1446 *****************************************************/
1447 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1449 const struct iwl_trans_ops *ops,
1450 const struct iwl_cfg_trans_params *cfg_trans);
1451 void iwl_trans_free(struct iwl_trans *trans);
1453 /*****************************************************
1454 * driver (transport) register/unregister functions
1455 ******************************************************/
1456 int __must_check iwl_pci_register_driver(void);
1457 void iwl_pci_unregister_driver(void);
1459 #endif /* __iwl_trans_h__ */