Commit | Line | Data |
---|---|---|
fe56b9e6 YM |
1 | /* QLogic qed NIC Driver |
2 | * | |
3 | * Copyright (c) 2015 QLogic Corporation | |
4 | * | |
5 | * This software is available under the terms of the GNU General Public License | |
6 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
7 | * this source tree. | |
8 | */ | |
9 | ||
10 | #ifndef _QED_IF_H | |
11 | #define _QED_IF_H | |
12 | ||
13 | #include <linux/types.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/netdevice.h> | |
16 | #include <linux/pci.h> | |
17 | #include <linux/skbuff.h> | |
18 | #include <linux/types.h> | |
19 | #include <asm/byteorder.h> | |
20 | #include <linux/io.h> | |
21 | #include <linux/compiler.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/list.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/qed/common_hsi.h> | |
26 | #include <linux/qed/qed_chain.h> | |
27 | ||
28 | #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ | |
29 | (void __iomem *)(reg_addr)) | |
30 | ||
31 | #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) | |
32 | ||
33 | #define QED_COALESCE_MAX 0xFF | |
34 | ||
35 | /* forward */ | |
36 | struct qed_dev; | |
37 | ||
38 | struct qed_eth_pf_params { | |
39 | /* The following parameters are used during HW-init | |
40 | * and these parameters need to be passed as arguments | |
41 | * to update_pf_params routine invoked before slowpath start | |
42 | */ | |
43 | u16 num_cons; | |
44 | }; | |
45 | ||
46 | struct qed_pf_params { | |
47 | struct qed_eth_pf_params eth_pf_params; | |
48 | }; | |
49 | ||
50 | enum qed_int_mode { | |
51 | QED_INT_MODE_INTA, | |
52 | QED_INT_MODE_MSIX, | |
53 | QED_INT_MODE_MSI, | |
54 | QED_INT_MODE_POLL, | |
55 | }; | |
56 | ||
57 | struct qed_sb_info { | |
58 | struct status_block *sb_virt; | |
59 | dma_addr_t sb_phys; | |
60 | u32 sb_ack; /* Last given ack */ | |
61 | u16 igu_sb_id; | |
62 | void __iomem *igu_addr; | |
63 | u8 flags; | |
64 | #define QED_SB_INFO_INIT 0x1 | |
65 | #define QED_SB_INFO_SETUP 0x2 | |
66 | ||
67 | struct qed_dev *cdev; | |
68 | }; | |
69 | ||
70 | struct qed_dev_info { | |
71 | unsigned long pci_mem_start; | |
72 | unsigned long pci_mem_end; | |
73 | unsigned int pci_irq; | |
74 | u8 num_hwfns; | |
75 | ||
76 | u8 hw_mac[ETH_ALEN]; | |
77 | bool is_mf; | |
78 | ||
79 | /* FW version */ | |
80 | u16 fw_major; | |
81 | u16 fw_minor; | |
82 | u16 fw_rev; | |
83 | u16 fw_eng; | |
84 | ||
85 | /* MFW version */ | |
86 | u32 mfw_rev; | |
87 | ||
88 | u32 flash_size; | |
89 | u8 mf_mode; | |
90 | }; | |
91 | ||
92 | enum qed_sb_type { | |
93 | QED_SB_TYPE_L2_QUEUE, | |
94 | }; | |
95 | ||
96 | enum qed_protocol { | |
97 | QED_PROTOCOL_ETH, | |
98 | }; | |
99 | ||
100 | struct qed_link_params { | |
101 | bool link_up; | |
102 | ||
103 | #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) | |
104 | #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) | |
105 | #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) | |
106 | #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) | |
107 | u32 override_flags; | |
108 | bool autoneg; | |
109 | u32 adv_speeds; | |
110 | u32 forced_speed; | |
111 | #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) | |
112 | #define QED_LINK_PAUSE_RX_ENABLE BIT(1) | |
113 | #define QED_LINK_PAUSE_TX_ENABLE BIT(2) | |
114 | u32 pause_config; | |
115 | }; | |
116 | ||
117 | struct qed_link_output { | |
118 | bool link_up; | |
119 | ||
120 | u32 supported_caps; /* In SUPPORTED defs */ | |
121 | u32 advertised_caps; /* In ADVERTISED defs */ | |
122 | u32 lp_caps; /* In ADVERTISED defs */ | |
123 | u32 speed; /* In Mb/s */ | |
124 | u8 duplex; /* In DUPLEX defs */ | |
125 | u8 port; /* In PORT defs */ | |
126 | bool autoneg; | |
127 | u32 pause_config; | |
128 | }; | |
129 | ||
130 | #define QED_DRV_VER_STR_SIZE 12 | |
131 | struct qed_slowpath_params { | |
132 | u32 int_mode; | |
133 | u8 drv_major; | |
134 | u8 drv_minor; | |
135 | u8 drv_rev; | |
136 | u8 drv_eng; | |
137 | u8 name[QED_DRV_VER_STR_SIZE]; | |
138 | }; | |
139 | ||
140 | #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ | |
141 | ||
142 | struct qed_int_info { | |
143 | struct msix_entry *msix; | |
144 | u8 msix_cnt; | |
145 | ||
146 | /* This should be updated by the protocol driver */ | |
147 | u8 used_cnt; | |
148 | }; | |
149 | ||
150 | struct qed_common_cb_ops { | |
151 | void (*link_update)(void *dev, | |
152 | struct qed_link_output *link); | |
153 | }; | |
154 | ||
155 | struct qed_common_ops { | |
156 | struct qed_dev* (*probe)(struct pci_dev *dev, | |
157 | enum qed_protocol protocol, | |
158 | u32 dp_module, u8 dp_level); | |
159 | ||
160 | void (*remove)(struct qed_dev *cdev); | |
161 | ||
162 | int (*set_power_state)(struct qed_dev *cdev, | |
163 | pci_power_t state); | |
164 | ||
165 | void (*set_id)(struct qed_dev *cdev, | |
166 | char name[], | |
167 | char ver_str[]); | |
168 | ||
169 | /* Client drivers need to make this call before slowpath_start. | |
170 | * PF params required for the call before slowpath_start is | |
171 | * documented within the qed_pf_params structure definition. | |
172 | */ | |
173 | void (*update_pf_params)(struct qed_dev *cdev, | |
174 | struct qed_pf_params *params); | |
175 | int (*slowpath_start)(struct qed_dev *cdev, | |
176 | struct qed_slowpath_params *params); | |
177 | ||
178 | int (*slowpath_stop)(struct qed_dev *cdev); | |
179 | ||
180 | /* Requests to use `cnt' interrupts for fastpath. | |
181 | * upon success, returns number of interrupts allocated for fastpath. | |
182 | */ | |
183 | int (*set_fp_int)(struct qed_dev *cdev, | |
184 | u16 cnt); | |
185 | ||
186 | /* Fills `info' with pointers required for utilizing interrupts */ | |
187 | int (*get_fp_int)(struct qed_dev *cdev, | |
188 | struct qed_int_info *info); | |
189 | ||
190 | u32 (*sb_init)(struct qed_dev *cdev, | |
191 | struct qed_sb_info *sb_info, | |
192 | void *sb_virt_addr, | |
193 | dma_addr_t sb_phy_addr, | |
194 | u16 sb_id, | |
195 | enum qed_sb_type type); | |
196 | ||
197 | u32 (*sb_release)(struct qed_dev *cdev, | |
198 | struct qed_sb_info *sb_info, | |
199 | u16 sb_id); | |
200 | ||
201 | void (*simd_handler_config)(struct qed_dev *cdev, | |
202 | void *token, | |
203 | int index, | |
204 | void (*handler)(void *)); | |
205 | ||
206 | void (*simd_handler_clean)(struct qed_dev *cdev, | |
207 | int index); | |
208 | /** | |
209 | * @brief set_link - set links according to params | |
210 | * | |
211 | * @param cdev | |
212 | * @param params - values used to override the default link configuration | |
213 | * | |
214 | * @return 0 on success, error otherwise. | |
215 | */ | |
216 | int (*set_link)(struct qed_dev *cdev, | |
217 | struct qed_link_params *params); | |
218 | ||
219 | /** | |
220 | * @brief get_link - returns the current link state. | |
221 | * | |
222 | * @param cdev | |
223 | * @param if_link - structure to be filled with current link configuration. | |
224 | */ | |
225 | void (*get_link)(struct qed_dev *cdev, | |
226 | struct qed_link_output *if_link); | |
227 | ||
228 | /** | |
229 | * @brief - drains chip in case Tx completions fail to arrive due to pause. | |
230 | * | |
231 | * @param cdev | |
232 | */ | |
233 | int (*drain)(struct qed_dev *cdev); | |
234 | ||
235 | /** | |
236 | * @brief update_msglvl - update module debug level | |
237 | * | |
238 | * @param cdev | |
239 | * @param dp_module | |
240 | * @param dp_level | |
241 | */ | |
242 | void (*update_msglvl)(struct qed_dev *cdev, | |
243 | u32 dp_module, | |
244 | u8 dp_level); | |
245 | ||
246 | int (*chain_alloc)(struct qed_dev *cdev, | |
247 | enum qed_chain_use_mode intended_use, | |
248 | enum qed_chain_mode mode, | |
249 | u16 num_elems, | |
250 | size_t elem_size, | |
251 | struct qed_chain *p_chain); | |
252 | ||
253 | void (*chain_free)(struct qed_dev *cdev, | |
254 | struct qed_chain *p_chain); | |
255 | }; | |
256 | ||
257 | /** | |
258 | * @brief qed_get_protocol_version | |
259 | * | |
260 | * @param protocol | |
261 | * | |
262 | * @return version supported by qed for given protocol driver | |
263 | */ | |
264 | u32 qed_get_protocol_version(enum qed_protocol protocol); | |
265 | ||
266 | #define MASK_FIELD(_name, _value) \ | |
267 | ((_value) &= (_name ## _MASK)) | |
268 | ||
269 | #define FIELD_VALUE(_name, _value) \ | |
270 | ((_value & _name ## _MASK) << _name ## _SHIFT) | |
271 | ||
272 | #define SET_FIELD(value, name, flag) \ | |
273 | do { \ | |
274 | (value) &= ~(name ## _MASK << name ## _SHIFT); \ | |
275 | (value) |= (((u64)flag) << (name ## _SHIFT)); \ | |
276 | } while (0) | |
277 | ||
278 | #define GET_FIELD(value, name) \ | |
279 | (((value) >> (name ## _SHIFT)) & name ## _MASK) | |
280 | ||
281 | /* Debug print definitions */ | |
282 | #define DP_ERR(cdev, fmt, ...) \ | |
283 | pr_err("[%s:%d(%s)]" fmt, \ | |
284 | __func__, __LINE__, \ | |
285 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | |
286 | ## __VA_ARGS__) \ | |
287 | ||
288 | #define DP_NOTICE(cdev, fmt, ...) \ | |
289 | do { \ | |
290 | if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \ | |
291 | pr_notice("[%s:%d(%s)]" fmt, \ | |
292 | __func__, __LINE__, \ | |
293 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | |
294 | ## __VA_ARGS__); \ | |
295 | \ | |
296 | } \ | |
297 | } while (0) | |
298 | ||
299 | #define DP_INFO(cdev, fmt, ...) \ | |
300 | do { \ | |
301 | if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \ | |
302 | pr_notice("[%s:%d(%s)]" fmt, \ | |
303 | __func__, __LINE__, \ | |
304 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | |
305 | ## __VA_ARGS__); \ | |
306 | } \ | |
307 | } while (0) | |
308 | ||
309 | #define DP_VERBOSE(cdev, module, fmt, ...) \ | |
310 | do { \ | |
311 | if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \ | |
312 | ((cdev)->dp_module & module))) { \ | |
313 | pr_notice("[%s:%d(%s)]" fmt, \ | |
314 | __func__, __LINE__, \ | |
315 | DP_NAME(cdev) ? DP_NAME(cdev) : "", \ | |
316 | ## __VA_ARGS__); \ | |
317 | } \ | |
318 | } while (0) | |
319 | ||
320 | enum DP_LEVEL { | |
321 | QED_LEVEL_VERBOSE = 0x0, | |
322 | QED_LEVEL_INFO = 0x1, | |
323 | QED_LEVEL_NOTICE = 0x2, | |
324 | QED_LEVEL_ERR = 0x3, | |
325 | }; | |
326 | ||
327 | #define QED_LOG_LEVEL_SHIFT (30) | |
328 | #define QED_LOG_VERBOSE_MASK (0x3fffffff) | |
329 | #define QED_LOG_INFO_MASK (0x40000000) | |
330 | #define QED_LOG_NOTICE_MASK (0x80000000) | |
331 | ||
332 | enum DP_MODULE { | |
333 | QED_MSG_SPQ = 0x10000, | |
334 | QED_MSG_STATS = 0x20000, | |
335 | QED_MSG_DCB = 0x40000, | |
336 | QED_MSG_IOV = 0x80000, | |
337 | QED_MSG_SP = 0x100000, | |
338 | QED_MSG_STORAGE = 0x200000, | |
339 | QED_MSG_CXT = 0x800000, | |
340 | QED_MSG_ILT = 0x2000000, | |
341 | QED_MSG_ROCE = 0x4000000, | |
342 | QED_MSG_DEBUG = 0x8000000, | |
343 | /* to be added...up to 0x8000000 */ | |
344 | }; | |
345 | ||
346 | struct qed_eth_stats { | |
347 | u64 no_buff_discards; | |
348 | u64 packet_too_big_discard; | |
349 | u64 ttl0_discard; | |
350 | u64 rx_ucast_bytes; | |
351 | u64 rx_mcast_bytes; | |
352 | u64 rx_bcast_bytes; | |
353 | u64 rx_ucast_pkts; | |
354 | u64 rx_mcast_pkts; | |
355 | u64 rx_bcast_pkts; | |
356 | u64 mftag_filter_discards; | |
357 | u64 mac_filter_discards; | |
358 | u64 tx_ucast_bytes; | |
359 | u64 tx_mcast_bytes; | |
360 | u64 tx_bcast_bytes; | |
361 | u64 tx_ucast_pkts; | |
362 | u64 tx_mcast_pkts; | |
363 | u64 tx_bcast_pkts; | |
364 | u64 tx_err_drop_pkts; | |
365 | u64 tpa_coalesced_pkts; | |
366 | u64 tpa_coalesced_events; | |
367 | u64 tpa_aborts_num; | |
368 | u64 tpa_not_coalesced_pkts; | |
369 | u64 tpa_coalesced_bytes; | |
370 | ||
371 | /* port */ | |
372 | u64 rx_64_byte_packets; | |
373 | u64 rx_127_byte_packets; | |
374 | u64 rx_255_byte_packets; | |
375 | u64 rx_511_byte_packets; | |
376 | u64 rx_1023_byte_packets; | |
377 | u64 rx_1518_byte_packets; | |
378 | u64 rx_1522_byte_packets; | |
379 | u64 rx_2047_byte_packets; | |
380 | u64 rx_4095_byte_packets; | |
381 | u64 rx_9216_byte_packets; | |
382 | u64 rx_16383_byte_packets; | |
383 | u64 rx_crc_errors; | |
384 | u64 rx_mac_crtl_frames; | |
385 | u64 rx_pause_frames; | |
386 | u64 rx_pfc_frames; | |
387 | u64 rx_align_errors; | |
388 | u64 rx_carrier_errors; | |
389 | u64 rx_oversize_packets; | |
390 | u64 rx_jabbers; | |
391 | u64 rx_undersize_packets; | |
392 | u64 rx_fragments; | |
393 | u64 tx_64_byte_packets; | |
394 | u64 tx_65_to_127_byte_packets; | |
395 | u64 tx_128_to_255_byte_packets; | |
396 | u64 tx_256_to_511_byte_packets; | |
397 | u64 tx_512_to_1023_byte_packets; | |
398 | u64 tx_1024_to_1518_byte_packets; | |
399 | u64 tx_1519_to_2047_byte_packets; | |
400 | u64 tx_2048_to_4095_byte_packets; | |
401 | u64 tx_4096_to_9216_byte_packets; | |
402 | u64 tx_9217_to_16383_byte_packets; | |
403 | u64 tx_pause_frames; | |
404 | u64 tx_pfc_frames; | |
405 | u64 tx_lpi_entry_count; | |
406 | u64 tx_total_collisions; | |
407 | u64 brb_truncates; | |
408 | u64 brb_discards; | |
409 | u64 rx_mac_bytes; | |
410 | u64 rx_mac_uc_packets; | |
411 | u64 rx_mac_mc_packets; | |
412 | u64 rx_mac_bc_packets; | |
413 | u64 rx_mac_frames_ok; | |
414 | u64 tx_mac_bytes; | |
415 | u64 tx_mac_uc_packets; | |
416 | u64 tx_mac_mc_packets; | |
417 | u64 tx_mac_bc_packets; | |
418 | u64 tx_mac_ctrl_frames; | |
419 | }; | |
420 | ||
421 | #define QED_SB_IDX 0x0002 | |
422 | ||
423 | #define RX_PI 0 | |
424 | #define TX_PI(tc) (RX_PI + 1 + tc) | |
425 | ||
426 | static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) | |
427 | { | |
428 | u32 prod = 0; | |
429 | u16 rc = 0; | |
430 | ||
431 | prod = le32_to_cpu(sb_info->sb_virt->prod_index) & | |
432 | STATUS_BLOCK_PROD_INDEX_MASK; | |
433 | if (sb_info->sb_ack != prod) { | |
434 | sb_info->sb_ack = prod; | |
435 | rc |= QED_SB_IDX; | |
436 | } | |
437 | ||
438 | /* Let SB update */ | |
439 | mmiowb(); | |
440 | return rc; | |
441 | } | |
442 | ||
443 | /** | |
444 | * | |
445 | * @brief This function creates an update command for interrupts that is | |
446 | * written to the IGU. | |
447 | * | |
448 | * @param sb_info - This is the structure allocated and | |
449 | * initialized per status block. Assumption is | |
450 | * that it was initialized using qed_sb_init | |
451 | * @param int_cmd - Enable/Disable/Nop | |
452 | * @param upd_flg - whether igu consumer should be | |
453 | * updated. | |
454 | * | |
455 | * @return inline void | |
456 | */ | |
457 | static inline void qed_sb_ack(struct qed_sb_info *sb_info, | |
458 | enum igu_int_cmd int_cmd, | |
459 | u8 upd_flg) | |
460 | { | |
461 | struct igu_prod_cons_update igu_ack = { 0 }; | |
462 | ||
463 | igu_ack.sb_id_and_flags = | |
464 | ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | | |
465 | (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | | |
466 | (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | | |
467 | (IGU_SEG_ACCESS_REG << | |
468 | IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); | |
469 | ||
470 | DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); | |
471 | ||
472 | /* Both segments (interrupts & acks) are written to same place address; | |
473 | * Need to guarantee all commands will be received (in-order) by HW. | |
474 | */ | |
475 | mmiowb(); | |
476 | barrier(); | |
477 | } | |
478 | ||
479 | static inline void __internal_ram_wr(void *p_hwfn, | |
480 | void __iomem *addr, | |
481 | int size, | |
482 | u32 *data) | |
483 | ||
484 | { | |
485 | unsigned int i; | |
486 | ||
487 | for (i = 0; i < size / sizeof(*data); i++) | |
488 | DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]); | |
489 | } | |
490 | ||
491 | static inline void internal_ram_wr(void __iomem *addr, | |
492 | int size, | |
493 | u32 *data) | |
494 | { | |
495 | __internal_ram_wr(NULL, addr, size, data); | |
496 | } | |
497 | ||
498 | #endif |