Commit | Line | Data |
---|---|---|
71ee6730 DS |
1 | /* This file is part of the Emulex RoCE Device Driver for |
2 | * RoCE (RDMA over Converged Ethernet) adapters. | |
3 | * Copyright (C) 2012-2015 Emulex. All rights reserved. | |
4 | * EMULEX and SLI are trademarks of Emulex. | |
5 | * www.emulex.com | |
6 | * | |
7 | * This software is available to you under a choice of one of two licenses. | |
8 | * You may choose to be licensed under the terms of the GNU General Public | |
9 | * License (GPL) Version 2, available from the file COPYING in the main | |
10 | * directory of this source tree, or the BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or without | |
13 | * modification, are permitted provided that the following conditions | |
14 | * are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above copyright notice, | |
17 | * this list of conditions and the following disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above copyright | |
20 | * notice, this list of conditions and the following disclaimer in | |
21 | * the documentation and/or other materials provided with the distribution. | |
22 | * | |
23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
24 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE | |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE | |
27 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
30 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
31 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
32 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
33 | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
fe2caefc PP |
34 | * |
35 | * Contact Information: | |
36 | * linux-drivers@emulex.com | |
37 | * | |
38 | * Emulex | |
39 | * 3333 Susan Street | |
40 | * Costa Mesa, CA 92626 | |
71ee6730 | 41 | */ |
fe2caefc PP |
42 | |
43 | #ifndef __OCRDMA_H__ | |
44 | #define __OCRDMA_H__ | |
45 | ||
46 | #include <linux/mutex.h> | |
47 | #include <linux/list.h> | |
48 | #include <linux/spinlock.h> | |
49 | #include <linux/pci.h> | |
50 | ||
51 | #include <rdma/ib_verbs.h> | |
52 | #include <rdma/ib_user_verbs.h> | |
fad51b7d | 53 | #include <rdma/ib_addr.h> |
fe2caefc PP |
54 | |
55 | #include <be_roce.h> | |
56 | #include "ocrdma_sli.h" | |
57 | ||
235dfcd4 | 58 | #define OCRDMA_ROCE_DRV_VERSION "10.6.0.0" |
0154410b DS |
59 | |
60 | #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" | |
fe2caefc PP |
61 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" |
62 | ||
a51f06e1 SX |
63 | #define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)" |
64 | #define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)" | |
65 | ||
66 | #define OC_SKH_DEVICE_PF 0x720 | |
67 | #define OC_SKH_DEVICE_VF 0x728 | |
fe2caefc PP |
68 | #define OCRDMA_MAX_AH 512 |
69 | ||
70 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | |
71 | ||
a51f06e1 | 72 | #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) |
b4dbe8d5 MA |
73 | #define EQ_INTR_PER_SEC_THRSH_HI 150000 |
74 | #define EQ_INTR_PER_SEC_THRSH_LOW 100000 | |
75 | #define EQ_AIC_MAX_EQD 20 | |
76 | #define EQ_AIC_MIN_EQD 0 | |
77 | ||
78 | void ocrdma_eqd_set_task(struct work_struct *work); | |
a51f06e1 | 79 | |
fe2caefc PP |
80 | struct ocrdma_dev_attr { |
81 | u8 fw_ver[32]; | |
82 | u32 vendor_id; | |
83 | u32 device_id; | |
84 | u16 max_pd; | |
9ba1377d | 85 | u16 max_dpp_pds; |
fe2caefc PP |
86 | u16 max_cq; |
87 | u16 max_cqe; | |
88 | u16 max_qp; | |
89 | u16 max_wqe; | |
90 | u16 max_rqe; | |
7c33880c | 91 | u16 max_srq; |
fe2caefc PP |
92 | u32 max_inline_data; |
93 | int max_send_sge; | |
94 | int max_recv_sge; | |
634c5796 | 95 | int max_srq_sge; |
45e86b33 | 96 | int max_rdma_sge; |
fe2caefc PP |
97 | int max_mr; |
98 | u64 max_mr_size; | |
99 | u32 max_num_mr_pbl; | |
ac578aef | 100 | int max_mw; |
fe2caefc PP |
101 | int max_fmr; |
102 | int max_map_per_fmr; | |
103 | int max_pages_per_frmr; | |
104 | u16 max_ord_per_qp; | |
105 | u16 max_ird_per_qp; | |
106 | ||
107 | int device_cap_flags; | |
108 | u8 cq_overflow_detect; | |
109 | u8 srq_supported; | |
110 | ||
111 | u32 wqe_size; | |
112 | u32 rqe_size; | |
113 | u32 ird_page_size; | |
114 | u8 local_ca_ack_delay; | |
115 | u8 ird; | |
116 | u8 num_ird_pages; | |
117 | }; | |
118 | ||
a51f06e1 SX |
119 | struct ocrdma_dma_mem { |
120 | void *va; | |
121 | dma_addr_t pa; | |
122 | u32 size; | |
123 | }; | |
124 | ||
fe2caefc PP |
125 | struct ocrdma_pbl { |
126 | void *va; | |
127 | dma_addr_t pa; | |
128 | }; | |
129 | ||
130 | struct ocrdma_queue_info { | |
131 | void *va; | |
132 | dma_addr_t dma; | |
133 | u32 size; | |
134 | u16 len; | |
135 | u16 entry_size; /* Size of an element in the queue */ | |
136 | u16 id; /* qid, where to ring the doorbell. */ | |
137 | u16 head, tail; | |
138 | bool created; | |
fe2caefc PP |
139 | }; |
140 | ||
b4dbe8d5 MA |
141 | struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ |
142 | u32 prev_eqd; | |
143 | u64 eq_intr_cnt; | |
144 | u64 prev_eq_intr_cnt; | |
145 | }; | |
146 | ||
fe2caefc PP |
147 | struct ocrdma_eq { |
148 | struct ocrdma_queue_info q; | |
149 | u32 vector; | |
150 | int cq_cnt; | |
151 | struct ocrdma_dev *dev; | |
152 | char irq_name[32]; | |
b4dbe8d5 | 153 | struct ocrdma_aic_obj aic_obj; |
fe2caefc PP |
154 | }; |
155 | ||
156 | struct ocrdma_mq { | |
157 | struct ocrdma_queue_info sq; | |
158 | struct ocrdma_queue_info cq; | |
159 | bool rearm_cq; | |
160 | }; | |
161 | ||
162 | struct mqe_ctx { | |
163 | struct mutex lock; /* for serializing mailbox commands on MQ */ | |
164 | wait_queue_head_t cmd_wait; | |
165 | u32 tag; | |
166 | u16 cqe_status; | |
167 | u16 ext_status; | |
168 | bool cmd_done; | |
6dab0264 | 169 | bool fw_error_state; |
fe2caefc PP |
170 | }; |
171 | ||
1852d1da NG |
172 | struct ocrdma_hw_mr { |
173 | u32 lkey; | |
174 | u8 fr_mr; | |
175 | u8 remote_atomic; | |
176 | u8 remote_rd; | |
177 | u8 remote_wr; | |
178 | u8 local_rd; | |
179 | u8 local_wr; | |
180 | u8 mw_bind; | |
181 | u8 rsvd; | |
182 | u64 len; | |
183 | struct ocrdma_pbl *pbl_table; | |
184 | u32 num_pbls; | |
185 | u32 num_pbes; | |
186 | u32 pbl_size; | |
187 | u32 pbe_size; | |
188 | u64 fbo; | |
189 | u64 va; | |
190 | }; | |
191 | ||
192 | struct ocrdma_mr { | |
193 | struct ib_mr ibmr; | |
194 | struct ib_umem *umem; | |
195 | struct ocrdma_hw_mr hwmr; | |
196 | }; | |
197 | ||
a51f06e1 SX |
198 | struct ocrdma_stats { |
199 | u8 type; | |
200 | struct ocrdma_dev *dev; | |
201 | }; | |
202 | ||
9ba1377d MA |
203 | struct ocrdma_pd_resource_mgr { |
204 | u32 pd_norm_start; | |
205 | u16 pd_norm_count; | |
206 | u16 pd_norm_thrsh; | |
207 | u16 max_normal_pd; | |
208 | u32 pd_dpp_start; | |
209 | u16 pd_dpp_count; | |
210 | u16 pd_dpp_thrsh; | |
211 | u16 max_dpp_pd; | |
212 | u16 dpp_page_index; | |
213 | unsigned long *pd_norm_bitmap; | |
214 | unsigned long *pd_dpp_bitmap; | |
215 | bool pd_prealloc_valid; | |
216 | }; | |
217 | ||
a51f06e1 SX |
218 | struct stats_mem { |
219 | struct ocrdma_mqe mqe; | |
220 | void *va; | |
221 | dma_addr_t pa; | |
222 | u32 size; | |
223 | char *debugfs_mem; | |
224 | }; | |
225 | ||
226 | struct phy_info { | |
227 | u16 auto_speeds_supported; | |
228 | u16 fixed_speeds_supported; | |
229 | u16 phy_type; | |
230 | u16 interface_type; | |
231 | }; | |
232 | ||
fe2caefc PP |
233 | struct ocrdma_dev { |
234 | struct ib_device ibdev; | |
235 | struct ocrdma_dev_attr attr; | |
236 | ||
237 | struct mutex dev_lock; /* provides syncronise access to device data */ | |
238 | spinlock_t flush_q_lock ____cacheline_aligned; | |
239 | ||
240 | struct ocrdma_cq **cq_tbl; | |
241 | struct ocrdma_qp **qp_tbl; | |
242 | ||
c88bd03f | 243 | struct ocrdma_eq *eq_tbl; |
fe2caefc | 244 | int eq_cnt; |
b4dbe8d5 | 245 | struct delayed_work eqd_work; |
fe2caefc PP |
246 | u16 base_eqid; |
247 | u16 max_eq; | |
248 | ||
249 | union ib_gid *sgid_tbl; | |
250 | /* provided synchronization to sgid table for | |
251 | * updating gid entries triggered by notifier. | |
252 | */ | |
253 | spinlock_t sgid_lock; | |
254 | ||
255 | int gsi_qp_created; | |
256 | struct ocrdma_cq *gsi_sqcq; | |
257 | struct ocrdma_cq *gsi_rqcq; | |
258 | ||
259 | struct { | |
260 | struct ocrdma_av *va; | |
261 | dma_addr_t pa; | |
262 | u32 size; | |
263 | u32 num_ah; | |
264 | /* provide synchronization for av | |
265 | * entry allocations. | |
266 | */ | |
267 | spinlock_t lock; | |
268 | u32 ahid; | |
269 | struct ocrdma_pbl pbl; | |
270 | } av_tbl; | |
271 | ||
272 | void *mbx_cmd; | |
273 | struct ocrdma_mq mq; | |
274 | struct mqe_ctx mqe_ctx; | |
275 | ||
276 | struct be_dev_info nic_info; | |
a51f06e1 SX |
277 | struct phy_info phy; |
278 | char model_number[32]; | |
279 | u32 hba_port_num; | |
fe2caefc PP |
280 | |
281 | struct list_head entry; | |
3e4d60a8 | 282 | struct rcu_head rcu; |
fe2caefc | 283 | int id; |
4f1df844 | 284 | u64 *stag_arr; |
31dbdd9a SX |
285 | u8 sl; /* service level */ |
286 | bool pfc_state; | |
287 | atomic_t update_sl; | |
84b105db | 288 | u16 pvid; |
21c3391a | 289 | u32 asic_id; |
a51f06e1 SX |
290 | |
291 | ulong last_stats_time; | |
292 | struct mutex stats_lock; /* provide synch for debugfs operations */ | |
293 | struct stats_mem stats_mem; | |
294 | struct ocrdma_stats rsrc_stats; | |
295 | struct ocrdma_stats rx_stats; | |
296 | struct ocrdma_stats wqe_stats; | |
297 | struct ocrdma_stats tx_stats; | |
298 | struct ocrdma_stats db_err_stats; | |
299 | struct ocrdma_stats tx_qp_err_stats; | |
300 | struct ocrdma_stats rx_qp_err_stats; | |
301 | struct ocrdma_stats tx_dbg_stats; | |
302 | struct ocrdma_stats rx_dbg_stats; | |
ad56ebb4 SX |
303 | struct ocrdma_stats driver_stats; |
304 | struct ocrdma_stats reset_stats; | |
a51f06e1 | 305 | struct dentry *dir; |
ad56ebb4 SX |
306 | atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS]; |
307 | atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR]; | |
9ba1377d | 308 | struct ocrdma_pd_resource_mgr *pd_mgr; |
fe2caefc PP |
309 | }; |
310 | ||
311 | struct ocrdma_cq { | |
312 | struct ib_cq ibcq; | |
fe2caefc PP |
313 | struct ocrdma_cqe *va; |
314 | u32 phase; | |
315 | u32 getp; /* pointer to pending wrs to | |
316 | * return to stack, wrap arounds | |
317 | * at max_hw_cqe | |
318 | */ | |
319 | u32 max_hw_cqe; | |
320 | bool phase_change; | |
ea617626 DS |
321 | bool deferred_arm, deferred_sol; |
322 | bool first_arm; | |
fe2caefc PP |
323 | |
324 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization | |
325 | * to cq polling | |
326 | */ | |
327 | /* syncronizes cq completion handler invoked from multiple context */ | |
328 | spinlock_t comp_handler_lock ____cacheline_aligned; | |
329 | u16 id; | |
330 | u16 eqn; | |
331 | ||
332 | struct ocrdma_ucontext *ucontext; | |
333 | dma_addr_t pa; | |
334 | u32 len; | |
ea617626 | 335 | u32 cqe_cnt; |
fe2caefc PP |
336 | |
337 | /* head of all qp's sq and rq for which cqes need to be flushed | |
338 | * by the software. | |
339 | */ | |
340 | struct list_head sq_head, rq_head; | |
341 | }; | |
342 | ||
343 | struct ocrdma_pd { | |
344 | struct ib_pd ibpd; | |
fe2caefc | 345 | struct ocrdma_ucontext *uctx; |
fe2caefc PP |
346 | u32 id; |
347 | int num_dpp_qp; | |
348 | u32 dpp_page; | |
349 | bool dpp_enabled; | |
350 | }; | |
351 | ||
352 | struct ocrdma_ah { | |
353 | struct ib_ah ibah; | |
fe2caefc PP |
354 | struct ocrdma_av *av; |
355 | u16 sgid_index; | |
356 | u32 id; | |
357 | }; | |
358 | ||
359 | struct ocrdma_qp_hwq_info { | |
360 | u8 *va; /* virtual address */ | |
361 | u32 max_sges; | |
362 | u32 head, tail; | |
363 | u32 entry_size; | |
364 | u32 max_cnt; | |
365 | u32 max_wqe_idx; | |
fe2caefc PP |
366 | u16 dbid; /* qid, where to ring the doorbell. */ |
367 | u32 len; | |
368 | dma_addr_t pa; | |
369 | }; | |
370 | ||
371 | struct ocrdma_srq { | |
372 | struct ib_srq ibsrq; | |
fe2caefc | 373 | u8 __iomem *db; |
9884bcdc NG |
374 | struct ocrdma_qp_hwq_info rq; |
375 | u64 *rqe_wr_id_tbl; | |
376 | u32 *idx_bit_fields; | |
377 | u32 bit_fields_len; | |
378 | ||
fe2caefc PP |
379 | /* provide synchronization to multiple context(s) posting rqe */ |
380 | spinlock_t q_lock ____cacheline_aligned; | |
381 | ||
fe2caefc | 382 | struct ocrdma_pd *pd; |
fe2caefc | 383 | u32 id; |
fe2caefc PP |
384 | }; |
385 | ||
386 | struct ocrdma_qp { | |
387 | struct ib_qp ibqp; | |
fe2caefc PP |
388 | |
389 | u8 __iomem *sq_db; | |
fe2caefc PP |
390 | struct ocrdma_qp_hwq_info sq; |
391 | struct { | |
392 | uint64_t wrid; | |
393 | uint16_t dpp_wqe_idx; | |
394 | uint16_t dpp_wqe; | |
395 | uint8_t signaled; | |
396 | uint8_t rsvd[3]; | |
397 | } *wqe_wr_id_tbl; | |
398 | u32 max_inline_data; | |
9884bcdc NG |
399 | |
400 | /* provide synchronization to multiple context(s) posting wqe, rqe */ | |
401 | spinlock_t q_lock ____cacheline_aligned; | |
fe2caefc PP |
402 | struct ocrdma_cq *sq_cq; |
403 | /* list maintained per CQ to flush SQ errors */ | |
404 | struct list_head sq_entry; | |
405 | ||
406 | u8 __iomem *rq_db; | |
407 | struct ocrdma_qp_hwq_info rq; | |
408 | u64 *rqe_wr_id_tbl; | |
409 | struct ocrdma_cq *rq_cq; | |
410 | struct ocrdma_srq *srq; | |
411 | /* list maintained per CQ to flush RQ errors */ | |
412 | struct list_head rq_entry; | |
413 | ||
414 | enum ocrdma_qp_state state; /* QP state */ | |
415 | int cap_flags; | |
416 | u32 max_ord, max_ird; | |
417 | ||
418 | u32 id; | |
419 | struct ocrdma_pd *pd; | |
420 | ||
421 | enum ib_qp_type qp_type; | |
422 | ||
423 | int sgid_idx; | |
424 | u32 qkey; | |
425 | bool dpp_enabled; | |
426 | u8 *ird_q_va; | |
2b51a9b9 | 427 | bool signaled; |
fe2caefc PP |
428 | }; |
429 | ||
fe2caefc PP |
430 | struct ocrdma_ucontext { |
431 | struct ib_ucontext ibucontext; | |
fe2caefc PP |
432 | |
433 | struct list_head mm_head; | |
434 | struct mutex mm_list_lock; /* protects list entries of mm type */ | |
cffce990 NG |
435 | struct ocrdma_pd *cntxt_pd; |
436 | int pd_in_use; | |
437 | ||
fe2caefc PP |
438 | struct { |
439 | u32 *va; | |
440 | dma_addr_t pa; | |
441 | u32 len; | |
442 | } ah_tbl; | |
443 | }; | |
444 | ||
445 | struct ocrdma_mm { | |
446 | struct { | |
447 | u64 phy_addr; | |
448 | unsigned long len; | |
449 | } key; | |
450 | struct list_head entry; | |
451 | }; | |
452 | ||
453 | static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) | |
454 | { | |
455 | return container_of(ibdev, struct ocrdma_dev, ibdev); | |
456 | } | |
457 | ||
458 | static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext | |
459 | *ibucontext) | |
460 | { | |
461 | return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); | |
462 | } | |
463 | ||
464 | static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) | |
465 | { | |
466 | return container_of(ibpd, struct ocrdma_pd, ibpd); | |
467 | } | |
468 | ||
469 | static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) | |
470 | { | |
471 | return container_of(ibcq, struct ocrdma_cq, ibcq); | |
472 | } | |
473 | ||
474 | static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) | |
475 | { | |
476 | return container_of(ibqp, struct ocrdma_qp, ibqp); | |
477 | } | |
478 | ||
479 | static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) | |
480 | { | |
481 | return container_of(ibmr, struct ocrdma_mr, ibmr); | |
482 | } | |
483 | ||
484 | static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) | |
485 | { | |
486 | return container_of(ibah, struct ocrdma_ah, ibah); | |
487 | } | |
488 | ||
489 | static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) | |
490 | { | |
491 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); | |
492 | } | |
493 | ||
df176ea0 NG |
494 | static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) |
495 | { | |
496 | int cqe_valid; | |
497 | cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; | |
f99b1649 | 498 | return (cqe_valid == cq->phase); |
df176ea0 NG |
499 | } |
500 | ||
501 | static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) | |
502 | { | |
503 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
504 | OCRDMA_CQE_QTYPE) ? 0 : 1; | |
505 | } | |
506 | ||
507 | static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) | |
508 | { | |
509 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
510 | OCRDMA_CQE_INVALIDATE) ? 1 : 0; | |
511 | } | |
512 | ||
513 | static inline int is_cqe_imm(struct ocrdma_cqe *cqe) | |
514 | { | |
515 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
516 | OCRDMA_CQE_IMM) ? 1 : 0; | |
517 | } | |
518 | ||
519 | static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) | |
520 | { | |
521 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
522 | OCRDMA_CQE_WRITE_IMM) ? 1 : 0; | |
523 | } | |
524 | ||
40aca6ff MS |
525 | static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, |
526 | struct ib_ah_attr *ah_attr, u8 *mac_addr) | |
527 | { | |
528 | struct in6_addr in6; | |
529 | ||
530 | memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); | |
531 | if (rdma_is_multicast_addr(&in6)) | |
532 | rdma_get_mcast_mac(&in6, mac_addr); | |
d27b2f15 MA |
533 | else if (rdma_link_local_addr(&in6)) |
534 | rdma_get_ll_mac(&in6, mac_addr); | |
40aca6ff MS |
535 | else |
536 | memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); | |
537 | return 0; | |
538 | } | |
df176ea0 | 539 | |
a51f06e1 SX |
540 | static inline char *hca_name(struct ocrdma_dev *dev) |
541 | { | |
542 | switch (dev->nic_info.pdev->device) { | |
543 | case OC_SKH_DEVICE_PF: | |
544 | case OC_SKH_DEVICE_VF: | |
545 | return OC_NAME_SH; | |
546 | default: | |
547 | return OC_NAME_UNKNOWN; | |
548 | } | |
549 | } | |
550 | ||
ea617626 DS |
551 | static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, |
552 | int eqid) | |
553 | { | |
554 | int indx; | |
555 | ||
556 | for (indx = 0; indx < dev->eq_cnt; indx++) { | |
557 | if (dev->eq_tbl[indx].q.id == eqid) | |
558 | return indx; | |
559 | } | |
560 | ||
561 | return -EINVAL; | |
562 | } | |
563 | ||
21c3391a DS |
564 | static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) |
565 | { | |
566 | if (dev->nic_info.dev_family == 0xF && !dev->asic_id) { | |
567 | pci_read_config_dword( | |
568 | dev->nic_info.pdev, | |
569 | OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id); | |
570 | } | |
571 | ||
572 | return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >> | |
573 | OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; | |
574 | } | |
575 | ||
31dbdd9a SX |
576 | static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio) |
577 | { | |
578 | return *(pfc + prio); | |
579 | } | |
580 | ||
581 | static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio) | |
582 | { | |
583 | return *(app_prio + prio); | |
584 | } | |
585 | ||
586 | static inline u8 ocrdma_is_enabled_and_synced(u32 state) | |
587 | { /* May also be used to interpret TC-state, QCN-state | |
588 | * Appl-state and Logical-link-state in future. | |
589 | */ | |
590 | return (state & OCRDMA_STATE_FLAG_ENABLED) && | |
591 | (state & OCRDMA_STATE_FLAG_SYNC); | |
592 | } | |
593 | ||
fe2caefc | 594 | #endif |