Commit | Line | Data |
---|---|---|
853e2bd2 BG |
1 | /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. |
2 | * This file contains the code that low level functions that interact | |
3 | * with 57712 FCoE firmware. | |
4 | * | |
9b35baae | 5 | * Copyright (c) 2008 - 2011 Broadcom Corporation |
853e2bd2 BG |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation. | |
10 | * | |
11 | * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) | |
12 | */ | |
13 | ||
14 | #include "bnx2fc.h" | |
15 | ||
16 | DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |
17 | ||
18 | static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, | |
19 | struct fcoe_kcqe *new_cqe_kcqe); | |
20 | static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, | |
21 | struct fcoe_kcqe *ofld_kcqe); | |
22 | static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, | |
23 | struct fcoe_kcqe *ofld_kcqe); | |
24 | static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); | |
25 | static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, | |
aea71a02 | 26 | struct fcoe_kcqe *destroy_kcqe); |
853e2bd2 BG |
27 | |
28 | int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) | |
29 | { | |
30 | struct fcoe_kwqe_stat stat_req; | |
31 | struct kwqe *kwqe_arr[2]; | |
32 | int num_kwqes = 1; | |
33 | int rc = 0; | |
34 | ||
35 | memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); | |
36 | stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; | |
37 | stat_req.hdr.flags = | |
38 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
39 | ||
40 | stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; | |
41 | stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); | |
42 | ||
43 | kwqe_arr[0] = (struct kwqe *) &stat_req; | |
44 | ||
45 | if (hba->cnic && hba->cnic->submit_kwqes) | |
46 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
47 | ||
48 | return rc; | |
49 | } | |
50 | ||
51 | /** | |
52 | * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w | |
53 | * | |
54 | * @hba: adapter structure pointer | |
55 | * | |
56 | * Send down FCoE firmware init KWQEs which initiates the initial handshake | |
57 | * with the f/w. | |
58 | * | |
59 | */ | |
60 | int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) | |
61 | { | |
62 | struct fcoe_kwqe_init1 fcoe_init1; | |
63 | struct fcoe_kwqe_init2 fcoe_init2; | |
64 | struct fcoe_kwqe_init3 fcoe_init3; | |
65 | struct kwqe *kwqe_arr[3]; | |
66 | int num_kwqes = 3; | |
67 | int rc = 0; | |
68 | ||
69 | if (!hba->cnic) { | |
aea71a02 | 70 | printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); |
853e2bd2 BG |
71 | return -ENODEV; |
72 | } | |
73 | ||
74 | /* fill init1 KWQE */ | |
75 | memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); | |
76 | fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; | |
77 | fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
78 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
79 | ||
80 | fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; | |
81 | fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; | |
82 | fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; | |
83 | fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; | |
84 | fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; | |
85 | fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; | |
86 | fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); | |
87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; | |
88 | fcoe_init1.task_list_pbl_addr_hi = | |
89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); | |
1294bfe6 | 90 | fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; |
853e2bd2 BG |
91 | |
92 | fcoe_init1.flags = (PAGE_SHIFT << | |
93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); | |
94 | ||
95 | fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; | |
96 | ||
97 | /* fill init2 KWQE */ | |
98 | memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); | |
99 | fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; | |
100 | fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
101 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
102 | ||
619c5cb6 VZ |
103 | fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; |
104 | fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; | |
105 | ||
aea71a02 | 106 | |
853e2bd2 BG |
107 | fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; |
108 | fcoe_init2.hash_tbl_pbl_addr_hi = (u32) | |
109 | ((u64) hba->hash_tbl_pbl_dma >> 32); | |
110 | ||
111 | fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; | |
112 | fcoe_init2.t2_hash_tbl_addr_hi = (u32) | |
113 | ((u64) hba->t2_hash_tbl_dma >> 32); | |
114 | ||
115 | fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; | |
116 | fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) | |
117 | ((u64) hba->t2_hash_tbl_ptr_dma >> 32); | |
118 | ||
119 | fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; | |
120 | ||
121 | /* fill init3 KWQE */ | |
122 | memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); | |
123 | fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; | |
124 | fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
125 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
126 | fcoe_init3.error_bit_map_lo = 0xffffffff; | |
127 | fcoe_init3.error_bit_map_hi = 0xffffffff; | |
128 | ||
619c5cb6 | 129 | fcoe_init3.perf_config = 1; |
853e2bd2 BG |
130 | |
131 | kwqe_arr[0] = (struct kwqe *) &fcoe_init1; | |
132 | kwqe_arr[1] = (struct kwqe *) &fcoe_init2; | |
133 | kwqe_arr[2] = (struct kwqe *) &fcoe_init3; | |
134 | ||
135 | if (hba->cnic && hba->cnic->submit_kwqes) | |
136 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
137 | ||
138 | return rc; | |
139 | } | |
140 | int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) | |
141 | { | |
142 | struct fcoe_kwqe_destroy fcoe_destroy; | |
143 | struct kwqe *kwqe_arr[2]; | |
144 | int num_kwqes = 1; | |
145 | int rc = -1; | |
146 | ||
147 | /* fill destroy KWQE */ | |
148 | memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); | |
149 | fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; | |
150 | fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
151 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
152 | kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; | |
153 | ||
154 | if (hba->cnic && hba->cnic->submit_kwqes) | |
155 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
156 | return rc; | |
157 | } | |
158 | ||
159 | /** | |
160 | * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process | |
161 | * | |
162 | * @port: port structure pointer | |
163 | * @tgt: bnx2fc_rport structure pointer | |
164 | */ | |
165 | int bnx2fc_send_session_ofld_req(struct fcoe_port *port, | |
166 | struct bnx2fc_rport *tgt) | |
167 | { | |
168 | struct fc_lport *lport = port->lport; | |
aea71a02 BPG |
169 | struct bnx2fc_interface *interface = port->priv; |
170 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
171 | struct kwqe *kwqe_arr[4]; |
172 | struct fcoe_kwqe_conn_offload1 ofld_req1; | |
173 | struct fcoe_kwqe_conn_offload2 ofld_req2; | |
174 | struct fcoe_kwqe_conn_offload3 ofld_req3; | |
175 | struct fcoe_kwqe_conn_offload4 ofld_req4; | |
176 | struct fc_rport_priv *rdata = tgt->rdata; | |
177 | struct fc_rport *rport = tgt->rport; | |
178 | int num_kwqes = 4; | |
179 | u32 port_id; | |
180 | int rc = 0; | |
181 | u16 conn_id; | |
182 | ||
183 | /* Initialize offload request 1 structure */ | |
184 | memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); | |
185 | ||
186 | ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; | |
187 | ofld_req1.hdr.flags = | |
188 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
189 | ||
190 | ||
191 | conn_id = (u16)tgt->fcoe_conn_id; | |
192 | ofld_req1.fcoe_conn_id = conn_id; | |
193 | ||
194 | ||
195 | ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; | |
196 | ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); | |
197 | ||
198 | ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; | |
199 | ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); | |
200 | ||
201 | ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; | |
202 | ofld_req1.rq_first_pbe_addr_hi = | |
203 | (u32)((u64) tgt->rq_dma >> 32); | |
204 | ||
205 | ofld_req1.rq_prod = 0x8000; | |
206 | ||
207 | /* Initialize offload request 2 structure */ | |
208 | memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); | |
209 | ||
210 | ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; | |
211 | ofld_req2.hdr.flags = | |
212 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
213 | ||
214 | ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; | |
215 | ||
216 | ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; | |
217 | ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); | |
218 | ||
219 | ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; | |
220 | ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); | |
221 | ||
222 | ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; | |
223 | ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); | |
224 | ||
225 | /* Initialize offload request 3 structure */ | |
226 | memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); | |
227 | ||
228 | ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; | |
229 | ofld_req3.hdr.flags = | |
230 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
231 | ||
aea71a02 | 232 | ofld_req3.vlan_tag = interface->vlan_id << |
853e2bd2 BG |
233 | FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; |
234 | ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; | |
235 | ||
236 | port_id = fc_host_port_id(lport->host); | |
237 | if (port_id == 0) { | |
238 | BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); | |
239 | return -EINVAL; | |
240 | } | |
241 | ||
242 | /* | |
243 | * Store s_id of the initiator for further reference. This will | |
244 | * be used during disable/destroy during linkdown processing as | |
245 | * when the lport is reset, the port_id also is reset to 0 | |
246 | */ | |
247 | tgt->sid = port_id; | |
248 | ofld_req3.s_id[0] = (port_id & 0x000000FF); | |
249 | ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; | |
250 | ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; | |
251 | ||
252 | port_id = rport->port_id; | |
253 | ofld_req3.d_id[0] = (port_id & 0x000000FF); | |
254 | ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; | |
255 | ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; | |
256 | ||
257 | ofld_req3.tx_total_conc_seqs = rdata->max_seq; | |
258 | ||
259 | ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; | |
260 | ofld_req3.rx_max_fc_pay_len = lport->mfs; | |
261 | ||
262 | ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; | |
263 | ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; | |
264 | ofld_req3.rx_open_seqs_exch_c3 = 1; | |
265 | ||
266 | ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; | |
267 | ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); | |
268 | ||
269 | /* set mul_n_port_ids supported flag to 0, until it is supported */ | |
270 | ofld_req3.flags = 0; | |
271 | /* | |
272 | ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << | |
273 | FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); | |
274 | */ | |
275 | /* Info from PLOGI response */ | |
276 | ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << | |
277 | FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); | |
278 | ||
279 | ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << | |
280 | FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); | |
281 | ||
b252f4c7 BPG |
282 | /* |
283 | * Info from PRLI response, this info is used for sequence level error | |
284 | * recovery support | |
285 | */ | |
286 | if (tgt->dev_type == TYPE_TAPE) { | |
287 | ofld_req3.flags |= 1 << | |
288 | FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; | |
289 | ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) | |
290 | ? 1 : 0) << | |
291 | FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); | |
292 | } | |
293 | ||
853e2bd2 | 294 | /* vlan flag */ |
aea71a02 | 295 | ofld_req3.flags |= (interface->vlan_enabled << |
853e2bd2 BG |
296 | FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); |
297 | ||
298 | /* C2_VALID and ACK flags are not set as they are not suppported */ | |
299 | ||
300 | ||
301 | /* Initialize offload request 4 structure */ | |
302 | memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); | |
303 | ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; | |
304 | ofld_req4.hdr.flags = | |
305 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
306 | ||
307 | ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; | |
308 | ||
309 | ||
619c5cb6 | 310 | ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; |
853e2bd2 | 311 | /* local mac */ |
619c5cb6 VZ |
312 | ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; |
313 | ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; | |
314 | ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; | |
315 | ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; | |
316 | ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; | |
aea71a02 BPG |
317 | ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; |
318 | /* fcf mac */ | |
319 | ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; | |
320 | ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; | |
321 | ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; | |
322 | ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; | |
323 | ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; | |
853e2bd2 BG |
324 | |
325 | ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; | |
326 | ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); | |
327 | ||
328 | ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; | |
329 | ofld_req4.confq_pbl_base_addr_hi = | |
330 | (u32)((u64) tgt->confq_pbl_dma >> 32); | |
331 | ||
332 | kwqe_arr[0] = (struct kwqe *) &ofld_req1; | |
333 | kwqe_arr[1] = (struct kwqe *) &ofld_req2; | |
334 | kwqe_arr[2] = (struct kwqe *) &ofld_req3; | |
335 | kwqe_arr[3] = (struct kwqe *) &ofld_req4; | |
336 | ||
337 | if (hba->cnic && hba->cnic->submit_kwqes) | |
338 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
339 | ||
340 | return rc; | |
341 | } | |
342 | ||
343 | /** | |
344 | * bnx2fc_send_session_enable_req - initiates FCoE Session enablement | |
345 | * | |
346 | * @port: port structure pointer | |
347 | * @tgt: bnx2fc_rport structure pointer | |
348 | */ | |
349 | static int bnx2fc_send_session_enable_req(struct fcoe_port *port, | |
350 | struct bnx2fc_rport *tgt) | |
351 | { | |
352 | struct kwqe *kwqe_arr[2]; | |
aea71a02 BPG |
353 | struct bnx2fc_interface *interface = port->priv; |
354 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
355 | struct fcoe_kwqe_conn_enable_disable enbl_req; |
356 | struct fc_lport *lport = port->lport; | |
357 | struct fc_rport *rport = tgt->rport; | |
358 | int num_kwqes = 1; | |
359 | int rc = 0; | |
360 | u32 port_id; | |
361 | ||
362 | memset(&enbl_req, 0x00, | |
363 | sizeof(struct fcoe_kwqe_conn_enable_disable)); | |
364 | enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; | |
365 | enbl_req.hdr.flags = | |
366 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
367 | ||
619c5cb6 | 368 | enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; |
853e2bd2 | 369 | /* local mac */ |
619c5cb6 VZ |
370 | enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; |
371 | enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; | |
372 | enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; | |
373 | enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; | |
374 | enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; | |
375 | memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); | |
376 | ||
aea71a02 BPG |
377 | enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; |
378 | enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; | |
379 | enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; | |
380 | enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; | |
381 | enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; | |
382 | enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; | |
853e2bd2 BG |
383 | |
384 | port_id = fc_host_port_id(lport->host); | |
385 | if (port_id != tgt->sid) { | |
386 | printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," | |
387 | "sid = 0x%x\n", port_id, tgt->sid); | |
388 | port_id = tgt->sid; | |
389 | } | |
390 | enbl_req.s_id[0] = (port_id & 0x000000FF); | |
391 | enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; | |
392 | enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; | |
393 | ||
394 | port_id = rport->port_id; | |
395 | enbl_req.d_id[0] = (port_id & 0x000000FF); | |
396 | enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; | |
397 | enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; | |
aea71a02 | 398 | enbl_req.vlan_tag = interface->vlan_id << |
853e2bd2 BG |
399 | FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; |
400 | enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; | |
aea71a02 | 401 | enbl_req.vlan_flag = interface->vlan_enabled; |
853e2bd2 BG |
402 | enbl_req.context_id = tgt->context_id; |
403 | enbl_req.conn_id = tgt->fcoe_conn_id; | |
404 | ||
405 | kwqe_arr[0] = (struct kwqe *) &enbl_req; | |
406 | ||
407 | if (hba->cnic && hba->cnic->submit_kwqes) | |
408 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
409 | return rc; | |
410 | } | |
411 | ||
412 | /** | |
413 | * bnx2fc_send_session_disable_req - initiates FCoE Session disable | |
414 | * | |
415 | * @port: port structure pointer | |
416 | * @tgt: bnx2fc_rport structure pointer | |
417 | */ | |
418 | int bnx2fc_send_session_disable_req(struct fcoe_port *port, | |
419 | struct bnx2fc_rport *tgt) | |
420 | { | |
aea71a02 BPG |
421 | struct bnx2fc_interface *interface = port->priv; |
422 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
423 | struct fcoe_kwqe_conn_enable_disable disable_req; |
424 | struct kwqe *kwqe_arr[2]; | |
425 | struct fc_rport *rport = tgt->rport; | |
426 | int num_kwqes = 1; | |
427 | int rc = 0; | |
428 | u32 port_id; | |
429 | ||
430 | memset(&disable_req, 0x00, | |
431 | sizeof(struct fcoe_kwqe_conn_enable_disable)); | |
432 | disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; | |
433 | disable_req.hdr.flags = | |
434 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
435 | ||
619c5cb6 VZ |
436 | disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; |
437 | disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; | |
438 | disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; | |
439 | disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; | |
440 | disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; | |
441 | disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; | |
853e2bd2 | 442 | |
aea71a02 BPG |
443 | disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; |
444 | disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; | |
445 | disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; | |
446 | disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; | |
447 | disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; | |
448 | disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; | |
853e2bd2 BG |
449 | |
450 | port_id = tgt->sid; | |
451 | disable_req.s_id[0] = (port_id & 0x000000FF); | |
452 | disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; | |
453 | disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; | |
454 | ||
455 | ||
456 | port_id = rport->port_id; | |
457 | disable_req.d_id[0] = (port_id & 0x000000FF); | |
458 | disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; | |
459 | disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; | |
460 | disable_req.context_id = tgt->context_id; | |
461 | disable_req.conn_id = tgt->fcoe_conn_id; | |
aea71a02 | 462 | disable_req.vlan_tag = interface->vlan_id << |
853e2bd2 BG |
463 | FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; |
464 | disable_req.vlan_tag |= | |
465 | 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; | |
aea71a02 | 466 | disable_req.vlan_flag = interface->vlan_enabled; |
853e2bd2 BG |
467 | |
468 | kwqe_arr[0] = (struct kwqe *) &disable_req; | |
469 | ||
470 | if (hba->cnic && hba->cnic->submit_kwqes) | |
471 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
472 | ||
473 | return rc; | |
474 | } | |
475 | ||
476 | /** | |
477 | * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy | |
478 | * | |
479 | * @port: port structure pointer | |
480 | * @tgt: bnx2fc_rport structure pointer | |
481 | */ | |
482 | int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, | |
483 | struct bnx2fc_rport *tgt) | |
484 | { | |
485 | struct fcoe_kwqe_conn_destroy destroy_req; | |
486 | struct kwqe *kwqe_arr[2]; | |
487 | int num_kwqes = 1; | |
488 | int rc = 0; | |
489 | ||
490 | memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); | |
491 | destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; | |
492 | destroy_req.hdr.flags = | |
493 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
494 | ||
495 | destroy_req.context_id = tgt->context_id; | |
496 | destroy_req.conn_id = tgt->fcoe_conn_id; | |
497 | ||
498 | kwqe_arr[0] = (struct kwqe *) &destroy_req; | |
499 | ||
500 | if (hba->cnic && hba->cnic->submit_kwqes) | |
501 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
502 | ||
503 | return rc; | |
504 | } | |
505 | ||
d36b3279 BPG |
506 | static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) |
507 | { | |
508 | struct bnx2fc_lport *blport; | |
509 | ||
510 | spin_lock_bh(&hba->hba_lock); | |
511 | list_for_each_entry(blport, &hba->vports, list) { | |
512 | if (blport->lport == lport) { | |
513 | spin_unlock_bh(&hba->hba_lock); | |
514 | return true; | |
515 | } | |
516 | } | |
517 | spin_unlock_bh(&hba->hba_lock); | |
518 | return false; | |
519 | ||
520 | } | |
521 | ||
522 | ||
853e2bd2 BG |
523 | static void bnx2fc_unsol_els_work(struct work_struct *work) |
524 | { | |
525 | struct bnx2fc_unsol_els *unsol_els; | |
526 | struct fc_lport *lport; | |
d36b3279 | 527 | struct bnx2fc_hba *hba; |
853e2bd2 BG |
528 | struct fc_frame *fp; |
529 | ||
530 | unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); | |
531 | lport = unsol_els->lport; | |
532 | fp = unsol_els->fp; | |
d36b3279 BPG |
533 | hba = unsol_els->hba; |
534 | if (is_valid_lport(hba, lport)) | |
535 | fc_exch_recv(lport, fp); | |
853e2bd2 BG |
536 | kfree(unsol_els); |
537 | } | |
538 | ||
539 | void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, | |
540 | unsigned char *buf, | |
541 | u32 frame_len, u16 l2_oxid) | |
542 | { | |
543 | struct fcoe_port *port = tgt->port; | |
544 | struct fc_lport *lport = port->lport; | |
aea71a02 | 545 | struct bnx2fc_interface *interface = port->priv; |
853e2bd2 BG |
546 | struct bnx2fc_unsol_els *unsol_els; |
547 | struct fc_frame_header *fh; | |
548 | struct fc_frame *fp; | |
549 | struct sk_buff *skb; | |
550 | u32 payload_len; | |
551 | u32 crc; | |
552 | u8 op; | |
553 | ||
554 | ||
555 | unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); | |
556 | if (!unsol_els) { | |
557 | BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); | |
558 | return; | |
559 | } | |
560 | ||
561 | BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", | |
562 | l2_oxid, frame_len); | |
563 | ||
564 | payload_len = frame_len - sizeof(struct fc_frame_header); | |
565 | ||
566 | fp = fc_frame_alloc(lport, payload_len); | |
567 | if (!fp) { | |
568 | printk(KERN_ERR PFX "fc_frame_alloc failure\n"); | |
5c2dce26 | 569 | kfree(unsol_els); |
853e2bd2 BG |
570 | return; |
571 | } | |
572 | ||
573 | fh = (struct fc_frame_header *) fc_frame_header_get(fp); | |
574 | /* Copy FC Frame header and payload into the frame */ | |
575 | memcpy(fh, buf, frame_len); | |
576 | ||
577 | if (l2_oxid != FC_XID_UNKNOWN) | |
578 | fh->fh_ox_id = htons(l2_oxid); | |
579 | ||
580 | skb = fp_skb(fp); | |
581 | ||
582 | if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || | |
583 | (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { | |
584 | ||
585 | if (fh->fh_type == FC_TYPE_ELS) { | |
586 | op = fc_frame_payload_op(fp); | |
587 | if ((op == ELS_TEST) || (op == ELS_ESTC) || | |
588 | (op == ELS_FAN) || (op == ELS_CSU)) { | |
589 | /* | |
590 | * No need to reply for these | |
591 | * ELS requests | |
592 | */ | |
593 | printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); | |
594 | kfree_skb(skb); | |
5c2dce26 | 595 | kfree(unsol_els); |
853e2bd2 BG |
596 | return; |
597 | } | |
598 | } | |
599 | crc = fcoe_fc_crc(fp); | |
600 | fc_frame_init(fp); | |
601 | fr_dev(fp) = lport; | |
602 | fr_sof(fp) = FC_SOF_I3; | |
603 | fr_eof(fp) = FC_EOF_T; | |
604 | fr_crc(fp) = cpu_to_le32(~crc); | |
605 | unsol_els->lport = lport; | |
aea71a02 | 606 | unsol_els->hba = interface->hba; |
853e2bd2 BG |
607 | unsol_els->fp = fp; |
608 | INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); | |
609 | queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); | |
610 | } else { | |
611 | BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); | |
612 | kfree_skb(skb); | |
5c2dce26 | 613 | kfree(unsol_els); |
853e2bd2 BG |
614 | } |
615 | } | |
616 | ||
617 | static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |
618 | { | |
619 | u8 num_rq; | |
620 | struct fcoe_err_report_entry *err_entry; | |
621 | unsigned char *rq_data; | |
622 | unsigned char *buf = NULL, *buf1; | |
623 | int i; | |
624 | u16 xid; | |
625 | u32 frame_len, len; | |
626 | struct bnx2fc_cmd *io_req = NULL; | |
627 | struct fcoe_task_ctx_entry *task, *task_page; | |
aea71a02 BPG |
628 | struct bnx2fc_interface *interface = tgt->port->priv; |
629 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
630 | int task_idx, index; |
631 | int rc = 0; | |
7b594769 BPG |
632 | u64 err_warn_bit_map; |
633 | u8 err_warn = 0xff; | |
853e2bd2 BG |
634 | |
635 | ||
636 | BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); | |
637 | switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { | |
638 | case FCOE_UNSOLICITED_FRAME_CQE_TYPE: | |
639 | frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> | |
640 | FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; | |
641 | ||
642 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; | |
643 | ||
68695973 | 644 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 | 645 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); |
68695973 NS |
646 | spin_unlock_bh(&tgt->tgt_lock); |
647 | ||
853e2bd2 BG |
648 | if (rq_data) { |
649 | buf = rq_data; | |
650 | } else { | |
651 | buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), | |
652 | GFP_ATOMIC); | |
653 | ||
654 | if (!buf1) { | |
655 | BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); | |
656 | break; | |
657 | } | |
658 | ||
659 | for (i = 0; i < num_rq; i++) { | |
68695973 | 660 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
661 | rq_data = (unsigned char *) |
662 | bnx2fc_get_next_rqe(tgt, 1); | |
68695973 | 663 | spin_unlock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
664 | len = BNX2FC_RQ_BUF_SZ; |
665 | memcpy(buf1, rq_data, len); | |
666 | buf1 += len; | |
667 | } | |
668 | } | |
669 | bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, | |
670 | FC_XID_UNKNOWN); | |
671 | ||
672 | if (buf != rq_data) | |
673 | kfree(buf); | |
68695973 | 674 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 | 675 | bnx2fc_return_rqe(tgt, num_rq); |
68695973 | 676 | spin_unlock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
677 | break; |
678 | ||
679 | case FCOE_ERROR_DETECTION_CQE_TYPE: | |
680 | /* | |
68695973 NS |
681 | * In case of error reporting CQE a single RQ entry |
682 | * is consumed. | |
853e2bd2 BG |
683 | */ |
684 | spin_lock_bh(&tgt->tgt_lock); | |
685 | num_rq = 1; | |
686 | err_entry = (struct fcoe_err_report_entry *) | |
687 | bnx2fc_get_next_rqe(tgt, 1); | |
688 | xid = err_entry->fc_hdr.ox_id; | |
689 | BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); | |
690 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", | |
619c5cb6 VZ |
691 | err_entry->data.err_warn_bitmap_hi, |
692 | err_entry->data.err_warn_bitmap_lo); | |
853e2bd2 | 693 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", |
619c5cb6 | 694 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
853e2bd2 | 695 | |
853e2bd2 BG |
696 | |
697 | if (xid > BNX2FC_MAX_XID) { | |
698 | BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", | |
699 | xid); | |
7b594769 | 700 | goto ret_err_rqe; |
853e2bd2 BG |
701 | } |
702 | ||
703 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; | |
704 | index = xid % BNX2FC_TASKS_PER_PAGE; | |
705 | task_page = (struct fcoe_task_ctx_entry *) | |
aea71a02 | 706 | hba->task_ctx[task_idx]; |
853e2bd2 BG |
707 | task = &(task_page[index]); |
708 | ||
709 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; | |
7b594769 BPG |
710 | if (!io_req) |
711 | goto ret_err_rqe; | |
853e2bd2 BG |
712 | |
713 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) { | |
714 | printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); | |
7b594769 | 715 | goto ret_err_rqe; |
853e2bd2 BG |
716 | } |
717 | ||
718 | if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, | |
719 | &io_req->req_flags)) { | |
720 | BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " | |
721 | "progress.. ignore unsol err\n"); | |
7b594769 BPG |
722 | goto ret_err_rqe; |
723 | } | |
724 | ||
725 | err_warn_bit_map = (u64) | |
726 | ((u64)err_entry->data.err_warn_bitmap_hi << 32) | | |
727 | (u64)err_entry->data.err_warn_bitmap_lo; | |
728 | for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { | |
729 | if (err_warn_bit_map & (u64)((u64)1 << i)) { | |
730 | err_warn = i; | |
731 | break; | |
732 | } | |
853e2bd2 BG |
733 | } |
734 | ||
735 | /* | |
736 | * If ABTS is already in progress, and FW error is | |
737 | * received after that, do not cancel the timeout_work | |
738 | * and let the error recovery continue by explicitly | |
739 | * logging out the target, when the ABTS eventually | |
740 | * times out. | |
741 | */ | |
7b594769 | 742 | if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { |
853e2bd2 BG |
743 | printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " |
744 | "in ABTS processing\n", xid); | |
7b594769 BPG |
745 | goto ret_err_rqe; |
746 | } | |
747 | BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); | |
748 | if (tgt->dev_type != TYPE_TAPE) | |
749 | goto skip_rec; | |
750 | switch (err_warn) { | |
751 | case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: | |
752 | case FCOE_ERROR_CODE_DATA_OOO_RO: | |
753 | case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: | |
754 | case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: | |
755 | case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: | |
756 | case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: | |
757 | BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", | |
758 | xid); | |
759 | memset(&io_req->err_entry, 0, | |
760 | sizeof(struct fcoe_err_report_entry)); | |
761 | memcpy(&io_req->err_entry, err_entry, | |
762 | sizeof(struct fcoe_err_report_entry)); | |
763 | if (!test_bit(BNX2FC_FLAG_SRR_SENT, | |
764 | &io_req->req_flags)) { | |
765 | spin_unlock_bh(&tgt->tgt_lock); | |
766 | rc = bnx2fc_send_rec(io_req); | |
767 | spin_lock_bh(&tgt->tgt_lock); | |
768 | ||
769 | if (rc) | |
770 | goto skip_rec; | |
771 | } else | |
772 | printk(KERN_ERR PFX "SRR in progress\n"); | |
773 | goto ret_err_rqe; | |
774 | break; | |
775 | default: | |
776 | break; | |
777 | } | |
778 | ||
779 | skip_rec: | |
780 | set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); | |
781 | /* | |
782 | * Cancel the timeout_work, as we received IO | |
783 | * completion with FW error. | |
784 | */ | |
785 | if (cancel_delayed_work(&io_req->timeout_work)) | |
786 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
787 | ||
788 | rc = bnx2fc_initiate_abts(io_req); | |
789 | if (rc != SUCCESS) { | |
790 | printk(KERN_ERR PFX "err_warn: initiate_abts " | |
791 | "failed xid = 0x%x. issue cleanup\n", | |
792 | io_req->xid); | |
793 | bnx2fc_initiate_cleanup(io_req); | |
794 | } | |
795 | ret_err_rqe: | |
796 | bnx2fc_return_rqe(tgt, 1); | |
853e2bd2 BG |
797 | spin_unlock_bh(&tgt->tgt_lock); |
798 | break; | |
799 | ||
800 | case FCOE_WARNING_DETECTION_CQE_TYPE: | |
801 | /* | |
802 | *In case of warning reporting CQE a single RQ entry | |
803 | * is consumes. | |
804 | */ | |
68695973 | 805 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
806 | num_rq = 1; |
807 | err_entry = (struct fcoe_err_report_entry *) | |
808 | bnx2fc_get_next_rqe(tgt, 1); | |
809 | xid = cpu_to_be16(err_entry->fc_hdr.ox_id); | |
810 | BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); | |
811 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", | |
619c5cb6 VZ |
812 | err_entry->data.err_warn_bitmap_hi, |
813 | err_entry->data.err_warn_bitmap_lo); | |
853e2bd2 | 814 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", |
619c5cb6 | 815 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
853e2bd2 | 816 | |
7b594769 BPG |
817 | if (xid > BNX2FC_MAX_XID) { |
818 | BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); | |
819 | goto ret_warn_rqe; | |
820 | } | |
821 | ||
822 | err_warn_bit_map = (u64) | |
823 | ((u64)err_entry->data.err_warn_bitmap_hi << 32) | | |
824 | (u64)err_entry->data.err_warn_bitmap_lo; | |
825 | for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { | |
826 | if (err_warn_bit_map & (u64) (1 << i)) { | |
827 | err_warn = i; | |
828 | break; | |
829 | } | |
830 | } | |
831 | BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); | |
832 | ||
833 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; | |
834 | index = xid % BNX2FC_TASKS_PER_PAGE; | |
835 | task_page = (struct fcoe_task_ctx_entry *) | |
836 | interface->hba->task_ctx[task_idx]; | |
837 | task = &(task_page[index]); | |
838 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; | |
839 | if (!io_req) | |
840 | goto ret_warn_rqe; | |
841 | ||
842 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) { | |
843 | printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); | |
844 | goto ret_warn_rqe; | |
845 | } | |
846 | ||
847 | memset(&io_req->err_entry, 0, | |
848 | sizeof(struct fcoe_err_report_entry)); | |
849 | memcpy(&io_req->err_entry, err_entry, | |
850 | sizeof(struct fcoe_err_report_entry)); | |
851 | ||
852 | if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) | |
853 | /* REC_TOV is not a warning code */ | |
854 | BUG_ON(1); | |
855 | else | |
856 | BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); | |
857 | ret_warn_rqe: | |
853e2bd2 | 858 | bnx2fc_return_rqe(tgt, 1); |
68695973 | 859 | spin_unlock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
860 | break; |
861 | ||
862 | default: | |
863 | printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); | |
864 | break; | |
865 | } | |
866 | } | |
867 | ||
868 | void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) | |
869 | { | |
870 | struct fcoe_task_ctx_entry *task; | |
871 | struct fcoe_task_ctx_entry *task_page; | |
872 | struct fcoe_port *port = tgt->port; | |
aea71a02 BPG |
873 | struct bnx2fc_interface *interface = port->priv; |
874 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
875 | struct bnx2fc_cmd *io_req; |
876 | int task_idx, index; | |
877 | u16 xid; | |
878 | u8 cmd_type; | |
879 | u8 rx_state = 0; | |
880 | u8 num_rq; | |
881 | ||
882 | spin_lock_bh(&tgt->tgt_lock); | |
883 | xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; | |
884 | if (xid >= BNX2FC_MAX_TASKS) { | |
b2a554ff | 885 | printk(KERN_ERR PFX "ERROR:xid out of range\n"); |
853e2bd2 BG |
886 | spin_unlock_bh(&tgt->tgt_lock); |
887 | return; | |
888 | } | |
889 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; | |
890 | index = xid % BNX2FC_TASKS_PER_PAGE; | |
891 | task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; | |
892 | task = &(task_page[index]); | |
893 | ||
619c5cb6 VZ |
894 | num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & |
895 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> | |
896 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); | |
853e2bd2 BG |
897 | |
898 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; | |
899 | ||
900 | if (io_req == NULL) { | |
901 | printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); | |
902 | spin_unlock_bh(&tgt->tgt_lock); | |
903 | return; | |
904 | } | |
905 | ||
906 | /* Timestamp IO completion time */ | |
907 | cmd_type = io_req->cmd_type; | |
908 | ||
619c5cb6 VZ |
909 | rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & |
910 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> | |
911 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); | |
853e2bd2 | 912 | |
619c5cb6 VZ |
913 | /* Process other IO completion types */ |
914 | switch (cmd_type) { | |
915 | case BNX2FC_SCSI_CMD: | |
853e2bd2 BG |
916 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { |
917 | bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); | |
918 | spin_unlock_bh(&tgt->tgt_lock); | |
919 | return; | |
920 | } | |
853e2bd2 | 921 | |
853e2bd2 BG |
922 | if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) |
923 | bnx2fc_process_abts_compl(io_req, task, num_rq); | |
924 | else if (rx_state == | |
925 | FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) | |
926 | bnx2fc_process_cleanup_compl(io_req, task, num_rq); | |
927 | else | |
928 | printk(KERN_ERR PFX "Invalid rx state - %d\n", | |
929 | rx_state); | |
930 | break; | |
931 | ||
932 | case BNX2FC_TASK_MGMT_CMD: | |
933 | BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); | |
934 | bnx2fc_process_tm_compl(io_req, task, num_rq); | |
935 | break; | |
936 | ||
937 | case BNX2FC_ABTS: | |
938 | /* | |
939 | * ABTS request received by firmware. ABTS response | |
940 | * will be delivered to the task belonging to the IO | |
941 | * that was aborted | |
942 | */ | |
943 | BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); | |
944 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
945 | break; | |
946 | ||
947 | case BNX2FC_ELS: | |
619c5cb6 VZ |
948 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) |
949 | bnx2fc_process_els_compl(io_req, task, num_rq); | |
950 | else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) | |
951 | bnx2fc_process_abts_compl(io_req, task, num_rq); | |
952 | else if (rx_state == | |
953 | FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) | |
954 | bnx2fc_process_cleanup_compl(io_req, task, num_rq); | |
955 | else | |
956 | printk(KERN_ERR PFX "Invalid rx state = %d\n", | |
957 | rx_state); | |
853e2bd2 BG |
958 | break; |
959 | ||
960 | case BNX2FC_CLEANUP: | |
961 | BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); | |
962 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
963 | break; | |
964 | ||
6c5a7ce4 BPG |
965 | case BNX2FC_SEQ_CLEANUP: |
966 | BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", | |
967 | io_req->xid); | |
968 | bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); | |
969 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
970 | break; | |
971 | ||
853e2bd2 BG |
972 | default: |
973 | printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); | |
974 | break; | |
975 | } | |
976 | spin_unlock_bh(&tgt->tgt_lock); | |
977 | } | |
978 | ||
619c5cb6 VZ |
979 | void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) |
980 | { | |
981 | struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; | |
982 | u32 msg; | |
983 | ||
984 | wmb(); | |
985 | rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << | |
986 | FCOE_CQE_TOGGLE_BIT_SHIFT); | |
987 | msg = *((u32 *)rx_db); | |
988 | writel(cpu_to_le32(msg), tgt->ctx_base); | |
989 | mmiowb(); | |
990 | ||
991 | } | |
992 | ||
853e2bd2 BG |
993 | struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) |
994 | { | |
995 | struct bnx2fc_work *work; | |
996 | work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); | |
997 | if (!work) | |
998 | return NULL; | |
999 | ||
1000 | INIT_LIST_HEAD(&work->list); | |
1001 | work->tgt = tgt; | |
1002 | work->wqe = wqe; | |
1003 | return work; | |
1004 | } | |
1005 | ||
1006 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | |
1007 | { | |
1008 | struct fcoe_cqe *cq; | |
1009 | u32 cq_cons; | |
1010 | struct fcoe_cqe *cqe; | |
619c5cb6 | 1011 | u32 num_free_sqes = 0; |
b338c785 | 1012 | u32 num_cqes = 0; |
853e2bd2 | 1013 | u16 wqe; |
853e2bd2 BG |
1014 | |
1015 | /* | |
1016 | * cq_lock is a low contention lock used to protect | |
1017 | * the CQ data structure from being freed up during | |
1018 | * the upload operation | |
1019 | */ | |
1020 | spin_lock_bh(&tgt->cq_lock); | |
1021 | ||
1022 | if (!tgt->cq) { | |
1023 | printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); | |
1024 | spin_unlock_bh(&tgt->cq_lock); | |
1025 | return 0; | |
1026 | } | |
1027 | cq = tgt->cq; | |
1028 | cq_cons = tgt->cq_cons_idx; | |
1029 | cqe = &cq[cq_cons]; | |
1030 | ||
619c5cb6 VZ |
1031 | while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == |
1032 | (tgt->cq_curr_toggle_bit << | |
1033 | FCOE_CQE_TOGGLE_BIT_SHIFT)) { | |
853e2bd2 | 1034 | |
619c5cb6 VZ |
1035 | /* new entry on the cq */ |
1036 | if (wqe & FCOE_CQE_CQE_TYPE) { | |
1037 | /* Unsolicited event notification */ | |
1038 | bnx2fc_process_unsol_compl(tgt, wqe); | |
1039 | } else { | |
1040 | /* Pending work request completion */ | |
1041 | struct bnx2fc_work *work = NULL; | |
1042 | struct bnx2fc_percpu_s *fps = NULL; | |
1043 | unsigned int cpu = wqe % num_possible_cpus(); | |
1044 | ||
1045 | fps = &per_cpu(bnx2fc_percpu, cpu); | |
1046 | spin_lock_bh(&fps->fp_work_lock); | |
1047 | if (unlikely(!fps->iothread)) | |
1048 | goto unlock; | |
1049 | ||
1050 | work = bnx2fc_alloc_work(tgt, wqe); | |
1051 | if (work) | |
1052 | list_add_tail(&work->list, | |
1053 | &fps->work_list); | |
853e2bd2 | 1054 | unlock: |
619c5cb6 | 1055 | spin_unlock_bh(&fps->fp_work_lock); |
853e2bd2 | 1056 | |
619c5cb6 VZ |
1057 | /* Pending work request completion */ |
1058 | if (fps->iothread && work) | |
1059 | wake_up_process(fps->iothread); | |
1060 | else | |
1061 | bnx2fc_process_cq_compl(tgt, wqe); | |
b338c785 | 1062 | num_free_sqes++; |
853e2bd2 | 1063 | } |
619c5cb6 VZ |
1064 | cqe++; |
1065 | tgt->cq_cons_idx++; | |
b338c785 | 1066 | num_cqes++; |
619c5cb6 VZ |
1067 | |
1068 | if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { | |
1069 | tgt->cq_cons_idx = 0; | |
1070 | cqe = cq; | |
1071 | tgt->cq_curr_toggle_bit = | |
1072 | 1 - tgt->cq_curr_toggle_bit; | |
853e2bd2 | 1073 | } |
619c5cb6 | 1074 | } |
b338c785 BPG |
1075 | if (num_cqes) { |
1076 | /* Arm CQ only if doorbell is mapped */ | |
1077 | if (tgt->ctx_base) | |
1078 | bnx2fc_arm_cq(tgt); | |
fd08bd62 BPG |
1079 | atomic_add(num_free_sqes, &tgt->free_sqes); |
1080 | } | |
853e2bd2 BG |
1081 | spin_unlock_bh(&tgt->cq_lock); |
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | /** | |
1086 | * bnx2fc_fastpath_notification - process global event queue (KCQ) | |
1087 | * | |
1088 | * @hba: adapter structure pointer | |
1089 | * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry | |
1090 | * | |
1091 | * Fast path event notification handler | |
1092 | */ | |
1093 | static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, | |
1094 | struct fcoe_kcqe *new_cqe_kcqe) | |
1095 | { | |
1096 | u32 conn_id = new_cqe_kcqe->fcoe_conn_id; | |
1097 | struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; | |
1098 | ||
1099 | if (!tgt) { | |
b2a554ff | 1100 | printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); |
853e2bd2 BG |
1101 | return; |
1102 | } | |
1103 | ||
1104 | bnx2fc_process_new_cqes(tgt); | |
1105 | } | |
1106 | ||
1107 | /** | |
1108 | * bnx2fc_process_ofld_cmpl - process FCoE session offload completion | |
1109 | * | |
1110 | * @hba: adapter structure pointer | |
1111 | * @ofld_kcqe: connection offload kcqe pointer | |
1112 | * | |
1113 | * handle session offload completion, enable the session if offload is | |
1114 | * successful. | |
1115 | */ | |
1116 | static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, | |
1117 | struct fcoe_kcqe *ofld_kcqe) | |
1118 | { | |
1119 | struct bnx2fc_rport *tgt; | |
1120 | struct fcoe_port *port; | |
aea71a02 | 1121 | struct bnx2fc_interface *interface; |
853e2bd2 BG |
1122 | u32 conn_id; |
1123 | u32 context_id; | |
1124 | int rc; | |
1125 | ||
1126 | conn_id = ofld_kcqe->fcoe_conn_id; | |
1127 | context_id = ofld_kcqe->fcoe_conn_context_id; | |
1128 | tgt = hba->tgt_ofld_list[conn_id]; | |
1129 | if (!tgt) { | |
aea71a02 | 1130 | printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); |
853e2bd2 BG |
1131 | return; |
1132 | } | |
1133 | BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", | |
1134 | ofld_kcqe->fcoe_conn_context_id); | |
1135 | port = tgt->port; | |
aea71a02 BPG |
1136 | interface = tgt->port->priv; |
1137 | if (hba != interface->hba) { | |
1138 | printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); | |
853e2bd2 BG |
1139 | goto ofld_cmpl_err; |
1140 | } | |
1141 | /* | |
1142 | * cnic has allocated a context_id for this session; use this | |
1143 | * while enabling the session. | |
1144 | */ | |
1145 | tgt->context_id = context_id; | |
1146 | if (ofld_kcqe->completion_status) { | |
1147 | if (ofld_kcqe->completion_status == | |
1148 | FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { | |
1149 | printk(KERN_ERR PFX "unable to allocate FCoE context " | |
1150 | "resources\n"); | |
1151 | set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); | |
1152 | } | |
1153 | goto ofld_cmpl_err; | |
1154 | } else { | |
1155 | ||
1156 | /* now enable the session */ | |
1157 | rc = bnx2fc_send_session_enable_req(port, tgt); | |
1158 | if (rc) { | |
b2a554ff | 1159 | printk(KERN_ERR PFX "enable session failed\n"); |
853e2bd2 BG |
1160 | goto ofld_cmpl_err; |
1161 | } | |
1162 | } | |
1163 | return; | |
1164 | ofld_cmpl_err: | |
1165 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); | |
1166 | wake_up_interruptible(&tgt->ofld_wait); | |
1167 | } | |
1168 | ||
1169 | /** | |
1170 | * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion | |
1171 | * | |
1172 | * @hba: adapter structure pointer | |
1173 | * @ofld_kcqe: connection offload kcqe pointer | |
1174 | * | |
1175 | * handle session enable completion, mark the rport as ready | |
1176 | */ | |
1177 | ||
1178 | static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, | |
1179 | struct fcoe_kcqe *ofld_kcqe) | |
1180 | { | |
1181 | struct bnx2fc_rport *tgt; | |
aea71a02 | 1182 | struct bnx2fc_interface *interface; |
853e2bd2 BG |
1183 | u32 conn_id; |
1184 | u32 context_id; | |
1185 | ||
1186 | context_id = ofld_kcqe->fcoe_conn_context_id; | |
1187 | conn_id = ofld_kcqe->fcoe_conn_id; | |
1188 | tgt = hba->tgt_ofld_list[conn_id]; | |
1189 | if (!tgt) { | |
b2a554ff | 1190 | printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); |
853e2bd2 BG |
1191 | return; |
1192 | } | |
1193 | ||
1194 | BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", | |
1195 | ofld_kcqe->fcoe_conn_context_id); | |
1196 | ||
1197 | /* | |
1198 | * context_id should be the same for this target during offload | |
1199 | * and enable | |
1200 | */ | |
1201 | if (tgt->context_id != context_id) { | |
b2a554ff | 1202 | printk(KERN_ERR PFX "context id mis-match\n"); |
853e2bd2 BG |
1203 | return; |
1204 | } | |
aea71a02 BPG |
1205 | interface = tgt->port->priv; |
1206 | if (hba != interface->hba) { | |
1207 | printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); | |
853e2bd2 BG |
1208 | goto enbl_cmpl_err; |
1209 | } | |
aea71a02 | 1210 | if (ofld_kcqe->completion_status) |
853e2bd2 | 1211 | goto enbl_cmpl_err; |
aea71a02 | 1212 | else { |
853e2bd2 BG |
1213 | /* enable successful - rport ready for issuing IOs */ |
1214 | set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); | |
1215 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); | |
1216 | wake_up_interruptible(&tgt->ofld_wait); | |
1217 | } | |
1218 | return; | |
1219 | ||
1220 | enbl_cmpl_err: | |
1221 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); | |
1222 | wake_up_interruptible(&tgt->ofld_wait); | |
1223 | } | |
1224 | ||
1225 | static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, | |
1226 | struct fcoe_kcqe *disable_kcqe) | |
1227 | { | |
1228 | ||
1229 | struct bnx2fc_rport *tgt; | |
1230 | u32 conn_id; | |
1231 | ||
1232 | conn_id = disable_kcqe->fcoe_conn_id; | |
1233 | tgt = hba->tgt_ofld_list[conn_id]; | |
1234 | if (!tgt) { | |
b2a554ff | 1235 | printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); |
853e2bd2 BG |
1236 | return; |
1237 | } | |
1238 | ||
1239 | BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); | |
1240 | ||
1241 | if (disable_kcqe->completion_status) { | |
b2a554ff | 1242 | printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", |
853e2bd2 BG |
1243 | disable_kcqe->completion_status); |
1244 | return; | |
1245 | } else { | |
1246 | /* disable successful */ | |
1247 | BNX2FC_TGT_DBG(tgt, "disable successful\n"); | |
1248 | clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); | |
1249 | set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); | |
1250 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
1251 | wake_up_interruptible(&tgt->upld_wait); | |
1252 | } | |
1253 | } | |
1254 | ||
1255 | static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, | |
1256 | struct fcoe_kcqe *destroy_kcqe) | |
1257 | { | |
1258 | struct bnx2fc_rport *tgt; | |
1259 | u32 conn_id; | |
1260 | ||
1261 | conn_id = destroy_kcqe->fcoe_conn_id; | |
1262 | tgt = hba->tgt_ofld_list[conn_id]; | |
1263 | if (!tgt) { | |
b2a554ff | 1264 | printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); |
853e2bd2 BG |
1265 | return; |
1266 | } | |
1267 | ||
1268 | BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); | |
1269 | ||
1270 | if (destroy_kcqe->completion_status) { | |
b2a554ff | 1271 | printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", |
853e2bd2 BG |
1272 | destroy_kcqe->completion_status); |
1273 | return; | |
1274 | } else { | |
1275 | /* destroy successful */ | |
1276 | BNX2FC_TGT_DBG(tgt, "upload successful\n"); | |
1277 | clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); | |
1278 | set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); | |
1279 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
1280 | wake_up_interruptible(&tgt->upld_wait); | |
1281 | } | |
1282 | } | |
1283 | ||
1284 | static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) | |
1285 | { | |
1286 | switch (err_code) { | |
1287 | case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: | |
1288 | printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); | |
1289 | break; | |
1290 | ||
1291 | case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: | |
1292 | printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); | |
1293 | break; | |
1294 | ||
1295 | case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: | |
1296 | printk(KERN_ERR PFX "init_failure due to NIC error\n"); | |
1297 | break; | |
619c5cb6 VZ |
1298 | case FCOE_KCQE_COMPLETION_STATUS_ERROR: |
1299 | printk(KERN_ERR PFX "init failure due to compl status err\n"); | |
1300 | break; | |
1301 | case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: | |
1302 | printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); | |
b2a554ff | 1303 | break; |
853e2bd2 BG |
1304 | default: |
1305 | printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); | |
1306 | } | |
1307 | } | |
1308 | ||
1309 | /** | |
1310 | * bnx2fc_indicae_kcqe - process KCQE | |
1311 | * | |
1312 | * @hba: adapter structure pointer | |
1313 | * @kcqe: kcqe pointer | |
1314 | * @num_cqe: Number of completion queue elements | |
1315 | * | |
1316 | * Generic KCQ event handler | |
1317 | */ | |
1318 | void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], | |
1319 | u32 num_cqe) | |
1320 | { | |
1321 | struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; | |
1322 | int i = 0; | |
1323 | struct fcoe_kcqe *kcqe = NULL; | |
1324 | ||
1325 | while (i < num_cqe) { | |
1326 | kcqe = (struct fcoe_kcqe *) kcq[i++]; | |
1327 | ||
1328 | switch (kcqe->op_code) { | |
1329 | case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: | |
1330 | bnx2fc_fastpath_notification(hba, kcqe); | |
1331 | break; | |
1332 | ||
1333 | case FCOE_KCQE_OPCODE_OFFLOAD_CONN: | |
1334 | bnx2fc_process_ofld_cmpl(hba, kcqe); | |
1335 | break; | |
1336 | ||
1337 | case FCOE_KCQE_OPCODE_ENABLE_CONN: | |
1338 | bnx2fc_process_enable_conn_cmpl(hba, kcqe); | |
1339 | break; | |
1340 | ||
1341 | case FCOE_KCQE_OPCODE_INIT_FUNC: | |
1342 | if (kcqe->completion_status != | |
1343 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { | |
1344 | bnx2fc_init_failure(hba, | |
1345 | kcqe->completion_status); | |
1346 | } else { | |
1347 | set_bit(ADAPTER_STATE_UP, &hba->adapter_state); | |
1348 | bnx2fc_get_link_state(hba); | |
1349 | printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", | |
1350 | (u8)hba->pcidev->bus->number); | |
1351 | } | |
1352 | break; | |
1353 | ||
1354 | case FCOE_KCQE_OPCODE_DESTROY_FUNC: | |
1355 | if (kcqe->completion_status != | |
1356 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { | |
1357 | ||
1358 | printk(KERN_ERR PFX "DESTROY failed\n"); | |
1359 | } else { | |
1360 | printk(KERN_ERR PFX "DESTROY success\n"); | |
1361 | } | |
aea71a02 | 1362 | set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); |
853e2bd2 BG |
1363 | wake_up_interruptible(&hba->destroy_wait); |
1364 | break; | |
1365 | ||
1366 | case FCOE_KCQE_OPCODE_DISABLE_CONN: | |
1367 | bnx2fc_process_conn_disable_cmpl(hba, kcqe); | |
1368 | break; | |
1369 | ||
1370 | case FCOE_KCQE_OPCODE_DESTROY_CONN: | |
1371 | bnx2fc_process_conn_destroy_cmpl(hba, kcqe); | |
1372 | break; | |
1373 | ||
1374 | case FCOE_KCQE_OPCODE_STAT_FUNC: | |
1375 | if (kcqe->completion_status != | |
1376 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) | |
1377 | printk(KERN_ERR PFX "STAT failed\n"); | |
1378 | complete(&hba->stat_req_done); | |
1379 | break; | |
1380 | ||
1381 | case FCOE_KCQE_OPCODE_FCOE_ERROR: | |
1382 | /* fall thru */ | |
1383 | default: | |
b2a554ff | 1384 | printk(KERN_ERR PFX "unknown opcode 0x%x\n", |
853e2bd2 BG |
1385 | kcqe->op_code); |
1386 | } | |
1387 | } | |
1388 | } | |
1389 | ||
1390 | void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) | |
1391 | { | |
1392 | struct fcoe_sqe *sqe; | |
1393 | ||
1394 | sqe = &tgt->sq[tgt->sq_prod_idx]; | |
1395 | ||
1396 | /* Fill SQ WQE */ | |
1397 | sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; | |
1398 | sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; | |
1399 | ||
1400 | /* Advance SQ Prod Idx */ | |
1401 | if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { | |
1402 | tgt->sq_prod_idx = 0; | |
1403 | tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; | |
1404 | } | |
1405 | } | |
1406 | ||
1407 | void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) | |
1408 | { | |
619c5cb6 | 1409 | struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; |
853e2bd2 BG |
1410 | u32 msg; |
1411 | ||
1412 | wmb(); | |
619c5cb6 | 1413 | sq_db->prod = tgt->sq_prod_idx | |
853e2bd2 | 1414 | (tgt->sq_curr_toggle_bit << 15); |
619c5cb6 | 1415 | msg = *((u32 *)sq_db); |
853e2bd2 | 1416 | writel(cpu_to_le32(msg), tgt->ctx_base); |
853e2bd2 BG |
1417 | mmiowb(); |
1418 | ||
1419 | } | |
1420 | ||
1421 | int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) | |
1422 | { | |
1423 | u32 context_id = tgt->context_id; | |
1424 | struct fcoe_port *port = tgt->port; | |
1425 | u32 reg_off; | |
1426 | resource_size_t reg_base; | |
aea71a02 BPG |
1427 | struct bnx2fc_interface *interface = port->priv; |
1428 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
1429 | |
1430 | reg_base = pci_resource_start(hba->pcidev, | |
1431 | BNX2X_DOORBELL_PCI_BAR); | |
1432 | reg_off = BNX2FC_5771X_DB_PAGE_SIZE * | |
1433 | (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; | |
1434 | tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); | |
1435 | if (!tgt->ctx_base) | |
1436 | return -ENOMEM; | |
1437 | return 0; | |
1438 | } | |
1439 | ||
1440 | char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) | |
1441 | { | |
1442 | char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); | |
1443 | ||
1444 | if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) | |
1445 | return NULL; | |
1446 | ||
1447 | tgt->rq_cons_idx += num_items; | |
1448 | ||
1449 | if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) | |
1450 | tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; | |
1451 | ||
1452 | return buf; | |
1453 | } | |
1454 | ||
1455 | void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) | |
1456 | { | |
1457 | /* return the rq buffer */ | |
1458 | u32 next_prod_idx = tgt->rq_prod_idx + num_items; | |
1459 | if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { | |
1460 | /* Wrap around RQ */ | |
1461 | next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; | |
1462 | } | |
1463 | tgt->rq_prod_idx = next_prod_idx; | |
1464 | tgt->conn_db->rq_prod = tgt->rq_prod_idx; | |
1465 | } | |
1466 | ||
6c5a7ce4 BPG |
1467 | void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, |
1468 | struct fcoe_task_ctx_entry *task, | |
1469 | struct bnx2fc_cmd *orig_io_req, | |
1470 | u32 offset) | |
1471 | { | |
1472 | struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; | |
1473 | struct bnx2fc_rport *tgt = seq_clnp_req->tgt; | |
1474 | struct bnx2fc_interface *interface = tgt->port->priv; | |
1475 | struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; | |
1476 | struct fcoe_task_ctx_entry *orig_task; | |
1477 | struct fcoe_task_ctx_entry *task_page; | |
1478 | struct fcoe_ext_mul_sges_ctx *sgl; | |
1479 | u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; | |
1480 | u8 orig_task_type; | |
1481 | u16 orig_xid = orig_io_req->xid; | |
1482 | u32 context_id = tgt->context_id; | |
1483 | u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; | |
1484 | u32 orig_offset = offset; | |
1485 | int bd_count; | |
1486 | int orig_task_idx, index; | |
1487 | int i; | |
1488 | ||
1489 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1490 | ||
1491 | if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) | |
1492 | orig_task_type = FCOE_TASK_TYPE_WRITE; | |
1493 | else | |
1494 | orig_task_type = FCOE_TASK_TYPE_READ; | |
1495 | ||
1496 | /* Tx flags */ | |
1497 | task->txwr_rxrd.const_ctx.tx_flags = | |
1498 | FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << | |
1499 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
1500 | /* init flags */ | |
1501 | task->txwr_rxrd.const_ctx.init_flags = task_type << | |
1502 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; | |
1503 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1504 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
1505 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1506 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1507 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1508 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1509 | ||
1510 | task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; | |
1511 | ||
1512 | task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; | |
1513 | task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; | |
1514 | ||
1515 | bd_count = orig_io_req->bd_tbl->bd_valid; | |
1516 | ||
1517 | /* obtain the appropriate bd entry from relative offset */ | |
1518 | for (i = 0; i < bd_count; i++) { | |
1519 | if (offset < bd[i].buf_len) | |
1520 | break; | |
1521 | offset -= bd[i].buf_len; | |
1522 | } | |
1523 | phys_addr += (i * sizeof(struct fcoe_bd_ctx)); | |
1524 | ||
1525 | if (orig_task_type == FCOE_TASK_TYPE_WRITE) { | |
1526 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = | |
1527 | (u32)phys_addr; | |
1528 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = | |
1529 | (u32)((u64)phys_addr >> 32); | |
1530 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = | |
1531 | bd_count; | |
1532 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = | |
1533 | offset; /* adjusted offset */ | |
1534 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; | |
1535 | } else { | |
1536 | orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE; | |
1537 | index = orig_xid % BNX2FC_TASKS_PER_PAGE; | |
1538 | ||
1539 | task_page = (struct fcoe_task_ctx_entry *) | |
1540 | interface->hba->task_ctx[orig_task_idx]; | |
1541 | orig_task = &(task_page[index]); | |
1542 | ||
1543 | /* Multiple SGEs were used for this IO */ | |
1544 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; | |
1545 | sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; | |
1546 | sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); | |
1547 | sgl->mul_sgl.sgl_size = bd_count; | |
1548 | sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ | |
1549 | sgl->mul_sgl.cur_sge_idx = i; | |
1550 | ||
1551 | memset(&task->rxwr_only.rx_seq_ctx, 0, | |
1552 | sizeof(struct fcoe_rx_seq_ctx)); | |
1553 | task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; | |
1554 | task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; | |
1555 | } | |
1556 | } | |
853e2bd2 BG |
1557 | void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, |
1558 | struct fcoe_task_ctx_entry *task, | |
1559 | u16 orig_xid) | |
1560 | { | |
1561 | u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; | |
1562 | struct bnx2fc_rport *tgt = io_req->tgt; | |
1563 | u32 context_id = tgt->context_id; | |
1564 | ||
1565 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1566 | ||
1567 | /* Tx Write Rx Read */ | |
619c5cb6 VZ |
1568 | /* init flags */ |
1569 | task->txwr_rxrd.const_ctx.init_flags = task_type << | |
1570 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; | |
1571 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1572 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
f3820b71 BPG |
1573 | if (tgt->dev_type == TYPE_TAPE) |
1574 | task->txwr_rxrd.const_ctx.init_flags |= | |
1575 | FCOE_TASK_DEV_TYPE_TAPE << | |
1576 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1577 | else | |
1578 | task->txwr_rxrd.const_ctx.init_flags |= | |
619c5cb6 VZ |
1579 | FCOE_TASK_DEV_TYPE_DISK << |
1580 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1581 | task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; | |
1582 | ||
1583 | /* Tx flags */ | |
1584 | task->txwr_rxrd.const_ctx.tx_flags = | |
1585 | FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << | |
1586 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
1587 | ||
1588 | /* Rx Read Tx Write */ | |
1589 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1590 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1591 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | |
1592 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | |
853e2bd2 BG |
1593 | } |
1594 | ||
1595 | void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, | |
1596 | struct fcoe_task_ctx_entry *task) | |
1597 | { | |
1598 | struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); | |
1599 | struct bnx2fc_rport *tgt = io_req->tgt; | |
1600 | struct fc_frame_header *fc_hdr; | |
619c5cb6 | 1601 | struct fcoe_ext_mul_sges_ctx *sgl; |
853e2bd2 BG |
1602 | u8 task_type = 0; |
1603 | u64 *hdr; | |
1604 | u64 temp_hdr[3]; | |
1605 | u32 context_id; | |
1606 | ||
1607 | ||
1608 | /* Obtain task_type */ | |
1609 | if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || | |
1610 | (io_req->cmd_type == BNX2FC_ELS)) { | |
1611 | task_type = FCOE_TASK_TYPE_MIDPATH; | |
1612 | } else if (io_req->cmd_type == BNX2FC_ABTS) { | |
1613 | task_type = FCOE_TASK_TYPE_ABTS; | |
1614 | } | |
1615 | ||
1616 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1617 | ||
1618 | /* Setup the task from io_req for easy reference */ | |
1619 | io_req->task = task; | |
1620 | ||
1621 | BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", | |
1622 | io_req->cmd_type, task_type); | |
1623 | ||
1624 | /* Tx only */ | |
1625 | if ((task_type == FCOE_TASK_TYPE_MIDPATH) || | |
1626 | (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { | |
619c5cb6 | 1627 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = |
853e2bd2 | 1628 | (u32)mp_req->mp_req_bd_dma; |
619c5cb6 | 1629 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = |
853e2bd2 | 1630 | (u32)((u64)mp_req->mp_req_bd_dma >> 32); |
619c5cb6 | 1631 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; |
853e2bd2 BG |
1632 | } |
1633 | ||
1634 | /* Tx Write Rx Read */ | |
619c5cb6 VZ |
1635 | /* init flags */ |
1636 | task->txwr_rxrd.const_ctx.init_flags = task_type << | |
1637 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; | |
f3820b71 BPG |
1638 | if (tgt->dev_type == TYPE_TAPE) |
1639 | task->txwr_rxrd.const_ctx.init_flags |= | |
1640 | FCOE_TASK_DEV_TYPE_TAPE << | |
1641 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1642 | else | |
1643 | task->txwr_rxrd.const_ctx.init_flags |= | |
619c5cb6 VZ |
1644 | FCOE_TASK_DEV_TYPE_DISK << |
1645 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1646 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1647 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
1648 | ||
1649 | /* tx flags */ | |
1650 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << | |
1651 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
853e2bd2 BG |
1652 | |
1653 | /* Rx Write Tx Read */ | |
619c5cb6 VZ |
1654 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; |
1655 | ||
1656 | /* rx flags */ | |
1657 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | |
1658 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | |
1659 | ||
1660 | context_id = tgt->context_id; | |
1661 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1662 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1663 | ||
853e2bd2 BG |
1664 | fc_hdr = &(mp_req->req_fc_hdr); |
1665 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { | |
1666 | fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); | |
1667 | fc_hdr->fh_rx_id = htons(0xffff); | |
619c5cb6 | 1668 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; |
853e2bd2 BG |
1669 | } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { |
1670 | fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); | |
1671 | } | |
1672 | ||
1673 | /* Fill FC Header into middle path buffer */ | |
619c5cb6 | 1674 | hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; |
853e2bd2 BG |
1675 | memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); |
1676 | hdr[0] = cpu_to_be64(temp_hdr[0]); | |
1677 | hdr[1] = cpu_to_be64(temp_hdr[1]); | |
1678 | hdr[2] = cpu_to_be64(temp_hdr[2]); | |
1679 | ||
1680 | /* Rx Only */ | |
1681 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { | |
619c5cb6 | 1682 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; |
853e2bd2 | 1683 | |
619c5cb6 VZ |
1684 | sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; |
1685 | sgl->mul_sgl.cur_sge_addr.hi = | |
853e2bd2 | 1686 | (u32)((u64)mp_req->mp_resp_bd_dma >> 32); |
619c5cb6 | 1687 | sgl->mul_sgl.sgl_size = 1; |
853e2bd2 BG |
1688 | } |
1689 | } | |
1690 | ||
1691 | void bnx2fc_init_task(struct bnx2fc_cmd *io_req, | |
1692 | struct fcoe_task_ctx_entry *task) | |
1693 | { | |
1694 | u8 task_type; | |
1695 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | |
1696 | struct io_bdt *bd_tbl = io_req->bd_tbl; | |
1697 | struct bnx2fc_rport *tgt = io_req->tgt; | |
619c5cb6 VZ |
1698 | struct fcoe_cached_sge_ctx *cached_sge; |
1699 | struct fcoe_ext_mul_sges_ctx *sgl; | |
f3820b71 | 1700 | int dev_type = tgt->dev_type; |
853e2bd2 BG |
1701 | u64 *fcp_cmnd; |
1702 | u64 tmp_fcp_cmnd[4]; | |
1703 | u32 context_id; | |
1704 | int cnt, i; | |
1705 | int bd_count; | |
1706 | ||
1707 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1708 | ||
1709 | /* Setup the task from io_req for easy reference */ | |
1710 | io_req->task = task; | |
1711 | ||
1712 | if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) | |
1713 | task_type = FCOE_TASK_TYPE_WRITE; | |
1714 | else | |
1715 | task_type = FCOE_TASK_TYPE_READ; | |
1716 | ||
1717 | /* Tx only */ | |
3c75108f | 1718 | bd_count = bd_tbl->bd_valid; |
853e2bd2 | 1719 | if (task_type == FCOE_TASK_TYPE_WRITE) { |
3c75108f BPG |
1720 | if ((dev_type == TYPE_DISK) && (bd_count == 1)) { |
1721 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; | |
1722 | ||
1723 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = | |
1724 | fcoe_bd_tbl->buf_addr_lo; | |
1725 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = | |
1726 | fcoe_bd_tbl->buf_addr_hi; | |
1727 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = | |
1728 | fcoe_bd_tbl->buf_len; | |
1729 | ||
1730 | task->txwr_rxrd.const_ctx.init_flags |= 1 << | |
1731 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; | |
1732 | } else { | |
1733 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = | |
1734 | (u32)bd_tbl->bd_tbl_dma; | |
1735 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = | |
1736 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); | |
1737 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = | |
1738 | bd_tbl->bd_valid; | |
1739 | } | |
853e2bd2 BG |
1740 | } |
1741 | ||
1742 | /*Tx Write Rx Read */ | |
1743 | /* Init state to NORMAL */ | |
3c75108f | 1744 | task->txwr_rxrd.const_ctx.init_flags |= task_type << |
619c5cb6 | 1745 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
c1c16bd5 | 1746 | if (dev_type == TYPE_TAPE) { |
f3820b71 BPG |
1747 | task->txwr_rxrd.const_ctx.init_flags |= |
1748 | FCOE_TASK_DEV_TYPE_TAPE << | |
1749 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
c1c16bd5 BPG |
1750 | io_req->rec_retry = 0; |
1751 | io_req->rec_retry = 0; | |
1752 | } else | |
f3820b71 | 1753 | task->txwr_rxrd.const_ctx.init_flags |= |
619c5cb6 VZ |
1754 | FCOE_TASK_DEV_TYPE_DISK << |
1755 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1756 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1757 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
1758 | /* tx flags */ | |
1759 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << | |
1760 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
853e2bd2 BG |
1761 | |
1762 | /* Set initial seq counter */ | |
619c5cb6 | 1763 | task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; |
853e2bd2 BG |
1764 | |
1765 | /* Fill FCP_CMND IU */ | |
1766 | fcp_cmnd = (u64 *) | |
619c5cb6 | 1767 | task->txwr_rxrd.union_ctx.fcp_cmd.opaque; |
853e2bd2 BG |
1768 | bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); |
1769 | ||
1770 | /* swap fcp_cmnd */ | |
1771 | cnt = sizeof(struct fcp_cmnd) / sizeof(u64); | |
1772 | ||
1773 | for (i = 0; i < cnt; i++) { | |
1774 | *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); | |
1775 | fcp_cmnd++; | |
1776 | } | |
1777 | ||
1778 | /* Rx Write Tx Read */ | |
619c5cb6 VZ |
1779 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; |
1780 | ||
1781 | context_id = tgt->context_id; | |
1782 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1783 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1784 | ||
1785 | /* rx flags */ | |
1786 | /* Set state to "waiting for the first packet" */ | |
1787 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | |
1788 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | |
1789 | ||
1790 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; | |
853e2bd2 BG |
1791 | |
1792 | /* Rx Only */ | |
619c5cb6 VZ |
1793 | cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; |
1794 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; | |
1795 | bd_count = bd_tbl->bd_valid; | |
f3820b71 BPG |
1796 | if (task_type == FCOE_TASK_TYPE_READ && |
1797 | dev_type == TYPE_DISK) { | |
853e2bd2 BG |
1798 | if (bd_count == 1) { |
1799 | ||
1800 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; | |
1801 | ||
619c5cb6 VZ |
1802 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; |
1803 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; | |
1804 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; | |
1805 | task->txwr_rxrd.const_ctx.init_flags |= 1 << | |
1806 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; | |
1807 | } else if (bd_count == 2) { | |
1808 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; | |
1809 | ||
1810 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; | |
1811 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; | |
1812 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; | |
1813 | ||
1814 | fcoe_bd_tbl++; | |
1815 | cached_sge->second_buf_addr.lo = | |
1816 | fcoe_bd_tbl->buf_addr_lo; | |
1817 | cached_sge->second_buf_addr.hi = | |
1818 | fcoe_bd_tbl->buf_addr_hi; | |
1819 | cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; | |
1820 | task->txwr_rxrd.const_ctx.init_flags |= 1 << | |
1821 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; | |
853e2bd2 BG |
1822 | } else { |
1823 | ||
619c5cb6 VZ |
1824 | sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; |
1825 | sgl->mul_sgl.cur_sge_addr.hi = | |
853e2bd2 | 1826 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); |
619c5cb6 | 1827 | sgl->mul_sgl.sgl_size = bd_count; |
853e2bd2 | 1828 | } |
f3820b71 BPG |
1829 | } else { |
1830 | sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; | |
1831 | sgl->mul_sgl.cur_sge_addr.hi = | |
1832 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); | |
1833 | sgl->mul_sgl.sgl_size = bd_count; | |
853e2bd2 BG |
1834 | } |
1835 | } | |
1836 | ||
1837 | /** | |
1838 | * bnx2fc_setup_task_ctx - allocate and map task context | |
1839 | * | |
1840 | * @hba: pointer to adapter structure | |
1841 | * | |
1842 | * allocate memory for task context, and associated BD table to be used | |
1843 | * by firmware | |
1844 | * | |
1845 | */ | |
1846 | int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) | |
1847 | { | |
1848 | int rc = 0; | |
1849 | struct regpair *task_ctx_bdt; | |
1850 | dma_addr_t addr; | |
1851 | int i; | |
1852 | ||
1853 | /* | |
1854 | * Allocate task context bd table. A page size of bd table | |
1855 | * can map 256 buffers. Each buffer contains 32 task context | |
1856 | * entries. Hence the limit with one page is 8192 task context | |
1857 | * entries. | |
1858 | */ | |
1859 | hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, | |
1860 | PAGE_SIZE, | |
1861 | &hba->task_ctx_bd_dma, | |
1862 | GFP_KERNEL); | |
1863 | if (!hba->task_ctx_bd_tbl) { | |
1864 | printk(KERN_ERR PFX "unable to allocate task context BDT\n"); | |
1865 | rc = -1; | |
1866 | goto out; | |
1867 | } | |
1868 | memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); | |
1869 | ||
1870 | /* | |
1871 | * Allocate task_ctx which is an array of pointers pointing to | |
1872 | * a page containing 32 task contexts | |
1873 | */ | |
1874 | hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), | |
1875 | GFP_KERNEL); | |
1876 | if (!hba->task_ctx) { | |
1877 | printk(KERN_ERR PFX "unable to allocate task context array\n"); | |
1878 | rc = -1; | |
1879 | goto out1; | |
1880 | } | |
1881 | ||
1882 | /* | |
1883 | * Allocate task_ctx_dma which is an array of dma addresses | |
1884 | */ | |
1885 | hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * | |
1886 | sizeof(dma_addr_t)), GFP_KERNEL); | |
1887 | if (!hba->task_ctx_dma) { | |
1888 | printk(KERN_ERR PFX "unable to alloc context mapping array\n"); | |
1889 | rc = -1; | |
1890 | goto out2; | |
1891 | } | |
1892 | ||
1893 | task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; | |
1894 | for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { | |
1895 | ||
1896 | hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, | |
1897 | PAGE_SIZE, | |
1898 | &hba->task_ctx_dma[i], | |
1899 | GFP_KERNEL); | |
1900 | if (!hba->task_ctx[i]) { | |
1901 | printk(KERN_ERR PFX "unable to alloc task context\n"); | |
1902 | rc = -1; | |
1903 | goto out3; | |
1904 | } | |
1905 | memset(hba->task_ctx[i], 0, PAGE_SIZE); | |
1906 | addr = (u64)hba->task_ctx_dma[i]; | |
1907 | task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); | |
1908 | task_ctx_bdt->lo = cpu_to_le32((u32)addr); | |
1909 | task_ctx_bdt++; | |
1910 | } | |
1911 | return 0; | |
1912 | ||
1913 | out3: | |
1914 | for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { | |
1915 | if (hba->task_ctx[i]) { | |
1916 | ||
1917 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1918 | hba->task_ctx[i], hba->task_ctx_dma[i]); | |
1919 | hba->task_ctx[i] = NULL; | |
1920 | } | |
1921 | } | |
1922 | ||
1923 | kfree(hba->task_ctx_dma); | |
1924 | hba->task_ctx_dma = NULL; | |
1925 | out2: | |
1926 | kfree(hba->task_ctx); | |
1927 | hba->task_ctx = NULL; | |
1928 | out1: | |
1929 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1930 | hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); | |
1931 | hba->task_ctx_bd_tbl = NULL; | |
1932 | out: | |
1933 | return rc; | |
1934 | } | |
1935 | ||
1936 | void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) | |
1937 | { | |
1938 | int i; | |
1939 | ||
1940 | if (hba->task_ctx_bd_tbl) { | |
1941 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1942 | hba->task_ctx_bd_tbl, | |
1943 | hba->task_ctx_bd_dma); | |
1944 | hba->task_ctx_bd_tbl = NULL; | |
1945 | } | |
1946 | ||
1947 | if (hba->task_ctx) { | |
1948 | for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { | |
1949 | if (hba->task_ctx[i]) { | |
1950 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1951 | hba->task_ctx[i], | |
1952 | hba->task_ctx_dma[i]); | |
1953 | hba->task_ctx[i] = NULL; | |
1954 | } | |
1955 | } | |
1956 | kfree(hba->task_ctx); | |
1957 | hba->task_ctx = NULL; | |
1958 | } | |
1959 | ||
1960 | kfree(hba->task_ctx_dma); | |
1961 | hba->task_ctx_dma = NULL; | |
1962 | } | |
1963 | ||
1964 | static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) | |
1965 | { | |
1966 | int i; | |
1967 | int segment_count; | |
1968 | int hash_table_size; | |
1969 | u32 *pbl; | |
1970 | ||
1971 | segment_count = hba->hash_tbl_segment_count; | |
1972 | hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * | |
1973 | sizeof(struct fcoe_hash_table_entry); | |
1974 | ||
1975 | pbl = hba->hash_tbl_pbl; | |
1976 | for (i = 0; i < segment_count; ++i) { | |
1977 | dma_addr_t dma_address; | |
1978 | ||
1979 | dma_address = le32_to_cpu(*pbl); | |
1980 | ++pbl; | |
1981 | dma_address += ((u64)le32_to_cpu(*pbl)) << 32; | |
1982 | ++pbl; | |
1983 | dma_free_coherent(&hba->pcidev->dev, | |
1984 | BNX2FC_HASH_TBL_CHUNK_SIZE, | |
1985 | hba->hash_tbl_segments[i], | |
1986 | dma_address); | |
1987 | ||
1988 | } | |
1989 | ||
1990 | if (hba->hash_tbl_pbl) { | |
1991 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1992 | hba->hash_tbl_pbl, | |
1993 | hba->hash_tbl_pbl_dma); | |
1994 | hba->hash_tbl_pbl = NULL; | |
1995 | } | |
1996 | } | |
1997 | ||
1998 | static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) | |
1999 | { | |
2000 | int i; | |
2001 | int hash_table_size; | |
2002 | int segment_count; | |
2003 | int segment_array_size; | |
2004 | int dma_segment_array_size; | |
2005 | dma_addr_t *dma_segment_array; | |
2006 | u32 *pbl; | |
2007 | ||
2008 | hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * | |
2009 | sizeof(struct fcoe_hash_table_entry); | |
2010 | ||
2011 | segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; | |
2012 | segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; | |
2013 | hba->hash_tbl_segment_count = segment_count; | |
2014 | ||
2015 | segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); | |
2016 | hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); | |
2017 | if (!hba->hash_tbl_segments) { | |
2018 | printk(KERN_ERR PFX "hash table pointers alloc failed\n"); | |
2019 | return -ENOMEM; | |
2020 | } | |
2021 | dma_segment_array_size = segment_count * sizeof(*dma_segment_array); | |
2022 | dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); | |
2023 | if (!dma_segment_array) { | |
2024 | printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); | |
2025 | return -ENOMEM; | |
2026 | } | |
2027 | ||
2028 | for (i = 0; i < segment_count; ++i) { | |
2029 | hba->hash_tbl_segments[i] = | |
2030 | dma_alloc_coherent(&hba->pcidev->dev, | |
2031 | BNX2FC_HASH_TBL_CHUNK_SIZE, | |
2032 | &dma_segment_array[i], | |
2033 | GFP_KERNEL); | |
2034 | if (!hba->hash_tbl_segments[i]) { | |
2035 | printk(KERN_ERR PFX "hash segment alloc failed\n"); | |
2036 | while (--i >= 0) { | |
2037 | dma_free_coherent(&hba->pcidev->dev, | |
2038 | BNX2FC_HASH_TBL_CHUNK_SIZE, | |
2039 | hba->hash_tbl_segments[i], | |
2040 | dma_segment_array[i]); | |
2041 | hba->hash_tbl_segments[i] = NULL; | |
2042 | } | |
2043 | kfree(dma_segment_array); | |
2044 | return -ENOMEM; | |
2045 | } | |
2046 | memset(hba->hash_tbl_segments[i], 0, | |
2047 | BNX2FC_HASH_TBL_CHUNK_SIZE); | |
2048 | } | |
2049 | ||
2050 | hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, | |
2051 | PAGE_SIZE, | |
2052 | &hba->hash_tbl_pbl_dma, | |
2053 | GFP_KERNEL); | |
2054 | if (!hba->hash_tbl_pbl) { | |
2055 | printk(KERN_ERR PFX "hash table pbl alloc failed\n"); | |
2056 | kfree(dma_segment_array); | |
2057 | return -ENOMEM; | |
2058 | } | |
2059 | memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); | |
2060 | ||
2061 | pbl = hba->hash_tbl_pbl; | |
2062 | for (i = 0; i < segment_count; ++i) { | |
2063 | u64 paddr = dma_segment_array[i]; | |
2064 | *pbl = cpu_to_le32((u32) paddr); | |
2065 | ++pbl; | |
2066 | *pbl = cpu_to_le32((u32) (paddr >> 32)); | |
2067 | ++pbl; | |
2068 | } | |
2069 | pbl = hba->hash_tbl_pbl; | |
2070 | i = 0; | |
2071 | while (*pbl && *(pbl + 1)) { | |
2072 | u32 lo; | |
2073 | u32 hi; | |
2074 | lo = *pbl; | |
2075 | ++pbl; | |
2076 | hi = *pbl; | |
2077 | ++pbl; | |
2078 | ++i; | |
2079 | } | |
2080 | kfree(dma_segment_array); | |
2081 | return 0; | |
2082 | } | |
2083 | ||
2084 | /** | |
2085 | * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer | |
2086 | * | |
2087 | * @hba: Pointer to adapter structure | |
2088 | * | |
2089 | */ | |
2090 | int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) | |
2091 | { | |
2092 | u64 addr; | |
2093 | u32 mem_size; | |
2094 | int i; | |
2095 | ||
2096 | if (bnx2fc_allocate_hash_table(hba)) | |
2097 | return -ENOMEM; | |
2098 | ||
2099 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); | |
2100 | hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, | |
2101 | &hba->t2_hash_tbl_ptr_dma, | |
2102 | GFP_KERNEL); | |
2103 | if (!hba->t2_hash_tbl_ptr) { | |
2104 | printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); | |
2105 | bnx2fc_free_fw_resc(hba); | |
2106 | return -ENOMEM; | |
2107 | } | |
2108 | memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); | |
2109 | ||
2110 | mem_size = BNX2FC_NUM_MAX_SESS * | |
2111 | sizeof(struct fcoe_t2_hash_table_entry); | |
2112 | hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, | |
2113 | &hba->t2_hash_tbl_dma, | |
2114 | GFP_KERNEL); | |
2115 | if (!hba->t2_hash_tbl) { | |
2116 | printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); | |
2117 | bnx2fc_free_fw_resc(hba); | |
2118 | return -ENOMEM; | |
2119 | } | |
2120 | memset(hba->t2_hash_tbl, 0x00, mem_size); | |
2121 | for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { | |
2122 | addr = (unsigned long) hba->t2_hash_tbl_dma + | |
2123 | ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); | |
2124 | hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; | |
2125 | hba->t2_hash_tbl[i].next.hi = addr >> 32; | |
2126 | } | |
2127 | ||
2128 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, | |
2129 | PAGE_SIZE, &hba->dummy_buf_dma, | |
2130 | GFP_KERNEL); | |
2131 | if (!hba->dummy_buffer) { | |
2132 | printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); | |
2133 | bnx2fc_free_fw_resc(hba); | |
2134 | return -ENOMEM; | |
2135 | } | |
2136 | ||
2137 | hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, | |
2138 | PAGE_SIZE, | |
2139 | &hba->stats_buf_dma, | |
2140 | GFP_KERNEL); | |
2141 | if (!hba->stats_buffer) { | |
2142 | printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); | |
2143 | bnx2fc_free_fw_resc(hba); | |
2144 | return -ENOMEM; | |
2145 | } | |
2146 | memset(hba->stats_buffer, 0x00, PAGE_SIZE); | |
2147 | ||
2148 | return 0; | |
2149 | } | |
2150 | ||
2151 | void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) | |
2152 | { | |
2153 | u32 mem_size; | |
2154 | ||
2155 | if (hba->stats_buffer) { | |
2156 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
2157 | hba->stats_buffer, hba->stats_buf_dma); | |
2158 | hba->stats_buffer = NULL; | |
2159 | } | |
2160 | ||
2161 | if (hba->dummy_buffer) { | |
2162 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
2163 | hba->dummy_buffer, hba->dummy_buf_dma); | |
2164 | hba->dummy_buffer = NULL; | |
2165 | } | |
2166 | ||
2167 | if (hba->t2_hash_tbl_ptr) { | |
2168 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); | |
2169 | dma_free_coherent(&hba->pcidev->dev, mem_size, | |
2170 | hba->t2_hash_tbl_ptr, | |
2171 | hba->t2_hash_tbl_ptr_dma); | |
2172 | hba->t2_hash_tbl_ptr = NULL; | |
2173 | } | |
2174 | ||
2175 | if (hba->t2_hash_tbl) { | |
2176 | mem_size = BNX2FC_NUM_MAX_SESS * | |
2177 | sizeof(struct fcoe_t2_hash_table_entry); | |
2178 | dma_free_coherent(&hba->pcidev->dev, mem_size, | |
2179 | hba->t2_hash_tbl, hba->t2_hash_tbl_dma); | |
2180 | hba->t2_hash_tbl = NULL; | |
2181 | } | |
2182 | bnx2fc_free_hash_table(hba); | |
2183 | } |