[SCSI] bnx2fc: Tx BDs cache in write tasks
[linux-2.6-block.git] / drivers / scsi / bnx2fc / bnx2fc_hwi.c
CommitLineData
853e2bd2
BG
1/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12 */
13
14#include "bnx2fc.h"
15
16DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
17
18static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19 struct fcoe_kcqe *new_cqe_kcqe);
20static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21 struct fcoe_kcqe *ofld_kcqe);
22static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe);
24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
aea71a02 26 struct fcoe_kcqe *destroy_kcqe);
853e2bd2
BG
27
28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29{
30 struct fcoe_kwqe_stat stat_req;
31 struct kwqe *kwqe_arr[2];
32 int num_kwqes = 1;
33 int rc = 0;
34
35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
37 stat_req.hdr.flags =
38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
39
40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
42
43 kwqe_arr[0] = (struct kwqe *) &stat_req;
44
45 if (hba->cnic && hba->cnic->submit_kwqes)
46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
47
48 return rc;
49}
50
51/**
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
53 *
54 * @hba: adapter structure pointer
55 *
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
57 * with the f/w.
58 *
59 */
60int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
61{
62 struct fcoe_kwqe_init1 fcoe_init1;
63 struct fcoe_kwqe_init2 fcoe_init2;
64 struct fcoe_kwqe_init3 fcoe_init3;
65 struct kwqe *kwqe_arr[3];
66 int num_kwqes = 3;
67 int rc = 0;
68
69 if (!hba->cnic) {
aea71a02 70 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
853e2bd2
BG
71 return -ENODEV;
72 }
73
74 /* fill init1 KWQE */
75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88 fcoe_init1.task_list_pbl_addr_hi =
89 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
1294bfe6 90 fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
853e2bd2
BG
91
92 fcoe_init1.flags = (PAGE_SHIFT <<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
94
95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
96
97 /* fill init2 KWQE */
98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102
619c5cb6
VZ
103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105
aea71a02 106
853e2bd2
BG
107 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
108 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
109 ((u64) hba->hash_tbl_pbl_dma >> 32);
110
111 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
112 fcoe_init2.t2_hash_tbl_addr_hi = (u32)
113 ((u64) hba->t2_hash_tbl_dma >> 32);
114
115 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
116 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
117 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
118
119 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
120
121 /* fill init3 KWQE */
122 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
123 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
124 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
125 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
126 fcoe_init3.error_bit_map_lo = 0xffffffff;
127 fcoe_init3.error_bit_map_hi = 0xffffffff;
128
619c5cb6 129 fcoe_init3.perf_config = 1;
853e2bd2
BG
130
131 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
132 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
133 kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
134
135 if (hba->cnic && hba->cnic->submit_kwqes)
136 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
137
138 return rc;
139}
140int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
141{
142 struct fcoe_kwqe_destroy fcoe_destroy;
143 struct kwqe *kwqe_arr[2];
144 int num_kwqes = 1;
145 int rc = -1;
146
147 /* fill destroy KWQE */
148 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
149 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
150 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
151 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
152 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
153
154 if (hba->cnic && hba->cnic->submit_kwqes)
155 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
156 return rc;
157}
158
159/**
160 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
161 *
162 * @port: port structure pointer
163 * @tgt: bnx2fc_rport structure pointer
164 */
165int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
166 struct bnx2fc_rport *tgt)
167{
168 struct fc_lport *lport = port->lport;
aea71a02
BPG
169 struct bnx2fc_interface *interface = port->priv;
170 struct bnx2fc_hba *hba = interface->hba;
853e2bd2
BG
171 struct kwqe *kwqe_arr[4];
172 struct fcoe_kwqe_conn_offload1 ofld_req1;
173 struct fcoe_kwqe_conn_offload2 ofld_req2;
174 struct fcoe_kwqe_conn_offload3 ofld_req3;
175 struct fcoe_kwqe_conn_offload4 ofld_req4;
176 struct fc_rport_priv *rdata = tgt->rdata;
177 struct fc_rport *rport = tgt->rport;
178 int num_kwqes = 4;
179 u32 port_id;
180 int rc = 0;
181 u16 conn_id;
182
183 /* Initialize offload request 1 structure */
184 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
185
186 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
187 ofld_req1.hdr.flags =
188 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
189
190
191 conn_id = (u16)tgt->fcoe_conn_id;
192 ofld_req1.fcoe_conn_id = conn_id;
193
194
195 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
196 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
197
198 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
199 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
200
201 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
202 ofld_req1.rq_first_pbe_addr_hi =
203 (u32)((u64) tgt->rq_dma >> 32);
204
205 ofld_req1.rq_prod = 0x8000;
206
207 /* Initialize offload request 2 structure */
208 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
209
210 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
211 ofld_req2.hdr.flags =
212 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
213
214 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
215
216 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
217 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
218
219 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
220 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
221
222 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
223 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
224
225 /* Initialize offload request 3 structure */
226 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
227
228 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
229 ofld_req3.hdr.flags =
230 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
231
aea71a02 232 ofld_req3.vlan_tag = interface->vlan_id <<
853e2bd2
BG
233 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
234 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
235
236 port_id = fc_host_port_id(lport->host);
237 if (port_id == 0) {
238 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
239 return -EINVAL;
240 }
241
242 /*
243 * Store s_id of the initiator for further reference. This will
244 * be used during disable/destroy during linkdown processing as
245 * when the lport is reset, the port_id also is reset to 0
246 */
247 tgt->sid = port_id;
248 ofld_req3.s_id[0] = (port_id & 0x000000FF);
249 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
250 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
251
252 port_id = rport->port_id;
253 ofld_req3.d_id[0] = (port_id & 0x000000FF);
254 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
255 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
256
257 ofld_req3.tx_total_conc_seqs = rdata->max_seq;
258
259 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
260 ofld_req3.rx_max_fc_pay_len = lport->mfs;
261
262 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
263 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
264 ofld_req3.rx_open_seqs_exch_c3 = 1;
265
266 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
267 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
268
269 /* set mul_n_port_ids supported flag to 0, until it is supported */
270 ofld_req3.flags = 0;
271 /*
272 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
273 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
274 */
275 /* Info from PLOGI response */
276 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
277 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
278
279 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
280 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
281
b252f4c7
BPG
282 /*
283 * Info from PRLI response, this info is used for sequence level error
284 * recovery support
285 */
286 if (tgt->dev_type == TYPE_TAPE) {
287 ofld_req3.flags |= 1 <<
288 FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
289 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
290 ? 1 : 0) <<
291 FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
292 }
293
853e2bd2 294 /* vlan flag */
aea71a02 295 ofld_req3.flags |= (interface->vlan_enabled <<
853e2bd2
BG
296 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
297
298 /* C2_VALID and ACK flags are not set as they are not suppported */
299
300
301 /* Initialize offload request 4 structure */
302 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
303 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
304 ofld_req4.hdr.flags =
305 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
306
307 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
308
309
619c5cb6 310 ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5];
853e2bd2 311 /* local mac */
619c5cb6
VZ
312 ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4];
313 ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3];
314 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
315 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
316 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
aea71a02
BPG
317 ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
318 /* fcf mac */
319 ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
320 ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
321 ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
322 ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
323 ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
853e2bd2
BG
324
325 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
326 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
327
328 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
329 ofld_req4.confq_pbl_base_addr_hi =
330 (u32)((u64) tgt->confq_pbl_dma >> 32);
331
332 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
333 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
334 kwqe_arr[2] = (struct kwqe *) &ofld_req3;
335 kwqe_arr[3] = (struct kwqe *) &ofld_req4;
336
337 if (hba->cnic && hba->cnic->submit_kwqes)
338 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
339
340 return rc;
341}
342
343/**
344 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
345 *
346 * @port: port structure pointer
347 * @tgt: bnx2fc_rport structure pointer
348 */
349static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
350 struct bnx2fc_rport *tgt)
351{
352 struct kwqe *kwqe_arr[2];
aea71a02
BPG
353 struct bnx2fc_interface *interface = port->priv;
354 struct bnx2fc_hba *hba = interface->hba;
853e2bd2
BG
355 struct fcoe_kwqe_conn_enable_disable enbl_req;
356 struct fc_lport *lport = port->lport;
357 struct fc_rport *rport = tgt->rport;
358 int num_kwqes = 1;
359 int rc = 0;
360 u32 port_id;
361
362 memset(&enbl_req, 0x00,
363 sizeof(struct fcoe_kwqe_conn_enable_disable));
364 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
365 enbl_req.hdr.flags =
366 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
367
619c5cb6 368 enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5];
853e2bd2 369 /* local mac */
619c5cb6
VZ
370 enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4];
371 enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3];
372 enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2];
373 enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1];
374 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
375 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
376
aea71a02
BPG
377 enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
378 enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
379 enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
380 enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
381 enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
382 enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
853e2bd2
BG
383
384 port_id = fc_host_port_id(lport->host);
385 if (port_id != tgt->sid) {
386 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
387 "sid = 0x%x\n", port_id, tgt->sid);
388 port_id = tgt->sid;
389 }
390 enbl_req.s_id[0] = (port_id & 0x000000FF);
391 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
392 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
393
394 port_id = rport->port_id;
395 enbl_req.d_id[0] = (port_id & 0x000000FF);
396 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
397 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
aea71a02 398 enbl_req.vlan_tag = interface->vlan_id <<
853e2bd2
BG
399 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
400 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
aea71a02 401 enbl_req.vlan_flag = interface->vlan_enabled;
853e2bd2
BG
402 enbl_req.context_id = tgt->context_id;
403 enbl_req.conn_id = tgt->fcoe_conn_id;
404
405 kwqe_arr[0] = (struct kwqe *) &enbl_req;
406
407 if (hba->cnic && hba->cnic->submit_kwqes)
408 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
409 return rc;
410}
411
412/**
413 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
414 *
415 * @port: port structure pointer
416 * @tgt: bnx2fc_rport structure pointer
417 */
418int bnx2fc_send_session_disable_req(struct fcoe_port *port,
419 struct bnx2fc_rport *tgt)
420{
aea71a02
BPG
421 struct bnx2fc_interface *interface = port->priv;
422 struct bnx2fc_hba *hba = interface->hba;
853e2bd2
BG
423 struct fcoe_kwqe_conn_enable_disable disable_req;
424 struct kwqe *kwqe_arr[2];
425 struct fc_rport *rport = tgt->rport;
426 int num_kwqes = 1;
427 int rc = 0;
428 u32 port_id;
429
430 memset(&disable_req, 0x00,
431 sizeof(struct fcoe_kwqe_conn_enable_disable));
432 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
433 disable_req.hdr.flags =
434 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
435
619c5cb6
VZ
436 disable_req.src_mac_addr_lo[0] = tgt->src_addr[5];
437 disable_req.src_mac_addr_lo[1] = tgt->src_addr[4];
438 disable_req.src_mac_addr_mid[0] = tgt->src_addr[3];
439 disable_req.src_mac_addr_mid[1] = tgt->src_addr[2];
440 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
441 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
853e2bd2 442
aea71a02
BPG
443 disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
444 disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
445 disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
446 disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
447 disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
448 disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
853e2bd2
BG
449
450 port_id = tgt->sid;
451 disable_req.s_id[0] = (port_id & 0x000000FF);
452 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
453 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
454
455
456 port_id = rport->port_id;
457 disable_req.d_id[0] = (port_id & 0x000000FF);
458 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
459 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
460 disable_req.context_id = tgt->context_id;
461 disable_req.conn_id = tgt->fcoe_conn_id;
aea71a02 462 disable_req.vlan_tag = interface->vlan_id <<
853e2bd2
BG
463 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
464 disable_req.vlan_tag |=
465 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
aea71a02 466 disable_req.vlan_flag = interface->vlan_enabled;
853e2bd2
BG
467
468 kwqe_arr[0] = (struct kwqe *) &disable_req;
469
470 if (hba->cnic && hba->cnic->submit_kwqes)
471 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
472
473 return rc;
474}
475
476/**
477 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
478 *
479 * @port: port structure pointer
480 * @tgt: bnx2fc_rport structure pointer
481 */
482int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
483 struct bnx2fc_rport *tgt)
484{
485 struct fcoe_kwqe_conn_destroy destroy_req;
486 struct kwqe *kwqe_arr[2];
487 int num_kwqes = 1;
488 int rc = 0;
489
490 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
491 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
492 destroy_req.hdr.flags =
493 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
494
495 destroy_req.context_id = tgt->context_id;
496 destroy_req.conn_id = tgt->fcoe_conn_id;
497
498 kwqe_arr[0] = (struct kwqe *) &destroy_req;
499
500 if (hba->cnic && hba->cnic->submit_kwqes)
501 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
502
503 return rc;
504}
505
d36b3279
BPG
506static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
507{
508 struct bnx2fc_lport *blport;
509
510 spin_lock_bh(&hba->hba_lock);
511 list_for_each_entry(blport, &hba->vports, list) {
512 if (blport->lport == lport) {
513 spin_unlock_bh(&hba->hba_lock);
514 return true;
515 }
516 }
517 spin_unlock_bh(&hba->hba_lock);
518 return false;
519
520}
521
522
853e2bd2
BG
523static void bnx2fc_unsol_els_work(struct work_struct *work)
524{
525 struct bnx2fc_unsol_els *unsol_els;
526 struct fc_lport *lport;
d36b3279 527 struct bnx2fc_hba *hba;
853e2bd2
BG
528 struct fc_frame *fp;
529
530 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
531 lport = unsol_els->lport;
532 fp = unsol_els->fp;
d36b3279
BPG
533 hba = unsol_els->hba;
534 if (is_valid_lport(hba, lport))
535 fc_exch_recv(lport, fp);
853e2bd2
BG
536 kfree(unsol_els);
537}
538
539void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
540 unsigned char *buf,
541 u32 frame_len, u16 l2_oxid)
542{
543 struct fcoe_port *port = tgt->port;
544 struct fc_lport *lport = port->lport;
aea71a02 545 struct bnx2fc_interface *interface = port->priv;
853e2bd2
BG
546 struct bnx2fc_unsol_els *unsol_els;
547 struct fc_frame_header *fh;
548 struct fc_frame *fp;
549 struct sk_buff *skb;
550 u32 payload_len;
551 u32 crc;
552 u8 op;
553
554
555 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
556 if (!unsol_els) {
557 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
558 return;
559 }
560
561 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
562 l2_oxid, frame_len);
563
564 payload_len = frame_len - sizeof(struct fc_frame_header);
565
566 fp = fc_frame_alloc(lport, payload_len);
567 if (!fp) {
568 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
5c2dce26 569 kfree(unsol_els);
853e2bd2
BG
570 return;
571 }
572
573 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
574 /* Copy FC Frame header and payload into the frame */
575 memcpy(fh, buf, frame_len);
576
577 if (l2_oxid != FC_XID_UNKNOWN)
578 fh->fh_ox_id = htons(l2_oxid);
579
580 skb = fp_skb(fp);
581
582 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
583 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
584
585 if (fh->fh_type == FC_TYPE_ELS) {
586 op = fc_frame_payload_op(fp);
587 if ((op == ELS_TEST) || (op == ELS_ESTC) ||
588 (op == ELS_FAN) || (op == ELS_CSU)) {
589 /*
590 * No need to reply for these
591 * ELS requests
592 */
593 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
594 kfree_skb(skb);
5c2dce26 595 kfree(unsol_els);
853e2bd2
BG
596 return;
597 }
598 }
599 crc = fcoe_fc_crc(fp);
600 fc_frame_init(fp);
601 fr_dev(fp) = lport;
602 fr_sof(fp) = FC_SOF_I3;
603 fr_eof(fp) = FC_EOF_T;
604 fr_crc(fp) = cpu_to_le32(~crc);
605 unsol_els->lport = lport;
aea71a02 606 unsol_els->hba = interface->hba;
853e2bd2
BG
607 unsol_els->fp = fp;
608 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
609 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
610 } else {
611 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
612 kfree_skb(skb);
5c2dce26 613 kfree(unsol_els);
853e2bd2
BG
614 }
615}
616
617static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
618{
619 u8 num_rq;
620 struct fcoe_err_report_entry *err_entry;
621 unsigned char *rq_data;
622 unsigned char *buf = NULL, *buf1;
623 int i;
624 u16 xid;
625 u32 frame_len, len;
626 struct bnx2fc_cmd *io_req = NULL;
627 struct fcoe_task_ctx_entry *task, *task_page;
aea71a02
BPG
628 struct bnx2fc_interface *interface = tgt->port->priv;
629 struct bnx2fc_hba *hba = interface->hba;
853e2bd2
BG
630 int task_idx, index;
631 int rc = 0;
7b594769
BPG
632 u64 err_warn_bit_map;
633 u8 err_warn = 0xff;
853e2bd2
BG
634
635
636 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
637 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
638 case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
639 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
640 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
641
642 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
643
68695973 644 spin_lock_bh(&tgt->tgt_lock);
853e2bd2 645 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
68695973
NS
646 spin_unlock_bh(&tgt->tgt_lock);
647
853e2bd2
BG
648 if (rq_data) {
649 buf = rq_data;
650 } else {
651 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
652 GFP_ATOMIC);
653
654 if (!buf1) {
655 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
656 break;
657 }
658
659 for (i = 0; i < num_rq; i++) {
68695973 660 spin_lock_bh(&tgt->tgt_lock);
853e2bd2
BG
661 rq_data = (unsigned char *)
662 bnx2fc_get_next_rqe(tgt, 1);
68695973 663 spin_unlock_bh(&tgt->tgt_lock);
853e2bd2
BG
664 len = BNX2FC_RQ_BUF_SZ;
665 memcpy(buf1, rq_data, len);
666 buf1 += len;
667 }
668 }
669 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
670 FC_XID_UNKNOWN);
671
672 if (buf != rq_data)
673 kfree(buf);
68695973 674 spin_lock_bh(&tgt->tgt_lock);
853e2bd2 675 bnx2fc_return_rqe(tgt, num_rq);
68695973 676 spin_unlock_bh(&tgt->tgt_lock);
853e2bd2
BG
677 break;
678
679 case FCOE_ERROR_DETECTION_CQE_TYPE:
680 /*
68695973
NS
681 * In case of error reporting CQE a single RQ entry
682 * is consumed.
853e2bd2
BG
683 */
684 spin_lock_bh(&tgt->tgt_lock);
685 num_rq = 1;
686 err_entry = (struct fcoe_err_report_entry *)
687 bnx2fc_get_next_rqe(tgt, 1);
688 xid = err_entry->fc_hdr.ox_id;
689 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
690 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
619c5cb6
VZ
691 err_entry->data.err_warn_bitmap_hi,
692 err_entry->data.err_warn_bitmap_lo);
853e2bd2 693 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
619c5cb6 694 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
853e2bd2 695
853e2bd2
BG
696
697 if (xid > BNX2FC_MAX_XID) {
698 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
699 xid);
7b594769 700 goto ret_err_rqe;
853e2bd2
BG
701 }
702
703 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
704 index = xid % BNX2FC_TASKS_PER_PAGE;
705 task_page = (struct fcoe_task_ctx_entry *)
aea71a02 706 hba->task_ctx[task_idx];
853e2bd2
BG
707 task = &(task_page[index]);
708
709 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
7b594769
BPG
710 if (!io_req)
711 goto ret_err_rqe;
853e2bd2
BG
712
713 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
714 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
7b594769 715 goto ret_err_rqe;
853e2bd2
BG
716 }
717
718 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
719 &io_req->req_flags)) {
720 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
721 "progress.. ignore unsol err\n");
7b594769
BPG
722 goto ret_err_rqe;
723 }
724
725 err_warn_bit_map = (u64)
726 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
727 (u64)err_entry->data.err_warn_bitmap_lo;
728 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
729 if (err_warn_bit_map & (u64)((u64)1 << i)) {
730 err_warn = i;
731 break;
732 }
853e2bd2
BG
733 }
734
735 /*
736 * If ABTS is already in progress, and FW error is
737 * received after that, do not cancel the timeout_work
738 * and let the error recovery continue by explicitly
739 * logging out the target, when the ABTS eventually
740 * times out.
741 */
7b594769 742 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
853e2bd2
BG
743 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
744 "in ABTS processing\n", xid);
7b594769
BPG
745 goto ret_err_rqe;
746 }
747 BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
748 if (tgt->dev_type != TYPE_TAPE)
749 goto skip_rec;
750 switch (err_warn) {
751 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
752 case FCOE_ERROR_CODE_DATA_OOO_RO:
753 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
754 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
755 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
756 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
757 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
758 xid);
759 memset(&io_req->err_entry, 0,
760 sizeof(struct fcoe_err_report_entry));
761 memcpy(&io_req->err_entry, err_entry,
762 sizeof(struct fcoe_err_report_entry));
763 if (!test_bit(BNX2FC_FLAG_SRR_SENT,
764 &io_req->req_flags)) {
765 spin_unlock_bh(&tgt->tgt_lock);
766 rc = bnx2fc_send_rec(io_req);
767 spin_lock_bh(&tgt->tgt_lock);
768
769 if (rc)
770 goto skip_rec;
771 } else
772 printk(KERN_ERR PFX "SRR in progress\n");
773 goto ret_err_rqe;
774 break;
775 default:
776 break;
777 }
778
779skip_rec:
780 set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
781 /*
782 * Cancel the timeout_work, as we received IO
783 * completion with FW error.
784 */
785 if (cancel_delayed_work(&io_req->timeout_work))
786 kref_put(&io_req->refcount, bnx2fc_cmd_release);
787
788 rc = bnx2fc_initiate_abts(io_req);
789 if (rc != SUCCESS) {
790 printk(KERN_ERR PFX "err_warn: initiate_abts "
791 "failed xid = 0x%x. issue cleanup\n",
792 io_req->xid);
793 bnx2fc_initiate_cleanup(io_req);
794 }
795ret_err_rqe:
796 bnx2fc_return_rqe(tgt, 1);
853e2bd2
BG
797 spin_unlock_bh(&tgt->tgt_lock);
798 break;
799
800 case FCOE_WARNING_DETECTION_CQE_TYPE:
801 /*
802 *In case of warning reporting CQE a single RQ entry
803 * is consumes.
804 */
68695973 805 spin_lock_bh(&tgt->tgt_lock);
853e2bd2
BG
806 num_rq = 1;
807 err_entry = (struct fcoe_err_report_entry *)
808 bnx2fc_get_next_rqe(tgt, 1);
809 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
810 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
811 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
619c5cb6
VZ
812 err_entry->data.err_warn_bitmap_hi,
813 err_entry->data.err_warn_bitmap_lo);
853e2bd2 814 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
619c5cb6 815 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
853e2bd2 816
7b594769
BPG
817 if (xid > BNX2FC_MAX_XID) {
818 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
819 goto ret_warn_rqe;
820 }
821
822 err_warn_bit_map = (u64)
823 ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
824 (u64)err_entry->data.err_warn_bitmap_lo;
825 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
826 if (err_warn_bit_map & (u64) (1 << i)) {
827 err_warn = i;
828 break;
829 }
830 }
831 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
832
833 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
834 index = xid % BNX2FC_TASKS_PER_PAGE;
835 task_page = (struct fcoe_task_ctx_entry *)
836 interface->hba->task_ctx[task_idx];
837 task = &(task_page[index]);
838 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
839 if (!io_req)
840 goto ret_warn_rqe;
841
842 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
843 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
844 goto ret_warn_rqe;
845 }
846
847 memset(&io_req->err_entry, 0,
848 sizeof(struct fcoe_err_report_entry));
849 memcpy(&io_req->err_entry, err_entry,
850 sizeof(struct fcoe_err_report_entry));
851
852 if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
853 /* REC_TOV is not a warning code */
854 BUG_ON(1);
855 else
856 BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
857ret_warn_rqe:
853e2bd2 858 bnx2fc_return_rqe(tgt, 1);
68695973 859 spin_unlock_bh(&tgt->tgt_lock);
853e2bd2
BG
860 break;
861
862 default:
863 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
864 break;
865 }
866}
867
868void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
869{
870 struct fcoe_task_ctx_entry *task;
871 struct fcoe_task_ctx_entry *task_page;
872 struct fcoe_port *port = tgt->port;
aea71a02
BPG
873 struct bnx2fc_interface *interface = port->priv;
874 struct bnx2fc_hba *hba = interface->hba;
853e2bd2
BG
875 struct bnx2fc_cmd *io_req;
876 int task_idx, index;
877 u16 xid;
878 u8 cmd_type;
879 u8 rx_state = 0;
880 u8 num_rq;
881
882 spin_lock_bh(&tgt->tgt_lock);
883 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
884 if (xid >= BNX2FC_MAX_TASKS) {
b2a554ff 885 printk(KERN_ERR PFX "ERROR:xid out of range\n");
853e2bd2
BG
886 spin_unlock_bh(&tgt->tgt_lock);
887 return;
888 }
889 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
890 index = xid % BNX2FC_TASKS_PER_PAGE;
891 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
892 task = &(task_page[index]);
893
619c5cb6
VZ
894 num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
895 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
896 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
853e2bd2
BG
897
898 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
899
900 if (io_req == NULL) {
901 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
902 spin_unlock_bh(&tgt->tgt_lock);
903 return;
904 }
905
906 /* Timestamp IO completion time */
907 cmd_type = io_req->cmd_type;
908
619c5cb6
VZ
909 rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
910 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
911 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
853e2bd2 912
619c5cb6
VZ
913 /* Process other IO completion types */
914 switch (cmd_type) {
915 case BNX2FC_SCSI_CMD:
853e2bd2
BG
916 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
917 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
918 spin_unlock_bh(&tgt->tgt_lock);
919 return;
920 }
853e2bd2 921
853e2bd2
BG
922 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
923 bnx2fc_process_abts_compl(io_req, task, num_rq);
924 else if (rx_state ==
925 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
926 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
927 else
928 printk(KERN_ERR PFX "Invalid rx state - %d\n",
929 rx_state);
930 break;
931
932 case BNX2FC_TASK_MGMT_CMD:
933 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
934 bnx2fc_process_tm_compl(io_req, task, num_rq);
935 break;
936
937 case BNX2FC_ABTS:
938 /*
939 * ABTS request received by firmware. ABTS response
940 * will be delivered to the task belonging to the IO
941 * that was aborted
942 */
943 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
944 kref_put(&io_req->refcount, bnx2fc_cmd_release);
945 break;
946
947 case BNX2FC_ELS:
619c5cb6
VZ
948 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
949 bnx2fc_process_els_compl(io_req, task, num_rq);
950 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
951 bnx2fc_process_abts_compl(io_req, task, num_rq);
952 else if (rx_state ==
953 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
954 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
955 else
956 printk(KERN_ERR PFX "Invalid rx state = %d\n",
957 rx_state);
853e2bd2
BG
958 break;
959
960 case BNX2FC_CLEANUP:
961 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
962 kref_put(&io_req->refcount, bnx2fc_cmd_release);
963 break;
964
6c5a7ce4
BPG
965 case BNX2FC_SEQ_CLEANUP:
966 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
967 io_req->xid);
968 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
969 kref_put(&io_req->refcount, bnx2fc_cmd_release);
970 break;
971
853e2bd2
BG
972 default:
973 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
974 break;
975 }
976 spin_unlock_bh(&tgt->tgt_lock);
977}
978
619c5cb6
VZ
979void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
980{
981 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
982 u32 msg;
983
984 wmb();
985 rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
986 FCOE_CQE_TOGGLE_BIT_SHIFT);
987 msg = *((u32 *)rx_db);
988 writel(cpu_to_le32(msg), tgt->ctx_base);
989 mmiowb();
990
991}
992
853e2bd2
BG
993struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
994{
995 struct bnx2fc_work *work;
996 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
997 if (!work)
998 return NULL;
999
1000 INIT_LIST_HEAD(&work->list);
1001 work->tgt = tgt;
1002 work->wqe = wqe;
1003 return work;
1004}
1005
1006int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1007{
1008 struct fcoe_cqe *cq;
1009 u32 cq_cons;
1010 struct fcoe_cqe *cqe;
619c5cb6 1011 u32 num_free_sqes = 0;
853e2bd2 1012 u16 wqe;
853e2bd2
BG
1013
1014 /*
1015 * cq_lock is a low contention lock used to protect
1016 * the CQ data structure from being freed up during
1017 * the upload operation
1018 */
1019 spin_lock_bh(&tgt->cq_lock);
1020
1021 if (!tgt->cq) {
1022 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1023 spin_unlock_bh(&tgt->cq_lock);
1024 return 0;
1025 }
1026 cq = tgt->cq;
1027 cq_cons = tgt->cq_cons_idx;
1028 cqe = &cq[cq_cons];
1029
619c5cb6
VZ
1030 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1031 (tgt->cq_curr_toggle_bit <<
1032 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
853e2bd2 1033
619c5cb6
VZ
1034 /* new entry on the cq */
1035 if (wqe & FCOE_CQE_CQE_TYPE) {
1036 /* Unsolicited event notification */
1037 bnx2fc_process_unsol_compl(tgt, wqe);
1038 } else {
1039 /* Pending work request completion */
1040 struct bnx2fc_work *work = NULL;
1041 struct bnx2fc_percpu_s *fps = NULL;
1042 unsigned int cpu = wqe % num_possible_cpus();
1043
1044 fps = &per_cpu(bnx2fc_percpu, cpu);
1045 spin_lock_bh(&fps->fp_work_lock);
1046 if (unlikely(!fps->iothread))
1047 goto unlock;
1048
1049 work = bnx2fc_alloc_work(tgt, wqe);
1050 if (work)
1051 list_add_tail(&work->list,
1052 &fps->work_list);
853e2bd2 1053unlock:
619c5cb6 1054 spin_unlock_bh(&fps->fp_work_lock);
853e2bd2 1055
619c5cb6
VZ
1056 /* Pending work request completion */
1057 if (fps->iothread && work)
1058 wake_up_process(fps->iothread);
1059 else
1060 bnx2fc_process_cq_compl(tgt, wqe);
853e2bd2 1061 }
619c5cb6
VZ
1062 cqe++;
1063 tgt->cq_cons_idx++;
1064 num_free_sqes++;
1065
1066 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1067 tgt->cq_cons_idx = 0;
1068 cqe = cq;
1069 tgt->cq_curr_toggle_bit =
1070 1 - tgt->cq_curr_toggle_bit;
853e2bd2 1071 }
619c5cb6 1072 }
fd08bd62
BPG
1073 if (num_free_sqes) {
1074 bnx2fc_arm_cq(tgt);
1075 atomic_add(num_free_sqes, &tgt->free_sqes);
1076 }
853e2bd2
BG
1077 spin_unlock_bh(&tgt->cq_lock);
1078 return 0;
1079}
1080
1081/**
1082 * bnx2fc_fastpath_notification - process global event queue (KCQ)
1083 *
1084 * @hba: adapter structure pointer
1085 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
1086 *
1087 * Fast path event notification handler
1088 */
1089static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1090 struct fcoe_kcqe *new_cqe_kcqe)
1091{
1092 u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1093 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1094
1095 if (!tgt) {
b2a554ff 1096 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
853e2bd2
BG
1097 return;
1098 }
1099
1100 bnx2fc_process_new_cqes(tgt);
1101}
1102
1103/**
1104 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1105 *
1106 * @hba: adapter structure pointer
1107 * @ofld_kcqe: connection offload kcqe pointer
1108 *
1109 * handle session offload completion, enable the session if offload is
1110 * successful.
1111 */
1112static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1113 struct fcoe_kcqe *ofld_kcqe)
1114{
1115 struct bnx2fc_rport *tgt;
1116 struct fcoe_port *port;
aea71a02 1117 struct bnx2fc_interface *interface;
853e2bd2
BG
1118 u32 conn_id;
1119 u32 context_id;
1120 int rc;
1121
1122 conn_id = ofld_kcqe->fcoe_conn_id;
1123 context_id = ofld_kcqe->fcoe_conn_context_id;
1124 tgt = hba->tgt_ofld_list[conn_id];
1125 if (!tgt) {
aea71a02 1126 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
853e2bd2
BG
1127 return;
1128 }
1129 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1130 ofld_kcqe->fcoe_conn_context_id);
1131 port = tgt->port;
aea71a02
BPG
1132 interface = tgt->port->priv;
1133 if (hba != interface->hba) {
1134 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
853e2bd2
BG
1135 goto ofld_cmpl_err;
1136 }
1137 /*
1138 * cnic has allocated a context_id for this session; use this
1139 * while enabling the session.
1140 */
1141 tgt->context_id = context_id;
1142 if (ofld_kcqe->completion_status) {
1143 if (ofld_kcqe->completion_status ==
1144 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1145 printk(KERN_ERR PFX "unable to allocate FCoE context "
1146 "resources\n");
1147 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1148 }
1149 goto ofld_cmpl_err;
1150 } else {
1151
1152 /* now enable the session */
1153 rc = bnx2fc_send_session_enable_req(port, tgt);
1154 if (rc) {
b2a554ff 1155 printk(KERN_ERR PFX "enable session failed\n");
853e2bd2
BG
1156 goto ofld_cmpl_err;
1157 }
1158 }
1159 return;
1160ofld_cmpl_err:
1161 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1162 wake_up_interruptible(&tgt->ofld_wait);
1163}
1164
1165/**
1166 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1167 *
1168 * @hba: adapter structure pointer
1169 * @ofld_kcqe: connection offload kcqe pointer
1170 *
1171 * handle session enable completion, mark the rport as ready
1172 */
1173
1174static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1175 struct fcoe_kcqe *ofld_kcqe)
1176{
1177 struct bnx2fc_rport *tgt;
aea71a02 1178 struct bnx2fc_interface *interface;
853e2bd2
BG
1179 u32 conn_id;
1180 u32 context_id;
1181
1182 context_id = ofld_kcqe->fcoe_conn_context_id;
1183 conn_id = ofld_kcqe->fcoe_conn_id;
1184 tgt = hba->tgt_ofld_list[conn_id];
1185 if (!tgt) {
b2a554ff 1186 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
853e2bd2
BG
1187 return;
1188 }
1189
1190 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1191 ofld_kcqe->fcoe_conn_context_id);
1192
1193 /*
1194 * context_id should be the same for this target during offload
1195 * and enable
1196 */
1197 if (tgt->context_id != context_id) {
b2a554ff 1198 printk(KERN_ERR PFX "context id mis-match\n");
853e2bd2
BG
1199 return;
1200 }
aea71a02
BPG
1201 interface = tgt->port->priv;
1202 if (hba != interface->hba) {
1203 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
853e2bd2
BG
1204 goto enbl_cmpl_err;
1205 }
aea71a02 1206 if (ofld_kcqe->completion_status)
853e2bd2 1207 goto enbl_cmpl_err;
aea71a02 1208 else {
853e2bd2
BG
1209 /* enable successful - rport ready for issuing IOs */
1210 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1211 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1212 wake_up_interruptible(&tgt->ofld_wait);
1213 }
1214 return;
1215
1216enbl_cmpl_err:
1217 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1218 wake_up_interruptible(&tgt->ofld_wait);
1219}
1220
1221static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1222 struct fcoe_kcqe *disable_kcqe)
1223{
1224
1225 struct bnx2fc_rport *tgt;
1226 u32 conn_id;
1227
1228 conn_id = disable_kcqe->fcoe_conn_id;
1229 tgt = hba->tgt_ofld_list[conn_id];
1230 if (!tgt) {
b2a554ff 1231 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
853e2bd2
BG
1232 return;
1233 }
1234
1235 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1236
1237 if (disable_kcqe->completion_status) {
b2a554ff 1238 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
853e2bd2
BG
1239 disable_kcqe->completion_status);
1240 return;
1241 } else {
1242 /* disable successful */
1243 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1244 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1245 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1246 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1247 wake_up_interruptible(&tgt->upld_wait);
1248 }
1249}
1250
1251static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1252 struct fcoe_kcqe *destroy_kcqe)
1253{
1254 struct bnx2fc_rport *tgt;
1255 u32 conn_id;
1256
1257 conn_id = destroy_kcqe->fcoe_conn_id;
1258 tgt = hba->tgt_ofld_list[conn_id];
1259 if (!tgt) {
b2a554ff 1260 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
853e2bd2
BG
1261 return;
1262 }
1263
1264 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1265
1266 if (destroy_kcqe->completion_status) {
b2a554ff 1267 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
853e2bd2
BG
1268 destroy_kcqe->completion_status);
1269 return;
1270 } else {
1271 /* destroy successful */
1272 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1273 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1274 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1275 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1276 wake_up_interruptible(&tgt->upld_wait);
1277 }
1278}
1279
1280static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1281{
1282 switch (err_code) {
1283 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1284 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1285 break;
1286
1287 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1288 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1289 break;
1290
1291 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1292 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1293 break;
619c5cb6
VZ
1294 case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1295 printk(KERN_ERR PFX "init failure due to compl status err\n");
1296 break;
1297 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1298 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
b2a554ff 1299 break;
853e2bd2
BG
1300 default:
1301 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1302 }
1303}
1304
1305/**
1306 * bnx2fc_indicae_kcqe - process KCQE
1307 *
1308 * @hba: adapter structure pointer
1309 * @kcqe: kcqe pointer
1310 * @num_cqe: Number of completion queue elements
1311 *
1312 * Generic KCQ event handler
1313 */
1314void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1315 u32 num_cqe)
1316{
1317 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1318 int i = 0;
1319 struct fcoe_kcqe *kcqe = NULL;
1320
1321 while (i < num_cqe) {
1322 kcqe = (struct fcoe_kcqe *) kcq[i++];
1323
1324 switch (kcqe->op_code) {
1325 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1326 bnx2fc_fastpath_notification(hba, kcqe);
1327 break;
1328
1329 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1330 bnx2fc_process_ofld_cmpl(hba, kcqe);
1331 break;
1332
1333 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1334 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1335 break;
1336
1337 case FCOE_KCQE_OPCODE_INIT_FUNC:
1338 if (kcqe->completion_status !=
1339 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1340 bnx2fc_init_failure(hba,
1341 kcqe->completion_status);
1342 } else {
1343 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1344 bnx2fc_get_link_state(hba);
1345 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1346 (u8)hba->pcidev->bus->number);
1347 }
1348 break;
1349
1350 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1351 if (kcqe->completion_status !=
1352 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1353
1354 printk(KERN_ERR PFX "DESTROY failed\n");
1355 } else {
1356 printk(KERN_ERR PFX "DESTROY success\n");
1357 }
aea71a02 1358 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
853e2bd2
BG
1359 wake_up_interruptible(&hba->destroy_wait);
1360 break;
1361
1362 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1363 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1364 break;
1365
1366 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1367 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1368 break;
1369
1370 case FCOE_KCQE_OPCODE_STAT_FUNC:
1371 if (kcqe->completion_status !=
1372 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1373 printk(KERN_ERR PFX "STAT failed\n");
1374 complete(&hba->stat_req_done);
1375 break;
1376
1377 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1378 /* fall thru */
1379 default:
b2a554ff 1380 printk(KERN_ERR PFX "unknown opcode 0x%x\n",
853e2bd2
BG
1381 kcqe->op_code);
1382 }
1383 }
1384}
1385
1386void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1387{
1388 struct fcoe_sqe *sqe;
1389
1390 sqe = &tgt->sq[tgt->sq_prod_idx];
1391
1392 /* Fill SQ WQE */
1393 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1394 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1395
1396 /* Advance SQ Prod Idx */
1397 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1398 tgt->sq_prod_idx = 0;
1399 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1400 }
1401}
1402
1403void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1404{
619c5cb6 1405 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
853e2bd2
BG
1406 u32 msg;
1407
1408 wmb();
619c5cb6 1409 sq_db->prod = tgt->sq_prod_idx |
853e2bd2 1410 (tgt->sq_curr_toggle_bit << 15);
619c5cb6 1411 msg = *((u32 *)sq_db);
853e2bd2 1412 writel(cpu_to_le32(msg), tgt->ctx_base);
853e2bd2
BG
1413 mmiowb();
1414
1415}
1416
1417int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1418{
1419 u32 context_id = tgt->context_id;
1420 struct fcoe_port *port = tgt->port;
1421 u32 reg_off;
1422 resource_size_t reg_base;
aea71a02
BPG
1423 struct bnx2fc_interface *interface = port->priv;
1424 struct bnx2fc_hba *hba = interface->hba;
853e2bd2
BG
1425
1426 reg_base = pci_resource_start(hba->pcidev,
1427 BNX2X_DOORBELL_PCI_BAR);
1428 reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1429 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1430 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1431 if (!tgt->ctx_base)
1432 return -ENOMEM;
1433 return 0;
1434}
1435
1436char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1437{
1438 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1439
1440 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1441 return NULL;
1442
1443 tgt->rq_cons_idx += num_items;
1444
1445 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1446 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1447
1448 return buf;
1449}
1450
1451void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1452{
1453 /* return the rq buffer */
1454 u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1455 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1456 /* Wrap around RQ */
1457 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1458 }
1459 tgt->rq_prod_idx = next_prod_idx;
1460 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1461}
1462
6c5a7ce4
BPG
1463void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1464 struct fcoe_task_ctx_entry *task,
1465 struct bnx2fc_cmd *orig_io_req,
1466 u32 offset)
1467{
1468 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1469 struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1470 struct bnx2fc_interface *interface = tgt->port->priv;
1471 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1472 struct fcoe_task_ctx_entry *orig_task;
1473 struct fcoe_task_ctx_entry *task_page;
1474 struct fcoe_ext_mul_sges_ctx *sgl;
1475 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1476 u8 orig_task_type;
1477 u16 orig_xid = orig_io_req->xid;
1478 u32 context_id = tgt->context_id;
1479 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1480 u32 orig_offset = offset;
1481 int bd_count;
1482 int orig_task_idx, index;
1483 int i;
1484
1485 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1486
1487 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1488 orig_task_type = FCOE_TASK_TYPE_WRITE;
1489 else
1490 orig_task_type = FCOE_TASK_TYPE_READ;
1491
1492 /* Tx flags */
1493 task->txwr_rxrd.const_ctx.tx_flags =
1494 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1495 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1496 /* init flags */
1497 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1498 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1499 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1500 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1501 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1502 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1503 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1504 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1505
1506 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1507
1508 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1509 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1510
1511 bd_count = orig_io_req->bd_tbl->bd_valid;
1512
1513 /* obtain the appropriate bd entry from relative offset */
1514 for (i = 0; i < bd_count; i++) {
1515 if (offset < bd[i].buf_len)
1516 break;
1517 offset -= bd[i].buf_len;
1518 }
1519 phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1520
1521 if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1522 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1523 (u32)phys_addr;
1524 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1525 (u32)((u64)phys_addr >> 32);
1526 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1527 bd_count;
1528 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1529 offset; /* adjusted offset */
1530 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1531 } else {
1532 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1533 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1534
1535 task_page = (struct fcoe_task_ctx_entry *)
1536 interface->hba->task_ctx[orig_task_idx];
1537 orig_task = &(task_page[index]);
1538
1539 /* Multiple SGEs were used for this IO */
1540 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1541 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1542 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1543 sgl->mul_sgl.sgl_size = bd_count;
1544 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1545 sgl->mul_sgl.cur_sge_idx = i;
1546
1547 memset(&task->rxwr_only.rx_seq_ctx, 0,
1548 sizeof(struct fcoe_rx_seq_ctx));
1549 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1550 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1551 }
1552}
853e2bd2
BG
1553void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1554 struct fcoe_task_ctx_entry *task,
1555 u16 orig_xid)
1556{
1557 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1558 struct bnx2fc_rport *tgt = io_req->tgt;
1559 u32 context_id = tgt->context_id;
1560
1561 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1562
1563 /* Tx Write Rx Read */
619c5cb6
VZ
1564 /* init flags */
1565 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1566 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1567 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1568 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
f3820b71
BPG
1569 if (tgt->dev_type == TYPE_TAPE)
1570 task->txwr_rxrd.const_ctx.init_flags |=
1571 FCOE_TASK_DEV_TYPE_TAPE <<
1572 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1573 else
1574 task->txwr_rxrd.const_ctx.init_flags |=
619c5cb6
VZ
1575 FCOE_TASK_DEV_TYPE_DISK <<
1576 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1577 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1578
1579 /* Tx flags */
1580 task->txwr_rxrd.const_ctx.tx_flags =
1581 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1582 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1583
1584 /* Rx Read Tx Write */
1585 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1586 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1587 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1588 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
853e2bd2
BG
1589}
1590
1591void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1592 struct fcoe_task_ctx_entry *task)
1593{
1594 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1595 struct bnx2fc_rport *tgt = io_req->tgt;
1596 struct fc_frame_header *fc_hdr;
619c5cb6 1597 struct fcoe_ext_mul_sges_ctx *sgl;
853e2bd2
BG
1598 u8 task_type = 0;
1599 u64 *hdr;
1600 u64 temp_hdr[3];
1601 u32 context_id;
1602
1603
1604 /* Obtain task_type */
1605 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1606 (io_req->cmd_type == BNX2FC_ELS)) {
1607 task_type = FCOE_TASK_TYPE_MIDPATH;
1608 } else if (io_req->cmd_type == BNX2FC_ABTS) {
1609 task_type = FCOE_TASK_TYPE_ABTS;
1610 }
1611
1612 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1613
1614 /* Setup the task from io_req for easy reference */
1615 io_req->task = task;
1616
1617 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1618 io_req->cmd_type, task_type);
1619
1620 /* Tx only */
1621 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1622 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
619c5cb6 1623 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
853e2bd2 1624 (u32)mp_req->mp_req_bd_dma;
619c5cb6 1625 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
853e2bd2 1626 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
619c5cb6 1627 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
853e2bd2
BG
1628 }
1629
1630 /* Tx Write Rx Read */
619c5cb6
VZ
1631 /* init flags */
1632 task->txwr_rxrd.const_ctx.init_flags = task_type <<
1633 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
f3820b71
BPG
1634 if (tgt->dev_type == TYPE_TAPE)
1635 task->txwr_rxrd.const_ctx.init_flags |=
1636 FCOE_TASK_DEV_TYPE_TAPE <<
1637 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1638 else
1639 task->txwr_rxrd.const_ctx.init_flags |=
619c5cb6
VZ
1640 FCOE_TASK_DEV_TYPE_DISK <<
1641 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1642 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1643 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1644
1645 /* tx flags */
1646 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1647 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
853e2bd2
BG
1648
1649 /* Rx Write Tx Read */
619c5cb6
VZ
1650 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1651
1652 /* rx flags */
1653 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1654 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1655
1656 context_id = tgt->context_id;
1657 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1658 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1659
853e2bd2
BG
1660 fc_hdr = &(mp_req->req_fc_hdr);
1661 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1662 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1663 fc_hdr->fh_rx_id = htons(0xffff);
619c5cb6 1664 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
853e2bd2
BG
1665 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1666 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1667 }
1668
1669 /* Fill FC Header into middle path buffer */
619c5cb6 1670 hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
853e2bd2
BG
1671 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1672 hdr[0] = cpu_to_be64(temp_hdr[0]);
1673 hdr[1] = cpu_to_be64(temp_hdr[1]);
1674 hdr[2] = cpu_to_be64(temp_hdr[2]);
1675
1676 /* Rx Only */
1677 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
619c5cb6 1678 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
853e2bd2 1679
619c5cb6
VZ
1680 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1681 sgl->mul_sgl.cur_sge_addr.hi =
853e2bd2 1682 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
619c5cb6 1683 sgl->mul_sgl.sgl_size = 1;
853e2bd2
BG
1684 }
1685}
1686
1687void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1688 struct fcoe_task_ctx_entry *task)
1689{
1690 u8 task_type;
1691 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1692 struct io_bdt *bd_tbl = io_req->bd_tbl;
1693 struct bnx2fc_rport *tgt = io_req->tgt;
619c5cb6
VZ
1694 struct fcoe_cached_sge_ctx *cached_sge;
1695 struct fcoe_ext_mul_sges_ctx *sgl;
f3820b71 1696 int dev_type = tgt->dev_type;
853e2bd2
BG
1697 u64 *fcp_cmnd;
1698 u64 tmp_fcp_cmnd[4];
1699 u32 context_id;
1700 int cnt, i;
1701 int bd_count;
1702
1703 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1704
1705 /* Setup the task from io_req for easy reference */
1706 io_req->task = task;
1707
1708 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1709 task_type = FCOE_TASK_TYPE_WRITE;
1710 else
1711 task_type = FCOE_TASK_TYPE_READ;
1712
1713 /* Tx only */
3c75108f 1714 bd_count = bd_tbl->bd_valid;
853e2bd2 1715 if (task_type == FCOE_TASK_TYPE_WRITE) {
3c75108f
BPG
1716 if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1717 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1718
1719 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1720 fcoe_bd_tbl->buf_addr_lo;
1721 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1722 fcoe_bd_tbl->buf_addr_hi;
1723 task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1724 fcoe_bd_tbl->buf_len;
1725
1726 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1727 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1728 } else {
1729 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1730 (u32)bd_tbl->bd_tbl_dma;
1731 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1732 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1733 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734 bd_tbl->bd_valid;
1735 }
853e2bd2
BG
1736 }
1737
1738 /*Tx Write Rx Read */
1739 /* Init state to NORMAL */
3c75108f 1740 task->txwr_rxrd.const_ctx.init_flags |= task_type <<
619c5cb6 1741 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
f3820b71
BPG
1742 if (dev_type == TYPE_TAPE)
1743 task->txwr_rxrd.const_ctx.init_flags |=
1744 FCOE_TASK_DEV_TYPE_TAPE <<
1745 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746 else
1747 task->txwr_rxrd.const_ctx.init_flags |=
619c5cb6
VZ
1748 FCOE_TASK_DEV_TYPE_DISK <<
1749 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1750 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1751 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1752 /* tx flags */
1753 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1754 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
853e2bd2
BG
1755
1756 /* Set initial seq counter */
619c5cb6 1757 task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
853e2bd2
BG
1758
1759 /* Fill FCP_CMND IU */
1760 fcp_cmnd = (u64 *)
619c5cb6 1761 task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
853e2bd2
BG
1762 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1763
1764 /* swap fcp_cmnd */
1765 cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1766
1767 for (i = 0; i < cnt; i++) {
1768 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1769 fcp_cmnd++;
1770 }
1771
1772 /* Rx Write Tx Read */
619c5cb6
VZ
1773 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1774
1775 context_id = tgt->context_id;
1776 task->rxwr_txrd.const_ctx.init_flags = context_id <<
1777 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1778
1779 /* rx flags */
1780 /* Set state to "waiting for the first packet" */
1781 task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1782 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1783
1784 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
853e2bd2
BG
1785
1786 /* Rx Only */
619c5cb6
VZ
1787 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1788 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1789 bd_count = bd_tbl->bd_valid;
f3820b71
BPG
1790 if (task_type == FCOE_TASK_TYPE_READ &&
1791 dev_type == TYPE_DISK) {
853e2bd2
BG
1792 if (bd_count == 1) {
1793
1794 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1795
619c5cb6
VZ
1796 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1797 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1798 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1799 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1800 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1801 } else if (bd_count == 2) {
1802 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1803
1804 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1805 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1806 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1807
1808 fcoe_bd_tbl++;
1809 cached_sge->second_buf_addr.lo =
1810 fcoe_bd_tbl->buf_addr_lo;
1811 cached_sge->second_buf_addr.hi =
1812 fcoe_bd_tbl->buf_addr_hi;
1813 cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1814 task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1815 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
853e2bd2
BG
1816 } else {
1817
619c5cb6
VZ
1818 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1819 sgl->mul_sgl.cur_sge_addr.hi =
853e2bd2 1820 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
619c5cb6 1821 sgl->mul_sgl.sgl_size = bd_count;
853e2bd2 1822 }
f3820b71
BPG
1823 } else {
1824 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1825 sgl->mul_sgl.cur_sge_addr.hi =
1826 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1827 sgl->mul_sgl.sgl_size = bd_count;
853e2bd2
BG
1828 }
1829}
1830
1831/**
1832 * bnx2fc_setup_task_ctx - allocate and map task context
1833 *
1834 * @hba: pointer to adapter structure
1835 *
1836 * allocate memory for task context, and associated BD table to be used
1837 * by firmware
1838 *
1839 */
1840int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1841{
1842 int rc = 0;
1843 struct regpair *task_ctx_bdt;
1844 dma_addr_t addr;
1845 int i;
1846
1847 /*
1848 * Allocate task context bd table. A page size of bd table
1849 * can map 256 buffers. Each buffer contains 32 task context
1850 * entries. Hence the limit with one page is 8192 task context
1851 * entries.
1852 */
1853 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1854 PAGE_SIZE,
1855 &hba->task_ctx_bd_dma,
1856 GFP_KERNEL);
1857 if (!hba->task_ctx_bd_tbl) {
1858 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1859 rc = -1;
1860 goto out;
1861 }
1862 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1863
1864 /*
1865 * Allocate task_ctx which is an array of pointers pointing to
1866 * a page containing 32 task contexts
1867 */
1868 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1869 GFP_KERNEL);
1870 if (!hba->task_ctx) {
1871 printk(KERN_ERR PFX "unable to allocate task context array\n");
1872 rc = -1;
1873 goto out1;
1874 }
1875
1876 /*
1877 * Allocate task_ctx_dma which is an array of dma addresses
1878 */
1879 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1880 sizeof(dma_addr_t)), GFP_KERNEL);
1881 if (!hba->task_ctx_dma) {
1882 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1883 rc = -1;
1884 goto out2;
1885 }
1886
1887 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1888 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1889
1890 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1891 PAGE_SIZE,
1892 &hba->task_ctx_dma[i],
1893 GFP_KERNEL);
1894 if (!hba->task_ctx[i]) {
1895 printk(KERN_ERR PFX "unable to alloc task context\n");
1896 rc = -1;
1897 goto out3;
1898 }
1899 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1900 addr = (u64)hba->task_ctx_dma[i];
1901 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1902 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1903 task_ctx_bdt++;
1904 }
1905 return 0;
1906
1907out3:
1908 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1909 if (hba->task_ctx[i]) {
1910
1911 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1912 hba->task_ctx[i], hba->task_ctx_dma[i]);
1913 hba->task_ctx[i] = NULL;
1914 }
1915 }
1916
1917 kfree(hba->task_ctx_dma);
1918 hba->task_ctx_dma = NULL;
1919out2:
1920 kfree(hba->task_ctx);
1921 hba->task_ctx = NULL;
1922out1:
1923 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1924 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1925 hba->task_ctx_bd_tbl = NULL;
1926out:
1927 return rc;
1928}
1929
1930void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1931{
1932 int i;
1933
1934 if (hba->task_ctx_bd_tbl) {
1935 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1936 hba->task_ctx_bd_tbl,
1937 hba->task_ctx_bd_dma);
1938 hba->task_ctx_bd_tbl = NULL;
1939 }
1940
1941 if (hba->task_ctx) {
1942 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1943 if (hba->task_ctx[i]) {
1944 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1945 hba->task_ctx[i],
1946 hba->task_ctx_dma[i]);
1947 hba->task_ctx[i] = NULL;
1948 }
1949 }
1950 kfree(hba->task_ctx);
1951 hba->task_ctx = NULL;
1952 }
1953
1954 kfree(hba->task_ctx_dma);
1955 hba->task_ctx_dma = NULL;
1956}
1957
1958static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1959{
1960 int i;
1961 int segment_count;
1962 int hash_table_size;
1963 u32 *pbl;
1964
1965 segment_count = hba->hash_tbl_segment_count;
1966 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1967 sizeof(struct fcoe_hash_table_entry);
1968
1969 pbl = hba->hash_tbl_pbl;
1970 for (i = 0; i < segment_count; ++i) {
1971 dma_addr_t dma_address;
1972
1973 dma_address = le32_to_cpu(*pbl);
1974 ++pbl;
1975 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1976 ++pbl;
1977 dma_free_coherent(&hba->pcidev->dev,
1978 BNX2FC_HASH_TBL_CHUNK_SIZE,
1979 hba->hash_tbl_segments[i],
1980 dma_address);
1981
1982 }
1983
1984 if (hba->hash_tbl_pbl) {
1985 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1986 hba->hash_tbl_pbl,
1987 hba->hash_tbl_pbl_dma);
1988 hba->hash_tbl_pbl = NULL;
1989 }
1990}
1991
1992static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1993{
1994 int i;
1995 int hash_table_size;
1996 int segment_count;
1997 int segment_array_size;
1998 int dma_segment_array_size;
1999 dma_addr_t *dma_segment_array;
2000 u32 *pbl;
2001
2002 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2003 sizeof(struct fcoe_hash_table_entry);
2004
2005 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2006 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2007 hba->hash_tbl_segment_count = segment_count;
2008
2009 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2010 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2011 if (!hba->hash_tbl_segments) {
2012 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2013 return -ENOMEM;
2014 }
2015 dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2016 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2017 if (!dma_segment_array) {
2018 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2019 return -ENOMEM;
2020 }
2021
2022 for (i = 0; i < segment_count; ++i) {
2023 hba->hash_tbl_segments[i] =
2024 dma_alloc_coherent(&hba->pcidev->dev,
2025 BNX2FC_HASH_TBL_CHUNK_SIZE,
2026 &dma_segment_array[i],
2027 GFP_KERNEL);
2028 if (!hba->hash_tbl_segments[i]) {
2029 printk(KERN_ERR PFX "hash segment alloc failed\n");
2030 while (--i >= 0) {
2031 dma_free_coherent(&hba->pcidev->dev,
2032 BNX2FC_HASH_TBL_CHUNK_SIZE,
2033 hba->hash_tbl_segments[i],
2034 dma_segment_array[i]);
2035 hba->hash_tbl_segments[i] = NULL;
2036 }
2037 kfree(dma_segment_array);
2038 return -ENOMEM;
2039 }
2040 memset(hba->hash_tbl_segments[i], 0,
2041 BNX2FC_HASH_TBL_CHUNK_SIZE);
2042 }
2043
2044 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
2045 PAGE_SIZE,
2046 &hba->hash_tbl_pbl_dma,
2047 GFP_KERNEL);
2048 if (!hba->hash_tbl_pbl) {
2049 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2050 kfree(dma_segment_array);
2051 return -ENOMEM;
2052 }
2053 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2054
2055 pbl = hba->hash_tbl_pbl;
2056 for (i = 0; i < segment_count; ++i) {
2057 u64 paddr = dma_segment_array[i];
2058 *pbl = cpu_to_le32((u32) paddr);
2059 ++pbl;
2060 *pbl = cpu_to_le32((u32) (paddr >> 32));
2061 ++pbl;
2062 }
2063 pbl = hba->hash_tbl_pbl;
2064 i = 0;
2065 while (*pbl && *(pbl + 1)) {
2066 u32 lo;
2067 u32 hi;
2068 lo = *pbl;
2069 ++pbl;
2070 hi = *pbl;
2071 ++pbl;
2072 ++i;
2073 }
2074 kfree(dma_segment_array);
2075 return 0;
2076}
2077
2078/**
2079 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2080 *
2081 * @hba: Pointer to adapter structure
2082 *
2083 */
2084int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2085{
2086 u64 addr;
2087 u32 mem_size;
2088 int i;
2089
2090 if (bnx2fc_allocate_hash_table(hba))
2091 return -ENOMEM;
2092
2093 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2094 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2095 &hba->t2_hash_tbl_ptr_dma,
2096 GFP_KERNEL);
2097 if (!hba->t2_hash_tbl_ptr) {
2098 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2099 bnx2fc_free_fw_resc(hba);
2100 return -ENOMEM;
2101 }
2102 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
2103
2104 mem_size = BNX2FC_NUM_MAX_SESS *
2105 sizeof(struct fcoe_t2_hash_table_entry);
2106 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2107 &hba->t2_hash_tbl_dma,
2108 GFP_KERNEL);
2109 if (!hba->t2_hash_tbl) {
2110 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2111 bnx2fc_free_fw_resc(hba);
2112 return -ENOMEM;
2113 }
2114 memset(hba->t2_hash_tbl, 0x00, mem_size);
2115 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2116 addr = (unsigned long) hba->t2_hash_tbl_dma +
2117 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2118 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2119 hba->t2_hash_tbl[i].next.hi = addr >> 32;
2120 }
2121
2122 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2123 PAGE_SIZE, &hba->dummy_buf_dma,
2124 GFP_KERNEL);
2125 if (!hba->dummy_buffer) {
2126 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2127 bnx2fc_free_fw_resc(hba);
2128 return -ENOMEM;
2129 }
2130
2131 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2132 PAGE_SIZE,
2133 &hba->stats_buf_dma,
2134 GFP_KERNEL);
2135 if (!hba->stats_buffer) {
2136 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2137 bnx2fc_free_fw_resc(hba);
2138 return -ENOMEM;
2139 }
2140 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2141
2142 return 0;
2143}
2144
2145void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2146{
2147 u32 mem_size;
2148
2149 if (hba->stats_buffer) {
2150 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2151 hba->stats_buffer, hba->stats_buf_dma);
2152 hba->stats_buffer = NULL;
2153 }
2154
2155 if (hba->dummy_buffer) {
2156 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2157 hba->dummy_buffer, hba->dummy_buf_dma);
2158 hba->dummy_buffer = NULL;
2159 }
2160
2161 if (hba->t2_hash_tbl_ptr) {
2162 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2163 dma_free_coherent(&hba->pcidev->dev, mem_size,
2164 hba->t2_hash_tbl_ptr,
2165 hba->t2_hash_tbl_ptr_dma);
2166 hba->t2_hash_tbl_ptr = NULL;
2167 }
2168
2169 if (hba->t2_hash_tbl) {
2170 mem_size = BNX2FC_NUM_MAX_SESS *
2171 sizeof(struct fcoe_t2_hash_table_entry);
2172 dma_free_coherent(&hba->pcidev->dev, mem_size,
2173 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2174 hba->t2_hash_tbl = NULL;
2175 }
2176 bnx2fc_free_hash_table(hba);
2177}