Commit | Line | Data |
---|---|---|
6f7efaab | 1 | /* |
2 | * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management | |
3 | * | |
4 | * Copyright (C) 2003-2008 Chelsio Communications. All rights reserved. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
7 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
8 | * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this | |
9 | * release for licensing terms and conditions. | |
10 | * | |
11 | * Written by: Dimitris Michailidis (dm@chelsio.com) | |
12 | * Karen Xie (kxie@chelsio.com) | |
13 | */ | |
14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ | |
16 | ||
6f7efaab | 17 | #include <linux/module.h> |
18 | #include <linux/moduleparam.h> | |
19 | #include <scsi/scsi_host.h> | |
20 | ||
21 | #include "common.h" | |
22 | #include "t3_cpl.h" | |
23 | #include "t3cdev.h" | |
24 | #include "cxgb3_defs.h" | |
25 | #include "cxgb3_ctl_defs.h" | |
26 | #include "cxgb3_offload.h" | |
27 | #include "firmware_exports.h" | |
28 | #include "cxgb3i.h" | |
29 | ||
30 | static unsigned int dbg_level; | |
31 | #include "../libcxgbi.h" | |
32 | ||
33 | #define DRV_MODULE_NAME "cxgb3i" | |
34 | #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver" | |
35 | #define DRV_MODULE_VERSION "2.0.0" | |
36 | #define DRV_MODULE_RELDATE "Jun. 2010" | |
37 | ||
38 | static char version[] = | |
39 | DRV_MODULE_DESC " " DRV_MODULE_NAME | |
40 | " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
41 | ||
42 | MODULE_AUTHOR("Chelsio Communications, Inc."); | |
43 | MODULE_DESCRIPTION(DRV_MODULE_DESC); | |
44 | MODULE_VERSION(DRV_MODULE_VERSION); | |
45 | MODULE_LICENSE("GPL"); | |
46 | ||
47 | module_param(dbg_level, uint, 0644); | |
48 | MODULE_PARM_DESC(dbg_level, "debug flag (default=0)"); | |
49 | ||
50 | static int cxgb3i_rcv_win = 256 * 1024; | |
51 | module_param(cxgb3i_rcv_win, int, 0644); | |
52 | MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)"); | |
53 | ||
54 | static int cxgb3i_snd_win = 128 * 1024; | |
55 | module_param(cxgb3i_snd_win, int, 0644); | |
56 | MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)"); | |
57 | ||
58 | static int cxgb3i_rx_credit_thres = 10 * 1024; | |
59 | module_param(cxgb3i_rx_credit_thres, int, 0644); | |
60 | MODULE_PARM_DESC(rx_credit_thres, | |
61 | "RX credits return threshold in bytes (default=10KB)"); | |
62 | ||
63 | static unsigned int cxgb3i_max_connect = 8 * 1024; | |
64 | module_param(cxgb3i_max_connect, uint, 0644); | |
65 | MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)"); | |
66 | ||
67 | static unsigned int cxgb3i_sport_base = 20000; | |
68 | module_param(cxgb3i_sport_base, uint, 0644); | |
69 | MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)"); | |
70 | ||
71 | static void cxgb3i_dev_open(struct t3cdev *); | |
72 | static void cxgb3i_dev_close(struct t3cdev *); | |
73 | static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32); | |
74 | ||
75 | static struct cxgb3_client t3_client = { | |
76 | .name = DRV_MODULE_NAME, | |
77 | .handlers = cxgb3i_cpl_handlers, | |
78 | .add = cxgb3i_dev_open, | |
79 | .remove = cxgb3i_dev_close, | |
80 | .event_handler = cxgb3i_dev_event_handler, | |
81 | }; | |
82 | ||
83 | static struct scsi_host_template cxgb3i_host_template = { | |
84 | .module = THIS_MODULE, | |
85 | .name = DRV_MODULE_NAME, | |
86 | .proc_name = DRV_MODULE_NAME, | |
87 | .can_queue = CXGB3I_SCSI_HOST_QDEPTH, | |
88 | .queuecommand = iscsi_queuecommand, | |
89 | .change_queue_depth = iscsi_change_queue_depth, | |
90 | .sg_tablesize = SG_ALL, | |
91 | .max_sectors = 0xFFFF, | |
92 | .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, | |
93 | .eh_abort_handler = iscsi_eh_abort, | |
94 | .eh_device_reset_handler = iscsi_eh_device_reset, | |
95 | .eh_target_reset_handler = iscsi_eh_recover_target, | |
96 | .target_alloc = iscsi_target_alloc, | |
97 | .use_clustering = DISABLE_CLUSTERING, | |
98 | .this_id = -1, | |
99 | }; | |
100 | ||
101 | static struct iscsi_transport cxgb3i_iscsi_transport = { | |
102 | .owner = THIS_MODULE, | |
103 | .name = DRV_MODULE_NAME, | |
104 | /* owner and name should be set already */ | |
105 | .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | |
106 | | CAP_DATADGST | CAP_DIGEST_OFFLOAD | | |
fdafd4df | 107 | CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, |
3128c6c7 | 108 | .attr_is_visible = cxgbi_attr_is_visible, |
6f7efaab | 109 | .get_host_param = cxgbi_get_host_param, |
110 | .set_host_param = cxgbi_set_host_param, | |
111 | /* session management */ | |
112 | .create_session = cxgbi_create_session, | |
113 | .destroy_session = cxgbi_destroy_session, | |
114 | .get_session_param = iscsi_session_get_param, | |
115 | /* connection management */ | |
116 | .create_conn = cxgbi_create_conn, | |
117 | .bind_conn = cxgbi_bind_conn, | |
118 | .destroy_conn = iscsi_tcp_conn_teardown, | |
119 | .start_conn = iscsi_conn_start, | |
120 | .stop_conn = iscsi_conn_stop, | |
c71b9b66 | 121 | .get_conn_param = iscsi_conn_get_param, |
6f7efaab | 122 | .set_param = cxgbi_set_conn_param, |
123 | .get_stats = cxgbi_get_conn_stats, | |
124 | /* pdu xmit req from user space */ | |
125 | .send_pdu = iscsi_conn_send_pdu, | |
126 | /* task */ | |
127 | .init_task = iscsi_tcp_task_init, | |
128 | .xmit_task = iscsi_tcp_task_xmit, | |
129 | .cleanup_task = cxgbi_cleanup_task, | |
130 | /* pdu */ | |
131 | .alloc_pdu = cxgbi_conn_alloc_pdu, | |
132 | .init_pdu = cxgbi_conn_init_pdu, | |
133 | .xmit_pdu = cxgbi_conn_xmit_pdu, | |
134 | .parse_pdu_itt = cxgbi_parse_pdu_itt, | |
135 | /* TCP connect/disconnect */ | |
c71b9b66 | 136 | .get_ep_param = cxgbi_get_ep_param, |
6f7efaab | 137 | .ep_connect = cxgbi_ep_connect, |
138 | .ep_poll = cxgbi_ep_poll, | |
139 | .ep_disconnect = cxgbi_ep_disconnect, | |
140 | /* Error recovery timeout call */ | |
141 | .session_recovery_timedout = iscsi_session_recovery_timedout, | |
142 | }; | |
143 | ||
144 | static struct scsi_transport_template *cxgb3i_stt; | |
145 | ||
146 | /* | |
147 | * CPL (Chelsio Protocol Language) defines a message passing interface between | |
148 | * the host driver and Chelsio asic. | |
149 | * The section below implments CPLs that related to iscsi tcp connection | |
150 | * open/close/abort and data send/receive. | |
151 | */ | |
152 | ||
153 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion); | |
154 | ||
155 | static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, | |
156 | const struct l2t_entry *e) | |
157 | { | |
158 | unsigned int wscale = cxgbi_sock_compute_wscale(cxgb3i_rcv_win); | |
159 | struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head; | |
160 | ||
161 | skb->priority = CPL_PRIORITY_SETUP; | |
162 | ||
163 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
164 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); | |
165 | req->local_port = csk->saddr.sin_port; | |
166 | req->peer_port = csk->daddr.sin_port; | |
167 | req->local_ip = csk->saddr.sin_addr.s_addr; | |
168 | req->peer_ip = csk->daddr.sin_addr.s_addr; | |
169 | ||
170 | req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS | | |
171 | V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | | |
172 | V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx)); | |
173 | req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) | | |
174 | V_RCV_BUFSIZ(cxgb3i_rcv_win>>10)); | |
175 | ||
176 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
177 | "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n", | |
178 | csk, csk->state, csk->flags, csk->atid, | |
179 | &req->local_ip, ntohs(req->local_port), | |
180 | &req->peer_ip, ntohs(req->peer_port), | |
181 | csk->mss_idx, e->idx, e->smt_idx); | |
182 | ||
183 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
184 | } | |
185 | ||
186 | static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |
187 | { | |
188 | cxgbi_sock_act_open_req_arp_failure(NULL, skb); | |
189 | } | |
190 | ||
191 | /* | |
192 | * CPL connection close request: host -> | |
193 | * | |
194 | * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to | |
195 | * the write queue (i.e., after any unsent txt data). | |
196 | */ | |
197 | static void send_close_req(struct cxgbi_sock *csk) | |
198 | { | |
199 | struct sk_buff *skb = csk->cpl_close; | |
200 | struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head; | |
201 | unsigned int tid = csk->tid; | |
202 | ||
203 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
204 | "csk 0x%p,%u,0x%lx,%u.\n", | |
205 | csk, csk->state, csk->flags, csk->tid); | |
206 | ||
207 | csk->cpl_close = NULL; | |
208 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | |
209 | req->wr.wr_lo = htonl(V_WR_TID(tid)); | |
210 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); | |
211 | req->rsvd = htonl(csk->write_seq); | |
212 | ||
213 | cxgbi_sock_skb_entail(csk, skb); | |
214 | if (csk->state >= CTP_ESTABLISHED) | |
215 | push_tx_frames(csk, 1); | |
216 | } | |
217 | ||
218 | /* | |
219 | * CPL connection abort request: host -> | |
220 | * | |
221 | * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs | |
222 | * for the same connection and also that we do not try to send a message | |
223 | * after the connection has closed. | |
224 | */ | |
225 | static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb) | |
226 | { | |
227 | struct cpl_abort_req *req = cplhdr(skb); | |
228 | ||
229 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
230 | "t3dev 0x%p, tid %u, skb 0x%p.\n", | |
231 | tdev, GET_TID(req), skb); | |
232 | req->cmd = CPL_ABORT_NO_RST; | |
233 | cxgb3_ofld_send(tdev, skb); | |
234 | } | |
235 | ||
236 | static void send_abort_req(struct cxgbi_sock *csk) | |
237 | { | |
238 | struct sk_buff *skb = csk->cpl_abort_req; | |
239 | struct cpl_abort_req *req; | |
240 | ||
241 | if (unlikely(csk->state == CTP_ABORTING || !skb)) | |
242 | return; | |
243 | cxgbi_sock_set_state(csk, CTP_ABORTING); | |
244 | cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); | |
245 | /* Purge the send queue so we don't send anything after an abort. */ | |
246 | cxgbi_sock_purge_write_queue(csk); | |
247 | ||
248 | csk->cpl_abort_req = NULL; | |
249 | req = (struct cpl_abort_req *)skb->head; | |
250 | skb->priority = CPL_PRIORITY_DATA; | |
251 | set_arp_failure_handler(skb, abort_arp_failure); | |
252 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | |
253 | req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | |
254 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); | |
255 | req->rsvd0 = htonl(csk->snd_nxt); | |
256 | req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); | |
257 | req->cmd = CPL_ABORT_SEND_RST; | |
258 | ||
259 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
260 | "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n", | |
261 | csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, | |
262 | req->rsvd1); | |
263 | ||
264 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
265 | } | |
266 | ||
267 | /* | |
268 | * CPL connection abort reply: host -> | |
269 | * | |
270 | * Send an ABORT_RPL message in response of the ABORT_REQ received. | |
271 | */ | |
272 | static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) | |
273 | { | |
274 | struct sk_buff *skb = csk->cpl_abort_rpl; | |
275 | struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head; | |
276 | ||
277 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
278 | "csk 0x%p,%u,0x%lx,%u, status %d.\n", | |
279 | csk, csk->state, csk->flags, csk->tid, rst_status); | |
280 | ||
281 | csk->cpl_abort_rpl = NULL; | |
282 | skb->priority = CPL_PRIORITY_DATA; | |
283 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | |
284 | rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); | |
285 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); | |
286 | rpl->cmd = rst_status; | |
287 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
288 | } | |
289 | ||
290 | /* | |
291 | * CPL connection rx data ack: host -> | |
292 | * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of | |
293 | * credits sent. | |
294 | */ | |
295 | static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) | |
296 | { | |
297 | struct sk_buff *skb; | |
298 | struct cpl_rx_data_ack *req; | |
299 | u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1); | |
300 | ||
301 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
302 | "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n", | |
303 | csk, csk->state, csk->flags, csk->tid, credits, dack); | |
304 | ||
24d3f95a | 305 | skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC); |
6f7efaab | 306 | if (!skb) { |
307 | pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); | |
308 | return 0; | |
309 | } | |
310 | req = (struct cpl_rx_data_ack *)skb->head; | |
311 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
312 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); | |
313 | req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) | | |
314 | V_RX_CREDITS(credits)); | |
315 | skb->priority = CPL_PRIORITY_ACK; | |
316 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
317 | return credits; | |
318 | } | |
319 | ||
320 | /* | |
321 | * CPL connection tx data: host -> | |
322 | * | |
323 | * Send iscsi PDU via TX_DATA CPL message. Returns the number of | |
324 | * credits sent. | |
325 | * Each TX_DATA consumes work request credit (wrs), so we need to keep track of | |
326 | * how many we've used so far and how many are pending (i.e., yet ack'ed by T3). | |
327 | */ | |
328 | ||
329 | static unsigned int wrlen __read_mostly; | |
330 | static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly; | |
331 | ||
332 | static void init_wr_tab(unsigned int wr_len) | |
333 | { | |
334 | int i; | |
335 | ||
336 | if (skb_wrs[1]) /* already initialized */ | |
337 | return; | |
338 | for (i = 1; i < SKB_WR_LIST_SIZE; i++) { | |
339 | int sgl_len = (3 * i) / 2 + (i & 1); | |
340 | ||
341 | sgl_len += 3; | |
342 | skb_wrs[i] = (sgl_len <= wr_len | |
343 | ? 1 : 1 + (sgl_len - 2) / (wr_len - 1)); | |
344 | } | |
345 | wrlen = wr_len * 8; | |
346 | } | |
347 | ||
348 | static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, | |
349 | int len, int req_completion) | |
350 | { | |
351 | struct tx_data_wr *req; | |
352 | struct l2t_entry *l2t = csk->l2t; | |
353 | ||
354 | skb_reset_transport_header(skb); | |
355 | req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req)); | |
356 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) | | |
357 | (req_completion ? F_WR_COMPL : 0)); | |
358 | req->wr_lo = htonl(V_WR_TID(csk->tid)); | |
359 | /* len includes the length of any HW ULP additions */ | |
360 | req->len = htonl(len); | |
361 | /* V_TX_ULP_SUBMODE sets both the mode and submode */ | |
362 | req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_ulp_mode(skb)) | | |
363 | V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); | |
364 | req->sndseq = htonl(csk->snd_nxt); | |
365 | req->param = htonl(V_TX_PORT(l2t->smt_idx)); | |
366 | ||
367 | if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { | |
368 | req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT | | |
369 | V_TX_CPU_IDX(csk->rss_qid)); | |
370 | /* sendbuffer is in units of 32KB. */ | |
371 | req->param |= htonl(V_TX_SNDBUF(cxgb3i_snd_win >> 15)); | |
372 | cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); | |
373 | } | |
374 | } | |
375 | ||
376 | /** | |
377 | * push_tx_frames -- start transmit | |
378 | * @c3cn: the offloaded connection | |
379 | * @req_completion: request wr_ack or not | |
380 | * | |
381 | * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a | |
382 | * connection's send queue and sends them on to T3. Must be called with the | |
383 | * connection's lock held. Returns the amount of send buffer space that was | |
384 | * freed as a result of sending queued data to T3. | |
385 | */ | |
386 | ||
387 | static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb) | |
388 | { | |
389 | kfree_skb(skb); | |
390 | } | |
391 | ||
392 | static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) | |
393 | { | |
394 | int total_size = 0; | |
395 | struct sk_buff *skb; | |
396 | ||
397 | if (unlikely(csk->state < CTP_ESTABLISHED || | |
398 | csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { | |
399 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
400 | "csk 0x%p,%u,0x%lx,%u, in closing state.\n", | |
401 | csk, csk->state, csk->flags, csk->tid); | |
402 | return 0; | |
403 | } | |
404 | ||
405 | while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { | |
406 | int len = skb->len; /* length before skb_push */ | |
407 | int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len); | |
408 | int wrs_needed = skb_wrs[frags]; | |
409 | ||
410 | if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen) | |
411 | wrs_needed = 1; | |
412 | ||
413 | WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1); | |
414 | ||
415 | if (csk->wr_cred < wrs_needed) { | |
416 | log_debug(1 << CXGBI_DBG_PDU_TX, | |
417 | "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n", | |
418 | csk, skb->len, skb->data_len, frags, | |
419 | wrs_needed, csk->wr_cred); | |
420 | break; | |
421 | } | |
422 | ||
423 | __skb_unlink(skb, &csk->write_queue); | |
424 | skb->priority = CPL_PRIORITY_DATA; | |
425 | skb->csum = wrs_needed; /* remember this until the WR_ACK */ | |
426 | csk->wr_cred -= wrs_needed; | |
427 | csk->wr_una_cred += wrs_needed; | |
428 | cxgbi_sock_enqueue_wr(csk, skb); | |
429 | ||
430 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
431 | "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, " | |
432 | "left %u, unack %u.\n", | |
433 | csk, skb->len, skb->data_len, frags, skb->csum, | |
434 | csk->wr_cred, csk->wr_una_cred); | |
435 | ||
436 | if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) { | |
437 | if ((req_completion && | |
438 | csk->wr_una_cred == wrs_needed) || | |
439 | csk->wr_una_cred >= csk->wr_max_cred / 2) { | |
440 | req_completion = 1; | |
441 | csk->wr_una_cred = 0; | |
442 | } | |
443 | len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); | |
444 | make_tx_data_wr(csk, skb, len, req_completion); | |
445 | csk->snd_nxt += len; | |
446 | cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR); | |
447 | } | |
448 | total_size += skb->truesize; | |
449 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX, | |
450 | "csk 0x%p, tid 0x%x, send skb 0x%p.\n", | |
451 | csk, csk->tid, skb); | |
452 | set_arp_failure_handler(skb, arp_failure_skb_discard); | |
453 | l2t_send(csk->cdev->lldev, skb, csk->l2t); | |
454 | } | |
455 | return total_size; | |
456 | } | |
457 | ||
458 | /* | |
459 | * Process a CPL_ACT_ESTABLISH message: -> host | |
460 | * Updates connection state from an active establish CPL message. Runs with | |
461 | * the connection lock held. | |
462 | */ | |
463 | ||
464 | static inline void free_atid(struct cxgbi_sock *csk) | |
465 | { | |
466 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { | |
467 | cxgb3_free_atid(csk->cdev->lldev, csk->atid); | |
468 | cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); | |
469 | cxgbi_sock_put(csk); | |
470 | } | |
471 | } | |
472 | ||
473 | static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
474 | { | |
475 | struct cxgbi_sock *csk = ctx; | |
476 | struct cpl_act_establish *req = cplhdr(skb); | |
477 | unsigned int tid = GET_TID(req); | |
478 | unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); | |
479 | u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */ | |
480 | ||
481 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
482 | "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", | |
483 | atid, atid, csk, csk->state, csk->flags, rcv_isn); | |
484 | ||
485 | cxgbi_sock_get(csk); | |
486 | cxgbi_sock_set_flag(csk, CTPF_HAS_TID); | |
487 | csk->tid = tid; | |
488 | cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); | |
489 | ||
490 | free_atid(csk); | |
491 | ||
492 | csk->rss_qid = G_QNUM(ntohs(skb->csum)); | |
493 | ||
494 | spin_lock_bh(&csk->lock); | |
495 | if (csk->retry_timer.function) { | |
496 | del_timer(&csk->retry_timer); | |
497 | csk->retry_timer.function = NULL; | |
498 | } | |
499 | ||
500 | if (unlikely(csk->state != CTP_ACTIVE_OPEN)) | |
501 | pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n", | |
502 | csk, csk->state, csk->flags, csk->tid); | |
503 | ||
504 | csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; | |
505 | if (cxgb3i_rcv_win > (M_RCV_BUFSIZ << 10)) | |
506 | csk->rcv_wup -= cxgb3i_rcv_win - (M_RCV_BUFSIZ << 10); | |
507 | ||
508 | cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); | |
509 | ||
510 | if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) | |
511 | /* upper layer has requested closing */ | |
512 | send_abort_req(csk); | |
513 | else { | |
514 | if (skb_queue_len(&csk->write_queue)) | |
515 | push_tx_frames(csk, 1); | |
516 | cxgbi_conn_tx_open(csk); | |
517 | } | |
518 | ||
519 | spin_unlock_bh(&csk->lock); | |
520 | __kfree_skb(skb); | |
521 | return 0; | |
522 | } | |
523 | ||
524 | /* | |
525 | * Process a CPL_ACT_OPEN_RPL message: -> host | |
526 | * Handle active open failures. | |
527 | */ | |
528 | static int act_open_rpl_status_to_errno(int status) | |
529 | { | |
530 | switch (status) { | |
531 | case CPL_ERR_CONN_RESET: | |
532 | return -ECONNREFUSED; | |
533 | case CPL_ERR_ARP_MISS: | |
534 | return -EHOSTUNREACH; | |
535 | case CPL_ERR_CONN_TIMEDOUT: | |
536 | return -ETIMEDOUT; | |
537 | case CPL_ERR_TCAM_FULL: | |
538 | return -ENOMEM; | |
539 | case CPL_ERR_CONN_EXIST: | |
540 | return -EADDRINUSE; | |
541 | default: | |
542 | return -EIO; | |
543 | } | |
544 | } | |
545 | ||
546 | static void act_open_retry_timer(unsigned long data) | |
547 | { | |
548 | struct sk_buff *skb; | |
549 | struct cxgbi_sock *csk = (struct cxgbi_sock *)data; | |
550 | ||
551 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
552 | "csk 0x%p,%u,0x%lx,%u.\n", | |
553 | csk, csk->state, csk->flags, csk->tid); | |
554 | ||
555 | cxgbi_sock_get(csk); | |
556 | spin_lock_bh(&csk->lock); | |
24d3f95a | 557 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC); |
6f7efaab | 558 | if (!skb) |
559 | cxgbi_sock_fail_act_open(csk, -ENOMEM); | |
560 | else { | |
561 | skb->sk = (struct sock *)csk; | |
562 | set_arp_failure_handler(skb, act_open_arp_failure); | |
563 | send_act_open_req(csk, skb, csk->l2t); | |
564 | } | |
565 | spin_unlock_bh(&csk->lock); | |
566 | cxgbi_sock_put(csk); | |
567 | } | |
568 | ||
569 | static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
570 | { | |
571 | struct cxgbi_sock *csk = ctx; | |
572 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
573 | ||
0b3d8947 | 574 | pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n", |
575 | csk, csk->state, csk->flags, csk->atid, rpl->status, | |
576 | &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), | |
577 | &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); | |
6f7efaab | 578 | |
579 | if (rpl->status != CPL_ERR_TCAM_FULL && | |
580 | rpl->status != CPL_ERR_CONN_EXIST && | |
581 | rpl->status != CPL_ERR_ARP_MISS) | |
582 | cxgb3_queue_tid_release(tdev, GET_TID(rpl)); | |
583 | ||
584 | cxgbi_sock_get(csk); | |
585 | spin_lock_bh(&csk->lock); | |
586 | if (rpl->status == CPL_ERR_CONN_EXIST && | |
587 | csk->retry_timer.function != act_open_retry_timer) { | |
588 | csk->retry_timer.function = act_open_retry_timer; | |
589 | mod_timer(&csk->retry_timer, jiffies + HZ / 2); | |
590 | } else | |
591 | cxgbi_sock_fail_act_open(csk, | |
592 | act_open_rpl_status_to_errno(rpl->status)); | |
593 | ||
594 | spin_unlock_bh(&csk->lock); | |
595 | cxgbi_sock_put(csk); | |
596 | __kfree_skb(skb); | |
597 | return 0; | |
598 | } | |
599 | ||
600 | /* | |
601 | * Process PEER_CLOSE CPL messages: -> host | |
602 | * Handle peer FIN. | |
603 | */ | |
604 | static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
605 | { | |
606 | struct cxgbi_sock *csk = ctx; | |
607 | ||
608 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
609 | "csk 0x%p,%u,0x%lx,%u.\n", | |
610 | csk, csk->state, csk->flags, csk->tid); | |
611 | ||
612 | cxgbi_sock_rcv_peer_close(csk); | |
613 | __kfree_skb(skb); | |
614 | return 0; | |
615 | } | |
616 | ||
617 | /* | |
618 | * Process CLOSE_CONN_RPL CPL message: -> host | |
619 | * Process a peer ACK to our FIN. | |
620 | */ | |
621 | static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb, | |
622 | void *ctx) | |
623 | { | |
624 | struct cxgbi_sock *csk = ctx; | |
625 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | |
626 | ||
627 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
628 | "csk 0x%p,%u,0x%lx,%u, snxt %u.\n", | |
629 | csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); | |
630 | ||
631 | cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); | |
632 | __kfree_skb(skb); | |
633 | return 0; | |
634 | } | |
635 | ||
636 | /* | |
637 | * Process ABORT_REQ_RSS CPL message: -> host | |
638 | * Process abort requests. If we are waiting for an ABORT_RPL we ignore this | |
639 | * request except that we need to reply to it. | |
640 | */ | |
641 | ||
642 | static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, | |
643 | int *need_rst) | |
644 | { | |
645 | switch (abort_reason) { | |
646 | case CPL_ERR_BAD_SYN: /* fall through */ | |
647 | case CPL_ERR_CONN_RESET: | |
0b3d8947 | 648 | return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; |
6f7efaab | 649 | case CPL_ERR_XMIT_TIMEDOUT: |
650 | case CPL_ERR_PERSIST_TIMEDOUT: | |
651 | case CPL_ERR_FINWAIT2_TIMEDOUT: | |
652 | case CPL_ERR_KEEPALIVE_TIMEDOUT: | |
653 | return -ETIMEDOUT; | |
654 | default: | |
655 | return -EIO; | |
656 | } | |
657 | } | |
658 | ||
659 | static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
660 | { | |
661 | const struct cpl_abort_req_rss *req = cplhdr(skb); | |
662 | struct cxgbi_sock *csk = ctx; | |
663 | int rst_status = CPL_ABORT_NO_RST; | |
664 | ||
665 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
666 | "csk 0x%p,%u,0x%lx,%u.\n", | |
667 | csk, csk->state, csk->flags, csk->tid); | |
668 | ||
669 | if (req->status == CPL_ERR_RTX_NEG_ADVICE || | |
670 | req->status == CPL_ERR_PERSIST_NEG_ADVICE) { | |
671 | goto done; | |
672 | } | |
673 | ||
674 | cxgbi_sock_get(csk); | |
675 | spin_lock_bh(&csk->lock); | |
676 | ||
677 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { | |
678 | cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); | |
679 | cxgbi_sock_set_state(csk, CTP_ABORTING); | |
680 | goto out; | |
681 | } | |
682 | ||
683 | cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); | |
684 | send_abort_rpl(csk, rst_status); | |
685 | ||
686 | if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { | |
687 | csk->err = abort_status_to_errno(csk, req->status, &rst_status); | |
688 | cxgbi_sock_closed(csk); | |
689 | } | |
690 | ||
691 | out: | |
692 | spin_unlock_bh(&csk->lock); | |
693 | cxgbi_sock_put(csk); | |
694 | done: | |
695 | __kfree_skb(skb); | |
696 | return 0; | |
697 | } | |
698 | ||
699 | /* | |
700 | * Process ABORT_RPL_RSS CPL message: -> host | |
701 | * Process abort replies. We only process these messages if we anticipate | |
702 | * them as the coordination between SW and HW in this area is somewhat lacking | |
703 | * and sometimes we get ABORT_RPLs after we are done with the connection that | |
704 | * originated the ABORT_REQ. | |
705 | */ | |
706 | static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
707 | { | |
708 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | |
709 | struct cxgbi_sock *csk = ctx; | |
710 | ||
711 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
712 | "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", | |
713 | rpl->status, csk, csk ? csk->state : 0, | |
714 | csk ? csk->flags : 0UL); | |
715 | /* | |
716 | * Ignore replies to post-close aborts indicating that the abort was | |
717 | * requested too late. These connections are terminated when we get | |
718 | * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss | |
719 | * arrives the TID is either no longer used or it has been recycled. | |
720 | */ | |
721 | if (rpl->status == CPL_ERR_ABORT_FAILED) | |
722 | goto rel_skb; | |
723 | /* | |
724 | * Sometimes we've already closed the connection, e.g., a post-close | |
725 | * abort races with ABORT_REQ_RSS, the latter frees the connection | |
726 | * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED, | |
727 | * but FW turns the ABORT_REQ into a regular one and so we get | |
728 | * ABORT_RPL_RSS with status 0 and no connection. | |
729 | */ | |
730 | if (csk) | |
731 | cxgbi_sock_rcv_abort_rpl(csk); | |
732 | rel_skb: | |
733 | __kfree_skb(skb); | |
734 | return 0; | |
735 | } | |
736 | ||
737 | /* | |
738 | * Process RX_ISCSI_HDR CPL message: -> host | |
739 | * Handle received PDUs, the payload could be DDP'ed. If not, the payload | |
740 | * follow after the bhs. | |
741 | */ | |
742 | static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx) | |
743 | { | |
744 | struct cxgbi_sock *csk = ctx; | |
745 | struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb); | |
746 | struct cpl_iscsi_hdr_norss data_cpl; | |
747 | struct cpl_rx_data_ddp_norss ddp_cpl; | |
748 | unsigned int hdr_len, data_len, status; | |
749 | unsigned int len; | |
750 | int err; | |
751 | ||
752 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
753 | "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n", | |
754 | csk, csk->state, csk->flags, csk->tid, skb, skb->len); | |
755 | ||
756 | spin_lock_bh(&csk->lock); | |
757 | ||
758 | if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { | |
759 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
760 | "csk 0x%p,%u,0x%lx,%u, bad state.\n", | |
761 | csk, csk->state, csk->flags, csk->tid); | |
762 | if (csk->state != CTP_ABORTING) | |
763 | goto abort_conn; | |
764 | else | |
765 | goto discard; | |
766 | } | |
767 | ||
768 | cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq); | |
769 | cxgbi_skcb_flags(skb) = 0; | |
770 | ||
771 | skb_reset_transport_header(skb); | |
772 | __skb_pull(skb, sizeof(struct cpl_iscsi_hdr)); | |
773 | ||
774 | len = hdr_len = ntohs(hdr_cpl->len); | |
775 | /* msg coalesce is off or not enough data received */ | |
776 | if (skb->len <= hdr_len) { | |
777 | pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n", | |
778 | csk->cdev->ports[csk->port_id]->name, csk->tid, | |
779 | skb->len, hdr_len); | |
780 | goto abort_conn; | |
781 | } | |
782 | cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED); | |
783 | ||
784 | err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl, | |
785 | sizeof(ddp_cpl)); | |
786 | if (err < 0) { | |
787 | pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n", | |
788 | csk->cdev->ports[csk->port_id]->name, csk->tid, | |
789 | skb->len, sizeof(ddp_cpl), err); | |
790 | goto abort_conn; | |
791 | } | |
792 | ||
793 | cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS); | |
794 | cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len); | |
795 | cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc); | |
796 | status = ntohl(ddp_cpl.ddp_status); | |
797 | ||
798 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
799 | "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n", | |
800 | csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); | |
801 | ||
802 | if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) | |
803 | cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR); | |
804 | if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) | |
805 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR); | |
806 | if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) | |
807 | cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR); | |
808 | ||
809 | if (skb->len > (hdr_len + sizeof(ddp_cpl))) { | |
810 | err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl)); | |
811 | if (err < 0) { | |
812 | pr_err("%s: tid %u, cp %zu/%u failed %d.\n", | |
813 | csk->cdev->ports[csk->port_id]->name, | |
814 | csk->tid, sizeof(data_cpl), skb->len, err); | |
815 | goto abort_conn; | |
816 | } | |
817 | data_len = ntohs(data_cpl.len); | |
818 | log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX, | |
819 | "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n", | |
820 | skb, data_len, cxgbi_skcb_rx_pdulen(skb), status); | |
821 | len += sizeof(data_cpl) + data_len; | |
822 | } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) | |
823 | cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD); | |
824 | ||
825 | csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); | |
826 | __pskb_trim(skb, len); | |
827 | __skb_queue_tail(&csk->receive_queue, skb); | |
828 | cxgbi_conn_pdu_ready(csk); | |
829 | ||
830 | spin_unlock_bh(&csk->lock); | |
831 | return 0; | |
832 | ||
833 | abort_conn: | |
834 | send_abort_req(csk); | |
835 | discard: | |
836 | spin_unlock_bh(&csk->lock); | |
837 | __kfree_skb(skb); | |
838 | return 0; | |
839 | } | |
840 | ||
841 | /* | |
842 | * Process TX_DATA_ACK CPL messages: -> host | |
843 | * Process an acknowledgment of WR completion. Advance snd_una and send the | |
844 | * next batch of work requests from the write queue. | |
845 | */ | |
846 | static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx) | |
847 | { | |
848 | struct cxgbi_sock *csk = ctx; | |
849 | struct cpl_wr_ack *hdr = cplhdr(skb); | |
850 | ||
851 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX, | |
852 | "csk 0x%p,%u,0x%lx,%u, cr %u.\n", | |
853 | csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); | |
854 | ||
855 | cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); | |
856 | __kfree_skb(skb); | |
857 | return 0; | |
858 | } | |
859 | ||
860 | /* | |
861 | * for each connection, pre-allocate skbs needed for close/abort requests. So | |
862 | * that we can service the request right away. | |
863 | */ | |
864 | static int alloc_cpls(struct cxgbi_sock *csk) | |
865 | { | |
24d3f95a | 866 | csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, |
6f7efaab | 867 | GFP_KERNEL); |
868 | if (!csk->cpl_close) | |
869 | return -ENOMEM; | |
24d3f95a | 870 | csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, |
6f7efaab | 871 | GFP_KERNEL); |
872 | if (!csk->cpl_abort_req) | |
873 | goto free_cpl_skbs; | |
874 | ||
24d3f95a | 875 | csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, |
6f7efaab | 876 | GFP_KERNEL); |
877 | if (!csk->cpl_abort_rpl) | |
878 | goto free_cpl_skbs; | |
879 | ||
880 | return 0; | |
881 | ||
882 | free_cpl_skbs: | |
883 | cxgbi_sock_free_cpl_skbs(csk); | |
884 | return -ENOMEM; | |
885 | } | |
886 | ||
887 | /** | |
888 | * release_offload_resources - release offload resource | |
889 | * @c3cn: the offloaded iscsi tcp connection. | |
890 | * Release resources held by an offload connection (TID, L2T entry, etc.) | |
891 | */ | |
892 | static void l2t_put(struct cxgbi_sock *csk) | |
893 | { | |
894 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | |
895 | ||
896 | if (csk->l2t) { | |
e48f129c | 897 | l2t_release(t3dev, csk->l2t); |
6f7efaab | 898 | csk->l2t = NULL; |
899 | cxgbi_sock_put(csk); | |
900 | } | |
901 | } | |
902 | ||
903 | static void release_offload_resources(struct cxgbi_sock *csk) | |
904 | { | |
905 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | |
906 | ||
907 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
908 | "csk 0x%p,%u,0x%lx,%u.\n", | |
909 | csk, csk->state, csk->flags, csk->tid); | |
910 | ||
911 | csk->rss_qid = 0; | |
912 | cxgbi_sock_free_cpl_skbs(csk); | |
913 | ||
914 | if (csk->wr_cred != csk->wr_max_cred) { | |
915 | cxgbi_sock_purge_wr_queue(csk); | |
916 | cxgbi_sock_reset_wr_list(csk); | |
917 | } | |
918 | l2t_put(csk); | |
919 | if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) | |
920 | free_atid(csk); | |
921 | else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { | |
922 | cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); | |
923 | cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); | |
924 | cxgbi_sock_put(csk); | |
925 | } | |
926 | csk->dst = NULL; | |
927 | csk->cdev = NULL; | |
928 | } | |
929 | ||
0b3d8947 | 930 | static void update_address(struct cxgbi_hba *chba) |
931 | { | |
932 | if (chba->ipv4addr) { | |
933 | if (chba->vdev && | |
934 | chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) { | |
935 | cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr); | |
936 | cxgb3i_set_private_ipv4addr(chba->ndev, 0); | |
937 | pr_info("%s set %pI4.\n", | |
938 | chba->vdev->name, &chba->ipv4addr); | |
939 | } else if (chba->ipv4addr != | |
940 | cxgb3i_get_private_ipv4addr(chba->ndev)) { | |
941 | cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr); | |
942 | pr_info("%s set %pI4.\n", | |
943 | chba->ndev->name, &chba->ipv4addr); | |
944 | } | |
945 | } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) { | |
946 | if (chba->vdev) | |
947 | cxgb3i_set_private_ipv4addr(chba->vdev, 0); | |
948 | cxgb3i_set_private_ipv4addr(chba->ndev, 0); | |
949 | } | |
950 | } | |
951 | ||
6f7efaab | 952 | static int init_act_open(struct cxgbi_sock *csk) |
953 | { | |
954 | struct dst_entry *dst = csk->dst; | |
955 | struct cxgbi_device *cdev = csk->cdev; | |
956 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | |
957 | struct net_device *ndev = cdev->ports[csk->port_id]; | |
0b3d8947 | 958 | struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; |
6f7efaab | 959 | struct sk_buff *skb = NULL; |
960 | ||
961 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, | |
962 | "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); | |
963 | ||
0b3d8947 | 964 | update_address(chba); |
965 | if (chba->ipv4addr) | |
966 | csk->saddr.sin_addr.s_addr = chba->ipv4addr; | |
967 | ||
6f7efaab | 968 | csk->rss_qid = 0; |
534cb283 DM |
969 | csk->l2t = t3_l2t_get(t3dev, dst, ndev, |
970 | &csk->daddr.sin_addr.s_addr); | |
6f7efaab | 971 | if (!csk->l2t) { |
972 | pr_err("NO l2t available.\n"); | |
973 | return -EINVAL; | |
974 | } | |
975 | cxgbi_sock_get(csk); | |
976 | ||
977 | csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); | |
978 | if (csk->atid < 0) { | |
979 | pr_err("NO atid available.\n"); | |
980 | goto rel_resource; | |
981 | } | |
982 | cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); | |
983 | cxgbi_sock_get(csk); | |
984 | ||
24d3f95a | 985 | skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL); |
6f7efaab | 986 | if (!skb) |
987 | goto rel_resource; | |
988 | skb->sk = (struct sock *)csk; | |
989 | set_arp_failure_handler(skb, act_open_arp_failure); | |
990 | ||
991 | csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; | |
992 | csk->wr_una_cred = 0; | |
993 | csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); | |
994 | cxgbi_sock_reset_wr_list(csk); | |
995 | csk->err = 0; | |
996 | ||
0b3d8947 | 997 | log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, |
998 | "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n", | |
999 | csk, csk->state, csk->flags, | |
1000 | &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), | |
1001 | &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); | |
1002 | ||
6f7efaab | 1003 | cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); |
1004 | send_act_open_req(csk, skb, csk->l2t); | |
1005 | return 0; | |
1006 | ||
1007 | rel_resource: | |
1008 | if (skb) | |
1009 | __kfree_skb(skb); | |
1010 | return -EINVAL; | |
1011 | } | |
1012 | ||
1013 | cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = { | |
1014 | [CPL_ACT_ESTABLISH] = do_act_establish, | |
1015 | [CPL_ACT_OPEN_RPL] = do_act_open_rpl, | |
1016 | [CPL_PEER_CLOSE] = do_peer_close, | |
1017 | [CPL_ABORT_REQ_RSS] = do_abort_req, | |
1018 | [CPL_ABORT_RPL_RSS] = do_abort_rpl, | |
1019 | [CPL_CLOSE_CON_RPL] = do_close_con_rpl, | |
1020 | [CPL_TX_DMA_ACK] = do_wr_ack, | |
1021 | [CPL_ISCSI_HDR] = do_iscsi_hdr, | |
1022 | }; | |
1023 | ||
1024 | /** | |
1025 | * cxgb3i_ofld_init - allocate and initialize resources for each adapter found | |
1026 | * @cdev: cxgbi adapter | |
1027 | */ | |
1028 | int cxgb3i_ofld_init(struct cxgbi_device *cdev) | |
1029 | { | |
1030 | struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev; | |
1031 | struct adap_ports port; | |
1032 | struct ofld_page_info rx_page_info; | |
1033 | unsigned int wr_len; | |
1034 | int rc; | |
1035 | ||
1036 | if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 || | |
1037 | t3dev->ctl(t3dev, GET_PORTS, &port) < 0 || | |
1038 | t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) { | |
1039 | pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev); | |
1040 | return -EINVAL; | |
1041 | } | |
1042 | ||
1043 | if (cxgb3i_max_connect > CXGBI_MAX_CONN) | |
1044 | cxgb3i_max_connect = CXGBI_MAX_CONN; | |
1045 | ||
1046 | rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base, | |
1047 | cxgb3i_max_connect); | |
1048 | if (rc < 0) | |
1049 | return rc; | |
1050 | ||
1051 | init_wr_tab(wr_len); | |
1052 | cdev->csk_release_offload_resources = release_offload_resources; | |
1053 | cdev->csk_push_tx_frames = push_tx_frames; | |
1054 | cdev->csk_send_abort_req = send_abort_req; | |
1055 | cdev->csk_send_close_req = send_close_req; | |
1056 | cdev->csk_send_rx_credits = send_rx_credits; | |
1057 | cdev->csk_alloc_cpls = alloc_cpls; | |
1058 | cdev->csk_init_act_open = init_act_open; | |
1059 | ||
1060 | pr_info("cdev 0x%p, offload up, added.\n", cdev); | |
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | /* | |
1065 | * functions to program the pagepod in h/w | |
1066 | */ | |
1067 | static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr) | |
1068 | { | |
1069 | struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head; | |
1070 | ||
1071 | memset(req, 0, sizeof(*req)); | |
1072 | ||
1073 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS)); | |
1074 | req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) | | |
1075 | V_ULPTX_CMD(ULP_MEM_WRITE)); | |
1076 | req->len = htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE >> 5) | | |
1077 | V_ULPTX_NFLITS((PPOD_SIZE >> 3) + 1)); | |
1078 | } | |
1079 | ||
1080 | static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, | |
1081 | unsigned int idx, unsigned int npods, | |
1082 | struct cxgbi_gather_list *gl) | |
1083 | { | |
1084 | struct cxgbi_device *cdev = csk->cdev; | |
1085 | struct cxgbi_ddp_info *ddp = cdev->ddp; | |
1086 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | |
1087 | int i; | |
1088 | ||
1089 | log_debug(1 << CXGBI_DBG_DDP, | |
1090 | "csk 0x%p, idx %u, npods %u, gl 0x%p.\n", | |
1091 | csk, idx, npods, gl); | |
1092 | ||
1093 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | |
b8ce8b59 | 1094 | struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + |
1095 | PPOD_SIZE, 0, GFP_ATOMIC); | |
6f7efaab | 1096 | |
b8ce8b59 | 1097 | if (!skb) |
1098 | return -ENOMEM; | |
6f7efaab | 1099 | |
1100 | ulp_mem_io_set_hdr(skb, pm_addr); | |
1101 | cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + | |
1102 | sizeof(struct ulp_mem_io)), | |
1103 | hdr, gl, i * PPOD_PAGES_MAX); | |
1104 | skb->priority = CPL_PRIORITY_CONTROL; | |
1105 | cxgb3_ofld_send(cdev->lldev, skb); | |
1106 | } | |
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, | |
1111 | unsigned int idx, unsigned int npods) | |
1112 | { | |
1113 | struct cxgbi_device *cdev = chba->cdev; | |
1114 | struct cxgbi_ddp_info *ddp = cdev->ddp; | |
1115 | unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ddp->llimit; | |
1116 | int i; | |
1117 | ||
1118 | log_debug(1 << CXGBI_DBG_DDP, | |
1119 | "cdev 0x%p, idx %u, npods %u, tag 0x%x.\n", | |
1120 | cdev, idx, npods, tag); | |
1121 | ||
1122 | for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { | |
b8ce8b59 | 1123 | struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + |
1124 | PPOD_SIZE, 0, GFP_ATOMIC); | |
6f7efaab | 1125 | |
1126 | if (!skb) { | |
b8ce8b59 | 1127 | pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n", |
6f7efaab | 1128 | tag, idx, i, npods); |
1129 | continue; | |
1130 | } | |
6f7efaab | 1131 | ulp_mem_io_set_hdr(skb, pm_addr); |
1132 | skb->priority = CPL_PRIORITY_CONTROL; | |
1133 | cxgb3_ofld_send(cdev->lldev, skb); | |
1134 | } | |
1135 | } | |
1136 | ||
6f7efaab | 1137 | static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, |
1138 | unsigned int tid, int pg_idx, bool reply) | |
1139 | { | |
24d3f95a | 1140 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
6f7efaab | 1141 | GFP_KERNEL); |
1142 | struct cpl_set_tcb_field *req; | |
1143 | u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0; | |
1144 | ||
1145 | log_debug(1 << CXGBI_DBG_DDP, | |
1146 | "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); | |
1147 | if (!skb) | |
1148 | return -ENOMEM; | |
1149 | ||
1150 | /* set up ulp submode and page size */ | |
1151 | req = (struct cpl_set_tcb_field *)skb->head; | |
1152 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1153 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1154 | req->reply = V_NO_REPLY(reply ? 0 : 1); | |
1155 | req->cpu_idx = 0; | |
1156 | req->word = htons(31); | |
1157 | req->mask = cpu_to_be64(0xF0000000); | |
1158 | req->val = cpu_to_be64(val << 28); | |
1159 | skb->priority = CPL_PRIORITY_CONTROL; | |
1160 | ||
1161 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
1162 | return 0; | |
1163 | } | |
1164 | ||
1165 | /** | |
1166 | * cxgb3i_setup_conn_digest - setup conn. digest setting | |
1167 | * @csk: cxgb tcp socket | |
1168 | * @tid: connection id | |
1169 | * @hcrc: header digest enabled | |
1170 | * @dcrc: data digest enabled | |
1171 | * @reply: request reply from h/w | |
1172 | * set up the iscsi digest settings for a connection identified by tid | |
1173 | */ | |
1174 | static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, | |
1175 | int hcrc, int dcrc, int reply) | |
1176 | { | |
24d3f95a | 1177 | struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, |
6f7efaab | 1178 | GFP_KERNEL); |
1179 | struct cpl_set_tcb_field *req; | |
1180 | u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0); | |
1181 | ||
1182 | log_debug(1 << CXGBI_DBG_DDP, | |
1183 | "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); | |
1184 | if (!skb) | |
1185 | return -ENOMEM; | |
1186 | ||
1187 | /* set up ulp submode and page size */ | |
1188 | req = (struct cpl_set_tcb_field *)skb->head; | |
1189 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1190 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); | |
1191 | req->reply = V_NO_REPLY(reply ? 0 : 1); | |
1192 | req->cpu_idx = 0; | |
1193 | req->word = htons(31); | |
1194 | req->mask = cpu_to_be64(0x0F000000); | |
1195 | req->val = cpu_to_be64(val << 24); | |
1196 | skb->priority = CPL_PRIORITY_CONTROL; | |
1197 | ||
1198 | cxgb3_ofld_send(csk->cdev->lldev, skb); | |
1199 | return 0; | |
1200 | } | |
1201 | ||
1202 | /** | |
1203 | * t3_ddp_cleanup - release the cxgb3 adapter's ddp resource | |
1204 | * @cdev: cxgb3i adapter | |
1205 | * release all the resource held by the ddp pagepod manager for a given | |
1206 | * adapter if needed | |
1207 | */ | |
1208 | ||
1209 | static void t3_ddp_cleanup(struct cxgbi_device *cdev) | |
1210 | { | |
1211 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | |
1212 | ||
1213 | if (cxgbi_ddp_cleanup(cdev)) { | |
1214 | pr_info("t3dev 0x%p, ulp_iscsi no more user.\n", tdev); | |
1215 | tdev->ulp_iscsi = NULL; | |
1216 | } | |
1217 | } | |
1218 | ||
1219 | /** | |
1220 | * ddp_init - initialize the cxgb3 adapter's ddp resource | |
1221 | * @cdev: cxgb3i adapter | |
1222 | * initialize the ddp pagepod manager for a given adapter | |
1223 | */ | |
1224 | static int cxgb3i_ddp_init(struct cxgbi_device *cdev) | |
1225 | { | |
1226 | struct t3cdev *tdev = (struct t3cdev *)cdev->lldev; | |
1227 | struct cxgbi_ddp_info *ddp = tdev->ulp_iscsi; | |
1228 | struct ulp_iscsi_info uinfo; | |
1229 | unsigned int pgsz_factor[4]; | |
c682d602 | 1230 | int i, err; |
6f7efaab | 1231 | |
1232 | if (ddp) { | |
1233 | kref_get(&ddp->refcnt); | |
1234 | pr_warn("t3dev 0x%p, ddp 0x%p already set up.\n", | |
1235 | tdev, tdev->ulp_iscsi); | |
1236 | cdev->ddp = ddp; | |
1237 | return -EALREADY; | |
1238 | } | |
1239 | ||
1240 | err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo); | |
1241 | if (err < 0) { | |
1242 | pr_err("%s, failed to get iscsi param err=%d.\n", | |
1243 | tdev->name, err); | |
1244 | return err; | |
1245 | } | |
1246 | ||
1247 | err = cxgbi_ddp_init(cdev, uinfo.llimit, uinfo.ulimit, | |
1248 | uinfo.max_txsz, uinfo.max_rxsz); | |
1249 | if (err < 0) | |
1250 | return err; | |
1251 | ||
1252 | ddp = cdev->ddp; | |
1253 | ||
1254 | uinfo.tagmask = ddp->idx_mask << PPOD_IDX_SHIFT; | |
1255 | cxgbi_ddp_page_size_factor(pgsz_factor); | |
c682d602 KX |
1256 | for (i = 0; i < 4; i++) |
1257 | uinfo.pgsz_factor[i] = pgsz_factor[i]; | |
6f7efaab | 1258 | uinfo.ulimit = uinfo.llimit + (ddp->nppods << PPOD_SIZE_SHIFT); |
1259 | ||
1260 | err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo); | |
1261 | if (err < 0) { | |
1262 | pr_warn("%s unable to set iscsi param err=%d, ddp disabled.\n", | |
1263 | tdev->name, err); | |
1264 | cxgbi_ddp_cleanup(cdev); | |
1265 | return err; | |
1266 | } | |
1267 | tdev->ulp_iscsi = ddp; | |
1268 | ||
6f7efaab | 1269 | cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; |
1270 | cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; | |
1271 | cdev->csk_ddp_set = ddp_set_map; | |
1272 | cdev->csk_ddp_clear = ddp_clear_map; | |
1273 | ||
1274 | pr_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, " | |
1275 | "%u/%u.\n", | |
1276 | tdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask, | |
1277 | ddp->rsvd_tag_mask, ddp->max_txsz, uinfo.max_txsz, | |
1278 | ddp->max_rxsz, uinfo.max_rxsz); | |
1279 | return 0; | |
1280 | } | |
1281 | ||
1282 | static void cxgb3i_dev_close(struct t3cdev *t3dev) | |
1283 | { | |
1284 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1285 | ||
1286 | if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) { | |
1287 | pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0); | |
1288 | return; | |
1289 | } | |
1290 | ||
1291 | cxgbi_device_unregister(cdev); | |
1292 | } | |
1293 | ||
1294 | /** | |
1295 | * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings | |
1296 | * @t3dev: t3cdev adapter | |
1297 | */ | |
1298 | static void cxgb3i_dev_open(struct t3cdev *t3dev) | |
1299 | { | |
1300 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1301 | struct adapter *adapter = tdev2adap(t3dev); | |
1302 | int i, err; | |
1303 | ||
1304 | if (cdev) { | |
1305 | pr_info("0x%p, updating.\n", cdev); | |
1306 | return; | |
1307 | } | |
1308 | ||
1309 | cdev = cxgbi_device_register(0, adapter->params.nports); | |
1310 | if (!cdev) { | |
1311 | pr_warn("device 0x%p register failed.\n", t3dev); | |
1312 | return; | |
1313 | } | |
1314 | ||
1315 | cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET; | |
1316 | cdev->lldev = t3dev; | |
1317 | cdev->pdev = adapter->pdev; | |
1318 | cdev->ports = adapter->port; | |
1319 | cdev->nports = adapter->params.nports; | |
1320 | cdev->mtus = adapter->params.mtus; | |
1321 | cdev->nmtus = NMTUS; | |
1322 | cdev->snd_win = cxgb3i_snd_win; | |
1323 | cdev->rcv_win = cxgb3i_rcv_win; | |
1324 | cdev->rx_credit_thres = cxgb3i_rx_credit_thres; | |
1325 | cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN; | |
1326 | cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss); | |
1327 | cdev->dev_ddp_cleanup = t3_ddp_cleanup; | |
1328 | cdev->itp = &cxgb3i_iscsi_transport; | |
1329 | ||
1330 | err = cxgb3i_ddp_init(cdev); | |
1331 | if (err) { | |
1332 | pr_info("0x%p ddp init failed\n", cdev); | |
1333 | goto err_out; | |
1334 | } | |
1335 | ||
1336 | err = cxgb3i_ofld_init(cdev); | |
1337 | if (err) { | |
1338 | pr_info("0x%p offload init failed\n", cdev); | |
1339 | goto err_out; | |
1340 | } | |
1341 | ||
1342 | err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN, | |
1343 | &cxgb3i_host_template, cxgb3i_stt); | |
1344 | if (err) | |
1345 | goto err_out; | |
1346 | ||
1347 | for (i = 0; i < cdev->nports; i++) | |
1348 | cdev->hbas[i]->ipv4addr = | |
1349 | cxgb3i_get_private_ipv4addr(cdev->ports[i]); | |
1350 | ||
1351 | pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n", | |
1352 | cdev, cdev ? cdev->flags : 0, t3dev, err); | |
1353 | return; | |
1354 | ||
1355 | err_out: | |
1356 | cxgbi_device_unregister(cdev); | |
1357 | } | |
1358 | ||
1359 | static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port) | |
1360 | { | |
1361 | struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev); | |
1362 | ||
1363 | log_debug(1 << CXGBI_DBG_TOE, | |
1364 | "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n", | |
1365 | t3dev, cdev, event, port); | |
1366 | if (!cdev) | |
1367 | return; | |
1368 | ||
1369 | switch (event) { | |
1370 | case OFFLOAD_STATUS_DOWN: | |
1371 | cdev->flags |= CXGBI_FLAG_ADAPTER_RESET; | |
1372 | break; | |
1373 | case OFFLOAD_STATUS_UP: | |
1374 | cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET; | |
1375 | break; | |
1376 | } | |
1377 | } | |
1378 | ||
1379 | /** | |
1380 | * cxgb3i_init_module - module init entry point | |
1381 | * | |
1382 | * initialize any driver wide global data structures and register itself | |
1383 | * with the cxgb3 module | |
1384 | */ | |
1385 | static int __init cxgb3i_init_module(void) | |
1386 | { | |
1387 | int rc; | |
1388 | ||
1389 | printk(KERN_INFO "%s", version); | |
1390 | ||
1391 | rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt); | |
1392 | if (rc < 0) | |
1393 | return rc; | |
1394 | ||
1395 | cxgb3_register_client(&t3_client); | |
1396 | return 0; | |
1397 | } | |
1398 | ||
1399 | /** | |
1400 | * cxgb3i_exit_module - module cleanup/exit entry point | |
1401 | * | |
1402 | * go through the driver hba list and for each hba, release any resource held. | |
1403 | * and unregisters iscsi transport and the cxgb3 module | |
1404 | */ | |
1405 | static void __exit cxgb3i_exit_module(void) | |
1406 | { | |
1407 | cxgb3_unregister_client(&t3_client); | |
1408 | cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3); | |
1409 | cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt); | |
1410 | } | |
1411 | ||
1412 | module_init(cxgb3i_init_module); | |
1413 | module_exit(cxgb3i_exit_module); |