Commit | Line | Data |
---|---|---|
85b4aa49 RL |
1 | /* |
2 | * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., | |
15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
16 | * | |
17 | * Maintained at www.Open-FCoE.org | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/version.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/netdevice.h> | |
26 | #include <linux/etherdevice.h> | |
27 | #include <linux/ethtool.h> | |
28 | #include <linux/if_ether.h> | |
29 | #include <linux/if_vlan.h> | |
30 | #include <linux/kthread.h> | |
31 | #include <linux/crc32.h> | |
32 | #include <linux/cpu.h> | |
33 | #include <linux/fs.h> | |
34 | #include <linux/sysfs.h> | |
35 | #include <linux/ctype.h> | |
36 | #include <scsi/scsi_tcq.h> | |
37 | #include <scsi/scsicam.h> | |
38 | #include <scsi/scsi_transport.h> | |
39 | #include <scsi/scsi_transport_fc.h> | |
40 | #include <net/rtnetlink.h> | |
41 | ||
42 | #include <scsi/fc/fc_encaps.h> | |
43 | ||
44 | #include <scsi/libfc.h> | |
45 | #include <scsi/fc_frame.h> | |
46 | #include <scsi/libfcoe.h> | |
47 | #include <scsi/fc_transport_fcoe.h> | |
48 | ||
49 | static int debug_fcoe; | |
50 | ||
51 | #define FCOE_MAX_QUEUE_DEPTH 256 | |
52 | ||
53 | /* destination address mode */ | |
54 | #define FCOE_GW_ADDR_MODE 0x00 | |
55 | #define FCOE_FCOUI_ADDR_MODE 0x01 | |
56 | ||
57 | #define FCOE_WORD_TO_BYTE 4 | |
58 | ||
59 | MODULE_AUTHOR("Open-FCoE.org"); | |
60 | MODULE_DESCRIPTION("FCoE"); | |
61 | MODULE_LICENSE("GPL"); | |
62 | ||
63 | /* fcoe host list */ | |
64 | LIST_HEAD(fcoe_hostlist); | |
65 | DEFINE_RWLOCK(fcoe_hostlist_lock); | |
66 | DEFINE_TIMER(fcoe_timer, NULL, 0, 0); | |
67 | struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; | |
68 | ||
69 | ||
70 | /* Function Prototyes */ | |
71 | static int fcoe_check_wait_queue(struct fc_lport *); | |
72 | static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *); | |
73 | static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *); | |
74 | static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); | |
75 | #ifdef CONFIG_HOTPLUG_CPU | |
76 | static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); | |
77 | #endif /* CONFIG_HOTPLUG_CPU */ | |
78 | static int fcoe_device_notification(struct notifier_block *, ulong, void *); | |
79 | static void fcoe_dev_setup(void); | |
80 | static void fcoe_dev_cleanup(void); | |
81 | ||
82 | /* notification function from net device */ | |
83 | static struct notifier_block fcoe_notifier = { | |
84 | .notifier_call = fcoe_device_notification, | |
85 | }; | |
86 | ||
87 | ||
88 | #ifdef CONFIG_HOTPLUG_CPU | |
89 | static struct notifier_block fcoe_cpu_notifier = { | |
90 | .notifier_call = fcoe_cpu_callback, | |
91 | }; | |
92 | ||
93 | /** | |
34f42a07 | 94 | * fcoe_create_percpu_data() - creates the associated cpu data |
85b4aa49 RL |
95 | * @cpu: index for the cpu where fcoe cpu data will be created |
96 | * | |
97 | * create percpu stats block, from cpu add notifier | |
98 | * | |
99 | * Returns: none | |
34f42a07 | 100 | */ |
85b4aa49 RL |
101 | static void fcoe_create_percpu_data(int cpu) |
102 | { | |
103 | struct fc_lport *lp; | |
104 | struct fcoe_softc *fc; | |
105 | ||
106 | write_lock_bh(&fcoe_hostlist_lock); | |
107 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
108 | lp = fc->lp; | |
109 | if (lp->dev_stats[cpu] == NULL) | |
110 | lp->dev_stats[cpu] = | |
111 | kzalloc(sizeof(struct fcoe_dev_stats), | |
112 | GFP_KERNEL); | |
113 | } | |
114 | write_unlock_bh(&fcoe_hostlist_lock); | |
115 | } | |
116 | ||
117 | /** | |
34f42a07 | 118 | * fcoe_destroy_percpu_data() - destroys the associated cpu data |
85b4aa49 RL |
119 | * @cpu: index for the cpu where fcoe cpu data will destroyed |
120 | * | |
121 | * destroy percpu stats block called by cpu add/remove notifier | |
122 | * | |
123 | * Retuns: none | |
34f42a07 | 124 | */ |
85b4aa49 RL |
125 | static void fcoe_destroy_percpu_data(int cpu) |
126 | { | |
127 | struct fc_lport *lp; | |
128 | struct fcoe_softc *fc; | |
129 | ||
130 | write_lock_bh(&fcoe_hostlist_lock); | |
131 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
132 | lp = fc->lp; | |
133 | kfree(lp->dev_stats[cpu]); | |
134 | lp->dev_stats[cpu] = NULL; | |
135 | } | |
136 | write_unlock_bh(&fcoe_hostlist_lock); | |
137 | } | |
138 | ||
139 | /** | |
34f42a07 | 140 | * fcoe_cpu_callback() - fcoe cpu hotplug event callback |
85b4aa49 RL |
141 | * @nfb: callback data block |
142 | * @action: event triggering the callback | |
143 | * @hcpu: index for the cpu of this event | |
144 | * | |
145 | * this creates or destroys per cpu data for fcoe | |
146 | * | |
147 | * Returns NOTIFY_OK always. | |
34f42a07 | 148 | */ |
85b4aa49 RL |
149 | static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, |
150 | void *hcpu) | |
151 | { | |
152 | unsigned int cpu = (unsigned long)hcpu; | |
153 | ||
154 | switch (action) { | |
155 | case CPU_ONLINE: | |
156 | fcoe_create_percpu_data(cpu); | |
157 | break; | |
158 | case CPU_DEAD: | |
159 | fcoe_destroy_percpu_data(cpu); | |
160 | break; | |
161 | default: | |
162 | break; | |
163 | } | |
164 | return NOTIFY_OK; | |
165 | } | |
166 | #endif /* CONFIG_HOTPLUG_CPU */ | |
167 | ||
168 | /** | |
34f42a07 | 169 | * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ |
85b4aa49 RL |
170 | * @skb: the receive skb |
171 | * @dev: associated net device | |
172 | * @ptype: context | |
173 | * @odldev: last device | |
174 | * | |
175 | * this function will receive the packet and build fc frame and pass it up | |
176 | * | |
177 | * Returns: 0 for success | |
34f42a07 | 178 | */ |
85b4aa49 RL |
179 | int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, |
180 | struct packet_type *ptype, struct net_device *olddev) | |
181 | { | |
182 | struct fc_lport *lp; | |
183 | struct fcoe_rcv_info *fr; | |
184 | struct fcoe_softc *fc; | |
185 | struct fcoe_dev_stats *stats; | |
186 | struct fc_frame_header *fh; | |
187 | unsigned short oxid; | |
188 | int cpu_idx; | |
189 | struct fcoe_percpu_s *fps; | |
190 | ||
191 | fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); | |
192 | lp = fc->lp; | |
193 | if (unlikely(lp == NULL)) { | |
194 | FC_DBG("cannot find hba structure"); | |
195 | goto err2; | |
196 | } | |
197 | ||
198 | if (unlikely(debug_fcoe)) { | |
199 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " | |
200 | "end:%p sum:%d dev:%s", skb->len, skb->data_len, | |
201 | skb->head, skb->data, skb_tail_pointer(skb), | |
202 | skb_end_pointer(skb), skb->csum, | |
203 | skb->dev ? skb->dev->name : "<NULL>"); | |
204 | ||
205 | } | |
206 | ||
207 | /* check for FCOE packet type */ | |
208 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { | |
209 | FC_DBG("wrong FC type frame"); | |
210 | goto err; | |
211 | } | |
212 | ||
213 | /* | |
214 | * Check for minimum frame length, and make sure required FCoE | |
215 | * and FC headers are pulled into the linear data area. | |
216 | */ | |
217 | if (unlikely((skb->len < FCOE_MIN_FRAME) || | |
218 | !pskb_may_pull(skb, FCOE_HEADER_LEN))) | |
219 | goto err; | |
220 | ||
221 | skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); | |
222 | fh = (struct fc_frame_header *) skb_transport_header(skb); | |
223 | ||
224 | oxid = ntohs(fh->fh_ox_id); | |
225 | ||
226 | fr = fcoe_dev_from_skb(skb); | |
227 | fr->fr_dev = lp; | |
228 | fr->ptype = ptype; | |
229 | cpu_idx = 0; | |
230 | #ifdef CONFIG_SMP | |
231 | /* | |
232 | * The incoming frame exchange id(oxid) is ANDed with num of online | |
233 | * cpu bits to get cpu_idx and then this cpu_idx is used for selecting | |
234 | * a per cpu kernel thread from fcoe_percpu. In case the cpu is | |
235 | * offline or no kernel thread for derived cpu_idx then cpu_idx is | |
236 | * initialize to first online cpu index. | |
237 | */ | |
238 | cpu_idx = oxid & (num_online_cpus() - 1); | |
239 | if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx)) | |
240 | cpu_idx = first_cpu(cpu_online_map); | |
241 | #endif | |
242 | fps = fcoe_percpu[cpu_idx]; | |
243 | ||
244 | spin_lock_bh(&fps->fcoe_rx_list.lock); | |
245 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | |
246 | if (fps->fcoe_rx_list.qlen == 1) | |
247 | wake_up_process(fps->thread); | |
248 | ||
249 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | |
250 | ||
251 | return 0; | |
252 | err: | |
253 | #ifdef CONFIG_SMP | |
254 | stats = lp->dev_stats[smp_processor_id()]; | |
255 | #else | |
256 | stats = lp->dev_stats[0]; | |
257 | #endif | |
258 | if (stats) | |
259 | stats->ErrorFrames++; | |
260 | ||
261 | err2: | |
262 | kfree_skb(skb); | |
263 | return -1; | |
264 | } | |
265 | EXPORT_SYMBOL_GPL(fcoe_rcv); | |
266 | ||
267 | /** | |
34f42a07 | 268 | * fcoe_start_io() - pass to netdev to start xmit for fcoe |
85b4aa49 RL |
269 | * @skb: the skb to be xmitted |
270 | * | |
271 | * Returns: 0 for success | |
34f42a07 | 272 | */ |
85b4aa49 RL |
273 | static inline int fcoe_start_io(struct sk_buff *skb) |
274 | { | |
275 | int rc; | |
276 | ||
277 | skb_get(skb); | |
278 | rc = dev_queue_xmit(skb); | |
279 | if (rc != 0) | |
280 | return rc; | |
281 | kfree_skb(skb); | |
282 | return 0; | |
283 | } | |
284 | ||
285 | /** | |
34f42a07 | 286 | * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof |
85b4aa49 RL |
287 | * @skb: the skb to be xmitted |
288 | * @tlen: total len | |
289 | * | |
290 | * Returns: 0 for success | |
34f42a07 | 291 | */ |
85b4aa49 RL |
292 | static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) |
293 | { | |
294 | struct fcoe_percpu_s *fps; | |
295 | struct page *page; | |
296 | int cpu_idx; | |
297 | ||
298 | cpu_idx = get_cpu(); | |
299 | fps = fcoe_percpu[cpu_idx]; | |
300 | page = fps->crc_eof_page; | |
301 | if (!page) { | |
302 | page = alloc_page(GFP_ATOMIC); | |
303 | if (!page) { | |
304 | put_cpu(); | |
305 | return -ENOMEM; | |
306 | } | |
307 | fps->crc_eof_page = page; | |
308 | WARN_ON(fps->crc_eof_offset != 0); | |
309 | } | |
310 | ||
311 | get_page(page); | |
312 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, | |
313 | fps->crc_eof_offset, tlen); | |
314 | skb->len += tlen; | |
315 | skb->data_len += tlen; | |
316 | skb->truesize += tlen; | |
317 | fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); | |
318 | ||
319 | if (fps->crc_eof_offset >= PAGE_SIZE) { | |
320 | fps->crc_eof_page = NULL; | |
321 | fps->crc_eof_offset = 0; | |
322 | put_page(page); | |
323 | } | |
324 | put_cpu(); | |
325 | return 0; | |
326 | } | |
327 | ||
328 | /** | |
34f42a07 | 329 | * fcoe_fc_crc() - calculates FC CRC in this fcoe skb |
85b4aa49 RL |
330 | * @fp: the fc_frame containg data to be checksummed |
331 | * | |
332 | * This uses crc32() to calculate the crc for fc frame | |
333 | * Return : 32 bit crc | |
34f42a07 | 334 | */ |
85b4aa49 RL |
335 | u32 fcoe_fc_crc(struct fc_frame *fp) |
336 | { | |
337 | struct sk_buff *skb = fp_skb(fp); | |
338 | struct skb_frag_struct *frag; | |
339 | unsigned char *data; | |
340 | unsigned long off, len, clen; | |
341 | u32 crc; | |
342 | unsigned i; | |
343 | ||
344 | crc = crc32(~0, skb->data, skb_headlen(skb)); | |
345 | ||
346 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
347 | frag = &skb_shinfo(skb)->frags[i]; | |
348 | off = frag->page_offset; | |
349 | len = frag->size; | |
350 | while (len > 0) { | |
351 | clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); | |
352 | data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), | |
353 | KM_SKB_DATA_SOFTIRQ); | |
354 | crc = crc32(crc, data + (off & ~PAGE_MASK), clen); | |
355 | kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); | |
356 | off += clen; | |
357 | len -= clen; | |
358 | } | |
359 | } | |
360 | return crc; | |
361 | } | |
362 | EXPORT_SYMBOL_GPL(fcoe_fc_crc); | |
363 | ||
364 | /** | |
34f42a07 | 365 | * fcoe_xmit() - FCoE frame transmit function |
85b4aa49 RL |
366 | * @lp: the associated local port |
367 | * @fp: the fc_frame to be transmitted | |
368 | * | |
369 | * Return : 0 for success | |
34f42a07 | 370 | */ |
85b4aa49 RL |
371 | int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) |
372 | { | |
373 | int wlen, rc = 0; | |
374 | u32 crc; | |
375 | struct ethhdr *eh; | |
376 | struct fcoe_crc_eof *cp; | |
377 | struct sk_buff *skb; | |
378 | struct fcoe_dev_stats *stats; | |
379 | struct fc_frame_header *fh; | |
380 | unsigned int hlen; /* header length implies the version */ | |
381 | unsigned int tlen; /* trailer length */ | |
382 | unsigned int elen; /* eth header, may include vlan */ | |
383 | int flogi_in_progress = 0; | |
384 | struct fcoe_softc *fc; | |
385 | u8 sof, eof; | |
386 | struct fcoe_hdr *hp; | |
387 | ||
388 | WARN_ON((fr_len(fp) % sizeof(u32)) != 0); | |
389 | ||
fc47ff6b | 390 | fc = lport_priv(lp); |
85b4aa49 RL |
391 | /* |
392 | * if it is a flogi then we need to learn gw-addr | |
393 | * and my own fcid | |
394 | */ | |
395 | fh = fc_frame_header_get(fp); | |
396 | if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { | |
397 | if (fc_frame_payload_op(fp) == ELS_FLOGI) { | |
398 | fc->flogi_oxid = ntohs(fh->fh_ox_id); | |
399 | fc->address_mode = FCOE_FCOUI_ADDR_MODE; | |
400 | fc->flogi_progress = 1; | |
401 | flogi_in_progress = 1; | |
402 | } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) { | |
403 | /* | |
404 | * Here we must've gotten an SID by accepting an FLOGI | |
405 | * from a point-to-point connection. Switch to using | |
406 | * the source mac based on the SID. The destination | |
407 | * MAC in this case would have been set by receving the | |
408 | * FLOGI. | |
409 | */ | |
410 | fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id); | |
411 | fc->flogi_progress = 0; | |
412 | } | |
413 | } | |
414 | ||
415 | skb = fp_skb(fp); | |
416 | sof = fr_sof(fp); | |
417 | eof = fr_eof(fp); | |
418 | ||
419 | elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? | |
420 | sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr); | |
421 | hlen = sizeof(struct fcoe_hdr); | |
422 | tlen = sizeof(struct fcoe_crc_eof); | |
423 | wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; | |
424 | ||
425 | /* crc offload */ | |
426 | if (likely(lp->crc_offload)) { | |
427 | skb->ip_summed = CHECKSUM_COMPLETE; | |
428 | skb->csum_start = skb_headroom(skb); | |
429 | skb->csum_offset = skb->len; | |
430 | crc = 0; | |
431 | } else { | |
432 | skb->ip_summed = CHECKSUM_NONE; | |
433 | crc = fcoe_fc_crc(fp); | |
434 | } | |
435 | ||
436 | /* copy fc crc and eof to the skb buff */ | |
437 | if (skb_is_nonlinear(skb)) { | |
438 | skb_frag_t *frag; | |
439 | if (fcoe_get_paged_crc_eof(skb, tlen)) { | |
e9041581 | 440 | kfree_skb(skb); |
85b4aa49 RL |
441 | return -ENOMEM; |
442 | } | |
443 | frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; | |
444 | cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) | |
445 | + frag->page_offset; | |
446 | } else { | |
447 | cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); | |
448 | } | |
449 | ||
450 | memset(cp, 0, sizeof(*cp)); | |
451 | cp->fcoe_eof = eof; | |
452 | cp->fcoe_crc32 = cpu_to_le32(~crc); | |
453 | ||
454 | if (skb_is_nonlinear(skb)) { | |
455 | kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); | |
456 | cp = NULL; | |
457 | } | |
458 | ||
459 | /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */ | |
460 | skb_push(skb, elen + hlen); | |
461 | skb_reset_mac_header(skb); | |
462 | skb_reset_network_header(skb); | |
463 | skb->mac_len = elen; | |
464 | skb->protocol = htons(ETH_P_802_3); | |
465 | skb->dev = fc->real_dev; | |
466 | ||
467 | /* fill up mac and fcoe headers */ | |
468 | eh = eth_hdr(skb); | |
469 | eh->h_proto = htons(ETH_P_FCOE); | |
470 | if (fc->address_mode == FCOE_FCOUI_ADDR_MODE) | |
471 | fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); | |
472 | else | |
473 | /* insert GW address */ | |
474 | memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN); | |
475 | ||
476 | if (unlikely(flogi_in_progress)) | |
477 | memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN); | |
478 | else | |
479 | memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN); | |
480 | ||
481 | hp = (struct fcoe_hdr *)(eh + 1); | |
482 | memset(hp, 0, sizeof(*hp)); | |
483 | if (FC_FCOE_VER) | |
484 | FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); | |
485 | hp->fcoe_sof = sof; | |
486 | ||
487 | /* update tx stats: regardless if LLD fails */ | |
488 | stats = lp->dev_stats[smp_processor_id()]; | |
489 | if (stats) { | |
490 | stats->TxFrames++; | |
491 | stats->TxWords += wlen; | |
492 | } | |
493 | ||
494 | /* send down to lld */ | |
495 | fr_dev(fp) = lp; | |
496 | if (fc->fcoe_pending_queue.qlen) | |
497 | rc = fcoe_check_wait_queue(lp); | |
498 | ||
499 | if (rc == 0) | |
500 | rc = fcoe_start_io(skb); | |
501 | ||
502 | if (rc) { | |
503 | fcoe_insert_wait_queue(lp, skb); | |
504 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | |
bc0e17f6 | 505 | lp->qfull = 1; |
85b4aa49 RL |
506 | } |
507 | ||
508 | return 0; | |
509 | } | |
510 | EXPORT_SYMBOL_GPL(fcoe_xmit); | |
511 | ||
34f42a07 RL |
512 | /** |
513 | * fcoe_percpu_receive_thread() - recv thread per cpu | |
85b4aa49 RL |
514 | * @arg: ptr to the fcoe per cpu struct |
515 | * | |
516 | * Return: 0 for success | |
85b4aa49 RL |
517 | */ |
518 | int fcoe_percpu_receive_thread(void *arg) | |
519 | { | |
520 | struct fcoe_percpu_s *p = arg; | |
521 | u32 fr_len; | |
522 | struct fc_lport *lp; | |
523 | struct fcoe_rcv_info *fr; | |
524 | struct fcoe_dev_stats *stats; | |
525 | struct fc_frame_header *fh; | |
526 | struct sk_buff *skb; | |
527 | struct fcoe_crc_eof crc_eof; | |
528 | struct fc_frame *fp; | |
529 | u8 *mac = NULL; | |
530 | struct fcoe_softc *fc; | |
531 | struct fcoe_hdr *hp; | |
532 | ||
533 | set_user_nice(current, 19); | |
534 | ||
535 | while (!kthread_should_stop()) { | |
536 | ||
537 | spin_lock_bh(&p->fcoe_rx_list.lock); | |
538 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { | |
539 | set_current_state(TASK_INTERRUPTIBLE); | |
540 | spin_unlock_bh(&p->fcoe_rx_list.lock); | |
541 | schedule(); | |
542 | set_current_state(TASK_RUNNING); | |
543 | if (kthread_should_stop()) | |
544 | return 0; | |
545 | spin_lock_bh(&p->fcoe_rx_list.lock); | |
546 | } | |
547 | spin_unlock_bh(&p->fcoe_rx_list.lock); | |
548 | fr = fcoe_dev_from_skb(skb); | |
549 | lp = fr->fr_dev; | |
550 | if (unlikely(lp == NULL)) { | |
551 | FC_DBG("invalid HBA Structure"); | |
552 | kfree_skb(skb); | |
553 | continue; | |
554 | } | |
555 | ||
556 | stats = lp->dev_stats[smp_processor_id()]; | |
557 | ||
558 | if (unlikely(debug_fcoe)) { | |
559 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " | |
560 | "tail:%p end:%p sum:%d dev:%s", | |
561 | skb->len, skb->data_len, | |
562 | skb->head, skb->data, skb_tail_pointer(skb), | |
563 | skb_end_pointer(skb), skb->csum, | |
564 | skb->dev ? skb->dev->name : "<NULL>"); | |
565 | } | |
566 | ||
567 | /* | |
568 | * Save source MAC address before discarding header. | |
569 | */ | |
570 | fc = lport_priv(lp); | |
571 | if (unlikely(fc->flogi_progress)) | |
572 | mac = eth_hdr(skb)->h_source; | |
573 | ||
574 | if (skb_is_nonlinear(skb)) | |
575 | skb_linearize(skb); /* not ideal */ | |
576 | ||
577 | /* | |
578 | * Frame length checks and setting up the header pointers | |
579 | * was done in fcoe_rcv already. | |
580 | */ | |
581 | hp = (struct fcoe_hdr *) skb_network_header(skb); | |
582 | fh = (struct fc_frame_header *) skb_transport_header(skb); | |
583 | ||
584 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { | |
585 | if (stats) { | |
586 | if (stats->ErrorFrames < 5) | |
587 | FC_DBG("unknown FCoE version %x", | |
588 | FC_FCOE_DECAPS_VER(hp)); | |
589 | stats->ErrorFrames++; | |
590 | } | |
591 | kfree_skb(skb); | |
592 | continue; | |
593 | } | |
594 | ||
595 | skb_pull(skb, sizeof(struct fcoe_hdr)); | |
596 | fr_len = skb->len - sizeof(struct fcoe_crc_eof); | |
597 | ||
598 | if (stats) { | |
599 | stats->RxFrames++; | |
600 | stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; | |
601 | } | |
602 | ||
603 | fp = (struct fc_frame *)skb; | |
604 | fc_frame_init(fp); | |
605 | fr_dev(fp) = lp; | |
606 | fr_sof(fp) = hp->fcoe_sof; | |
607 | ||
608 | /* Copy out the CRC and EOF trailer for access */ | |
609 | if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { | |
610 | kfree_skb(skb); | |
611 | continue; | |
612 | } | |
613 | fr_eof(fp) = crc_eof.fcoe_eof; | |
614 | fr_crc(fp) = crc_eof.fcoe_crc32; | |
615 | if (pskb_trim(skb, fr_len)) { | |
616 | kfree_skb(skb); | |
617 | continue; | |
618 | } | |
619 | ||
620 | /* | |
621 | * We only check CRC if no offload is available and if it is | |
622 | * it's solicited data, in which case, the FCP layer would | |
623 | * check it during the copy. | |
624 | */ | |
625 | if (lp->crc_offload) | |
626 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | |
627 | else | |
628 | fr_flags(fp) |= FCPHF_CRC_UNCHECKED; | |
629 | ||
630 | fh = fc_frame_header_get(fp); | |
631 | if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && | |
632 | fh->fh_type == FC_TYPE_FCP) { | |
633 | fc_exch_recv(lp, lp->emp, fp); | |
634 | continue; | |
635 | } | |
636 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { | |
637 | if (le32_to_cpu(fr_crc(fp)) != | |
638 | ~crc32(~0, skb->data, fr_len)) { | |
639 | if (debug_fcoe || stats->InvalidCRCCount < 5) | |
640 | printk(KERN_WARNING "fcoe: dropping " | |
641 | "frame with CRC error\n"); | |
642 | stats->InvalidCRCCount++; | |
643 | stats->ErrorFrames++; | |
644 | fc_frame_free(fp); | |
645 | continue; | |
646 | } | |
647 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | |
648 | } | |
649 | /* non flogi and non data exchanges are handled here */ | |
650 | if (unlikely(fc->flogi_progress)) | |
651 | fcoe_recv_flogi(fc, fp, mac); | |
652 | fc_exch_recv(lp, lp->emp, fp); | |
653 | } | |
654 | return 0; | |
655 | } | |
656 | ||
657 | /** | |
34f42a07 | 658 | * fcoe_recv_flogi() - flogi receive function |
85b4aa49 RL |
659 | * @fc: associated fcoe_softc |
660 | * @fp: the recieved frame | |
661 | * @sa: the source address of this flogi | |
662 | * | |
663 | * This is responsible to parse the flogi response and sets the corresponding | |
664 | * mac address for the initiator, eitehr OUI based or GW based. | |
665 | * | |
666 | * Returns: none | |
34f42a07 | 667 | */ |
85b4aa49 RL |
668 | static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) |
669 | { | |
670 | struct fc_frame_header *fh; | |
671 | u8 op; | |
672 | ||
673 | fh = fc_frame_header_get(fp); | |
674 | if (fh->fh_type != FC_TYPE_ELS) | |
675 | return; | |
676 | op = fc_frame_payload_op(fp); | |
677 | if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && | |
678 | fc->flogi_oxid == ntohs(fh->fh_ox_id)) { | |
679 | /* | |
680 | * FLOGI accepted. | |
681 | * If the src mac addr is FC_OUI-based, then we mark the | |
682 | * address_mode flag to use FC_OUI-based Ethernet DA. | |
683 | * Otherwise we use the FCoE gateway addr | |
684 | */ | |
685 | if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) { | |
686 | fc->address_mode = FCOE_FCOUI_ADDR_MODE; | |
687 | } else { | |
688 | memcpy(fc->dest_addr, sa, ETH_ALEN); | |
689 | fc->address_mode = FCOE_GW_ADDR_MODE; | |
690 | } | |
691 | ||
692 | /* | |
693 | * Remove any previously-set unicast MAC filter. | |
694 | * Add secondary FCoE MAC address filter for our OUI. | |
695 | */ | |
696 | rtnl_lock(); | |
697 | if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) | |
698 | dev_unicast_delete(fc->real_dev, fc->data_src_addr, | |
699 | ETH_ALEN); | |
700 | fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id); | |
701 | dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN); | |
702 | rtnl_unlock(); | |
703 | ||
704 | fc->flogi_progress = 0; | |
705 | } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { | |
706 | /* | |
707 | * Save source MAC for point-to-point responses. | |
708 | */ | |
709 | memcpy(fc->dest_addr, sa, ETH_ALEN); | |
710 | fc->address_mode = FCOE_GW_ADDR_MODE; | |
711 | } | |
712 | } | |
713 | ||
714 | /** | |
34f42a07 | 715 | * fcoe_watchdog() - fcoe timer callback |
85b4aa49 RL |
716 | * @vp: |
717 | * | |
bc0e17f6 | 718 | * This checks the pending queue length for fcoe and set lport qfull |
85b4aa49 RL |
719 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the |
720 | * fcoe_hostlist. | |
721 | * | |
722 | * Returns: 0 for success | |
34f42a07 | 723 | */ |
85b4aa49 RL |
724 | void fcoe_watchdog(ulong vp) |
725 | { | |
726 | struct fc_lport *lp; | |
727 | struct fcoe_softc *fc; | |
bc0e17f6 | 728 | int qfilled = 0; |
85b4aa49 RL |
729 | |
730 | read_lock(&fcoe_hostlist_lock); | |
731 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
732 | lp = fc->lp; | |
733 | if (lp) { | |
734 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | |
bc0e17f6 | 735 | qfilled = 1; |
85b4aa49 | 736 | if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) { |
bc0e17f6 VD |
737 | if (qfilled) |
738 | lp->qfull = 0; | |
85b4aa49 RL |
739 | } |
740 | } | |
741 | } | |
742 | read_unlock(&fcoe_hostlist_lock); | |
743 | ||
744 | fcoe_timer.expires = jiffies + (1 * HZ); | |
745 | add_timer(&fcoe_timer); | |
746 | } | |
747 | ||
748 | ||
749 | /** | |
34f42a07 | 750 | * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue |
85b4aa49 RL |
751 | * @lp: the fc_port for this skb |
752 | * @skb: the associated skb to be xmitted | |
753 | * | |
754 | * This empties the wait_queue, dequeue the head of the wait_queue queue | |
755 | * and calls fcoe_start_io() for each packet, if all skb have been | |
756 | * transmitted, return 0 if a error occurs, then restore wait_queue and | |
757 | * try again later. | |
758 | * | |
759 | * The wait_queue is used when the skb transmit fails. skb will go | |
760 | * in the wait_queue which will be emptied by the time function OR | |
761 | * by the next skb transmit. | |
762 | * | |
763 | * Returns: 0 for success | |
34f42a07 | 764 | */ |
85b4aa49 RL |
765 | static int fcoe_check_wait_queue(struct fc_lport *lp) |
766 | { | |
bc0e17f6 | 767 | int rc; |
85b4aa49 RL |
768 | struct sk_buff *skb; |
769 | struct fcoe_softc *fc; | |
770 | ||
fc47ff6b | 771 | fc = lport_priv(lp); |
85b4aa49 RL |
772 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
773 | ||
774 | /* | |
bc0e17f6 | 775 | * if interface pending queue full then set qfull in lport. |
85b4aa49 RL |
776 | */ |
777 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) | |
bc0e17f6 | 778 | lp->qfull = 1; |
85b4aa49 RL |
779 | if (fc->fcoe_pending_queue.qlen) { |
780 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | |
781 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
782 | rc = fcoe_start_io(skb); | |
783 | if (rc) { | |
784 | fcoe_insert_wait_queue_head(lp, skb); | |
785 | return rc; | |
786 | } | |
787 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
788 | } | |
789 | if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH) | |
bc0e17f6 | 790 | lp->qfull = 0; |
85b4aa49 RL |
791 | } |
792 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
85b4aa49 RL |
793 | return fc->fcoe_pending_queue.qlen; |
794 | } | |
795 | ||
796 | /** | |
34f42a07 | 797 | * fcoe_insert_wait_queue_head() - puts skb to fcoe pending queue head |
85b4aa49 RL |
798 | * @lp: the fc_port for this skb |
799 | * @skb: the associated skb to be xmitted | |
800 | * | |
801 | * Returns: none | |
34f42a07 | 802 | */ |
85b4aa49 RL |
803 | static void fcoe_insert_wait_queue_head(struct fc_lport *lp, |
804 | struct sk_buff *skb) | |
805 | { | |
806 | struct fcoe_softc *fc; | |
807 | ||
fc47ff6b | 808 | fc = lport_priv(lp); |
85b4aa49 RL |
809 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
810 | __skb_queue_head(&fc->fcoe_pending_queue, skb); | |
811 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
812 | } | |
813 | ||
814 | /** | |
34f42a07 | 815 | * fcoe_insert_wait_queue() - put the skb into fcoe pending queue tail |
85b4aa49 RL |
816 | * @lp: the fc_port for this skb |
817 | * @skb: the associated skb to be xmitted | |
818 | * | |
819 | * Returns: none | |
34f42a07 | 820 | */ |
85b4aa49 RL |
821 | static void fcoe_insert_wait_queue(struct fc_lport *lp, |
822 | struct sk_buff *skb) | |
823 | { | |
824 | struct fcoe_softc *fc; | |
825 | ||
fc47ff6b | 826 | fc = lport_priv(lp); |
85b4aa49 RL |
827 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
828 | __skb_queue_tail(&fc->fcoe_pending_queue, skb); | |
829 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
830 | } | |
831 | ||
832 | /** | |
34f42a07 RL |
833 | * fcoe_dev_setup() - setup link change notification interface |
834 | */ | |
835 | static void fcoe_dev_setup() | |
85b4aa49 RL |
836 | { |
837 | /* | |
838 | * here setup a interface specific wd time to | |
839 | * monitor the link state | |
840 | */ | |
841 | register_netdevice_notifier(&fcoe_notifier); | |
842 | } | |
843 | ||
844 | /** | |
34f42a07 RL |
845 | * fcoe_dev_setup() - cleanup link change notification interface |
846 | */ | |
85b4aa49 RL |
847 | static void fcoe_dev_cleanup(void) |
848 | { | |
849 | unregister_netdevice_notifier(&fcoe_notifier); | |
850 | } | |
851 | ||
852 | /** | |
34f42a07 | 853 | * fcoe_device_notification() - netdev event notification callback |
85b4aa49 RL |
854 | * @notifier: context of the notification |
855 | * @event: type of event | |
856 | * @ptr: fixed array for output parsed ifname | |
857 | * | |
858 | * This function is called by the ethernet driver in case of link change event | |
859 | * | |
860 | * Returns: 0 for success | |
34f42a07 | 861 | */ |
85b4aa49 RL |
862 | static int fcoe_device_notification(struct notifier_block *notifier, |
863 | ulong event, void *ptr) | |
864 | { | |
865 | struct fc_lport *lp = NULL; | |
866 | struct net_device *real_dev = ptr; | |
867 | struct fcoe_softc *fc; | |
868 | struct fcoe_dev_stats *stats; | |
bc0e17f6 | 869 | u32 new_link_up; |
85b4aa49 RL |
870 | u32 mfs; |
871 | int rc = NOTIFY_OK; | |
872 | ||
873 | read_lock(&fcoe_hostlist_lock); | |
874 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
875 | if (fc->real_dev == real_dev) { | |
876 | lp = fc->lp; | |
877 | break; | |
878 | } | |
879 | } | |
880 | read_unlock(&fcoe_hostlist_lock); | |
881 | if (lp == NULL) { | |
882 | rc = NOTIFY_DONE; | |
883 | goto out; | |
884 | } | |
885 | ||
bc0e17f6 | 886 | new_link_up = lp->link_up; |
85b4aa49 RL |
887 | switch (event) { |
888 | case NETDEV_DOWN: | |
889 | case NETDEV_GOING_DOWN: | |
bc0e17f6 | 890 | new_link_up = 0; |
85b4aa49 RL |
891 | break; |
892 | case NETDEV_UP: | |
893 | case NETDEV_CHANGE: | |
bc0e17f6 | 894 | new_link_up = !fcoe_link_ok(lp); |
85b4aa49 RL |
895 | break; |
896 | case NETDEV_CHANGEMTU: | |
897 | mfs = fc->real_dev->mtu - | |
898 | (sizeof(struct fcoe_hdr) + | |
899 | sizeof(struct fcoe_crc_eof)); | |
900 | if (mfs >= FC_MIN_MAX_FRAME) | |
901 | fc_set_mfs(lp, mfs); | |
bc0e17f6 | 902 | new_link_up = !fcoe_link_ok(lp); |
85b4aa49 RL |
903 | break; |
904 | case NETDEV_REGISTER: | |
905 | break; | |
906 | default: | |
907 | FC_DBG("unknown event %ld call", event); | |
908 | } | |
bc0e17f6 VD |
909 | if (lp->link_up != new_link_up) { |
910 | if (new_link_up) | |
85b4aa49 RL |
911 | fc_linkup(lp); |
912 | else { | |
913 | stats = lp->dev_stats[smp_processor_id()]; | |
914 | if (stats) | |
915 | stats->LinkFailureCount++; | |
916 | fc_linkdown(lp); | |
917 | fcoe_clean_pending_queue(lp); | |
918 | } | |
919 | } | |
920 | out: | |
921 | return rc; | |
922 | } | |
923 | ||
924 | /** | |
34f42a07 | 925 | * fcoe_if_to_netdev() - parse a name buffer to get netdev |
85b4aa49 RL |
926 | * @ifname: fixed array for output parsed ifname |
927 | * @buffer: incoming buffer to be copied | |
928 | * | |
929 | * Returns: NULL or ptr to netdeive | |
34f42a07 | 930 | */ |
85b4aa49 RL |
931 | static struct net_device *fcoe_if_to_netdev(const char *buffer) |
932 | { | |
933 | char *cp; | |
934 | char ifname[IFNAMSIZ + 2]; | |
935 | ||
936 | if (buffer) { | |
937 | strlcpy(ifname, buffer, IFNAMSIZ); | |
938 | cp = ifname + strlen(ifname); | |
939 | while (--cp >= ifname && *cp == '\n') | |
940 | *cp = '\0'; | |
941 | return dev_get_by_name(&init_net, ifname); | |
942 | } | |
943 | return NULL; | |
944 | } | |
945 | ||
946 | /** | |
34f42a07 | 947 | * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev |
85b4aa49 RL |
948 | * @netdev: the target netdev |
949 | * | |
950 | * Returns: ptr to the struct module, NULL for failure | |
34f42a07 | 951 | */ |
b2ab99c9 RL |
952 | static struct module * |
953 | fcoe_netdev_to_module_owner(const struct net_device *netdev) | |
85b4aa49 RL |
954 | { |
955 | struct device *dev; | |
956 | ||
957 | if (!netdev) | |
958 | return NULL; | |
959 | ||
960 | dev = netdev->dev.parent; | |
961 | if (!dev) | |
962 | return NULL; | |
963 | ||
964 | if (!dev->driver) | |
965 | return NULL; | |
966 | ||
967 | return dev->driver->owner; | |
968 | } | |
969 | ||
970 | /** | |
34f42a07 | 971 | * fcoe_ethdrv_get() - Hold the Ethernet driver |
85b4aa49 RL |
972 | * @netdev: the target netdev |
973 | * | |
34f42a07 RL |
974 | * Holds the Ethernet driver module by try_module_get() for |
975 | * the corresponding netdev. | |
976 | * | |
85b4aa49 | 977 | * Returns: 0 for succsss |
34f42a07 | 978 | */ |
85b4aa49 RL |
979 | static int fcoe_ethdrv_get(const struct net_device *netdev) |
980 | { | |
981 | struct module *owner; | |
982 | ||
983 | owner = fcoe_netdev_to_module_owner(netdev); | |
984 | if (owner) { | |
56b854bb JB |
985 | printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n", |
986 | module_name(owner), netdev->name); | |
85b4aa49 RL |
987 | return try_module_get(owner); |
988 | } | |
989 | return -ENODEV; | |
990 | } | |
991 | ||
992 | /** | |
34f42a07 | 993 | * fcoe_ethdrv_put() - Release the Ethernet driver |
85b4aa49 RL |
994 | * @netdev: the target netdev |
995 | * | |
34f42a07 RL |
996 | * Releases the Ethernet driver module by module_put for |
997 | * the corresponding netdev. | |
998 | * | |
85b4aa49 | 999 | * Returns: 0 for succsss |
34f42a07 | 1000 | */ |
85b4aa49 RL |
1001 | static int fcoe_ethdrv_put(const struct net_device *netdev) |
1002 | { | |
1003 | struct module *owner; | |
1004 | ||
1005 | owner = fcoe_netdev_to_module_owner(netdev); | |
1006 | if (owner) { | |
56b854bb JB |
1007 | printk(KERN_DEBUG "fcoe:release driver module %s for %s\n", |
1008 | module_name(owner), netdev->name); | |
85b4aa49 RL |
1009 | module_put(owner); |
1010 | return 0; | |
1011 | } | |
1012 | return -ENODEV; | |
1013 | } | |
1014 | ||
1015 | /** | |
34f42a07 | 1016 | * fcoe_destroy() - handles the destroy from sysfs |
85b4aa49 RL |
1017 | * @buffer: expcted to be a eth if name |
1018 | * @kp: associated kernel param | |
1019 | * | |
1020 | * Returns: 0 for success | |
34f42a07 | 1021 | */ |
85b4aa49 RL |
1022 | static int fcoe_destroy(const char *buffer, struct kernel_param *kp) |
1023 | { | |
1024 | int rc; | |
1025 | struct net_device *netdev; | |
1026 | ||
1027 | netdev = fcoe_if_to_netdev(buffer); | |
1028 | if (!netdev) { | |
1029 | rc = -ENODEV; | |
1030 | goto out_nodev; | |
1031 | } | |
1032 | /* look for existing lport */ | |
1033 | if (!fcoe_hostlist_lookup(netdev)) { | |
1034 | rc = -ENODEV; | |
1035 | goto out_putdev; | |
1036 | } | |
1037 | /* pass to transport */ | |
1038 | rc = fcoe_transport_release(netdev); | |
1039 | if (rc) { | |
1040 | printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n", | |
1041 | netdev->name); | |
1042 | rc = -EIO; | |
1043 | goto out_putdev; | |
1044 | } | |
1045 | fcoe_ethdrv_put(netdev); | |
1046 | rc = 0; | |
1047 | out_putdev: | |
1048 | dev_put(netdev); | |
1049 | out_nodev: | |
1050 | return rc; | |
1051 | } | |
1052 | ||
1053 | /** | |
34f42a07 | 1054 | * fcoe_create() - Handles the create call from sysfs |
85b4aa49 RL |
1055 | * @buffer: expcted to be a eth if name |
1056 | * @kp: associated kernel param | |
1057 | * | |
1058 | * Returns: 0 for success | |
34f42a07 | 1059 | */ |
85b4aa49 RL |
1060 | static int fcoe_create(const char *buffer, struct kernel_param *kp) |
1061 | { | |
1062 | int rc; | |
1063 | struct net_device *netdev; | |
1064 | ||
1065 | netdev = fcoe_if_to_netdev(buffer); | |
1066 | if (!netdev) { | |
1067 | rc = -ENODEV; | |
1068 | goto out_nodev; | |
1069 | } | |
1070 | /* look for existing lport */ | |
1071 | if (fcoe_hostlist_lookup(netdev)) { | |
1072 | rc = -EEXIST; | |
1073 | goto out_putdev; | |
1074 | } | |
1075 | fcoe_ethdrv_get(netdev); | |
1076 | ||
1077 | /* pass to transport */ | |
1078 | rc = fcoe_transport_attach(netdev); | |
1079 | if (rc) { | |
1080 | printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n", | |
1081 | netdev->name); | |
1082 | fcoe_ethdrv_put(netdev); | |
1083 | rc = -EIO; | |
1084 | goto out_putdev; | |
1085 | } | |
1086 | rc = 0; | |
1087 | out_putdev: | |
1088 | dev_put(netdev); | |
1089 | out_nodev: | |
1090 | return rc; | |
1091 | } | |
1092 | ||
1093 | module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); | |
1094 | __MODULE_PARM_TYPE(create, "string"); | |
1095 | MODULE_PARM_DESC(create, "Create fcoe port using net device passed in."); | |
1096 | module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); | |
1097 | __MODULE_PARM_TYPE(destroy, "string"); | |
1098 | MODULE_PARM_DESC(destroy, "Destroy fcoe port"); | |
1099 | ||
34f42a07 RL |
1100 | /** |
1101 | * fcoe_link_ok() - Check if link is ok for the fc_lport | |
85b4aa49 RL |
1102 | * @lp: ptr to the fc_lport |
1103 | * | |
1104 | * Any permanently-disqualifying conditions have been previously checked. | |
1105 | * This also updates the speed setting, which may change with link for 100/1000. | |
1106 | * | |
1107 | * This function should probably be checking for PAUSE support at some point | |
1108 | * in the future. Currently Per-priority-pause is not determinable using | |
1109 | * ethtool, so we shouldn't be restrictive until that problem is resolved. | |
1110 | * | |
1111 | * Returns: 0 if link is OK for use by FCoE. | |
1112 | * | |
1113 | */ | |
1114 | int fcoe_link_ok(struct fc_lport *lp) | |
1115 | { | |
fc47ff6b | 1116 | struct fcoe_softc *fc = lport_priv(lp); |
85b4aa49 RL |
1117 | struct net_device *dev = fc->real_dev; |
1118 | struct ethtool_cmd ecmd = { ETHTOOL_GSET }; | |
1119 | int rc = 0; | |
1120 | ||
1121 | if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { | |
1122 | dev = fc->phys_dev; | |
1123 | if (dev->ethtool_ops->get_settings) { | |
1124 | dev->ethtool_ops->get_settings(dev, &ecmd); | |
1125 | lp->link_supported_speeds &= | |
1126 | ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | |
1127 | if (ecmd.supported & (SUPPORTED_1000baseT_Half | | |
1128 | SUPPORTED_1000baseT_Full)) | |
1129 | lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; | |
1130 | if (ecmd.supported & SUPPORTED_10000baseT_Full) | |
1131 | lp->link_supported_speeds |= | |
1132 | FC_PORTSPEED_10GBIT; | |
1133 | if (ecmd.speed == SPEED_1000) | |
1134 | lp->link_speed = FC_PORTSPEED_1GBIT; | |
1135 | if (ecmd.speed == SPEED_10000) | |
1136 | lp->link_speed = FC_PORTSPEED_10GBIT; | |
1137 | } | |
1138 | } else | |
1139 | rc = -1; | |
1140 | ||
1141 | return rc; | |
1142 | } | |
1143 | EXPORT_SYMBOL_GPL(fcoe_link_ok); | |
1144 | ||
34f42a07 RL |
1145 | /** |
1146 | * fcoe_percpu_clean() - Clear the pending skbs for an lport | |
85b4aa49 RL |
1147 | * @lp: the fc_lport |
1148 | */ | |
1149 | void fcoe_percpu_clean(struct fc_lport *lp) | |
1150 | { | |
1151 | int idx; | |
1152 | struct fcoe_percpu_s *pp; | |
1153 | struct fcoe_rcv_info *fr; | |
1154 | struct sk_buff_head *list; | |
1155 | struct sk_buff *skb, *next; | |
1156 | struct sk_buff *head; | |
1157 | ||
1158 | for (idx = 0; idx < NR_CPUS; idx++) { | |
1159 | if (fcoe_percpu[idx]) { | |
1160 | pp = fcoe_percpu[idx]; | |
1161 | spin_lock_bh(&pp->fcoe_rx_list.lock); | |
1162 | list = &pp->fcoe_rx_list; | |
1163 | head = list->next; | |
1164 | for (skb = head; skb != (struct sk_buff *)list; | |
1165 | skb = next) { | |
1166 | next = skb->next; | |
1167 | fr = fcoe_dev_from_skb(skb); | |
1168 | if (fr->fr_dev == lp) { | |
1169 | __skb_unlink(skb, list); | |
1170 | kfree_skb(skb); | |
1171 | } | |
1172 | } | |
1173 | spin_unlock_bh(&pp->fcoe_rx_list.lock); | |
1174 | } | |
1175 | } | |
1176 | } | |
1177 | EXPORT_SYMBOL_GPL(fcoe_percpu_clean); | |
1178 | ||
1179 | /** | |
34f42a07 | 1180 | * fcoe_clean_pending_queue() - Dequeue a skb and free it |
85b4aa49 RL |
1181 | * @lp: the corresponding fc_lport |
1182 | * | |
1183 | * Returns: none | |
34f42a07 | 1184 | */ |
85b4aa49 RL |
1185 | void fcoe_clean_pending_queue(struct fc_lport *lp) |
1186 | { | |
1187 | struct fcoe_softc *fc = lport_priv(lp); | |
1188 | struct sk_buff *skb; | |
1189 | ||
1190 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
1191 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | |
1192 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
1193 | kfree_skb(skb); | |
1194 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
1195 | } | |
1196 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
1197 | } | |
1198 | EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); | |
1199 | ||
1200 | /** | |
34f42a07 | 1201 | * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport |
85b4aa49 RL |
1202 | * @sht: ptr to the scsi host templ |
1203 | * @priv_size: size of private data after fc_lport | |
1204 | * | |
1205 | * Returns: ptr to Scsi_Host | |
34f42a07 | 1206 | * TODO: to libfc? |
85b4aa49 | 1207 | */ |
b2ab99c9 RL |
1208 | static inline struct Scsi_Host * |
1209 | libfc_host_alloc(struct scsi_host_template *sht, int priv_size) | |
85b4aa49 RL |
1210 | { |
1211 | return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size); | |
1212 | } | |
1213 | ||
1214 | /** | |
34f42a07 | 1215 | * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc |
85b4aa49 RL |
1216 | * @sht: ptr to the scsi host templ |
1217 | * @priv_size: size of private data after fc_lport | |
1218 | * | |
1219 | * Returns: ptr to Scsi_Host | |
1220 | */ | |
1221 | struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size) | |
1222 | { | |
1223 | return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size); | |
1224 | } | |
1225 | EXPORT_SYMBOL_GPL(fcoe_host_alloc); | |
1226 | ||
34f42a07 RL |
1227 | /** |
1228 | * fcoe_reset() - Resets the fcoe | |
85b4aa49 RL |
1229 | * @shost: shost the reset is from |
1230 | * | |
1231 | * Returns: always 0 | |
1232 | */ | |
1233 | int fcoe_reset(struct Scsi_Host *shost) | |
1234 | { | |
1235 | struct fc_lport *lport = shost_priv(shost); | |
1236 | fc_lport_reset(lport); | |
1237 | return 0; | |
1238 | } | |
1239 | EXPORT_SYMBOL_GPL(fcoe_reset); | |
1240 | ||
34f42a07 RL |
1241 | /** |
1242 | * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN. | |
85b4aa49 RL |
1243 | * @mac: mac address |
1244 | * @scheme: check port | |
1245 | * @port: port indicator for converting | |
1246 | * | |
1247 | * Returns: u64 fc world wide name | |
1248 | */ | |
1249 | u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], | |
1250 | unsigned int scheme, unsigned int port) | |
1251 | { | |
1252 | u64 wwn; | |
1253 | u64 host_mac; | |
1254 | ||
1255 | /* The MAC is in NO, so flip only the low 48 bits */ | |
1256 | host_mac = ((u64) mac[0] << 40) | | |
1257 | ((u64) mac[1] << 32) | | |
1258 | ((u64) mac[2] << 24) | | |
1259 | ((u64) mac[3] << 16) | | |
1260 | ((u64) mac[4] << 8) | | |
1261 | (u64) mac[5]; | |
1262 | ||
1263 | WARN_ON(host_mac >= (1ULL << 48)); | |
1264 | wwn = host_mac | ((u64) scheme << 60); | |
1265 | switch (scheme) { | |
1266 | case 1: | |
1267 | WARN_ON(port != 0); | |
1268 | break; | |
1269 | case 2: | |
1270 | WARN_ON(port >= 0xfff); | |
1271 | wwn |= (u64) port << 48; | |
1272 | break; | |
1273 | default: | |
1274 | WARN_ON(1); | |
1275 | break; | |
1276 | } | |
1277 | ||
1278 | return wwn; | |
1279 | } | |
1280 | EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); | |
34f42a07 RL |
1281 | |
1282 | /** | |
1283 | * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device | |
85b4aa49 RL |
1284 | * @device: this is currently ptr to net_device |
1285 | * | |
1286 | * Returns: NULL or the located fcoe_softc | |
1287 | */ | |
b2ab99c9 RL |
1288 | static struct fcoe_softc * |
1289 | fcoe_hostlist_lookup_softc(const struct net_device *dev) | |
85b4aa49 RL |
1290 | { |
1291 | struct fcoe_softc *fc; | |
1292 | ||
1293 | read_lock(&fcoe_hostlist_lock); | |
1294 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
1295 | if (fc->real_dev == dev) { | |
1296 | read_unlock(&fcoe_hostlist_lock); | |
1297 | return fc; | |
1298 | } | |
1299 | } | |
1300 | read_unlock(&fcoe_hostlist_lock); | |
1301 | return NULL; | |
1302 | } | |
1303 | ||
34f42a07 RL |
1304 | /** |
1305 | * fcoe_hostlist_lookup() - Find the corresponding lport by netdev | |
85b4aa49 RL |
1306 | * @netdev: ptr to net_device |
1307 | * | |
1308 | * Returns: 0 for success | |
1309 | */ | |
1310 | struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) | |
1311 | { | |
1312 | struct fcoe_softc *fc; | |
1313 | ||
1314 | fc = fcoe_hostlist_lookup_softc(netdev); | |
1315 | ||
1316 | return (fc) ? fc->lp : NULL; | |
1317 | } | |
1318 | EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup); | |
1319 | ||
34f42a07 RL |
1320 | /** |
1321 | * fcoe_hostlist_add() - Add a lport to lports list | |
85b4aa49 RL |
1322 | * @lp: ptr to the fc_lport to badded |
1323 | * | |
1324 | * Returns: 0 for success | |
1325 | */ | |
1326 | int fcoe_hostlist_add(const struct fc_lport *lp) | |
1327 | { | |
1328 | struct fcoe_softc *fc; | |
1329 | ||
1330 | fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); | |
1331 | if (!fc) { | |
fc47ff6b | 1332 | fc = lport_priv(lp); |
85b4aa49 RL |
1333 | write_lock_bh(&fcoe_hostlist_lock); |
1334 | list_add_tail(&fc->list, &fcoe_hostlist); | |
1335 | write_unlock_bh(&fcoe_hostlist_lock); | |
1336 | } | |
1337 | return 0; | |
1338 | } | |
1339 | EXPORT_SYMBOL_GPL(fcoe_hostlist_add); | |
1340 | ||
34f42a07 RL |
1341 | /** |
1342 | * fcoe_hostlist_remove() - remove a lport from lports list | |
85b4aa49 RL |
1343 | * @lp: ptr to the fc_lport to badded |
1344 | * | |
1345 | * Returns: 0 for success | |
1346 | */ | |
1347 | int fcoe_hostlist_remove(const struct fc_lport *lp) | |
1348 | { | |
1349 | struct fcoe_softc *fc; | |
1350 | ||
1351 | fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); | |
1352 | BUG_ON(!fc); | |
1353 | write_lock_bh(&fcoe_hostlist_lock); | |
1354 | list_del(&fc->list); | |
1355 | write_unlock_bh(&fcoe_hostlist_lock); | |
1356 | ||
1357 | return 0; | |
1358 | } | |
1359 | EXPORT_SYMBOL_GPL(fcoe_hostlist_remove); | |
1360 | ||
1361 | /** | |
34f42a07 | 1362 | * fcoe_libfc_config() - sets up libfc related properties for lport |
85b4aa49 RL |
1363 | * @lp: ptr to the fc_lport |
1364 | * @tt: libfc function template | |
1365 | * | |
1366 | * Returns : 0 for success | |
34f42a07 | 1367 | */ |
85b4aa49 RL |
1368 | int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) |
1369 | { | |
1370 | /* Set the function pointers set by the LLDD */ | |
1371 | memcpy(&lp->tt, tt, sizeof(*tt)); | |
1372 | if (fc_fcp_init(lp)) | |
1373 | return -ENOMEM; | |
1374 | fc_exch_init(lp); | |
1375 | fc_elsct_init(lp); | |
1376 | fc_lport_init(lp); | |
1377 | fc_rport_init(lp); | |
1378 | fc_disc_init(lp); | |
1379 | ||
1380 | return 0; | |
1381 | } | |
1382 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); | |
1383 | ||
1384 | /** | |
34f42a07 | 1385 | * fcoe_init() - fcoe module loading initialization |
85b4aa49 RL |
1386 | * |
1387 | * Initialization routine | |
1388 | * 1. Will create fc transport software structure | |
1389 | * 2. initialize the link list of port information structure | |
1390 | * | |
1391 | * Returns 0 on success, negative on failure | |
34f42a07 | 1392 | */ |
85b4aa49 RL |
1393 | static int __init fcoe_init(void) |
1394 | { | |
1395 | int cpu; | |
1396 | struct fcoe_percpu_s *p; | |
1397 | ||
1398 | ||
1399 | INIT_LIST_HEAD(&fcoe_hostlist); | |
1400 | rwlock_init(&fcoe_hostlist_lock); | |
1401 | ||
1402 | #ifdef CONFIG_HOTPLUG_CPU | |
1403 | register_cpu_notifier(&fcoe_cpu_notifier); | |
1404 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1405 | ||
1406 | /* | |
1407 | * initialize per CPU interrupt thread | |
1408 | */ | |
1409 | for_each_online_cpu(cpu) { | |
1410 | p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL); | |
1411 | if (p) { | |
1412 | p->thread = kthread_create(fcoe_percpu_receive_thread, | |
1413 | (void *)p, | |
1414 | "fcoethread/%d", cpu); | |
1415 | ||
1416 | /* | |
1417 | * if there is no error then bind the thread to the cpu | |
1418 | * initialize the semaphore and skb queue head | |
1419 | */ | |
1420 | if (likely(!IS_ERR(p->thread))) { | |
1421 | p->cpu = cpu; | |
1422 | fcoe_percpu[cpu] = p; | |
1423 | skb_queue_head_init(&p->fcoe_rx_list); | |
1424 | kthread_bind(p->thread, cpu); | |
1425 | wake_up_process(p->thread); | |
1426 | } else { | |
1427 | fcoe_percpu[cpu] = NULL; | |
1428 | kfree(p); | |
85b4aa49 RL |
1429 | } |
1430 | } | |
1431 | } | |
1432 | ||
1433 | /* | |
1434 | * setup link change notification | |
1435 | */ | |
1436 | fcoe_dev_setup(); | |
1437 | ||
a468f328 RL |
1438 | setup_timer(&fcoe_timer, fcoe_watchdog, 0); |
1439 | ||
1440 | mod_timer(&fcoe_timer, jiffies + (10 * HZ)); | |
85b4aa49 RL |
1441 | |
1442 | /* initiatlize the fcoe transport */ | |
1443 | fcoe_transport_init(); | |
1444 | ||
1445 | fcoe_sw_init(); | |
1446 | ||
1447 | return 0; | |
1448 | } | |
1449 | module_init(fcoe_init); | |
1450 | ||
1451 | /** | |
34f42a07 | 1452 | * fcoe_exit() - fcoe module unloading cleanup |
85b4aa49 RL |
1453 | * |
1454 | * Returns 0 on success, negative on failure | |
34f42a07 | 1455 | */ |
85b4aa49 RL |
1456 | static void __exit fcoe_exit(void) |
1457 | { | |
1458 | u32 idx; | |
1459 | struct fcoe_softc *fc, *tmp; | |
1460 | struct fcoe_percpu_s *p; | |
1461 | struct sk_buff *skb; | |
1462 | ||
1463 | /* | |
1464 | * Stop all call back interfaces | |
1465 | */ | |
1466 | #ifdef CONFIG_HOTPLUG_CPU | |
1467 | unregister_cpu_notifier(&fcoe_cpu_notifier); | |
1468 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1469 | fcoe_dev_cleanup(); | |
1470 | ||
1471 | /* | |
1472 | * stop timer | |
1473 | */ | |
1474 | del_timer_sync(&fcoe_timer); | |
1475 | ||
b2ab99c9 | 1476 | /* releases the associated fcoe transport for each lport */ |
85b4aa49 RL |
1477 | list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) |
1478 | fcoe_transport_release(fc->real_dev); | |
1479 | ||
1480 | for (idx = 0; idx < NR_CPUS; idx++) { | |
1481 | if (fcoe_percpu[idx]) { | |
1482 | kthread_stop(fcoe_percpu[idx]->thread); | |
1483 | p = fcoe_percpu[idx]; | |
1484 | spin_lock_bh(&p->fcoe_rx_list.lock); | |
1485 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | |
1486 | kfree_skb(skb); | |
1487 | spin_unlock_bh(&p->fcoe_rx_list.lock); | |
1488 | if (fcoe_percpu[idx]->crc_eof_page) | |
1489 | put_page(fcoe_percpu[idx]->crc_eof_page); | |
1490 | kfree(fcoe_percpu[idx]); | |
1491 | } | |
1492 | } | |
1493 | ||
1494 | /* remove sw trasnport */ | |
1495 | fcoe_sw_exit(); | |
1496 | ||
1497 | /* detach the transport */ | |
1498 | fcoe_transport_exit(); | |
1499 | } | |
1500 | module_exit(fcoe_exit); |