Commit | Line | Data |
---|---|---|
85b4aa49 RL |
1 | /* |
2 | * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., | |
15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
16 | * | |
17 | * Maintained at www.Open-FCoE.org | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/version.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/spinlock.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/netdevice.h> | |
26 | #include <linux/etherdevice.h> | |
27 | #include <linux/ethtool.h> | |
28 | #include <linux/if_ether.h> | |
29 | #include <linux/if_vlan.h> | |
30 | #include <linux/kthread.h> | |
31 | #include <linux/crc32.h> | |
32 | #include <linux/cpu.h> | |
33 | #include <linux/fs.h> | |
34 | #include <linux/sysfs.h> | |
35 | #include <linux/ctype.h> | |
36 | #include <scsi/scsi_tcq.h> | |
37 | #include <scsi/scsicam.h> | |
38 | #include <scsi/scsi_transport.h> | |
39 | #include <scsi/scsi_transport_fc.h> | |
40 | #include <net/rtnetlink.h> | |
41 | ||
42 | #include <scsi/fc/fc_encaps.h> | |
43 | ||
44 | #include <scsi/libfc.h> | |
45 | #include <scsi/fc_frame.h> | |
46 | #include <scsi/libfcoe.h> | |
47 | #include <scsi/fc_transport_fcoe.h> | |
48 | ||
49 | static int debug_fcoe; | |
50 | ||
51 | #define FCOE_MAX_QUEUE_DEPTH 256 | |
c826a314 | 52 | #define FCOE_LOW_QUEUE_DEPTH 32 |
85b4aa49 RL |
53 | |
54 | /* destination address mode */ | |
55 | #define FCOE_GW_ADDR_MODE 0x00 | |
56 | #define FCOE_FCOUI_ADDR_MODE 0x01 | |
57 | ||
58 | #define FCOE_WORD_TO_BYTE 4 | |
59 | ||
60 | MODULE_AUTHOR("Open-FCoE.org"); | |
61 | MODULE_DESCRIPTION("FCoE"); | |
62 | MODULE_LICENSE("GPL"); | |
63 | ||
64 | /* fcoe host list */ | |
65 | LIST_HEAD(fcoe_hostlist); | |
66 | DEFINE_RWLOCK(fcoe_hostlist_lock); | |
67 | DEFINE_TIMER(fcoe_timer, NULL, 0, 0); | |
68 | struct fcoe_percpu_s *fcoe_percpu[NR_CPUS]; | |
69 | ||
70 | ||
71 | /* Function Prototyes */ | |
72 | static int fcoe_check_wait_queue(struct fc_lport *); | |
85b4aa49 RL |
73 | static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *); |
74 | #ifdef CONFIG_HOTPLUG_CPU | |
75 | static int fcoe_cpu_callback(struct notifier_block *, ulong, void *); | |
76 | #endif /* CONFIG_HOTPLUG_CPU */ | |
77 | static int fcoe_device_notification(struct notifier_block *, ulong, void *); | |
78 | static void fcoe_dev_setup(void); | |
79 | static void fcoe_dev_cleanup(void); | |
80 | ||
81 | /* notification function from net device */ | |
82 | static struct notifier_block fcoe_notifier = { | |
83 | .notifier_call = fcoe_device_notification, | |
84 | }; | |
85 | ||
86 | ||
87 | #ifdef CONFIG_HOTPLUG_CPU | |
88 | static struct notifier_block fcoe_cpu_notifier = { | |
89 | .notifier_call = fcoe_cpu_callback, | |
90 | }; | |
91 | ||
92 | /** | |
34f42a07 | 93 | * fcoe_create_percpu_data() - creates the associated cpu data |
85b4aa49 RL |
94 | * @cpu: index for the cpu where fcoe cpu data will be created |
95 | * | |
96 | * create percpu stats block, from cpu add notifier | |
97 | * | |
98 | * Returns: none | |
34f42a07 | 99 | */ |
85b4aa49 RL |
100 | static void fcoe_create_percpu_data(int cpu) |
101 | { | |
102 | struct fc_lport *lp; | |
103 | struct fcoe_softc *fc; | |
104 | ||
105 | write_lock_bh(&fcoe_hostlist_lock); | |
106 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
107 | lp = fc->lp; | |
108 | if (lp->dev_stats[cpu] == NULL) | |
109 | lp->dev_stats[cpu] = | |
110 | kzalloc(sizeof(struct fcoe_dev_stats), | |
111 | GFP_KERNEL); | |
112 | } | |
113 | write_unlock_bh(&fcoe_hostlist_lock); | |
114 | } | |
115 | ||
116 | /** | |
34f42a07 | 117 | * fcoe_destroy_percpu_data() - destroys the associated cpu data |
85b4aa49 RL |
118 | * @cpu: index for the cpu where fcoe cpu data will destroyed |
119 | * | |
120 | * destroy percpu stats block called by cpu add/remove notifier | |
121 | * | |
122 | * Retuns: none | |
34f42a07 | 123 | */ |
85b4aa49 RL |
124 | static void fcoe_destroy_percpu_data(int cpu) |
125 | { | |
126 | struct fc_lport *lp; | |
127 | struct fcoe_softc *fc; | |
128 | ||
129 | write_lock_bh(&fcoe_hostlist_lock); | |
130 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
131 | lp = fc->lp; | |
132 | kfree(lp->dev_stats[cpu]); | |
133 | lp->dev_stats[cpu] = NULL; | |
134 | } | |
135 | write_unlock_bh(&fcoe_hostlist_lock); | |
136 | } | |
137 | ||
138 | /** | |
34f42a07 | 139 | * fcoe_cpu_callback() - fcoe cpu hotplug event callback |
85b4aa49 RL |
140 | * @nfb: callback data block |
141 | * @action: event triggering the callback | |
142 | * @hcpu: index for the cpu of this event | |
143 | * | |
144 | * this creates or destroys per cpu data for fcoe | |
145 | * | |
146 | * Returns NOTIFY_OK always. | |
34f42a07 | 147 | */ |
85b4aa49 RL |
148 | static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action, |
149 | void *hcpu) | |
150 | { | |
151 | unsigned int cpu = (unsigned long)hcpu; | |
152 | ||
153 | switch (action) { | |
154 | case CPU_ONLINE: | |
155 | fcoe_create_percpu_data(cpu); | |
156 | break; | |
157 | case CPU_DEAD: | |
158 | fcoe_destroy_percpu_data(cpu); | |
159 | break; | |
160 | default: | |
161 | break; | |
162 | } | |
163 | return NOTIFY_OK; | |
164 | } | |
165 | #endif /* CONFIG_HOTPLUG_CPU */ | |
166 | ||
167 | /** | |
34f42a07 | 168 | * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ |
85b4aa49 RL |
169 | * @skb: the receive skb |
170 | * @dev: associated net device | |
171 | * @ptype: context | |
172 | * @odldev: last device | |
173 | * | |
174 | * this function will receive the packet and build fc frame and pass it up | |
175 | * | |
176 | * Returns: 0 for success | |
34f42a07 | 177 | */ |
85b4aa49 RL |
178 | int fcoe_rcv(struct sk_buff *skb, struct net_device *dev, |
179 | struct packet_type *ptype, struct net_device *olddev) | |
180 | { | |
181 | struct fc_lport *lp; | |
182 | struct fcoe_rcv_info *fr; | |
183 | struct fcoe_softc *fc; | |
184 | struct fcoe_dev_stats *stats; | |
185 | struct fc_frame_header *fh; | |
186 | unsigned short oxid; | |
187 | int cpu_idx; | |
188 | struct fcoe_percpu_s *fps; | |
189 | ||
190 | fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type); | |
191 | lp = fc->lp; | |
192 | if (unlikely(lp == NULL)) { | |
193 | FC_DBG("cannot find hba structure"); | |
194 | goto err2; | |
195 | } | |
196 | ||
197 | if (unlikely(debug_fcoe)) { | |
198 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p " | |
199 | "end:%p sum:%d dev:%s", skb->len, skb->data_len, | |
200 | skb->head, skb->data, skb_tail_pointer(skb), | |
201 | skb_end_pointer(skb), skb->csum, | |
202 | skb->dev ? skb->dev->name : "<NULL>"); | |
203 | ||
204 | } | |
205 | ||
206 | /* check for FCOE packet type */ | |
207 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { | |
208 | FC_DBG("wrong FC type frame"); | |
209 | goto err; | |
210 | } | |
211 | ||
212 | /* | |
213 | * Check for minimum frame length, and make sure required FCoE | |
214 | * and FC headers are pulled into the linear data area. | |
215 | */ | |
216 | if (unlikely((skb->len < FCOE_MIN_FRAME) || | |
217 | !pskb_may_pull(skb, FCOE_HEADER_LEN))) | |
218 | goto err; | |
219 | ||
220 | skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); | |
221 | fh = (struct fc_frame_header *) skb_transport_header(skb); | |
222 | ||
223 | oxid = ntohs(fh->fh_ox_id); | |
224 | ||
225 | fr = fcoe_dev_from_skb(skb); | |
226 | fr->fr_dev = lp; | |
227 | fr->ptype = ptype; | |
228 | cpu_idx = 0; | |
229 | #ifdef CONFIG_SMP | |
230 | /* | |
231 | * The incoming frame exchange id(oxid) is ANDed with num of online | |
232 | * cpu bits to get cpu_idx and then this cpu_idx is used for selecting | |
233 | * a per cpu kernel thread from fcoe_percpu. In case the cpu is | |
234 | * offline or no kernel thread for derived cpu_idx then cpu_idx is | |
235 | * initialize to first online cpu index. | |
236 | */ | |
237 | cpu_idx = oxid & (num_online_cpus() - 1); | |
238 | if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx)) | |
239 | cpu_idx = first_cpu(cpu_online_map); | |
240 | #endif | |
241 | fps = fcoe_percpu[cpu_idx]; | |
242 | ||
243 | spin_lock_bh(&fps->fcoe_rx_list.lock); | |
244 | __skb_queue_tail(&fps->fcoe_rx_list, skb); | |
245 | if (fps->fcoe_rx_list.qlen == 1) | |
246 | wake_up_process(fps->thread); | |
247 | ||
248 | spin_unlock_bh(&fps->fcoe_rx_list.lock); | |
249 | ||
250 | return 0; | |
251 | err: | |
252 | #ifdef CONFIG_SMP | |
253 | stats = lp->dev_stats[smp_processor_id()]; | |
254 | #else | |
255 | stats = lp->dev_stats[0]; | |
256 | #endif | |
257 | if (stats) | |
258 | stats->ErrorFrames++; | |
259 | ||
260 | err2: | |
261 | kfree_skb(skb); | |
262 | return -1; | |
263 | } | |
264 | EXPORT_SYMBOL_GPL(fcoe_rcv); | |
265 | ||
266 | /** | |
34f42a07 | 267 | * fcoe_start_io() - pass to netdev to start xmit for fcoe |
85b4aa49 RL |
268 | * @skb: the skb to be xmitted |
269 | * | |
270 | * Returns: 0 for success | |
34f42a07 | 271 | */ |
85b4aa49 RL |
272 | static inline int fcoe_start_io(struct sk_buff *skb) |
273 | { | |
274 | int rc; | |
275 | ||
276 | skb_get(skb); | |
277 | rc = dev_queue_xmit(skb); | |
278 | if (rc != 0) | |
279 | return rc; | |
280 | kfree_skb(skb); | |
281 | return 0; | |
282 | } | |
283 | ||
284 | /** | |
34f42a07 | 285 | * fcoe_get_paged_crc_eof() - in case we need alloc a page for crc_eof |
85b4aa49 RL |
286 | * @skb: the skb to be xmitted |
287 | * @tlen: total len | |
288 | * | |
289 | * Returns: 0 for success | |
34f42a07 | 290 | */ |
85b4aa49 RL |
291 | static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) |
292 | { | |
293 | struct fcoe_percpu_s *fps; | |
294 | struct page *page; | |
295 | int cpu_idx; | |
296 | ||
297 | cpu_idx = get_cpu(); | |
298 | fps = fcoe_percpu[cpu_idx]; | |
299 | page = fps->crc_eof_page; | |
300 | if (!page) { | |
301 | page = alloc_page(GFP_ATOMIC); | |
302 | if (!page) { | |
303 | put_cpu(); | |
304 | return -ENOMEM; | |
305 | } | |
306 | fps->crc_eof_page = page; | |
307 | WARN_ON(fps->crc_eof_offset != 0); | |
308 | } | |
309 | ||
310 | get_page(page); | |
311 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, | |
312 | fps->crc_eof_offset, tlen); | |
313 | skb->len += tlen; | |
314 | skb->data_len += tlen; | |
315 | skb->truesize += tlen; | |
316 | fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); | |
317 | ||
318 | if (fps->crc_eof_offset >= PAGE_SIZE) { | |
319 | fps->crc_eof_page = NULL; | |
320 | fps->crc_eof_offset = 0; | |
321 | put_page(page); | |
322 | } | |
323 | put_cpu(); | |
324 | return 0; | |
325 | } | |
326 | ||
327 | /** | |
34f42a07 | 328 | * fcoe_fc_crc() - calculates FC CRC in this fcoe skb |
85b4aa49 RL |
329 | * @fp: the fc_frame containg data to be checksummed |
330 | * | |
331 | * This uses crc32() to calculate the crc for fc frame | |
332 | * Return : 32 bit crc | |
34f42a07 | 333 | */ |
85b4aa49 RL |
334 | u32 fcoe_fc_crc(struct fc_frame *fp) |
335 | { | |
336 | struct sk_buff *skb = fp_skb(fp); | |
337 | struct skb_frag_struct *frag; | |
338 | unsigned char *data; | |
339 | unsigned long off, len, clen; | |
340 | u32 crc; | |
341 | unsigned i; | |
342 | ||
343 | crc = crc32(~0, skb->data, skb_headlen(skb)); | |
344 | ||
345 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
346 | frag = &skb_shinfo(skb)->frags[i]; | |
347 | off = frag->page_offset; | |
348 | len = frag->size; | |
349 | while (len > 0) { | |
350 | clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); | |
351 | data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), | |
352 | KM_SKB_DATA_SOFTIRQ); | |
353 | crc = crc32(crc, data + (off & ~PAGE_MASK), clen); | |
354 | kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); | |
355 | off += clen; | |
356 | len -= clen; | |
357 | } | |
358 | } | |
359 | return crc; | |
360 | } | |
361 | EXPORT_SYMBOL_GPL(fcoe_fc_crc); | |
362 | ||
363 | /** | |
34f42a07 | 364 | * fcoe_xmit() - FCoE frame transmit function |
85b4aa49 RL |
365 | * @lp: the associated local port |
366 | * @fp: the fc_frame to be transmitted | |
367 | * | |
368 | * Return : 0 for success | |
34f42a07 | 369 | */ |
85b4aa49 RL |
370 | int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) |
371 | { | |
372 | int wlen, rc = 0; | |
373 | u32 crc; | |
374 | struct ethhdr *eh; | |
375 | struct fcoe_crc_eof *cp; | |
376 | struct sk_buff *skb; | |
377 | struct fcoe_dev_stats *stats; | |
378 | struct fc_frame_header *fh; | |
379 | unsigned int hlen; /* header length implies the version */ | |
380 | unsigned int tlen; /* trailer length */ | |
381 | unsigned int elen; /* eth header, may include vlan */ | |
382 | int flogi_in_progress = 0; | |
383 | struct fcoe_softc *fc; | |
384 | u8 sof, eof; | |
385 | struct fcoe_hdr *hp; | |
386 | ||
387 | WARN_ON((fr_len(fp) % sizeof(u32)) != 0); | |
388 | ||
fc47ff6b | 389 | fc = lport_priv(lp); |
85b4aa49 RL |
390 | /* |
391 | * if it is a flogi then we need to learn gw-addr | |
392 | * and my own fcid | |
393 | */ | |
394 | fh = fc_frame_header_get(fp); | |
395 | if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { | |
396 | if (fc_frame_payload_op(fp) == ELS_FLOGI) { | |
397 | fc->flogi_oxid = ntohs(fh->fh_ox_id); | |
398 | fc->address_mode = FCOE_FCOUI_ADDR_MODE; | |
399 | fc->flogi_progress = 1; | |
400 | flogi_in_progress = 1; | |
401 | } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) { | |
402 | /* | |
403 | * Here we must've gotten an SID by accepting an FLOGI | |
404 | * from a point-to-point connection. Switch to using | |
405 | * the source mac based on the SID. The destination | |
406 | * MAC in this case would have been set by receving the | |
407 | * FLOGI. | |
408 | */ | |
409 | fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id); | |
410 | fc->flogi_progress = 0; | |
411 | } | |
412 | } | |
413 | ||
414 | skb = fp_skb(fp); | |
415 | sof = fr_sof(fp); | |
416 | eof = fr_eof(fp); | |
417 | ||
418 | elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? | |
419 | sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr); | |
420 | hlen = sizeof(struct fcoe_hdr); | |
421 | tlen = sizeof(struct fcoe_crc_eof); | |
422 | wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; | |
423 | ||
424 | /* crc offload */ | |
425 | if (likely(lp->crc_offload)) { | |
39ca9a06 | 426 | skb->ip_summed = CHECKSUM_PARTIAL; |
85b4aa49 RL |
427 | skb->csum_start = skb_headroom(skb); |
428 | skb->csum_offset = skb->len; | |
429 | crc = 0; | |
430 | } else { | |
431 | skb->ip_summed = CHECKSUM_NONE; | |
432 | crc = fcoe_fc_crc(fp); | |
433 | } | |
434 | ||
435 | /* copy fc crc and eof to the skb buff */ | |
436 | if (skb_is_nonlinear(skb)) { | |
437 | skb_frag_t *frag; | |
438 | if (fcoe_get_paged_crc_eof(skb, tlen)) { | |
e9041581 | 439 | kfree_skb(skb); |
85b4aa49 RL |
440 | return -ENOMEM; |
441 | } | |
442 | frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; | |
443 | cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) | |
444 | + frag->page_offset; | |
445 | } else { | |
446 | cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); | |
447 | } | |
448 | ||
449 | memset(cp, 0, sizeof(*cp)); | |
450 | cp->fcoe_eof = eof; | |
451 | cp->fcoe_crc32 = cpu_to_le32(~crc); | |
452 | ||
453 | if (skb_is_nonlinear(skb)) { | |
454 | kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); | |
455 | cp = NULL; | |
456 | } | |
457 | ||
458 | /* adjust skb netowrk/transport offsets to match mac/fcoe/fc */ | |
459 | skb_push(skb, elen + hlen); | |
460 | skb_reset_mac_header(skb); | |
461 | skb_reset_network_header(skb); | |
462 | skb->mac_len = elen; | |
211c738d | 463 | skb->protocol = htons(ETH_P_FCOE); |
85b4aa49 RL |
464 | skb->dev = fc->real_dev; |
465 | ||
466 | /* fill up mac and fcoe headers */ | |
467 | eh = eth_hdr(skb); | |
468 | eh->h_proto = htons(ETH_P_FCOE); | |
469 | if (fc->address_mode == FCOE_FCOUI_ADDR_MODE) | |
470 | fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); | |
471 | else | |
472 | /* insert GW address */ | |
473 | memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN); | |
474 | ||
475 | if (unlikely(flogi_in_progress)) | |
476 | memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN); | |
477 | else | |
478 | memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN); | |
479 | ||
480 | hp = (struct fcoe_hdr *)(eh + 1); | |
481 | memset(hp, 0, sizeof(*hp)); | |
482 | if (FC_FCOE_VER) | |
483 | FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); | |
484 | hp->fcoe_sof = sof; | |
485 | ||
39ca9a06 YZ |
486 | #ifdef NETIF_F_FSO |
487 | /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ | |
488 | if (lp->seq_offload && fr_max_payload(fp)) { | |
489 | skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; | |
490 | skb_shinfo(skb)->gso_size = fr_max_payload(fp); | |
491 | } else { | |
492 | skb_shinfo(skb)->gso_type = 0; | |
493 | skb_shinfo(skb)->gso_size = 0; | |
494 | } | |
495 | #endif | |
85b4aa49 RL |
496 | /* update tx stats: regardless if LLD fails */ |
497 | stats = lp->dev_stats[smp_processor_id()]; | |
498 | if (stats) { | |
499 | stats->TxFrames++; | |
500 | stats->TxWords += wlen; | |
501 | } | |
502 | ||
503 | /* send down to lld */ | |
504 | fr_dev(fp) = lp; | |
505 | if (fc->fcoe_pending_queue.qlen) | |
506 | rc = fcoe_check_wait_queue(lp); | |
507 | ||
508 | if (rc == 0) | |
509 | rc = fcoe_start_io(skb); | |
510 | ||
511 | if (rc) { | |
55c8bafb CL |
512 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
513 | __skb_queue_tail(&fc->fcoe_pending_queue, skb); | |
514 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
85b4aa49 | 515 | if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) |
bc0e17f6 | 516 | lp->qfull = 1; |
85b4aa49 RL |
517 | } |
518 | ||
519 | return 0; | |
520 | } | |
521 | EXPORT_SYMBOL_GPL(fcoe_xmit); | |
522 | ||
34f42a07 RL |
523 | /** |
524 | * fcoe_percpu_receive_thread() - recv thread per cpu | |
85b4aa49 RL |
525 | * @arg: ptr to the fcoe per cpu struct |
526 | * | |
527 | * Return: 0 for success | |
85b4aa49 RL |
528 | */ |
529 | int fcoe_percpu_receive_thread(void *arg) | |
530 | { | |
531 | struct fcoe_percpu_s *p = arg; | |
532 | u32 fr_len; | |
533 | struct fc_lport *lp; | |
534 | struct fcoe_rcv_info *fr; | |
535 | struct fcoe_dev_stats *stats; | |
536 | struct fc_frame_header *fh; | |
537 | struct sk_buff *skb; | |
538 | struct fcoe_crc_eof crc_eof; | |
539 | struct fc_frame *fp; | |
540 | u8 *mac = NULL; | |
541 | struct fcoe_softc *fc; | |
542 | struct fcoe_hdr *hp; | |
543 | ||
4469c195 | 544 | set_user_nice(current, -20); |
85b4aa49 RL |
545 | |
546 | while (!kthread_should_stop()) { | |
547 | ||
548 | spin_lock_bh(&p->fcoe_rx_list.lock); | |
549 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) { | |
550 | set_current_state(TASK_INTERRUPTIBLE); | |
551 | spin_unlock_bh(&p->fcoe_rx_list.lock); | |
552 | schedule(); | |
553 | set_current_state(TASK_RUNNING); | |
554 | if (kthread_should_stop()) | |
555 | return 0; | |
556 | spin_lock_bh(&p->fcoe_rx_list.lock); | |
557 | } | |
558 | spin_unlock_bh(&p->fcoe_rx_list.lock); | |
559 | fr = fcoe_dev_from_skb(skb); | |
560 | lp = fr->fr_dev; | |
561 | if (unlikely(lp == NULL)) { | |
562 | FC_DBG("invalid HBA Structure"); | |
563 | kfree_skb(skb); | |
564 | continue; | |
565 | } | |
566 | ||
567 | stats = lp->dev_stats[smp_processor_id()]; | |
568 | ||
569 | if (unlikely(debug_fcoe)) { | |
570 | FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p " | |
571 | "tail:%p end:%p sum:%d dev:%s", | |
572 | skb->len, skb->data_len, | |
573 | skb->head, skb->data, skb_tail_pointer(skb), | |
574 | skb_end_pointer(skb), skb->csum, | |
575 | skb->dev ? skb->dev->name : "<NULL>"); | |
576 | } | |
577 | ||
578 | /* | |
579 | * Save source MAC address before discarding header. | |
580 | */ | |
581 | fc = lport_priv(lp); | |
582 | if (unlikely(fc->flogi_progress)) | |
583 | mac = eth_hdr(skb)->h_source; | |
584 | ||
585 | if (skb_is_nonlinear(skb)) | |
586 | skb_linearize(skb); /* not ideal */ | |
587 | ||
588 | /* | |
589 | * Frame length checks and setting up the header pointers | |
590 | * was done in fcoe_rcv already. | |
591 | */ | |
592 | hp = (struct fcoe_hdr *) skb_network_header(skb); | |
593 | fh = (struct fc_frame_header *) skb_transport_header(skb); | |
594 | ||
595 | if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { | |
596 | if (stats) { | |
597 | if (stats->ErrorFrames < 5) | |
598 | FC_DBG("unknown FCoE version %x", | |
599 | FC_FCOE_DECAPS_VER(hp)); | |
600 | stats->ErrorFrames++; | |
601 | } | |
602 | kfree_skb(skb); | |
603 | continue; | |
604 | } | |
605 | ||
606 | skb_pull(skb, sizeof(struct fcoe_hdr)); | |
607 | fr_len = skb->len - sizeof(struct fcoe_crc_eof); | |
608 | ||
609 | if (stats) { | |
610 | stats->RxFrames++; | |
611 | stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; | |
612 | } | |
613 | ||
614 | fp = (struct fc_frame *)skb; | |
615 | fc_frame_init(fp); | |
616 | fr_dev(fp) = lp; | |
617 | fr_sof(fp) = hp->fcoe_sof; | |
618 | ||
619 | /* Copy out the CRC and EOF trailer for access */ | |
620 | if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { | |
621 | kfree_skb(skb); | |
622 | continue; | |
623 | } | |
624 | fr_eof(fp) = crc_eof.fcoe_eof; | |
625 | fr_crc(fp) = crc_eof.fcoe_crc32; | |
626 | if (pskb_trim(skb, fr_len)) { | |
627 | kfree_skb(skb); | |
628 | continue; | |
629 | } | |
630 | ||
631 | /* | |
632 | * We only check CRC if no offload is available and if it is | |
633 | * it's solicited data, in which case, the FCP layer would | |
634 | * check it during the copy. | |
635 | */ | |
636 | if (lp->crc_offload) | |
637 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | |
638 | else | |
639 | fr_flags(fp) |= FCPHF_CRC_UNCHECKED; | |
640 | ||
641 | fh = fc_frame_header_get(fp); | |
642 | if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && | |
643 | fh->fh_type == FC_TYPE_FCP) { | |
644 | fc_exch_recv(lp, lp->emp, fp); | |
645 | continue; | |
646 | } | |
647 | if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) { | |
648 | if (le32_to_cpu(fr_crc(fp)) != | |
649 | ~crc32(~0, skb->data, fr_len)) { | |
650 | if (debug_fcoe || stats->InvalidCRCCount < 5) | |
651 | printk(KERN_WARNING "fcoe: dropping " | |
652 | "frame with CRC error\n"); | |
653 | stats->InvalidCRCCount++; | |
654 | stats->ErrorFrames++; | |
655 | fc_frame_free(fp); | |
656 | continue; | |
657 | } | |
658 | fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; | |
659 | } | |
660 | /* non flogi and non data exchanges are handled here */ | |
661 | if (unlikely(fc->flogi_progress)) | |
662 | fcoe_recv_flogi(fc, fp, mac); | |
663 | fc_exch_recv(lp, lp->emp, fp); | |
664 | } | |
665 | return 0; | |
666 | } | |
667 | ||
668 | /** | |
34f42a07 | 669 | * fcoe_recv_flogi() - flogi receive function |
85b4aa49 RL |
670 | * @fc: associated fcoe_softc |
671 | * @fp: the recieved frame | |
672 | * @sa: the source address of this flogi | |
673 | * | |
674 | * This is responsible to parse the flogi response and sets the corresponding | |
675 | * mac address for the initiator, eitehr OUI based or GW based. | |
676 | * | |
677 | * Returns: none | |
34f42a07 | 678 | */ |
85b4aa49 RL |
679 | static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa) |
680 | { | |
681 | struct fc_frame_header *fh; | |
682 | u8 op; | |
683 | ||
684 | fh = fc_frame_header_get(fp); | |
685 | if (fh->fh_type != FC_TYPE_ELS) | |
686 | return; | |
687 | op = fc_frame_payload_op(fp); | |
688 | if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP && | |
689 | fc->flogi_oxid == ntohs(fh->fh_ox_id)) { | |
690 | /* | |
691 | * FLOGI accepted. | |
692 | * If the src mac addr is FC_OUI-based, then we mark the | |
693 | * address_mode flag to use FC_OUI-based Ethernet DA. | |
694 | * Otherwise we use the FCoE gateway addr | |
695 | */ | |
696 | if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) { | |
697 | fc->address_mode = FCOE_FCOUI_ADDR_MODE; | |
698 | } else { | |
699 | memcpy(fc->dest_addr, sa, ETH_ALEN); | |
700 | fc->address_mode = FCOE_GW_ADDR_MODE; | |
701 | } | |
702 | ||
703 | /* | |
704 | * Remove any previously-set unicast MAC filter. | |
705 | * Add secondary FCoE MAC address filter for our OUI. | |
706 | */ | |
707 | rtnl_lock(); | |
708 | if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 })) | |
709 | dev_unicast_delete(fc->real_dev, fc->data_src_addr, | |
710 | ETH_ALEN); | |
711 | fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id); | |
712 | dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN); | |
713 | rtnl_unlock(); | |
714 | ||
715 | fc->flogi_progress = 0; | |
716 | } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { | |
717 | /* | |
718 | * Save source MAC for point-to-point responses. | |
719 | */ | |
720 | memcpy(fc->dest_addr, sa, ETH_ALEN); | |
721 | fc->address_mode = FCOE_GW_ADDR_MODE; | |
722 | } | |
723 | } | |
724 | ||
725 | /** | |
34f42a07 | 726 | * fcoe_watchdog() - fcoe timer callback |
85b4aa49 RL |
727 | * @vp: |
728 | * | |
bc0e17f6 | 729 | * This checks the pending queue length for fcoe and set lport qfull |
85b4aa49 RL |
730 | * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the |
731 | * fcoe_hostlist. | |
732 | * | |
733 | * Returns: 0 for success | |
34f42a07 | 734 | */ |
85b4aa49 RL |
735 | void fcoe_watchdog(ulong vp) |
736 | { | |
85b4aa49 | 737 | struct fcoe_softc *fc; |
85b4aa49 RL |
738 | |
739 | read_lock(&fcoe_hostlist_lock); | |
740 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
c826a314 VD |
741 | if (fc->lp) |
742 | fcoe_check_wait_queue(fc->lp); | |
85b4aa49 RL |
743 | } |
744 | read_unlock(&fcoe_hostlist_lock); | |
745 | ||
746 | fcoe_timer.expires = jiffies + (1 * HZ); | |
747 | add_timer(&fcoe_timer); | |
748 | } | |
749 | ||
750 | ||
751 | /** | |
34f42a07 | 752 | * fcoe_check_wait_queue() - put the skb into fcoe pending xmit queue |
85b4aa49 RL |
753 | * @lp: the fc_port for this skb |
754 | * @skb: the associated skb to be xmitted | |
755 | * | |
756 | * This empties the wait_queue, dequeue the head of the wait_queue queue | |
757 | * and calls fcoe_start_io() for each packet, if all skb have been | |
c826a314 VD |
758 | * transmitted, return qlen or -1 if a error occurs, then restore |
759 | * wait_queue and try again later. | |
85b4aa49 RL |
760 | * |
761 | * The wait_queue is used when the skb transmit fails. skb will go | |
762 | * in the wait_queue which will be emptied by the time function OR | |
763 | * by the next skb transmit. | |
764 | * | |
765 | * Returns: 0 for success | |
34f42a07 | 766 | */ |
85b4aa49 RL |
767 | static int fcoe_check_wait_queue(struct fc_lport *lp) |
768 | { | |
55c8bafb | 769 | struct fcoe_softc *fc = lport_priv(lp); |
85b4aa49 | 770 | struct sk_buff *skb; |
c826a314 | 771 | int rc = -1; |
85b4aa49 | 772 | |
85b4aa49 | 773 | spin_lock_bh(&fc->fcoe_pending_queue.lock); |
c826a314 VD |
774 | if (fc->fcoe_pending_queue_active) |
775 | goto out; | |
776 | fc->fcoe_pending_queue_active = 1; | |
55c8bafb CL |
777 | |
778 | while (fc->fcoe_pending_queue.qlen) { | |
779 | /* keep qlen > 0 until fcoe_start_io succeeds */ | |
780 | fc->fcoe_pending_queue.qlen++; | |
781 | skb = __skb_dequeue(&fc->fcoe_pending_queue); | |
782 | ||
783 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
784 | rc = fcoe_start_io(skb); | |
785 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
786 | ||
787 | if (rc) { | |
788 | __skb_queue_head(&fc->fcoe_pending_queue, skb); | |
789 | /* undo temporary increment above */ | |
790 | fc->fcoe_pending_queue.qlen--; | |
791 | break; | |
85b4aa49 | 792 | } |
55c8bafb CL |
793 | /* undo temporary increment above */ |
794 | fc->fcoe_pending_queue.qlen--; | |
85b4aa49 | 795 | } |
55c8bafb CL |
796 | |
797 | if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) | |
798 | lp->qfull = 0; | |
c826a314 VD |
799 | fc->fcoe_pending_queue_active = 0; |
800 | rc = fc->fcoe_pending_queue.qlen; | |
801 | out: | |
85b4aa49 | 802 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); |
c826a314 | 803 | return rc; |
85b4aa49 RL |
804 | } |
805 | ||
85b4aa49 | 806 | /** |
34f42a07 RL |
807 | * fcoe_dev_setup() - setup link change notification interface |
808 | */ | |
809 | static void fcoe_dev_setup() | |
85b4aa49 RL |
810 | { |
811 | /* | |
812 | * here setup a interface specific wd time to | |
813 | * monitor the link state | |
814 | */ | |
815 | register_netdevice_notifier(&fcoe_notifier); | |
816 | } | |
817 | ||
818 | /** | |
34f42a07 RL |
819 | * fcoe_dev_setup() - cleanup link change notification interface |
820 | */ | |
85b4aa49 RL |
821 | static void fcoe_dev_cleanup(void) |
822 | { | |
823 | unregister_netdevice_notifier(&fcoe_notifier); | |
824 | } | |
825 | ||
826 | /** | |
34f42a07 | 827 | * fcoe_device_notification() - netdev event notification callback |
85b4aa49 RL |
828 | * @notifier: context of the notification |
829 | * @event: type of event | |
830 | * @ptr: fixed array for output parsed ifname | |
831 | * | |
832 | * This function is called by the ethernet driver in case of link change event | |
833 | * | |
834 | * Returns: 0 for success | |
34f42a07 | 835 | */ |
85b4aa49 RL |
836 | static int fcoe_device_notification(struct notifier_block *notifier, |
837 | ulong event, void *ptr) | |
838 | { | |
839 | struct fc_lport *lp = NULL; | |
840 | struct net_device *real_dev = ptr; | |
841 | struct fcoe_softc *fc; | |
842 | struct fcoe_dev_stats *stats; | |
bc0e17f6 | 843 | u32 new_link_up; |
85b4aa49 RL |
844 | u32 mfs; |
845 | int rc = NOTIFY_OK; | |
846 | ||
847 | read_lock(&fcoe_hostlist_lock); | |
848 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
849 | if (fc->real_dev == real_dev) { | |
850 | lp = fc->lp; | |
851 | break; | |
852 | } | |
853 | } | |
854 | read_unlock(&fcoe_hostlist_lock); | |
855 | if (lp == NULL) { | |
856 | rc = NOTIFY_DONE; | |
857 | goto out; | |
858 | } | |
859 | ||
bc0e17f6 | 860 | new_link_up = lp->link_up; |
85b4aa49 RL |
861 | switch (event) { |
862 | case NETDEV_DOWN: | |
863 | case NETDEV_GOING_DOWN: | |
bc0e17f6 | 864 | new_link_up = 0; |
85b4aa49 RL |
865 | break; |
866 | case NETDEV_UP: | |
867 | case NETDEV_CHANGE: | |
bc0e17f6 | 868 | new_link_up = !fcoe_link_ok(lp); |
85b4aa49 RL |
869 | break; |
870 | case NETDEV_CHANGEMTU: | |
871 | mfs = fc->real_dev->mtu - | |
872 | (sizeof(struct fcoe_hdr) + | |
873 | sizeof(struct fcoe_crc_eof)); | |
874 | if (mfs >= FC_MIN_MAX_FRAME) | |
875 | fc_set_mfs(lp, mfs); | |
bc0e17f6 | 876 | new_link_up = !fcoe_link_ok(lp); |
85b4aa49 RL |
877 | break; |
878 | case NETDEV_REGISTER: | |
879 | break; | |
880 | default: | |
881 | FC_DBG("unknown event %ld call", event); | |
882 | } | |
bc0e17f6 VD |
883 | if (lp->link_up != new_link_up) { |
884 | if (new_link_up) | |
85b4aa49 RL |
885 | fc_linkup(lp); |
886 | else { | |
887 | stats = lp->dev_stats[smp_processor_id()]; | |
888 | if (stats) | |
889 | stats->LinkFailureCount++; | |
890 | fc_linkdown(lp); | |
891 | fcoe_clean_pending_queue(lp); | |
892 | } | |
893 | } | |
894 | out: | |
895 | return rc; | |
896 | } | |
897 | ||
898 | /** | |
34f42a07 | 899 | * fcoe_if_to_netdev() - parse a name buffer to get netdev |
85b4aa49 RL |
900 | * @ifname: fixed array for output parsed ifname |
901 | * @buffer: incoming buffer to be copied | |
902 | * | |
903 | * Returns: NULL or ptr to netdeive | |
34f42a07 | 904 | */ |
85b4aa49 RL |
905 | static struct net_device *fcoe_if_to_netdev(const char *buffer) |
906 | { | |
907 | char *cp; | |
908 | char ifname[IFNAMSIZ + 2]; | |
909 | ||
910 | if (buffer) { | |
911 | strlcpy(ifname, buffer, IFNAMSIZ); | |
912 | cp = ifname + strlen(ifname); | |
913 | while (--cp >= ifname && *cp == '\n') | |
914 | *cp = '\0'; | |
915 | return dev_get_by_name(&init_net, ifname); | |
916 | } | |
917 | return NULL; | |
918 | } | |
919 | ||
920 | /** | |
34f42a07 | 921 | * fcoe_netdev_to_module_owner() - finds out the nic drive moddule of the netdev |
85b4aa49 RL |
922 | * @netdev: the target netdev |
923 | * | |
924 | * Returns: ptr to the struct module, NULL for failure | |
34f42a07 | 925 | */ |
b2ab99c9 RL |
926 | static struct module * |
927 | fcoe_netdev_to_module_owner(const struct net_device *netdev) | |
85b4aa49 RL |
928 | { |
929 | struct device *dev; | |
930 | ||
931 | if (!netdev) | |
932 | return NULL; | |
933 | ||
934 | dev = netdev->dev.parent; | |
935 | if (!dev) | |
936 | return NULL; | |
937 | ||
938 | if (!dev->driver) | |
939 | return NULL; | |
940 | ||
941 | return dev->driver->owner; | |
942 | } | |
943 | ||
944 | /** | |
34f42a07 | 945 | * fcoe_ethdrv_get() - Hold the Ethernet driver |
85b4aa49 RL |
946 | * @netdev: the target netdev |
947 | * | |
34f42a07 RL |
948 | * Holds the Ethernet driver module by try_module_get() for |
949 | * the corresponding netdev. | |
950 | * | |
85b4aa49 | 951 | * Returns: 0 for succsss |
34f42a07 | 952 | */ |
85b4aa49 RL |
953 | static int fcoe_ethdrv_get(const struct net_device *netdev) |
954 | { | |
955 | struct module *owner; | |
956 | ||
957 | owner = fcoe_netdev_to_module_owner(netdev); | |
958 | if (owner) { | |
56b854bb JB |
959 | printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n", |
960 | module_name(owner), netdev->name); | |
85b4aa49 RL |
961 | return try_module_get(owner); |
962 | } | |
963 | return -ENODEV; | |
964 | } | |
965 | ||
966 | /** | |
34f42a07 | 967 | * fcoe_ethdrv_put() - Release the Ethernet driver |
85b4aa49 RL |
968 | * @netdev: the target netdev |
969 | * | |
34f42a07 RL |
970 | * Releases the Ethernet driver module by module_put for |
971 | * the corresponding netdev. | |
972 | * | |
85b4aa49 | 973 | * Returns: 0 for succsss |
34f42a07 | 974 | */ |
85b4aa49 RL |
975 | static int fcoe_ethdrv_put(const struct net_device *netdev) |
976 | { | |
977 | struct module *owner; | |
978 | ||
979 | owner = fcoe_netdev_to_module_owner(netdev); | |
980 | if (owner) { | |
56b854bb JB |
981 | printk(KERN_DEBUG "fcoe:release driver module %s for %s\n", |
982 | module_name(owner), netdev->name); | |
85b4aa49 RL |
983 | module_put(owner); |
984 | return 0; | |
985 | } | |
986 | return -ENODEV; | |
987 | } | |
988 | ||
989 | /** | |
34f42a07 | 990 | * fcoe_destroy() - handles the destroy from sysfs |
85b4aa49 RL |
991 | * @buffer: expcted to be a eth if name |
992 | * @kp: associated kernel param | |
993 | * | |
994 | * Returns: 0 for success | |
34f42a07 | 995 | */ |
85b4aa49 RL |
996 | static int fcoe_destroy(const char *buffer, struct kernel_param *kp) |
997 | { | |
998 | int rc; | |
999 | struct net_device *netdev; | |
1000 | ||
1001 | netdev = fcoe_if_to_netdev(buffer); | |
1002 | if (!netdev) { | |
1003 | rc = -ENODEV; | |
1004 | goto out_nodev; | |
1005 | } | |
1006 | /* look for existing lport */ | |
1007 | if (!fcoe_hostlist_lookup(netdev)) { | |
1008 | rc = -ENODEV; | |
1009 | goto out_putdev; | |
1010 | } | |
1011 | /* pass to transport */ | |
1012 | rc = fcoe_transport_release(netdev); | |
1013 | if (rc) { | |
1014 | printk(KERN_ERR "fcoe: fcoe_transport_release(%s) failed\n", | |
1015 | netdev->name); | |
1016 | rc = -EIO; | |
1017 | goto out_putdev; | |
1018 | } | |
1019 | fcoe_ethdrv_put(netdev); | |
1020 | rc = 0; | |
1021 | out_putdev: | |
1022 | dev_put(netdev); | |
1023 | out_nodev: | |
1024 | return rc; | |
1025 | } | |
1026 | ||
1027 | /** | |
34f42a07 | 1028 | * fcoe_create() - Handles the create call from sysfs |
85b4aa49 RL |
1029 | * @buffer: expcted to be a eth if name |
1030 | * @kp: associated kernel param | |
1031 | * | |
1032 | * Returns: 0 for success | |
34f42a07 | 1033 | */ |
85b4aa49 RL |
1034 | static int fcoe_create(const char *buffer, struct kernel_param *kp) |
1035 | { | |
1036 | int rc; | |
1037 | struct net_device *netdev; | |
1038 | ||
1039 | netdev = fcoe_if_to_netdev(buffer); | |
1040 | if (!netdev) { | |
1041 | rc = -ENODEV; | |
1042 | goto out_nodev; | |
1043 | } | |
1044 | /* look for existing lport */ | |
1045 | if (fcoe_hostlist_lookup(netdev)) { | |
1046 | rc = -EEXIST; | |
1047 | goto out_putdev; | |
1048 | } | |
1049 | fcoe_ethdrv_get(netdev); | |
1050 | ||
1051 | /* pass to transport */ | |
1052 | rc = fcoe_transport_attach(netdev); | |
1053 | if (rc) { | |
1054 | printk(KERN_ERR "fcoe: fcoe_transport_attach(%s) failed\n", | |
1055 | netdev->name); | |
1056 | fcoe_ethdrv_put(netdev); | |
1057 | rc = -EIO; | |
1058 | goto out_putdev; | |
1059 | } | |
1060 | rc = 0; | |
1061 | out_putdev: | |
1062 | dev_put(netdev); | |
1063 | out_nodev: | |
1064 | return rc; | |
1065 | } | |
1066 | ||
1067 | module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR); | |
1068 | __MODULE_PARM_TYPE(create, "string"); | |
1069 | MODULE_PARM_DESC(create, "Create fcoe port using net device passed in."); | |
1070 | module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); | |
1071 | __MODULE_PARM_TYPE(destroy, "string"); | |
1072 | MODULE_PARM_DESC(destroy, "Destroy fcoe port"); | |
1073 | ||
34f42a07 RL |
1074 | /** |
1075 | * fcoe_link_ok() - Check if link is ok for the fc_lport | |
85b4aa49 RL |
1076 | * @lp: ptr to the fc_lport |
1077 | * | |
1078 | * Any permanently-disqualifying conditions have been previously checked. | |
1079 | * This also updates the speed setting, which may change with link for 100/1000. | |
1080 | * | |
1081 | * This function should probably be checking for PAUSE support at some point | |
1082 | * in the future. Currently Per-priority-pause is not determinable using | |
1083 | * ethtool, so we shouldn't be restrictive until that problem is resolved. | |
1084 | * | |
1085 | * Returns: 0 if link is OK for use by FCoE. | |
1086 | * | |
1087 | */ | |
1088 | int fcoe_link_ok(struct fc_lport *lp) | |
1089 | { | |
fc47ff6b | 1090 | struct fcoe_softc *fc = lport_priv(lp); |
85b4aa49 RL |
1091 | struct net_device *dev = fc->real_dev; |
1092 | struct ethtool_cmd ecmd = { ETHTOOL_GSET }; | |
1093 | int rc = 0; | |
1094 | ||
1095 | if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) { | |
1096 | dev = fc->phys_dev; | |
1097 | if (dev->ethtool_ops->get_settings) { | |
1098 | dev->ethtool_ops->get_settings(dev, &ecmd); | |
1099 | lp->link_supported_speeds &= | |
1100 | ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | |
1101 | if (ecmd.supported & (SUPPORTED_1000baseT_Half | | |
1102 | SUPPORTED_1000baseT_Full)) | |
1103 | lp->link_supported_speeds |= FC_PORTSPEED_1GBIT; | |
1104 | if (ecmd.supported & SUPPORTED_10000baseT_Full) | |
1105 | lp->link_supported_speeds |= | |
1106 | FC_PORTSPEED_10GBIT; | |
1107 | if (ecmd.speed == SPEED_1000) | |
1108 | lp->link_speed = FC_PORTSPEED_1GBIT; | |
1109 | if (ecmd.speed == SPEED_10000) | |
1110 | lp->link_speed = FC_PORTSPEED_10GBIT; | |
1111 | } | |
1112 | } else | |
1113 | rc = -1; | |
1114 | ||
1115 | return rc; | |
1116 | } | |
1117 | EXPORT_SYMBOL_GPL(fcoe_link_ok); | |
1118 | ||
34f42a07 RL |
1119 | /** |
1120 | * fcoe_percpu_clean() - Clear the pending skbs for an lport | |
85b4aa49 RL |
1121 | * @lp: the fc_lport |
1122 | */ | |
1123 | void fcoe_percpu_clean(struct fc_lport *lp) | |
1124 | { | |
1125 | int idx; | |
1126 | struct fcoe_percpu_s *pp; | |
1127 | struct fcoe_rcv_info *fr; | |
1128 | struct sk_buff_head *list; | |
1129 | struct sk_buff *skb, *next; | |
1130 | struct sk_buff *head; | |
1131 | ||
1132 | for (idx = 0; idx < NR_CPUS; idx++) { | |
1133 | if (fcoe_percpu[idx]) { | |
1134 | pp = fcoe_percpu[idx]; | |
1135 | spin_lock_bh(&pp->fcoe_rx_list.lock); | |
1136 | list = &pp->fcoe_rx_list; | |
1137 | head = list->next; | |
1138 | for (skb = head; skb != (struct sk_buff *)list; | |
1139 | skb = next) { | |
1140 | next = skb->next; | |
1141 | fr = fcoe_dev_from_skb(skb); | |
1142 | if (fr->fr_dev == lp) { | |
1143 | __skb_unlink(skb, list); | |
1144 | kfree_skb(skb); | |
1145 | } | |
1146 | } | |
1147 | spin_unlock_bh(&pp->fcoe_rx_list.lock); | |
1148 | } | |
1149 | } | |
1150 | } | |
1151 | EXPORT_SYMBOL_GPL(fcoe_percpu_clean); | |
1152 | ||
1153 | /** | |
34f42a07 | 1154 | * fcoe_clean_pending_queue() - Dequeue a skb and free it |
85b4aa49 RL |
1155 | * @lp: the corresponding fc_lport |
1156 | * | |
1157 | * Returns: none | |
34f42a07 | 1158 | */ |
85b4aa49 RL |
1159 | void fcoe_clean_pending_queue(struct fc_lport *lp) |
1160 | { | |
1161 | struct fcoe_softc *fc = lport_priv(lp); | |
1162 | struct sk_buff *skb; | |
1163 | ||
1164 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
1165 | while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) { | |
1166 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
1167 | kfree_skb(skb); | |
1168 | spin_lock_bh(&fc->fcoe_pending_queue.lock); | |
1169 | } | |
1170 | spin_unlock_bh(&fc->fcoe_pending_queue.lock); | |
1171 | } | |
1172 | EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); | |
1173 | ||
1174 | /** | |
34f42a07 | 1175 | * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport |
85b4aa49 RL |
1176 | * @sht: ptr to the scsi host templ |
1177 | * @priv_size: size of private data after fc_lport | |
1178 | * | |
1179 | * Returns: ptr to Scsi_Host | |
34f42a07 | 1180 | * TODO: to libfc? |
85b4aa49 | 1181 | */ |
b2ab99c9 RL |
1182 | static inline struct Scsi_Host * |
1183 | libfc_host_alloc(struct scsi_host_template *sht, int priv_size) | |
85b4aa49 RL |
1184 | { |
1185 | return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size); | |
1186 | } | |
1187 | ||
1188 | /** | |
34f42a07 | 1189 | * fcoe_host_alloc() - Allocate a Scsi_Host with room for the fcoe_softc |
85b4aa49 RL |
1190 | * @sht: ptr to the scsi host templ |
1191 | * @priv_size: size of private data after fc_lport | |
1192 | * | |
1193 | * Returns: ptr to Scsi_Host | |
1194 | */ | |
1195 | struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *sht, int priv_size) | |
1196 | { | |
1197 | return libfc_host_alloc(sht, sizeof(struct fcoe_softc) + priv_size); | |
1198 | } | |
1199 | EXPORT_SYMBOL_GPL(fcoe_host_alloc); | |
1200 | ||
34f42a07 RL |
1201 | /** |
1202 | * fcoe_reset() - Resets the fcoe | |
85b4aa49 RL |
1203 | * @shost: shost the reset is from |
1204 | * | |
1205 | * Returns: always 0 | |
1206 | */ | |
1207 | int fcoe_reset(struct Scsi_Host *shost) | |
1208 | { | |
1209 | struct fc_lport *lport = shost_priv(shost); | |
1210 | fc_lport_reset(lport); | |
1211 | return 0; | |
1212 | } | |
1213 | EXPORT_SYMBOL_GPL(fcoe_reset); | |
1214 | ||
34f42a07 RL |
1215 | /** |
1216 | * fcoe_wwn_from_mac() - Converts 48-bit IEEE MAC address to 64-bit FC WWN. | |
85b4aa49 RL |
1217 | * @mac: mac address |
1218 | * @scheme: check port | |
1219 | * @port: port indicator for converting | |
1220 | * | |
1221 | * Returns: u64 fc world wide name | |
1222 | */ | |
1223 | u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN], | |
1224 | unsigned int scheme, unsigned int port) | |
1225 | { | |
1226 | u64 wwn; | |
1227 | u64 host_mac; | |
1228 | ||
1229 | /* The MAC is in NO, so flip only the low 48 bits */ | |
1230 | host_mac = ((u64) mac[0] << 40) | | |
1231 | ((u64) mac[1] << 32) | | |
1232 | ((u64) mac[2] << 24) | | |
1233 | ((u64) mac[3] << 16) | | |
1234 | ((u64) mac[4] << 8) | | |
1235 | (u64) mac[5]; | |
1236 | ||
1237 | WARN_ON(host_mac >= (1ULL << 48)); | |
1238 | wwn = host_mac | ((u64) scheme << 60); | |
1239 | switch (scheme) { | |
1240 | case 1: | |
1241 | WARN_ON(port != 0); | |
1242 | break; | |
1243 | case 2: | |
1244 | WARN_ON(port >= 0xfff); | |
1245 | wwn |= (u64) port << 48; | |
1246 | break; | |
1247 | default: | |
1248 | WARN_ON(1); | |
1249 | break; | |
1250 | } | |
1251 | ||
1252 | return wwn; | |
1253 | } | |
1254 | EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac); | |
34f42a07 RL |
1255 | |
1256 | /** | |
1257 | * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device | |
85b4aa49 RL |
1258 | * @device: this is currently ptr to net_device |
1259 | * | |
1260 | * Returns: NULL or the located fcoe_softc | |
1261 | */ | |
b2ab99c9 RL |
1262 | static struct fcoe_softc * |
1263 | fcoe_hostlist_lookup_softc(const struct net_device *dev) | |
85b4aa49 RL |
1264 | { |
1265 | struct fcoe_softc *fc; | |
1266 | ||
1267 | read_lock(&fcoe_hostlist_lock); | |
1268 | list_for_each_entry(fc, &fcoe_hostlist, list) { | |
1269 | if (fc->real_dev == dev) { | |
1270 | read_unlock(&fcoe_hostlist_lock); | |
1271 | return fc; | |
1272 | } | |
1273 | } | |
1274 | read_unlock(&fcoe_hostlist_lock); | |
1275 | return NULL; | |
1276 | } | |
1277 | ||
34f42a07 RL |
1278 | /** |
1279 | * fcoe_hostlist_lookup() - Find the corresponding lport by netdev | |
85b4aa49 RL |
1280 | * @netdev: ptr to net_device |
1281 | * | |
1282 | * Returns: 0 for success | |
1283 | */ | |
1284 | struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) | |
1285 | { | |
1286 | struct fcoe_softc *fc; | |
1287 | ||
1288 | fc = fcoe_hostlist_lookup_softc(netdev); | |
1289 | ||
1290 | return (fc) ? fc->lp : NULL; | |
1291 | } | |
1292 | EXPORT_SYMBOL_GPL(fcoe_hostlist_lookup); | |
1293 | ||
34f42a07 RL |
1294 | /** |
1295 | * fcoe_hostlist_add() - Add a lport to lports list | |
85b4aa49 RL |
1296 | * @lp: ptr to the fc_lport to badded |
1297 | * | |
1298 | * Returns: 0 for success | |
1299 | */ | |
1300 | int fcoe_hostlist_add(const struct fc_lport *lp) | |
1301 | { | |
1302 | struct fcoe_softc *fc; | |
1303 | ||
1304 | fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); | |
1305 | if (!fc) { | |
fc47ff6b | 1306 | fc = lport_priv(lp); |
85b4aa49 RL |
1307 | write_lock_bh(&fcoe_hostlist_lock); |
1308 | list_add_tail(&fc->list, &fcoe_hostlist); | |
1309 | write_unlock_bh(&fcoe_hostlist_lock); | |
1310 | } | |
1311 | return 0; | |
1312 | } | |
1313 | EXPORT_SYMBOL_GPL(fcoe_hostlist_add); | |
1314 | ||
34f42a07 RL |
1315 | /** |
1316 | * fcoe_hostlist_remove() - remove a lport from lports list | |
85b4aa49 RL |
1317 | * @lp: ptr to the fc_lport to badded |
1318 | * | |
1319 | * Returns: 0 for success | |
1320 | */ | |
1321 | int fcoe_hostlist_remove(const struct fc_lport *lp) | |
1322 | { | |
1323 | struct fcoe_softc *fc; | |
1324 | ||
1325 | fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp)); | |
1326 | BUG_ON(!fc); | |
1327 | write_lock_bh(&fcoe_hostlist_lock); | |
1328 | list_del(&fc->list); | |
1329 | write_unlock_bh(&fcoe_hostlist_lock); | |
1330 | ||
1331 | return 0; | |
1332 | } | |
1333 | EXPORT_SYMBOL_GPL(fcoe_hostlist_remove); | |
1334 | ||
1335 | /** | |
34f42a07 | 1336 | * fcoe_libfc_config() - sets up libfc related properties for lport |
85b4aa49 RL |
1337 | * @lp: ptr to the fc_lport |
1338 | * @tt: libfc function template | |
1339 | * | |
1340 | * Returns : 0 for success | |
34f42a07 | 1341 | */ |
85b4aa49 RL |
1342 | int fcoe_libfc_config(struct fc_lport *lp, struct libfc_function_template *tt) |
1343 | { | |
1344 | /* Set the function pointers set by the LLDD */ | |
1345 | memcpy(&lp->tt, tt, sizeof(*tt)); | |
1346 | if (fc_fcp_init(lp)) | |
1347 | return -ENOMEM; | |
1348 | fc_exch_init(lp); | |
1349 | fc_elsct_init(lp); | |
1350 | fc_lport_init(lp); | |
1351 | fc_rport_init(lp); | |
1352 | fc_disc_init(lp); | |
1353 | ||
1354 | return 0; | |
1355 | } | |
1356 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); | |
1357 | ||
1358 | /** | |
34f42a07 | 1359 | * fcoe_init() - fcoe module loading initialization |
85b4aa49 RL |
1360 | * |
1361 | * Initialization routine | |
1362 | * 1. Will create fc transport software structure | |
1363 | * 2. initialize the link list of port information structure | |
1364 | * | |
1365 | * Returns 0 on success, negative on failure | |
34f42a07 | 1366 | */ |
85b4aa49 RL |
1367 | static int __init fcoe_init(void) |
1368 | { | |
1369 | int cpu; | |
1370 | struct fcoe_percpu_s *p; | |
1371 | ||
1372 | ||
1373 | INIT_LIST_HEAD(&fcoe_hostlist); | |
1374 | rwlock_init(&fcoe_hostlist_lock); | |
1375 | ||
1376 | #ifdef CONFIG_HOTPLUG_CPU | |
1377 | register_cpu_notifier(&fcoe_cpu_notifier); | |
1378 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1379 | ||
1380 | /* | |
1381 | * initialize per CPU interrupt thread | |
1382 | */ | |
1383 | for_each_online_cpu(cpu) { | |
1384 | p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL); | |
1385 | if (p) { | |
1386 | p->thread = kthread_create(fcoe_percpu_receive_thread, | |
1387 | (void *)p, | |
1388 | "fcoethread/%d", cpu); | |
1389 | ||
1390 | /* | |
1391 | * if there is no error then bind the thread to the cpu | |
1392 | * initialize the semaphore and skb queue head | |
1393 | */ | |
1394 | if (likely(!IS_ERR(p->thread))) { | |
1395 | p->cpu = cpu; | |
1396 | fcoe_percpu[cpu] = p; | |
1397 | skb_queue_head_init(&p->fcoe_rx_list); | |
1398 | kthread_bind(p->thread, cpu); | |
1399 | wake_up_process(p->thread); | |
1400 | } else { | |
1401 | fcoe_percpu[cpu] = NULL; | |
1402 | kfree(p); | |
85b4aa49 RL |
1403 | } |
1404 | } | |
1405 | } | |
1406 | ||
1407 | /* | |
1408 | * setup link change notification | |
1409 | */ | |
1410 | fcoe_dev_setup(); | |
1411 | ||
a468f328 RL |
1412 | setup_timer(&fcoe_timer, fcoe_watchdog, 0); |
1413 | ||
1414 | mod_timer(&fcoe_timer, jiffies + (10 * HZ)); | |
85b4aa49 RL |
1415 | |
1416 | /* initiatlize the fcoe transport */ | |
1417 | fcoe_transport_init(); | |
1418 | ||
1419 | fcoe_sw_init(); | |
1420 | ||
1421 | return 0; | |
1422 | } | |
1423 | module_init(fcoe_init); | |
1424 | ||
1425 | /** | |
34f42a07 | 1426 | * fcoe_exit() - fcoe module unloading cleanup |
85b4aa49 RL |
1427 | * |
1428 | * Returns 0 on success, negative on failure | |
34f42a07 | 1429 | */ |
85b4aa49 RL |
1430 | static void __exit fcoe_exit(void) |
1431 | { | |
1432 | u32 idx; | |
1433 | struct fcoe_softc *fc, *tmp; | |
1434 | struct fcoe_percpu_s *p; | |
1435 | struct sk_buff *skb; | |
1436 | ||
1437 | /* | |
1438 | * Stop all call back interfaces | |
1439 | */ | |
1440 | #ifdef CONFIG_HOTPLUG_CPU | |
1441 | unregister_cpu_notifier(&fcoe_cpu_notifier); | |
1442 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1443 | fcoe_dev_cleanup(); | |
1444 | ||
1445 | /* | |
1446 | * stop timer | |
1447 | */ | |
1448 | del_timer_sync(&fcoe_timer); | |
1449 | ||
b2ab99c9 | 1450 | /* releases the associated fcoe transport for each lport */ |
85b4aa49 RL |
1451 | list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) |
1452 | fcoe_transport_release(fc->real_dev); | |
1453 | ||
1454 | for (idx = 0; idx < NR_CPUS; idx++) { | |
1455 | if (fcoe_percpu[idx]) { | |
1456 | kthread_stop(fcoe_percpu[idx]->thread); | |
1457 | p = fcoe_percpu[idx]; | |
1458 | spin_lock_bh(&p->fcoe_rx_list.lock); | |
1459 | while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL) | |
1460 | kfree_skb(skb); | |
1461 | spin_unlock_bh(&p->fcoe_rx_list.lock); | |
1462 | if (fcoe_percpu[idx]->crc_eof_page) | |
1463 | put_page(fcoe_percpu[idx]->crc_eof_page); | |
1464 | kfree(fcoe_percpu[idx]); | |
1465 | } | |
1466 | } | |
1467 | ||
1468 | /* remove sw trasnport */ | |
1469 | fcoe_sw_exit(); | |
1470 | ||
1471 | /* detach the transport */ | |
1472 | fcoe_transport_exit(); | |
1473 | } | |
1474 | module_exit(fcoe_exit); |