Commit | Line | Data |
---|---|---|
51dce24b JK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 1999 - 2018 Intel Corporation. */ | |
d3a2ae6d | 3 | |
d3a2ae6d YZ |
4 | #include "ixgbe.h" |
5 | #include <linux/if_ether.h> | |
5a0e3ad6 | 6 | #include <linux/gfp.h> |
be5d507d | 7 | #include <linux/if_vlan.h> |
d3a2ae6d YZ |
8 | #include <scsi/scsi_cmnd.h> |
9 | #include <scsi/scsi_device.h> | |
10 | #include <scsi/fc/fc_fs.h> | |
11 | #include <scsi/fc/fc_fcoe.h> | |
12 | #include <scsi/libfc.h> | |
13 | #include <scsi/libfcoe.h> | |
14 | ||
d0ed8937 YZ |
15 | /** |
16 | * ixgbe_fcoe_clear_ddp - clear the given ddp context | |
49ce9c2c | 17 | * @ddp: ptr to the ixgbe_fcoe_ddp |
d0ed8937 YZ |
18 | * |
19 | * Returns : none | |
20 | * | |
21 | */ | |
22 | static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) | |
23 | { | |
24 | ddp->len = 0; | |
8ca371e4 | 25 | ddp->err = 1; |
d0ed8937 YZ |
26 | ddp->udl = NULL; |
27 | ddp->udp = 0UL; | |
28 | ddp->sgl = NULL; | |
29 | ddp->sgc = 0; | |
30 | } | |
31 | ||
32 | /** | |
33 | * ixgbe_fcoe_ddp_put - free the ddp context for a given xid | |
34 | * @netdev: the corresponding net_device | |
35 | * @xid: the xid that corresponding ddp will be freed | |
36 | * | |
37 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_done | |
38 | * and it is expected to be called by ULD, i.e., FCP layer of libfc | |
39 | * to release the corresponding ddp context when the I/O is done. | |
40 | * | |
41 | * Returns : data length already ddp-ed in bytes | |
42 | */ | |
43 | int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) | |
44 | { | |
e90dd264 | 45 | int len; |
d0ed8937 YZ |
46 | struct ixgbe_fcoe *fcoe; |
47 | struct ixgbe_adapter *adapter; | |
48 | struct ixgbe_fcoe_ddp *ddp; | |
ea412015 | 49 | struct ixgbe_hw *hw; |
9b55bb03 | 50 | u32 fcbuff; |
d0ed8937 YZ |
51 | |
52 | if (!netdev) | |
e90dd264 | 53 | return 0; |
d0ed8937 | 54 | |
f10166ab | 55 | if (xid >= netdev->fcoe_ddp_xid) |
e90dd264 | 56 | return 0; |
d0ed8937 YZ |
57 | |
58 | adapter = netdev_priv(netdev); | |
59 | fcoe = &adapter->fcoe; | |
60 | ddp = &fcoe->ddp[xid]; | |
61 | if (!ddp->udl) | |
e90dd264 | 62 | return 0; |
d0ed8937 | 63 | |
ea412015 | 64 | hw = &adapter->hw; |
d0ed8937 | 65 | len = ddp->len; |
ea412015 VD |
66 | /* if no error then skip ddp context invalidation */ |
67 | if (!ddp->err) | |
68 | goto skip_ddpinv; | |
69 | ||
70 | if (hw->mac.type == ixgbe_mac_X550) { | |
71 | /* X550 does not require DDP FCoE lock */ | |
72 | ||
73 | IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); | |
74 | IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), | |
75 | (xid | IXGBE_FCFLTRW_WE)); | |
76 | ||
77 | /* program FCBUFF */ | |
78 | IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); | |
79 | ||
80 | /* program FCDMARW */ | |
81 | IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), | |
82 | (xid | IXGBE_FCDMARW_WE)); | |
83 | ||
84 | /* read FCBUFF to check context invalidated */ | |
85 | IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), | |
86 | (xid | IXGBE_FCDMARW_RE)); | |
87 | fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); | |
88 | } else { | |
89 | /* other hardware requires DDP FCoE lock */ | |
d0ed8937 | 90 | spin_lock_bh(&fcoe->lock); |
ea412015 VD |
91 | IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); |
92 | IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, | |
d0ed8937 | 93 | (xid | IXGBE_FCFLTRW_WE)); |
ea412015 VD |
94 | IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); |
95 | IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, | |
d0ed8937 | 96 | (xid | IXGBE_FCDMARW_WE)); |
9b55bb03 YZ |
97 | |
98 | /* guaranteed to be invalidated after 100us */ | |
ea412015 | 99 | IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, |
9b55bb03 | 100 | (xid | IXGBE_FCDMARW_RE)); |
ea412015 | 101 | fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); |
d0ed8937 | 102 | spin_unlock_bh(&fcoe->lock); |
ea412015 VD |
103 | } |
104 | ||
105 | if (fcbuff & IXGBE_FCBUFF_VALID) | |
106 | usleep_range(100, 150); | |
107 | ||
108 | skip_ddpinv: | |
d0ed8937 | 109 | if (ddp->sgl) |
1bf91cdc | 110 | dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, |
d0ed8937 | 111 | DMA_FROM_DEVICE); |
dadbe85a | 112 | if (ddp->pool) { |
1bf91cdc | 113 | dma_pool_free(ddp->pool, ddp->udl, ddp->udp); |
dadbe85a VD |
114 | ddp->pool = NULL; |
115 | } | |
116 | ||
d0ed8937 YZ |
117 | ixgbe_fcoe_clear_ddp(ddp); |
118 | ||
d0ed8937 YZ |
119 | return len; |
120 | } | |
121 | ||
122 | /** | |
68a683cf | 123 | * ixgbe_fcoe_ddp_setup - called to set up ddp context |
d0ed8937 YZ |
124 | * @netdev: the corresponding net_device |
125 | * @xid: the exchange id requesting ddp | |
126 | * @sgl: the scatter-gather list for this request | |
127 | * @sgc: the number of scatter-gather items | |
5ba643c6 | 128 | * @target_mode: 1 to setup target mode, 0 to setup initiator mode |
d0ed8937 | 129 | * |
d0ed8937 YZ |
130 | * Returns : 1 for success and 0 for no ddp |
131 | */ | |
68a683cf YZ |
132 | static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, |
133 | struct scatterlist *sgl, unsigned int sgc, | |
134 | int target_mode) | |
d0ed8937 YZ |
135 | { |
136 | struct ixgbe_adapter *adapter; | |
137 | struct ixgbe_hw *hw; | |
138 | struct ixgbe_fcoe *fcoe; | |
139 | struct ixgbe_fcoe_ddp *ddp; | |
5a1ee270 | 140 | struct ixgbe_fcoe_ddp_pool *ddp_pool; |
d0ed8937 YZ |
141 | struct scatterlist *sg; |
142 | unsigned int i, j, dmacount; | |
143 | unsigned int len; | |
c600636b | 144 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
d0ed8937 YZ |
145 | unsigned int firstoff = 0; |
146 | unsigned int lastsize; | |
147 | unsigned int thisoff = 0; | |
148 | unsigned int thislen = 0; | |
68a683cf | 149 | u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; |
fbbea32b | 150 | dma_addr_t addr = 0; |
d0ed8937 YZ |
151 | |
152 | if (!netdev || !sgl) | |
153 | return 0; | |
154 | ||
155 | adapter = netdev_priv(netdev); | |
f10166ab | 156 | if (xid >= netdev->fcoe_ddp_xid) { |
396e799c | 157 | e_warn(drv, "xid=0x%x out-of-range\n", xid); |
d0ed8937 YZ |
158 | return 0; |
159 | } | |
160 | ||
a41c0597 YZ |
161 | /* no DDP if we are already down or resetting */ |
162 | if (test_bit(__IXGBE_DOWN, &adapter->state) || | |
163 | test_bit(__IXGBE_RESETTING, &adapter->state)) | |
164 | return 0; | |
165 | ||
d0ed8937 | 166 | fcoe = &adapter->fcoe; |
d0ed8937 YZ |
167 | ddp = &fcoe->ddp[xid]; |
168 | if (ddp->sgl) { | |
396e799c ET |
169 | e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", |
170 | xid, ddp->sgl, ddp->sgc); | |
d0ed8937 YZ |
171 | return 0; |
172 | } | |
173 | ixgbe_fcoe_clear_ddp(ddp); | |
174 | ||
5a1ee270 AD |
175 | |
176 | if (!fcoe->ddp_pool) { | |
177 | e_warn(drv, "No ddp_pool resources allocated\n"); | |
178 | return 0; | |
179 | } | |
180 | ||
181 | ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); | |
182 | if (!ddp_pool->pool) { | |
183 | e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); | |
184 | goto out_noddp; | |
185 | } | |
186 | ||
d0ed8937 | 187 | /* setup dma from scsi command sgl */ |
1bf91cdc | 188 | dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); |
d0ed8937 | 189 | if (dmacount == 0) { |
396e799c | 190 | e_err(drv, "xid 0x%x DMA map error\n", xid); |
5a1ee270 | 191 | goto out_noddp; |
d0ed8937 YZ |
192 | } |
193 | ||
dadbe85a | 194 | /* alloc the udl from per cpu ddp pool */ |
374f78f7 | 195 | ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp); |
d0ed8937 | 196 | if (!ddp->udl) { |
396e799c | 197 | e_err(drv, "failed allocated ddp context\n"); |
d0ed8937 YZ |
198 | goto out_noddp_unmap; |
199 | } | |
5a1ee270 | 200 | ddp->pool = ddp_pool->pool; |
d0ed8937 YZ |
201 | ddp->sgl = sgl; |
202 | ddp->sgc = sgc; | |
203 | ||
204 | j = 0; | |
205 | for_each_sg(sgl, sg, dmacount, i) { | |
206 | addr = sg_dma_address(sg); | |
207 | len = sg_dma_len(sg); | |
208 | while (len) { | |
a7551b75 RL |
209 | /* max number of buffers allowed in one DDP context */ |
210 | if (j >= IXGBE_BUFFCNT_MAX) { | |
5a1ee270 | 211 | ddp_pool->noddp++; |
a7551b75 RL |
212 | goto out_noddp_free; |
213 | } | |
214 | ||
d0ed8937 YZ |
215 | /* get the offset of length of current buffer */ |
216 | thisoff = addr & ((dma_addr_t)bufflen - 1); | |
217 | thislen = min((bufflen - thisoff), len); | |
218 | /* | |
219 | * all but the 1st buffer (j == 0) | |
220 | * must be aligned on bufflen | |
221 | */ | |
222 | if ((j != 0) && (thisoff)) | |
223 | goto out_noddp_free; | |
224 | /* | |
225 | * all but the last buffer | |
226 | * ((i == (dmacount - 1)) && (thislen == len)) | |
227 | * must end at bufflen | |
228 | */ | |
229 | if (((i != (dmacount - 1)) || (thislen != len)) | |
230 | && ((thislen + thisoff) != bufflen)) | |
231 | goto out_noddp_free; | |
232 | ||
233 | ddp->udl[j] = (u64)(addr - thisoff); | |
234 | /* only the first buffer may have none-zero offset */ | |
235 | if (j == 0) | |
236 | firstoff = thisoff; | |
237 | len -= thislen; | |
238 | addr += thislen; | |
239 | j++; | |
d0ed8937 YZ |
240 | } |
241 | } | |
242 | /* only the last buffer may have non-full bufflen */ | |
243 | lastsize = thisoff + thislen; | |
244 | ||
c600636b AH |
245 | /* |
246 | * lastsize can not be buffer len. | |
247 | * If it is then adding another buffer with lastsize = 1. | |
248 | */ | |
249 | if (lastsize == bufflen) { | |
250 | if (j >= IXGBE_BUFFCNT_MAX) { | |
5a1ee270 | 251 | ddp_pool->noddp_ext_buff++; |
c600636b AH |
252 | goto out_noddp_free; |
253 | } | |
254 | ||
255 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); | |
256 | j++; | |
257 | lastsize = 1; | |
258 | } | |
dadbe85a | 259 | put_cpu(); |
c600636b | 260 | |
d0ed8937 | 261 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
a7551b75 | 262 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
d0ed8937 | 263 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
68a683cf YZ |
264 | /* Set WRCONTX bit to allow DDP for target */ |
265 | if (target_mode) | |
266 | fcbuff |= (IXGBE_FCBUFF_WRCONTX); | |
d0ed8937 YZ |
267 | fcbuff |= (IXGBE_FCBUFF_VALID); |
268 | ||
269 | fcdmarw = xid; | |
270 | fcdmarw |= IXGBE_FCDMARW_WE; | |
271 | fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); | |
272 | ||
273 | fcfltrw = xid; | |
274 | fcfltrw |= IXGBE_FCFLTRW_WE; | |
275 | ||
276 | /* program DMA context */ | |
277 | hw = &adapter->hw; | |
68a683cf YZ |
278 | |
279 | /* turn on last frame indication for target mode as FCP_RSPtarget is | |
280 | * supposed to send FCP_RSP when it is done. */ | |
281 | if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { | |
282 | set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); | |
283 | fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); | |
284 | fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; | |
285 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); | |
286 | } | |
287 | ||
ea412015 VD |
288 | if (hw->mac.type == ixgbe_mac_X550) { |
289 | /* X550 does not require DDP lock */ | |
290 | ||
291 | IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), | |
292 | ddp->udp & DMA_BIT_MASK(32)); | |
293 | IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); | |
294 | IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); | |
295 | IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); | |
296 | /* program filter context */ | |
297 | IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); | |
298 | IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); | |
299 | IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); | |
300 | } else { | |
301 | /* DDP lock for indirect DDP context access */ | |
302 | spin_lock_bh(&fcoe->lock); | |
68a683cf | 303 | |
ea412015 VD |
304 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); |
305 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); | |
306 | IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); | |
307 | IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); | |
308 | /* program filter context */ | |
309 | IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); | |
310 | IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); | |
311 | IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); | |
312 | ||
313 | spin_unlock_bh(&fcoe->lock); | |
314 | } | |
d0ed8937 YZ |
315 | |
316 | return 1; | |
317 | ||
318 | out_noddp_free: | |
5a1ee270 | 319 | dma_pool_free(ddp->pool, ddp->udl, ddp->udp); |
d0ed8937 YZ |
320 | ixgbe_fcoe_clear_ddp(ddp); |
321 | ||
322 | out_noddp_unmap: | |
1bf91cdc | 323 | dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); |
5a1ee270 | 324 | out_noddp: |
dadbe85a | 325 | put_cpu(); |
d0ed8937 YZ |
326 | return 0; |
327 | } | |
328 | ||
68a683cf YZ |
329 | /** |
330 | * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode | |
331 | * @netdev: the corresponding net_device | |
332 | * @xid: the exchange id requesting ddp | |
333 | * @sgl: the scatter-gather list for this request | |
334 | * @sgc: the number of scatter-gather items | |
335 | * | |
336 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup | |
337 | * and is expected to be called from ULD, e.g., FCP layer of libfc | |
338 | * to set up ddp for the corresponding xid of the given sglist for | |
339 | * the corresponding I/O. | |
340 | * | |
341 | * Returns : 1 for success and 0 for no ddp | |
342 | */ | |
343 | int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |
344 | struct scatterlist *sgl, unsigned int sgc) | |
345 | { | |
346 | return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); | |
347 | } | |
348 | ||
349 | /** | |
350 | * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode | |
351 | * @netdev: the corresponding net_device | |
352 | * @xid: the exchange id requesting ddp | |
353 | * @sgl: the scatter-gather list for this request | |
354 | * @sgc: the number of scatter-gather items | |
355 | * | |
356 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_target | |
357 | * and is expected to be called from ULD, e.g., FCP layer of libfc | |
358 | * to set up ddp for the corresponding xid of the given sglist for | |
359 | * the corresponding I/O. The DDP in target mode is a write I/O request | |
360 | * from the initiator. | |
361 | * | |
362 | * Returns : 1 for success and 0 for no ddp | |
363 | */ | |
364 | int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, | |
365 | struct scatterlist *sgl, unsigned int sgc) | |
366 | { | |
367 | return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); | |
368 | } | |
369 | ||
d0ed8937 YZ |
370 | /** |
371 | * ixgbe_fcoe_ddp - check ddp status and mark it done | |
372 | * @adapter: ixgbe adapter | |
373 | * @rx_desc: advanced rx descriptor | |
374 | * @skb: the skb holding the received data | |
375 | * | |
376 | * This checks ddp status. | |
377 | * | |
3d8fd385 YZ |
378 | * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates |
379 | * not passing the skb to ULD, > 0 indicates is the length of data | |
380 | * being ddped. | |
d0ed8937 YZ |
381 | */ |
382 | int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | |
383 | union ixgbe_adv_rx_desc *rx_desc, | |
f56e0cb1 | 384 | struct sk_buff *skb) |
d0ed8937 | 385 | { |
d0ed8937 YZ |
386 | int rc = -EINVAL; |
387 | struct ixgbe_fcoe *fcoe; | |
388 | struct ixgbe_fcoe_ddp *ddp; | |
389 | struct fc_frame_header *fh; | |
68a683cf | 390 | struct fcoe_crc_eof *crc; |
f56e0cb1 AD |
391 | __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); |
392 | __le32 ddp_err; | |
ea412015 | 393 | int ddp_max; |
f56e0cb1 AD |
394 | u32 fctl; |
395 | u16 xid; | |
d0ed8937 | 396 | |
f56e0cb1 AD |
397 | if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) |
398 | skb->ip_summed = CHECKSUM_NONE; | |
bc8acf2c ED |
399 | else |
400 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
d0ed8937 | 401 | |
be5d507d YZ |
402 | if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) |
403 | fh = (struct fc_frame_header *)(skb->data + | |
404 | sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); | |
405 | else | |
406 | fh = (struct fc_frame_header *)(skb->data + | |
407 | sizeof(struct fcoe_hdr)); | |
f56e0cb1 | 408 | |
d4ab8819 YZ |
409 | fctl = ntoh24(fh->fh_f_ctl); |
410 | if (fctl & FC_FC_EX_CTX) | |
411 | xid = be16_to_cpu(fh->fh_ox_id); | |
412 | else | |
413 | xid = be16_to_cpu(fh->fh_rx_id); | |
414 | ||
ea412015 VD |
415 | ddp_max = IXGBE_FCOE_DDP_MAX; |
416 | /* X550 has different DDP Max limit */ | |
417 | if (adapter->hw.mac.type == ixgbe_mac_X550) | |
418 | ddp_max = IXGBE_FCOE_DDP_MAX_X550; | |
419 | if (xid >= ddp_max) | |
e90dd264 | 420 | return -EINVAL; |
d0ed8937 YZ |
421 | |
422 | fcoe = &adapter->fcoe; | |
423 | ddp = &fcoe->ddp[xid]; | |
424 | if (!ddp->udl) | |
e90dd264 | 425 | return -EINVAL; |
d0ed8937 | 426 | |
f56e0cb1 AD |
427 | ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | |
428 | IXGBE_RXDADV_ERR_FCERR); | |
429 | if (ddp_err) | |
e90dd264 | 430 | return -EINVAL; |
d0ed8937 | 431 | |
f56e0cb1 AD |
432 | switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { |
433 | /* return 0 to bypass going to ULD for DDPed data */ | |
a1108ffd | 434 | case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): |
d0ed8937 YZ |
435 | /* update length of DDPed data */ |
436 | ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); | |
f56e0cb1 AD |
437 | rc = 0; |
438 | break; | |
439 | /* unmap the sg list when FCPRSP is received */ | |
a1108ffd | 440 | case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): |
1bf91cdc | 441 | dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, |
f56e0cb1 | 442 | ddp->sgc, DMA_FROM_DEVICE); |
9cfbfa70 | 443 | ddp->err = (__force u32)ddp_err; |
f56e0cb1 AD |
444 | ddp->sgl = NULL; |
445 | ddp->sgc = 0; | |
446 | /* fall through */ | |
447 | /* if DDP length is present pass it through to ULD */ | |
a1108ffd | 448 | case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): |
f56e0cb1 AD |
449 | /* update length of DDPed data */ |
450 | ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); | |
451 | if (ddp->len) | |
3d8fd385 | 452 | rc = ddp->len; |
f56e0cb1 AD |
453 | break; |
454 | /* no match will return as an error */ | |
a1108ffd | 455 | case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): |
f56e0cb1 AD |
456 | default: |
457 | break; | |
d0ed8937 | 458 | } |
f56e0cb1 | 459 | |
68a683cf YZ |
460 | /* In target mode, check the last data frame of the sequence. |
461 | * For DDP in target mode, data is already DDPed but the header | |
462 | * indication of the last data frame ould allow is to tell if we | |
463 | * got all the data and the ULP can send FCP_RSP back, as this is | |
464 | * not a full fcoe frame, we fill the trailer here so it won't be | |
465 | * dropped by the ULP stack. | |
466 | */ | |
467 | if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && | |
468 | (fctl & FC_FC_END_SEQ)) { | |
441e1719 | 469 | skb_linearize(skb); |
4df864c1 | 470 | crc = skb_put(skb, sizeof(*crc)); |
68a683cf YZ |
471 | crc->fcoe_eof = FC_EOF_T; |
472 | } | |
e90dd264 | 473 | |
d0ed8937 YZ |
474 | return rc; |
475 | } | |
476 | ||
bc079228 YZ |
477 | /** |
478 | * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) | |
bc079228 | 479 | * @tx_ring: tx desc ring |
fd0db0ed | 480 | * @first: first tx_buffer structure containing skb, tx_flags, and protocol |
bc079228 YZ |
481 | * @hdr_len: hdr_len to be returned |
482 | * | |
483 | * This sets up large send offload for FCoE | |
484 | * | |
244e27ad | 485 | * Returns : 0 indicates success, < 0 for error |
bc079228 | 486 | */ |
fd0db0ed AD |
487 | int ixgbe_fso(struct ixgbe_ring *tx_ring, |
488 | struct ixgbe_tx_buffer *first, | |
244e27ad | 489 | u8 *hdr_len) |
bc079228 | 490 | { |
fd0db0ed | 491 | struct sk_buff *skb = first->skb; |
897ab156 | 492 | struct fc_frame_header *fh; |
bc079228 | 493 | u32 vlan_macip_lens; |
897ab156 | 494 | u32 fcoe_sof_eof = 0; |
bc079228 | 495 | u32 mss_l4len_idx; |
8b75451b | 496 | u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; |
897ab156 | 497 | u8 sof, eof; |
bc079228 YZ |
498 | |
499 | if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { | |
897ab156 AD |
500 | dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", |
501 | skb_shinfo(skb)->gso_type); | |
bc079228 YZ |
502 | return -EINVAL; |
503 | } | |
504 | ||
505 | /* resets the header to point fcoe/fc */ | |
506 | skb_set_network_header(skb, skb->mac_len); | |
507 | skb_set_transport_header(skb, skb->mac_len + | |
508 | sizeof(struct fcoe_hdr)); | |
509 | ||
510 | /* sets up SOF and ORIS */ | |
bc079228 YZ |
511 | sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; |
512 | switch (sof) { | |
513 | case FC_SOF_I2: | |
897ab156 | 514 | fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; |
bc079228 YZ |
515 | break; |
516 | case FC_SOF_I3: | |
897ab156 AD |
517 | fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF | |
518 | IXGBE_ADVTXD_FCOEF_ORIS; | |
bc079228 YZ |
519 | break; |
520 | case FC_SOF_N2: | |
521 | break; | |
522 | case FC_SOF_N3: | |
897ab156 | 523 | fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; |
bc079228 YZ |
524 | break; |
525 | default: | |
897ab156 | 526 | dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); |
bc079228 YZ |
527 | return -EINVAL; |
528 | } | |
529 | ||
530 | /* the first byte of the last dword is EOF */ | |
531 | skb_copy_bits(skb, skb->len - 4, &eof, 1); | |
532 | /* sets up EOF and ORIE */ | |
533 | switch (eof) { | |
534 | case FC_EOF_N: | |
535 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; | |
536 | break; | |
537 | case FC_EOF_T: | |
538 | /* lso needs ORIE */ | |
897ab156 AD |
539 | if (skb_is_gso(skb)) |
540 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N | | |
541 | IXGBE_ADVTXD_FCOEF_ORIE; | |
542 | else | |
bc079228 | 543 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; |
bc079228 YZ |
544 | break; |
545 | case FC_EOF_NI: | |
546 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; | |
547 | break; | |
548 | case FC_EOF_A: | |
549 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; | |
550 | break; | |
551 | default: | |
897ab156 | 552 | dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); |
bc079228 YZ |
553 | return -EINVAL; |
554 | } | |
555 | ||
556 | /* sets up PARINC indicating data offset */ | |
557 | fh = (struct fc_frame_header *)skb_transport_header(skb); | |
558 | if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) | |
559 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; | |
560 | ||
897ab156 | 561 | /* include trailer in headlen as it is replicated per frame */ |
bc079228 | 562 | *hdr_len = sizeof(struct fcoe_crc_eof); |
897ab156 AD |
563 | |
564 | /* hdr_len includes fc_hdr if FCoE LSO is enabled */ | |
091a6246 AD |
565 | if (skb_is_gso(skb)) { |
566 | *hdr_len += skb_transport_offset(skb) + | |
567 | sizeof(struct fc_frame_header); | |
568 | /* update gso_segs and bytecount */ | |
569 | first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, | |
570 | skb_shinfo(skb)->gso_size); | |
571 | first->bytecount += (first->gso_segs - 1) * *hdr_len; | |
472148c3 | 572 | first->tx_flags |= IXGBE_TX_FLAGS_TSO; |
8b75451b NP |
573 | /* Hardware expects L4T to be RSV for FCoE TSO */ |
574 | type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; | |
091a6246 | 575 | } |
897ab156 | 576 | |
244e27ad | 577 | /* set flag indicating FCOE to ixgbe_tx_map call */ |
472148c3 | 578 | first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC; |
244e27ad | 579 | |
c44f5f51 | 580 | /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */ |
897ab156 | 581 | mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; |
897ab156 AD |
582 | |
583 | /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ | |
584 | vlan_macip_lens = skb_transport_offset(skb) + | |
585 | sizeof(struct fc_frame_header); | |
586 | vlan_macip_lens |= (skb_transport_offset(skb) - 4) | |
587 | << IXGBE_ADVTXD_MACLEN_SHIFT; | |
244e27ad | 588 | vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; |
bc079228 YZ |
589 | |
590 | /* write context desc */ | |
897ab156 | 591 | ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, |
8b75451b | 592 | type_tucmd, mss_l4len_idx); |
bc079228 | 593 | |
244e27ad | 594 | return 0; |
bc079228 YZ |
595 | } |
596 | ||
5a1ee270 AD |
597 | static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) |
598 | { | |
599 | struct ixgbe_fcoe_ddp_pool *ddp_pool; | |
600 | ||
601 | ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); | |
edab421a | 602 | dma_pool_destroy(ddp_pool->pool); |
5a1ee270 AD |
603 | ddp_pool->pool = NULL; |
604 | } | |
605 | ||
5a1ee270 AD |
606 | static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, |
607 | struct device *dev, | |
608 | unsigned int cpu) | |
609 | { | |
610 | struct ixgbe_fcoe_ddp_pool *ddp_pool; | |
611 | struct dma_pool *pool; | |
612 | char pool_name[32]; | |
613 | ||
0e7bcee4 | 614 | snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); |
5a1ee270 AD |
615 | |
616 | pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, | |
617 | IXGBE_FCPTR_ALIGN, PAGE_SIZE); | |
618 | if (!pool) | |
619 | return -ENOMEM; | |
620 | ||
621 | ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); | |
622 | ddp_pool->pool = pool; | |
623 | ddp_pool->noddp = 0; | |
624 | ddp_pool->noddp_ext_buff = 0; | |
625 | ||
626 | return 0; | |
dadbe85a VD |
627 | } |
628 | ||
d3a2ae6d YZ |
629 | /** |
630 | * ixgbe_configure_fcoe - configures registers for fcoe at start | |
631 | * @adapter: ptr to ixgbe adapter | |
632 | * | |
633 | * This sets up FCoE related registers | |
634 | * | |
635 | * Returns : none | |
636 | */ | |
637 | void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |
638 | { | |
7c8ae65a | 639 | struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; |
d3a2ae6d | 640 | struct ixgbe_hw *hw = &adapter->hw; |
ea412015 VD |
641 | int i, fcoe_q, fcoe_i, fcoe_q_h = 0; |
642 | int fcreta_size; | |
81faddef | 643 | u32 etqf; |
d0ed8937 | 644 | |
a58915c7 AD |
645 | /* Minimal functionality for FCoE requires at least CRC offloads */ |
646 | if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) | |
7c8ae65a | 647 | return; |
29ebf6f8 | 648 | |
a58915c7 | 649 | /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ |
81faddef AD |
650 | etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; |
651 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | |
652 | etqf |= IXGBE_ETQF_POOL_ENABLE; | |
653 | etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; | |
654 | } | |
655 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); | |
656 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); | |
657 | ||
a58915c7 AD |
658 | /* leave registers un-configured if FCoE is disabled */ |
659 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | |
660 | return; | |
661 | ||
81faddef | 662 | /* Use one or more Rx queues for FCoE by redirection table */ |
ea412015 VD |
663 | fcreta_size = IXGBE_FCRETA_SIZE; |
664 | if (adapter->hw.mac.type == ixgbe_mac_X550) | |
665 | fcreta_size = IXGBE_FCRETA_SIZE_X550; | |
666 | ||
667 | for (i = 0; i < fcreta_size; i++) { | |
668 | if (adapter->hw.mac.type == ixgbe_mac_X550) { | |
669 | int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % | |
670 | fcoe->indices); | |
671 | fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; | |
672 | fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & | |
673 | IXGBE_FCRETA_ENTRY_HIGH_MASK; | |
674 | } | |
675 | ||
7c8ae65a | 676 | fcoe_i = fcoe->offset + (i % fcoe->indices); |
81faddef | 677 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; |
4a0b9ca0 | 678 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
ea412015 | 679 | fcoe_q |= fcoe_q_h; |
81faddef | 680 | IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); |
29ebf6f8 | 681 | } |
81faddef AD |
682 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); |
683 | ||
684 | /* Enable L2 EtherType filter for FIP */ | |
685 | etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; | |
686 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { | |
687 | etqf |= IXGBE_ETQF_POOL_ENABLE; | |
688 | etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; | |
689 | } | |
690 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); | |
691 | ||
692 | /* Send FIP frames to the first FCoE queue */ | |
7c8ae65a | 693 | fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; |
af06393b CL |
694 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), |
695 | IXGBE_ETQS_QUEUE_EN | | |
696 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); | |
29ebf6f8 | 697 | |
81faddef AD |
698 | /* Configure FCoE Rx control */ |
699 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, | |
700 | IXGBE_FCRXCTRL_FCCRCBO | | |
d3a2ae6d YZ |
701 | (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); |
702 | } | |
d0ed8937 YZ |
703 | |
704 | /** | |
7c8ae65a | 705 | * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources |
d0ed8937 YZ |
706 | * @adapter : ixgbe adapter |
707 | * | |
708 | * Cleans up outstanding ddp context resources | |
709 | * | |
710 | * Returns : none | |
711 | */ | |
7c8ae65a | 712 | void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) |
d0ed8937 | 713 | { |
d0ed8937 | 714 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
ea412015 | 715 | int cpu, i, ddp_max; |
d0ed8937 | 716 | |
7c8ae65a | 717 | /* do nothing if no DDP pools were allocated */ |
5a1ee270 | 718 | if (!fcoe->ddp_pool) |
dadbe85a VD |
719 | return; |
720 | ||
ea412015 VD |
721 | ddp_max = IXGBE_FCOE_DDP_MAX; |
722 | /* X550 has different DDP Max limit */ | |
723 | if (adapter->hw.mac.type == ixgbe_mac_X550) | |
724 | ddp_max = IXGBE_FCOE_DDP_MAX_X550; | |
725 | ||
726 | for (i = 0; i < ddp_max; i++) | |
dadbe85a | 727 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
5a1ee270 | 728 | |
7c8ae65a AD |
729 | for_each_possible_cpu(cpu) |
730 | ixgbe_fcoe_dma_pool_free(fcoe, cpu); | |
731 | ||
dadbe85a VD |
732 | dma_unmap_single(&adapter->pdev->dev, |
733 | fcoe->extra_ddp_buffer_dma, | |
734 | IXGBE_FCBUFF_MIN, | |
735 | DMA_FROM_DEVICE); | |
736 | kfree(fcoe->extra_ddp_buffer); | |
5a1ee270 | 737 | |
7c8ae65a AD |
738 | fcoe->extra_ddp_buffer = NULL; |
739 | fcoe->extra_ddp_buffer_dma = 0; | |
740 | } | |
741 | ||
742 | /** | |
743 | * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources | |
744 | * @adapter: ixgbe adapter | |
745 | * | |
746 | * Sets up ddp context resouces | |
747 | * | |
748 | * Returns : 0 indicates success or -EINVAL on failure | |
749 | */ | |
750 | int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) | |
751 | { | |
752 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | |
753 | struct device *dev = &adapter->pdev->dev; | |
754 | void *buffer; | |
755 | dma_addr_t dma; | |
756 | unsigned int cpu; | |
757 | ||
758 | /* do nothing if no DDP pools were allocated */ | |
759 | if (!fcoe->ddp_pool) | |
760 | return 0; | |
761 | ||
762 | /* Extra buffer to be shared by all DDPs for HW work around */ | |
374f78f7 | 763 | buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL); |
14f8dc49 | 764 | if (!buffer) |
7c8ae65a | 765 | return -ENOMEM; |
7c8ae65a AD |
766 | |
767 | dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); | |
768 | if (dma_mapping_error(dev, dma)) { | |
769 | e_err(drv, "failed to map extra DDP buffer\n"); | |
770 | kfree(buffer); | |
771 | return -ENOMEM; | |
772 | } | |
773 | ||
774 | fcoe->extra_ddp_buffer = buffer; | |
775 | fcoe->extra_ddp_buffer_dma = dma; | |
776 | ||
777 | /* allocate pci pool for each cpu */ | |
778 | for_each_possible_cpu(cpu) { | |
779 | int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); | |
780 | if (!err) | |
781 | continue; | |
782 | ||
783 | e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); | |
784 | ixgbe_free_fcoe_ddp_resources(adapter); | |
785 | return -ENOMEM; | |
786 | } | |
787 | ||
788 | return 0; | |
789 | } | |
790 | ||
791 | static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) | |
792 | { | |
793 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | |
794 | ||
795 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) | |
796 | return -EINVAL; | |
797 | ||
798 | fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); | |
799 | ||
800 | if (!fcoe->ddp_pool) { | |
801 | e_err(drv, "failed to allocate percpu DDP resources\n"); | |
802 | return -ENOMEM; | |
803 | } | |
804 | ||
805 | adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; | |
ea412015 VD |
806 | /* X550 has different DDP Max limit */ |
807 | if (adapter->hw.mac.type == ixgbe_mac_X550) | |
808 | adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; | |
7c8ae65a AD |
809 | |
810 | return 0; | |
811 | } | |
812 | ||
813 | static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) | |
814 | { | |
815 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; | |
816 | ||
817 | adapter->netdev->fcoe_ddp_xid = 0; | |
818 | ||
819 | if (!fcoe->ddp_pool) | |
820 | return; | |
821 | ||
822 | free_percpu(fcoe->ddp_pool); | |
823 | fcoe->ddp_pool = NULL; | |
d0ed8937 | 824 | } |
8450ff8c YZ |
825 | |
826 | /** | |
827 | * ixgbe_fcoe_enable - turn on FCoE offload feature | |
828 | * @netdev: the corresponding netdev | |
829 | * | |
830 | * Turns on FCoE offload feature in 82599. | |
831 | * | |
832 | * Returns : 0 indicates success or -EINVAL on failure | |
833 | */ | |
834 | int ixgbe_fcoe_enable(struct net_device *netdev) | |
835 | { | |
8450ff8c | 836 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
27ab7606 | 837 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
8450ff8c | 838 | |
7c8ae65a | 839 | atomic_inc(&fcoe->refcnt); |
8450ff8c YZ |
840 | |
841 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) | |
7c8ae65a | 842 | return -EINVAL; |
8450ff8c YZ |
843 | |
844 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) | |
7c8ae65a | 845 | return -EINVAL; |
8450ff8c | 846 | |
396e799c | 847 | e_info(drv, "Enabling FCoE offload features.\n"); |
872844dd AD |
848 | |
849 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | |
850 | e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); | |
851 | ||
8450ff8c YZ |
852 | if (netif_running(netdev)) |
853 | netdev->netdev_ops->ndo_stop(netdev); | |
854 | ||
7c8ae65a AD |
855 | /* Allocate per CPU memory to track DDP pools */ |
856 | ixgbe_fcoe_ddp_enable(adapter); | |
8450ff8c | 857 | |
7c8ae65a | 858 | /* enable FCoE and notify stack */ |
8450ff8c | 859 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; |
a58915c7 | 860 | netdev->features |= NETIF_F_FCOE_MTU; |
7c8ae65a | 861 | netdev_features_change(netdev); |
8450ff8c | 862 | |
7c8ae65a AD |
863 | /* release existing queues and reallocate them */ |
864 | ixgbe_clear_interrupt_scheme(adapter); | |
8450ff8c YZ |
865 | ixgbe_init_interrupt_scheme(adapter); |
866 | ||
867 | if (netif_running(netdev)) | |
868 | netdev->netdev_ops->ndo_open(netdev); | |
8450ff8c | 869 | |
7c8ae65a | 870 | return 0; |
8450ff8c YZ |
871 | } |
872 | ||
873 | /** | |
874 | * ixgbe_fcoe_disable - turn off FCoE offload feature | |
875 | * @netdev: the corresponding netdev | |
876 | * | |
877 | * Turns off FCoE offload feature in 82599. | |
878 | * | |
879 | * Returns : 0 indicates success or -EINVAL on failure | |
880 | */ | |
881 | int ixgbe_fcoe_disable(struct net_device *netdev) | |
882 | { | |
8450ff8c YZ |
883 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
884 | ||
7c8ae65a AD |
885 | if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) |
886 | return -EINVAL; | |
8450ff8c YZ |
887 | |
888 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | |
7c8ae65a | 889 | return -EINVAL; |
8450ff8c | 890 | |
396e799c | 891 | e_info(drv, "Disabling FCoE offload features.\n"); |
8450ff8c YZ |
892 | if (netif_running(netdev)) |
893 | netdev->netdev_ops->ndo_stop(netdev); | |
894 | ||
7c8ae65a AD |
895 | /* Free per CPU memory to track DDP pools */ |
896 | ixgbe_fcoe_ddp_disable(adapter); | |
897 | ||
898 | /* disable FCoE and notify stack */ | |
8450ff8c | 899 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
a58915c7 | 900 | netdev->features &= ~NETIF_F_FCOE_MTU; |
7c8ae65a AD |
901 | |
902 | netdev_features_change(netdev); | |
903 | ||
904 | /* release existing queues and reallocate them */ | |
905 | ixgbe_clear_interrupt_scheme(adapter); | |
8450ff8c | 906 | ixgbe_init_interrupt_scheme(adapter); |
936332b8 | 907 | |
8450ff8c YZ |
908 | if (netif_running(netdev)) |
909 | netdev->netdev_ops->ndo_open(netdev); | |
8450ff8c | 910 | |
7c8ae65a | 911 | return 0; |
8450ff8c | 912 | } |
6ee16520 | 913 | |
61a1fa10 YZ |
914 | /** |
915 | * ixgbe_fcoe_get_wwn - get world wide name for the node or the port | |
916 | * @netdev : ixgbe adapter | |
917 | * @wwn : the world wide name | |
918 | * @type: the type of world wide name | |
919 | * | |
920 | * Returns the node or port world wide name if both the prefix and the san | |
921 | * mac address are valid, then the wwn is formed based on the NAA-2 for | |
922 | * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). | |
923 | * | |
924 | * Returns : 0 on success | |
925 | */ | |
926 | int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) | |
927 | { | |
61a1fa10 YZ |
928 | u16 prefix = 0xffff; |
929 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
930 | struct ixgbe_mac_info *mac = &adapter->hw.mac; | |
931 | ||
932 | switch (type) { | |
933 | case NETDEV_FCOE_WWNN: | |
934 | prefix = mac->wwnn_prefix; | |
935 | break; | |
936 | case NETDEV_FCOE_WWPN: | |
937 | prefix = mac->wwpn_prefix; | |
938 | break; | |
939 | default: | |
940 | break; | |
941 | } | |
942 | ||
943 | if ((prefix != 0xffff) && | |
944 | is_valid_ether_addr(mac->san_addr)) { | |
945 | *wwn = ((u64) prefix << 48) | | |
946 | ((u64) mac->san_addr[0] << 40) | | |
947 | ((u64) mac->san_addr[1] << 32) | | |
948 | ((u64) mac->san_addr[2] << 24) | | |
949 | ((u64) mac->san_addr[3] << 16) | | |
950 | ((u64) mac->san_addr[4] << 8) | | |
951 | ((u64) mac->san_addr[5]); | |
e90dd264 | 952 | return 0; |
61a1fa10 | 953 | } |
e90dd264 | 954 | return -EINVAL; |
61a1fa10 | 955 | } |
ea81875a NP |
956 | |
957 | /** | |
958 | * ixgbe_fcoe_get_hbainfo - get FCoE HBA information | |
959 | * @netdev : ixgbe adapter | |
960 | * @info : HBA information | |
961 | * | |
962 | * Returns ixgbe HBA information | |
963 | * | |
964 | * Returns : 0 on success | |
965 | */ | |
966 | int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, | |
967 | struct netdev_fcoe_hbainfo *info) | |
968 | { | |
969 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | |
970 | struct ixgbe_hw *hw = &adapter->hw; | |
971 | int i, pos; | |
972 | u8 buf[8]; | |
973 | ||
974 | if (!info) | |
975 | return -EINVAL; | |
976 | ||
977 | /* Don't return information on unsupported devices */ | |
b262a9a7 | 978 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
ea81875a NP |
979 | return -EINVAL; |
980 | ||
981 | /* Manufacturer */ | |
982 | snprintf(info->manufacturer, sizeof(info->manufacturer), | |
983 | "Intel Corporation"); | |
984 | ||
985 | /* Serial Number */ | |
986 | ||
987 | /* Get the PCI-e Device Serial Number Capability */ | |
988 | pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); | |
989 | if (pos) { | |
990 | pos += 4; | |
991 | for (i = 0; i < 8; i++) | |
992 | pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); | |
993 | ||
994 | snprintf(info->serial_number, sizeof(info->serial_number), | |
995 | "%02X%02X%02X%02X%02X%02X%02X%02X", | |
996 | buf[7], buf[6], buf[5], buf[4], | |
997 | buf[3], buf[2], buf[1], buf[0]); | |
998 | } else | |
999 | snprintf(info->serial_number, sizeof(info->serial_number), | |
1000 | "Unknown"); | |
1001 | ||
1002 | /* Hardware Version */ | |
1003 | snprintf(info->hardware_version, | |
1004 | sizeof(info->hardware_version), | |
1005 | "Rev %d", hw->revision_id); | |
1006 | /* Driver Name/Version */ | |
1007 | snprintf(info->driver_version, | |
1008 | sizeof(info->driver_version), | |
1009 | "%s v%s", | |
1010 | ixgbe_driver_name, | |
1011 | ixgbe_driver_version); | |
1012 | /* Firmware Version */ | |
73834aec PG |
1013 | strlcpy(info->firmware_version, adapter->eeprom_id, |
1014 | sizeof(info->firmware_version)); | |
ea81875a NP |
1015 | |
1016 | /* Model */ | |
1017 | if (hw->mac.type == ixgbe_mac_82599EB) { | |
1018 | snprintf(info->model, | |
1019 | sizeof(info->model), | |
1020 | "Intel 82599"); | |
b262a9a7 UK |
1021 | } else if (hw->mac.type == ixgbe_mac_X550) { |
1022 | snprintf(info->model, | |
1023 | sizeof(info->model), | |
1024 | "Intel X550"); | |
ea81875a NP |
1025 | } else { |
1026 | snprintf(info->model, | |
1027 | sizeof(info->model), | |
1028 | "Intel X540"); | |
1029 | } | |
1030 | ||
1031 | /* Model Description */ | |
1032 | snprintf(info->model_description, | |
1033 | sizeof(info->model_description), | |
1034 | "%s", | |
1035 | ixgbe_default_device_descr); | |
1036 | ||
1037 | return 0; | |
1038 | } | |
800bd607 AD |
1039 | |
1040 | /** | |
1041 | * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to | |
5ba643c6 | 1042 | * @adapter: pointer to the device adapter structure |
800bd607 AD |
1043 | * |
1044 | * Return : TC that FCoE is mapped to | |
1045 | */ | |
1046 | u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) | |
1047 | { | |
1048 | #ifdef CONFIG_IXGBE_DCB | |
1049 | return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); | |
1050 | #else | |
1051 | return 0; | |
1052 | #endif | |
1053 | } |