Commit | Line | Data |
---|---|---|
8af3c33f JK |
1 | /******************************************************************************* |
2 | ||
3 | Intel 10 Gigabit PCI Express Linux driver | |
434c5e39 | 4 | Copyright(c) 1999 - 2013 Intel Corporation. |
8af3c33f JK |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "COPYING". | |
21 | ||
22 | Contact Information: | |
b89aae71 | 23 | Linux NICS <linux.nics@intel.com> |
8af3c33f JK |
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
26 | ||
27 | *******************************************************************************/ | |
28 | ||
29 | #include "ixgbe.h" | |
30 | #include "ixgbe_sriov.h" | |
31 | ||
800bd607 | 32 | #ifdef CONFIG_IXGBE_DCB |
73079ea0 AD |
33 | /** |
34 | * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV | |
35 | * @adapter: board private structure to initialize | |
36 | * | |
37 | * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It | |
38 | * will also try to cache the proper offsets if RSS/FCoE are enabled along | |
39 | * with VMDq. | |
40 | * | |
41 | **/ | |
42 | static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) | |
43 | { | |
44 | #ifdef IXGBE_FCOE | |
45 | struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; | |
46 | #endif /* IXGBE_FCOE */ | |
47 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; | |
48 | int i; | |
49 | u16 reg_idx; | |
50 | u8 tcs = netdev_get_num_tc(adapter->netdev); | |
51 | ||
52 | /* verify we have DCB queueing enabled before proceeding */ | |
53 | if (tcs <= 1) | |
54 | return false; | |
55 | ||
56 | /* verify we have VMDq enabled before proceeding */ | |
57 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | |
58 | return false; | |
59 | ||
60 | /* start at VMDq register offset for SR-IOV enabled setups */ | |
61 | reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); | |
62 | for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { | |
63 | /* If we are greater than indices move to next pool */ | |
64 | if ((reg_idx & ~vmdq->mask) >= tcs) | |
65 | reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); | |
66 | adapter->rx_ring[i]->reg_idx = reg_idx; | |
67 | } | |
68 | ||
69 | reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); | |
70 | for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { | |
71 | /* If we are greater than indices move to next pool */ | |
72 | if ((reg_idx & ~vmdq->mask) >= tcs) | |
73 | reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); | |
74 | adapter->tx_ring[i]->reg_idx = reg_idx; | |
75 | } | |
76 | ||
77 | #ifdef IXGBE_FCOE | |
78 | /* nothing to do if FCoE is disabled */ | |
79 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) | |
80 | return true; | |
81 | ||
82 | /* The work is already done if the FCoE ring is shared */ | |
83 | if (fcoe->offset < tcs) | |
84 | return true; | |
85 | ||
86 | /* The FCoE rings exist separately, we need to move their reg_idx */ | |
87 | if (fcoe->indices) { | |
88 | u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); | |
89 | u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); | |
90 | ||
91 | reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; | |
92 | for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { | |
93 | reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; | |
94 | adapter->rx_ring[i]->reg_idx = reg_idx; | |
95 | reg_idx++; | |
96 | } | |
97 | ||
98 | reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; | |
99 | for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { | |
100 | reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; | |
101 | adapter->tx_ring[i]->reg_idx = reg_idx; | |
102 | reg_idx++; | |
103 | } | |
104 | } | |
105 | ||
106 | #endif /* IXGBE_FCOE */ | |
107 | return true; | |
108 | } | |
109 | ||
8af3c33f JK |
110 | /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ |
111 | static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, | |
112 | unsigned int *tx, unsigned int *rx) | |
113 | { | |
114 | struct net_device *dev = adapter->netdev; | |
115 | struct ixgbe_hw *hw = &adapter->hw; | |
116 | u8 num_tcs = netdev_get_num_tc(dev); | |
117 | ||
118 | *tx = 0; | |
119 | *rx = 0; | |
120 | ||
121 | switch (hw->mac.type) { | |
122 | case ixgbe_mac_82598EB: | |
4ae63730 AD |
123 | /* TxQs/TC: 4 RxQs/TC: 8 */ |
124 | *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ | |
125 | *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ | |
8af3c33f JK |
126 | break; |
127 | case ixgbe_mac_82599EB: | |
128 | case ixgbe_mac_X540: | |
129 | if (num_tcs > 4) { | |
4ae63730 AD |
130 | /* |
131 | * TCs : TC0/1 TC2/3 TC4-7 | |
132 | * TxQs/TC: 32 16 8 | |
133 | * RxQs/TC: 16 16 16 | |
134 | */ | |
135 | *rx = tc << 4; | |
136 | if (tc < 3) | |
137 | *tx = tc << 5; /* 0, 32, 64 */ | |
138 | else if (tc < 5) | |
139 | *tx = (tc + 2) << 4; /* 80, 96 */ | |
140 | else | |
141 | *tx = (tc + 8) << 3; /* 104, 112, 120 */ | |
8af3c33f | 142 | } else { |
4ae63730 AD |
143 | /* |
144 | * TCs : TC0 TC1 TC2/3 | |
145 | * TxQs/TC: 64 32 16 | |
146 | * RxQs/TC: 32 32 32 | |
147 | */ | |
148 | *rx = tc << 5; | |
149 | if (tc < 2) | |
150 | *tx = tc << 6; /* 0, 64 */ | |
151 | else | |
152 | *tx = (tc + 4) << 4; /* 96, 112 */ | |
8af3c33f | 153 | } |
8af3c33f JK |
154 | default: |
155 | break; | |
156 | } | |
157 | } | |
158 | ||
159 | /** | |
160 | * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB | |
161 | * @adapter: board private structure to initialize | |
162 | * | |
163 | * Cache the descriptor ring offsets for DCB to the assigned rings. | |
164 | * | |
165 | **/ | |
4ae63730 | 166 | static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) |
8af3c33f JK |
167 | { |
168 | struct net_device *dev = adapter->netdev; | |
4ae63730 AD |
169 | unsigned int tx_idx, rx_idx; |
170 | int tc, offset, rss_i, i; | |
8af3c33f JK |
171 | u8 num_tcs = netdev_get_num_tc(dev); |
172 | ||
4ae63730 AD |
173 | /* verify we have DCB queueing enabled before proceeding */ |
174 | if (num_tcs <= 1) | |
8af3c33f JK |
175 | return false; |
176 | ||
4ae63730 | 177 | rss_i = adapter->ring_feature[RING_F_RSS].indices; |
8af3c33f | 178 | |
4ae63730 AD |
179 | for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { |
180 | ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); | |
181 | for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { | |
182 | adapter->tx_ring[offset + i]->reg_idx = tx_idx; | |
183 | adapter->rx_ring[offset + i]->reg_idx = rx_idx; | |
184 | adapter->tx_ring[offset + i]->dcb_tc = tc; | |
185 | adapter->rx_ring[offset + i]->dcb_tc = tc; | |
8af3c33f JK |
186 | } |
187 | } | |
188 | ||
189 | return true; | |
190 | } | |
8af3c33f | 191 | |
d411a936 | 192 | #endif |
8af3c33f JK |
193 | /** |
194 | * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov | |
195 | * @adapter: board private structure to initialize | |
196 | * | |
197 | * SR-IOV doesn't use any descriptor rings but changes the default if | |
198 | * no other mapping is used. | |
199 | * | |
200 | */ | |
73079ea0 | 201 | static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) |
8af3c33f | 202 | { |
73079ea0 AD |
203 | #ifdef IXGBE_FCOE |
204 | struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; | |
205 | #endif /* IXGBE_FCOE */ | |
206 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; | |
207 | struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; | |
208 | int i; | |
209 | u16 reg_idx; | |
210 | ||
211 | /* only proceed if VMDq is enabled */ | |
212 | if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) | |
8af3c33f | 213 | return false; |
73079ea0 AD |
214 | |
215 | /* start at VMDq register offset for SR-IOV enabled setups */ | |
216 | reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); | |
217 | for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { | |
218 | #ifdef IXGBE_FCOE | |
219 | /* Allow first FCoE queue to be mapped as RSS */ | |
220 | if (fcoe->offset && (i > fcoe->offset)) | |
221 | break; | |
222 | #endif | |
223 | /* If we are greater than indices move to next pool */ | |
224 | if ((reg_idx & ~vmdq->mask) >= rss->indices) | |
225 | reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); | |
226 | adapter->rx_ring[i]->reg_idx = reg_idx; | |
227 | } | |
228 | ||
229 | #ifdef IXGBE_FCOE | |
230 | /* FCoE uses a linear block of queues so just assigning 1:1 */ | |
231 | for (; i < adapter->num_rx_queues; i++, reg_idx++) | |
232 | adapter->rx_ring[i]->reg_idx = reg_idx; | |
233 | ||
234 | #endif | |
235 | reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); | |
236 | for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { | |
237 | #ifdef IXGBE_FCOE | |
238 | /* Allow first FCoE queue to be mapped as RSS */ | |
239 | if (fcoe->offset && (i > fcoe->offset)) | |
240 | break; | |
241 | #endif | |
242 | /* If we are greater than indices move to next pool */ | |
243 | if ((reg_idx & rss->mask) >= rss->indices) | |
244 | reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); | |
245 | adapter->tx_ring[i]->reg_idx = reg_idx; | |
246 | } | |
247 | ||
248 | #ifdef IXGBE_FCOE | |
249 | /* FCoE uses a linear block of queues so just assigning 1:1 */ | |
250 | for (; i < adapter->num_tx_queues; i++, reg_idx++) | |
251 | adapter->tx_ring[i]->reg_idx = reg_idx; | |
252 | ||
253 | #endif | |
254 | ||
255 | return true; | |
8af3c33f JK |
256 | } |
257 | ||
d411a936 AD |
258 | /** |
259 | * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS | |
260 | * @adapter: board private structure to initialize | |
261 | * | |
262 | * Cache the descriptor ring offsets for RSS to the assigned rings. | |
263 | * | |
264 | **/ | |
265 | static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) | |
266 | { | |
267 | int i; | |
268 | ||
d411a936 AD |
269 | for (i = 0; i < adapter->num_rx_queues; i++) |
270 | adapter->rx_ring[i]->reg_idx = i; | |
271 | for (i = 0; i < adapter->num_tx_queues; i++) | |
272 | adapter->tx_ring[i]->reg_idx = i; | |
273 | ||
274 | return true; | |
275 | } | |
276 | ||
8af3c33f JK |
277 | /** |
278 | * ixgbe_cache_ring_register - Descriptor ring to register mapping | |
279 | * @adapter: board private structure to initialize | |
280 | * | |
281 | * Once we know the feature-set enabled for the device, we'll cache | |
282 | * the register offset the descriptor ring is assigned to. | |
283 | * | |
284 | * Note, the order the various feature calls is important. It must start with | |
285 | * the "most" features enabled at the same time, then trickle down to the | |
286 | * least amount of features turned on at once. | |
287 | **/ | |
288 | static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) | |
289 | { | |
290 | /* start with default case */ | |
291 | adapter->rx_ring[0]->reg_idx = 0; | |
292 | adapter->tx_ring[0]->reg_idx = 0; | |
293 | ||
73079ea0 AD |
294 | #ifdef CONFIG_IXGBE_DCB |
295 | if (ixgbe_cache_ring_dcb_sriov(adapter)) | |
8af3c33f JK |
296 | return; |
297 | ||
8af3c33f JK |
298 | if (ixgbe_cache_ring_dcb(adapter)) |
299 | return; | |
73079ea0 | 300 | |
8af3c33f | 301 | #endif |
73079ea0 AD |
302 | if (ixgbe_cache_ring_sriov(adapter)) |
303 | return; | |
8af3c33f | 304 | |
d411a936 | 305 | ixgbe_cache_ring_rss(adapter); |
8af3c33f JK |
306 | } |
307 | ||
d411a936 AD |
308 | #define IXGBE_RSS_16Q_MASK 0xF |
309 | #define IXGBE_RSS_8Q_MASK 0x7 | |
310 | #define IXGBE_RSS_4Q_MASK 0x3 | |
311 | #define IXGBE_RSS_2Q_MASK 0x1 | |
312 | #define IXGBE_RSS_DISABLED_MASK 0x0 | |
313 | ||
314 | #ifdef CONFIG_IXGBE_DCB | |
73079ea0 AD |
315 | /** |
316 | * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB | |
317 | * @adapter: board private structure to initialize | |
318 | * | |
319 | * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues | |
320 | * and VM pools where appropriate. Also assign queues based on DCB | |
321 | * priorities and map accordingly.. | |
322 | * | |
323 | **/ | |
324 | static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) | |
325 | { | |
326 | int i; | |
327 | u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; | |
328 | u16 vmdq_m = 0; | |
329 | #ifdef IXGBE_FCOE | |
330 | u16 fcoe_i = 0; | |
331 | #endif | |
332 | u8 tcs = netdev_get_num_tc(adapter->netdev); | |
333 | ||
334 | /* verify we have DCB queueing enabled before proceeding */ | |
335 | if (tcs <= 1) | |
336 | return false; | |
337 | ||
338 | /* verify we have VMDq enabled before proceeding */ | |
339 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | |
340 | return false; | |
341 | ||
342 | /* Add starting offset to total pool count */ | |
343 | vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; | |
344 | ||
345 | /* 16 pools w/ 8 TC per pool */ | |
346 | if (tcs > 4) { | |
347 | vmdq_i = min_t(u16, vmdq_i, 16); | |
348 | vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; | |
349 | /* 32 pools w/ 4 TC per pool */ | |
350 | } else { | |
351 | vmdq_i = min_t(u16, vmdq_i, 32); | |
352 | vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; | |
353 | } | |
354 | ||
355 | #ifdef IXGBE_FCOE | |
356 | /* queues in the remaining pools are available for FCoE */ | |
357 | fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; | |
358 | ||
359 | #endif | |
360 | /* remove the starting offset from the pool count */ | |
361 | vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; | |
362 | ||
363 | /* save features for later use */ | |
364 | adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; | |
365 | adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; | |
366 | ||
367 | /* | |
368 | * We do not support DCB, VMDq, and RSS all simultaneously | |
369 | * so we will disable RSS since it is the lowest priority | |
370 | */ | |
371 | adapter->ring_feature[RING_F_RSS].indices = 1; | |
372 | adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; | |
373 | ||
39cb681b AD |
374 | /* disable ATR as it is not supported when VMDq is enabled */ |
375 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | |
376 | ||
73079ea0 AD |
377 | adapter->num_rx_pools = vmdq_i; |
378 | adapter->num_rx_queues_per_pool = tcs; | |
379 | ||
380 | adapter->num_tx_queues = vmdq_i * tcs; | |
381 | adapter->num_rx_queues = vmdq_i * tcs; | |
382 | ||
383 | #ifdef IXGBE_FCOE | |
384 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | |
385 | struct ixgbe_ring_feature *fcoe; | |
386 | ||
387 | fcoe = &adapter->ring_feature[RING_F_FCOE]; | |
388 | ||
389 | /* limit ourselves based on feature limits */ | |
73079ea0 AD |
390 | fcoe_i = min_t(u16, fcoe_i, fcoe->limit); |
391 | ||
392 | if (fcoe_i) { | |
393 | /* alloc queues for FCoE separately */ | |
394 | fcoe->indices = fcoe_i; | |
395 | fcoe->offset = vmdq_i * tcs; | |
396 | ||
397 | /* add queues to adapter */ | |
398 | adapter->num_tx_queues += fcoe_i; | |
399 | adapter->num_rx_queues += fcoe_i; | |
400 | } else if (tcs > 1) { | |
401 | /* use queue belonging to FcoE TC */ | |
402 | fcoe->indices = 1; | |
403 | fcoe->offset = ixgbe_fcoe_get_tc(adapter); | |
404 | } else { | |
405 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; | |
406 | ||
407 | fcoe->indices = 0; | |
408 | fcoe->offset = 0; | |
409 | } | |
410 | } | |
411 | ||
412 | #endif /* IXGBE_FCOE */ | |
413 | /* configure TC to queue mapping */ | |
414 | for (i = 0; i < tcs; i++) | |
415 | netdev_set_tc_queue(adapter->netdev, i, 1, i); | |
416 | ||
417 | return true; | |
418 | } | |
419 | ||
d411a936 AD |
420 | static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) |
421 | { | |
422 | struct net_device *dev = adapter->netdev; | |
423 | struct ixgbe_ring_feature *f; | |
424 | int rss_i, rss_m, i; | |
425 | int tcs; | |
426 | ||
427 | /* Map queue offset and counts onto allocated tx queues */ | |
428 | tcs = netdev_get_num_tc(dev); | |
429 | ||
430 | /* verify we have DCB queueing enabled before proceeding */ | |
431 | if (tcs <= 1) | |
432 | return false; | |
433 | ||
434 | /* determine the upper limit for our current DCB mode */ | |
435 | rss_i = dev->num_tx_queues / tcs; | |
436 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | |
437 | /* 8 TC w/ 4 queues per TC */ | |
438 | rss_i = min_t(u16, rss_i, 4); | |
439 | rss_m = IXGBE_RSS_4Q_MASK; | |
440 | } else if (tcs > 4) { | |
441 | /* 8 TC w/ 8 queues per TC */ | |
442 | rss_i = min_t(u16, rss_i, 8); | |
443 | rss_m = IXGBE_RSS_8Q_MASK; | |
444 | } else { | |
445 | /* 4 TC w/ 16 queues per TC */ | |
446 | rss_i = min_t(u16, rss_i, 16); | |
447 | rss_m = IXGBE_RSS_16Q_MASK; | |
448 | } | |
449 | ||
450 | /* set RSS mask and indices */ | |
451 | f = &adapter->ring_feature[RING_F_RSS]; | |
452 | rss_i = min_t(int, rss_i, f->limit); | |
453 | f->indices = rss_i; | |
454 | f->mask = rss_m; | |
455 | ||
39cb681b AD |
456 | /* disable ATR as it is not supported when multiple TCs are enabled */ |
457 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | |
458 | ||
d411a936 AD |
459 | #ifdef IXGBE_FCOE |
460 | /* FCoE enabled queues require special configuration indexed | |
461 | * by feature specific indices and offset. Here we map FCoE | |
462 | * indices onto the DCB queue pairs allowing FCoE to own | |
463 | * configuration later. | |
464 | */ | |
465 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | |
466 | u8 tc = ixgbe_fcoe_get_tc(adapter); | |
467 | ||
468 | f = &adapter->ring_feature[RING_F_FCOE]; | |
469 | f->indices = min_t(u16, rss_i, f->limit); | |
470 | f->offset = rss_i * tc; | |
471 | } | |
472 | ||
473 | #endif /* IXGBE_FCOE */ | |
474 | for (i = 0; i < tcs; i++) | |
475 | netdev_set_tc_queue(dev, i, rss_i, rss_i * i); | |
476 | ||
477 | adapter->num_tx_queues = rss_i * tcs; | |
478 | adapter->num_rx_queues = rss_i * tcs; | |
479 | ||
480 | return true; | |
481 | } | |
482 | ||
483 | #endif | |
73079ea0 AD |
484 | /** |
485 | * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices | |
486 | * @adapter: board private structure to initialize | |
487 | * | |
488 | * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues | |
489 | * and VM pools where appropriate. If RSS is available, then also try and | |
490 | * enable RSS and map accordingly. | |
491 | * | |
492 | **/ | |
493 | static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) | |
494 | { | |
495 | u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; | |
496 | u16 vmdq_m = 0; | |
497 | u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; | |
498 | u16 rss_m = IXGBE_RSS_DISABLED_MASK; | |
499 | #ifdef IXGBE_FCOE | |
500 | u16 fcoe_i = 0; | |
501 | #endif | |
2a47fa45 | 502 | bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); |
73079ea0 AD |
503 | |
504 | /* only proceed if SR-IOV is enabled */ | |
505 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) | |
506 | return false; | |
507 | ||
508 | /* Add starting offset to total pool count */ | |
509 | vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; | |
510 | ||
511 | /* double check we are limited to maximum pools */ | |
512 | vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); | |
513 | ||
514 | /* 64 pool mode with 2 queues per pool */ | |
2a47fa45 | 515 | if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { |
73079ea0 AD |
516 | vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; |
517 | rss_m = IXGBE_RSS_2Q_MASK; | |
518 | rss_i = min_t(u16, rss_i, 2); | |
519 | /* 32 pool mode with 4 queues per pool */ | |
520 | } else { | |
521 | vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; | |
522 | rss_m = IXGBE_RSS_4Q_MASK; | |
523 | rss_i = 4; | |
524 | } | |
525 | ||
526 | #ifdef IXGBE_FCOE | |
527 | /* queues in the remaining pools are available for FCoE */ | |
528 | fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); | |
529 | ||
530 | #endif | |
531 | /* remove the starting offset from the pool count */ | |
532 | vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; | |
533 | ||
534 | /* save features for later use */ | |
535 | adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; | |
536 | adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; | |
537 | ||
538 | /* limit RSS based on user input and save for later use */ | |
539 | adapter->ring_feature[RING_F_RSS].indices = rss_i; | |
540 | adapter->ring_feature[RING_F_RSS].mask = rss_m; | |
541 | ||
542 | adapter->num_rx_pools = vmdq_i; | |
543 | adapter->num_rx_queues_per_pool = rss_i; | |
544 | ||
545 | adapter->num_rx_queues = vmdq_i * rss_i; | |
546 | adapter->num_tx_queues = vmdq_i * rss_i; | |
547 | ||
548 | /* disable ATR as it is not supported when VMDq is enabled */ | |
549 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | |
550 | ||
551 | #ifdef IXGBE_FCOE | |
552 | /* | |
553 | * FCoE can use rings from adjacent buffers to allow RSS | |
554 | * like behavior. To account for this we need to add the | |
555 | * FCoE indices to the total ring count. | |
556 | */ | |
557 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | |
558 | struct ixgbe_ring_feature *fcoe; | |
559 | ||
560 | fcoe = &adapter->ring_feature[RING_F_FCOE]; | |
561 | ||
562 | /* limit ourselves based on feature limits */ | |
563 | fcoe_i = min_t(u16, fcoe_i, fcoe->limit); | |
564 | ||
565 | if (vmdq_i > 1 && fcoe_i) { | |
73079ea0 AD |
566 | /* alloc queues for FCoE separately */ |
567 | fcoe->indices = fcoe_i; | |
568 | fcoe->offset = vmdq_i * rss_i; | |
569 | } else { | |
570 | /* merge FCoE queues with RSS queues */ | |
571 | fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); | |
572 | ||
573 | /* limit indices to rss_i if MSI-X is disabled */ | |
574 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | |
575 | fcoe_i = rss_i; | |
576 | ||
577 | /* attempt to reserve some queues for just FCoE */ | |
578 | fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); | |
579 | fcoe->offset = fcoe_i - fcoe->indices; | |
580 | ||
581 | fcoe_i -= rss_i; | |
582 | } | |
583 | ||
584 | /* add queues to adapter */ | |
585 | adapter->num_tx_queues += fcoe_i; | |
586 | adapter->num_rx_queues += fcoe_i; | |
587 | } | |
588 | ||
589 | #endif | |
590 | return true; | |
591 | } | |
592 | ||
8af3c33f | 593 | /** |
49ce9c2c | 594 | * ixgbe_set_rss_queues - Allocate queues for RSS |
8af3c33f JK |
595 | * @adapter: board private structure to initialize |
596 | * | |
597 | * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try | |
598 | * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. | |
599 | * | |
600 | **/ | |
0b7f5d0b | 601 | static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) |
8af3c33f | 602 | { |
0b7f5d0b AD |
603 | struct ixgbe_ring_feature *f; |
604 | u16 rss_i; | |
8af3c33f | 605 | |
0b7f5d0b AD |
606 | /* set mask for 16 queue limit of RSS */ |
607 | f = &adapter->ring_feature[RING_F_RSS]; | |
608 | rss_i = f->limit; | |
8af3c33f | 609 | |
0b7f5d0b | 610 | f->indices = rss_i; |
d411a936 | 611 | f->mask = IXGBE_RSS_16Q_MASK; |
8af3c33f | 612 | |
39cb681b AD |
613 | /* disable ATR by default, it will be configured below */ |
614 | adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; | |
615 | ||
8af3c33f | 616 | /* |
0b7f5d0b | 617 | * Use Flow Director in addition to RSS to ensure the best |
8af3c33f JK |
618 | * distribution of flows across cores, even when an FDIR flow |
619 | * isn't matched. | |
620 | */ | |
39cb681b | 621 | if (rss_i > 1 && adapter->atr_sample_rate) { |
0b7f5d0b AD |
622 | f = &adapter->ring_feature[RING_F_FDIR]; |
623 | ||
d3cb9869 | 624 | rss_i = f->indices = f->limit; |
39cb681b AD |
625 | |
626 | if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) | |
627 | adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; | |
8af3c33f | 628 | } |
0b7f5d0b | 629 | |
8af3c33f | 630 | #ifdef IXGBE_FCOE |
d411a936 AD |
631 | /* |
632 | * FCoE can exist on the same rings as standard network traffic | |
633 | * however it is preferred to avoid that if possible. In order | |
634 | * to get the best performance we allocate as many FCoE queues | |
635 | * as we can and we place them at the end of the ring array to | |
636 | * avoid sharing queues with standard RSS on systems with 24 or | |
637 | * more CPUs. | |
638 | */ | |
639 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { | |
640 | struct net_device *dev = adapter->netdev; | |
641 | u16 fcoe_i; | |
8af3c33f | 642 | |
d411a936 | 643 | f = &adapter->ring_feature[RING_F_FCOE]; |
8af3c33f | 644 | |
d411a936 AD |
645 | /* merge FCoE queues with RSS queues */ |
646 | fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); | |
647 | fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); | |
8af3c33f | 648 | |
d411a936 AD |
649 | /* limit indices to rss_i if MSI-X is disabled */ |
650 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | |
651 | fcoe_i = rss_i; | |
8af3c33f | 652 | |
d411a936 AD |
653 | /* attempt to reserve some queues for just FCoE */ |
654 | f->indices = min_t(u16, fcoe_i, f->limit); | |
655 | f->offset = fcoe_i - f->indices; | |
656 | rss_i = max_t(u16, fcoe_i, rss_i); | |
8af3c33f JK |
657 | } |
658 | ||
8af3c33f | 659 | #endif /* IXGBE_FCOE */ |
d411a936 AD |
660 | adapter->num_rx_queues = rss_i; |
661 | adapter->num_tx_queues = rss_i; | |
8af3c33f JK |
662 | |
663 | return true; | |
664 | } | |
8af3c33f JK |
665 | |
666 | /** | |
49ce9c2c | 667 | * ixgbe_set_num_queues - Allocate queues for device, feature dependent |
8af3c33f JK |
668 | * @adapter: board private structure to initialize |
669 | * | |
670 | * This is the top level queue allocation routine. The order here is very | |
671 | * important, starting with the "most" number of features turned on at once, | |
672 | * and ending with the smallest set of features. This way large combinations | |
673 | * can be allocated if they're turned on, and smaller combinations are the | |
674 | * fallthrough conditions. | |
675 | * | |
676 | **/ | |
ac802f5d | 677 | static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) |
8af3c33f JK |
678 | { |
679 | /* Start with base case */ | |
680 | adapter->num_rx_queues = 1; | |
681 | adapter->num_tx_queues = 1; | |
682 | adapter->num_rx_pools = adapter->num_rx_queues; | |
683 | adapter->num_rx_queues_per_pool = 1; | |
684 | ||
73079ea0 AD |
685 | #ifdef CONFIG_IXGBE_DCB |
686 | if (ixgbe_set_dcb_sriov_queues(adapter)) | |
ac802f5d | 687 | return; |
8af3c33f | 688 | |
8af3c33f | 689 | if (ixgbe_set_dcb_queues(adapter)) |
ac802f5d | 690 | return; |
8af3c33f JK |
691 | |
692 | #endif | |
73079ea0 AD |
693 | if (ixgbe_set_sriov_queues(adapter)) |
694 | return; | |
695 | ||
ac802f5d | 696 | ixgbe_set_rss_queues(adapter); |
8af3c33f JK |
697 | } |
698 | ||
699 | static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, | |
700 | int vectors) | |
701 | { | |
b45e620c | 702 | int vector_threshold; |
8af3c33f JK |
703 | |
704 | /* We'll want at least 2 (vector_threshold): | |
705 | * 1) TxQ[0] + RxQ[0] handler | |
706 | * 2) Other (Link Status Change, etc.) | |
707 | */ | |
708 | vector_threshold = MIN_MSIX_COUNT; | |
709 | ||
710 | /* | |
711 | * The more we get, the more we will assign to Tx/Rx Cleanup | |
712 | * for the separate queues...where Rx Cleanup >= Tx Cleanup. | |
713 | * Right now, we simply care about how many we'll get; we'll | |
714 | * set them up later while requesting irq's. | |
715 | */ | |
b45e620c AG |
716 | vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, |
717 | vector_threshold, vectors); | |
8af3c33f | 718 | |
b45e620c | 719 | if (vectors < 0) { |
8af3c33f JK |
720 | /* Can't allocate enough MSI-X interrupts? Oh well. |
721 | * This just means we'll go with either a single MSI | |
722 | * vector or fall back to legacy interrupts. | |
723 | */ | |
724 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, | |
725 | "Unable to allocate MSI-X interrupts\n"); | |
726 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | |
727 | kfree(adapter->msix_entries); | |
728 | adapter->msix_entries = NULL; | |
729 | } else { | |
730 | adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ | |
731 | /* | |
732 | * Adjust for only the vectors we'll use, which is minimum | |
733 | * of max_msix_q_vectors + NON_Q_VECTORS, or the number of | |
734 | * vectors we were allocated. | |
735 | */ | |
49c7ffbe AD |
736 | vectors -= NON_Q_VECTORS; |
737 | adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); | |
8af3c33f JK |
738 | } |
739 | } | |
740 | ||
741 | static void ixgbe_add_ring(struct ixgbe_ring *ring, | |
742 | struct ixgbe_ring_container *head) | |
743 | { | |
744 | ring->next = head->ring; | |
745 | head->ring = ring; | |
746 | head->count++; | |
747 | } | |
748 | ||
749 | /** | |
750 | * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector | |
751 | * @adapter: board private structure to initialize | |
d0bfcdfd | 752 | * @v_count: q_vectors allocated on adapter, used for ring interleaving |
8af3c33f | 753 | * @v_idx: index of vector in adapter struct |
d0bfcdfd AD |
754 | * @txr_count: total number of Tx rings to allocate |
755 | * @txr_idx: index of first Tx ring to allocate | |
756 | * @rxr_count: total number of Rx rings to allocate | |
757 | * @rxr_idx: index of first Rx ring to allocate | |
8af3c33f JK |
758 | * |
759 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | |
760 | **/ | |
d0bfcdfd AD |
761 | static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, |
762 | int v_count, int v_idx, | |
8af3c33f JK |
763 | int txr_count, int txr_idx, |
764 | int rxr_count, int rxr_idx) | |
765 | { | |
766 | struct ixgbe_q_vector *q_vector; | |
767 | struct ixgbe_ring *ring; | |
fd786b7b | 768 | int node = NUMA_NO_NODE; |
8af3c33f JK |
769 | int cpu = -1; |
770 | int ring_count, size; | |
fd786b7b | 771 | u8 tcs = netdev_get_num_tc(adapter->netdev); |
8af3c33f JK |
772 | |
773 | ring_count = txr_count + rxr_count; | |
774 | size = sizeof(struct ixgbe_q_vector) + | |
775 | (sizeof(struct ixgbe_ring) * ring_count); | |
776 | ||
777 | /* customize cpu for Flow Director mapping */ | |
fd786b7b AD |
778 | if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { |
779 | u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; | |
780 | if (rss_i > 1 && adapter->atr_sample_rate) { | |
781 | if (cpu_online(v_idx)) { | |
782 | cpu = v_idx; | |
783 | node = cpu_to_node(cpu); | |
784 | } | |
8af3c33f JK |
785 | } |
786 | } | |
787 | ||
788 | /* allocate q_vector and rings */ | |
789 | q_vector = kzalloc_node(size, GFP_KERNEL, node); | |
790 | if (!q_vector) | |
791 | q_vector = kzalloc(size, GFP_KERNEL); | |
792 | if (!q_vector) | |
793 | return -ENOMEM; | |
794 | ||
795 | /* setup affinity mask and node */ | |
796 | if (cpu != -1) | |
797 | cpumask_set_cpu(cpu, &q_vector->affinity_mask); | |
8af3c33f JK |
798 | q_vector->numa_node = node; |
799 | ||
245f292d AD |
800 | #ifdef CONFIG_IXGBE_DCA |
801 | /* initialize CPU for DCA */ | |
802 | q_vector->cpu = -1; | |
803 | ||
804 | #endif | |
8af3c33f JK |
805 | /* initialize NAPI */ |
806 | netif_napi_add(adapter->netdev, &q_vector->napi, | |
807 | ixgbe_poll, 64); | |
5a85e737 | 808 | napi_hash_add(&q_vector->napi); |
8af3c33f | 809 | |
adc81090 AD |
810 | #ifdef CONFIG_NET_RX_BUSY_POLL |
811 | /* initialize busy poll */ | |
812 | atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); | |
813 | ||
814 | #endif | |
8af3c33f JK |
815 | /* tie q_vector and adapter together */ |
816 | adapter->q_vector[v_idx] = q_vector; | |
817 | q_vector->adapter = adapter; | |
818 | q_vector->v_idx = v_idx; | |
819 | ||
820 | /* initialize work limits */ | |
821 | q_vector->tx.work_limit = adapter->tx_work_limit; | |
822 | ||
823 | /* initialize pointer to rings */ | |
824 | ring = q_vector->ring; | |
825 | ||
3af3361e ET |
826 | /* intialize ITR */ |
827 | if (txr_count && !rxr_count) { | |
828 | /* tx only vector */ | |
829 | if (adapter->tx_itr_setting == 1) | |
830 | q_vector->itr = IXGBE_10K_ITR; | |
831 | else | |
832 | q_vector->itr = adapter->tx_itr_setting; | |
833 | } else { | |
834 | /* rx or rx/tx vector */ | |
835 | if (adapter->rx_itr_setting == 1) | |
836 | q_vector->itr = IXGBE_20K_ITR; | |
837 | else | |
838 | q_vector->itr = adapter->rx_itr_setting; | |
839 | } | |
840 | ||
8af3c33f JK |
841 | while (txr_count) { |
842 | /* assign generic ring traits */ | |
843 | ring->dev = &adapter->pdev->dev; | |
844 | ring->netdev = adapter->netdev; | |
845 | ||
846 | /* configure backlink on ring */ | |
847 | ring->q_vector = q_vector; | |
848 | ||
849 | /* update q_vector Tx values */ | |
850 | ixgbe_add_ring(ring, &q_vector->tx); | |
851 | ||
852 | /* apply Tx specific ring traits */ | |
853 | ring->count = adapter->tx_ring_count; | |
2a47fa45 JF |
854 | if (adapter->num_rx_pools > 1) |
855 | ring->queue_index = | |
856 | txr_idx % adapter->num_rx_queues_per_pool; | |
857 | else | |
858 | ring->queue_index = txr_idx; | |
8af3c33f JK |
859 | |
860 | /* assign ring to adapter */ | |
861 | adapter->tx_ring[txr_idx] = ring; | |
862 | ||
863 | /* update count and index */ | |
864 | txr_count--; | |
d0bfcdfd | 865 | txr_idx += v_count; |
8af3c33f JK |
866 | |
867 | /* push pointer to next ring */ | |
868 | ring++; | |
869 | } | |
870 | ||
871 | while (rxr_count) { | |
872 | /* assign generic ring traits */ | |
873 | ring->dev = &adapter->pdev->dev; | |
874 | ring->netdev = adapter->netdev; | |
875 | ||
876 | /* configure backlink on ring */ | |
877 | ring->q_vector = q_vector; | |
878 | ||
879 | /* update q_vector Rx values */ | |
880 | ixgbe_add_ring(ring, &q_vector->rx); | |
881 | ||
882 | /* | |
883 | * 82599 errata, UDP frames with a 0 checksum | |
884 | * can be marked as checksum errors. | |
885 | */ | |
886 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) | |
887 | set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); | |
888 | ||
b2db497e AD |
889 | #ifdef IXGBE_FCOE |
890 | if (adapter->netdev->features & NETIF_F_FCOE_MTU) { | |
891 | struct ixgbe_ring_feature *f; | |
892 | f = &adapter->ring_feature[RING_F_FCOE]; | |
e4b317e9 AD |
893 | if ((rxr_idx >= f->offset) && |
894 | (rxr_idx < f->offset + f->indices)) | |
57efd44c | 895 | set_bit(__IXGBE_RX_FCOE, &ring->state); |
b2db497e AD |
896 | } |
897 | ||
898 | #endif /* IXGBE_FCOE */ | |
8af3c33f JK |
899 | /* apply Rx specific ring traits */ |
900 | ring->count = adapter->rx_ring_count; | |
2a47fa45 JF |
901 | if (adapter->num_rx_pools > 1) |
902 | ring->queue_index = | |
903 | rxr_idx % adapter->num_rx_queues_per_pool; | |
904 | else | |
905 | ring->queue_index = rxr_idx; | |
8af3c33f JK |
906 | |
907 | /* assign ring to adapter */ | |
908 | adapter->rx_ring[rxr_idx] = ring; | |
909 | ||
910 | /* update count and index */ | |
911 | rxr_count--; | |
d0bfcdfd | 912 | rxr_idx += v_count; |
8af3c33f JK |
913 | |
914 | /* push pointer to next ring */ | |
915 | ring++; | |
916 | } | |
917 | ||
918 | return 0; | |
919 | } | |
920 | ||
921 | /** | |
922 | * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector | |
923 | * @adapter: board private structure to initialize | |
924 | * @v_idx: Index of vector to be freed | |
925 | * | |
926 | * This function frees the memory allocated to the q_vector. In addition if | |
927 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
928 | * to freeing the q_vector. | |
929 | **/ | |
930 | static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) | |
931 | { | |
932 | struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; | |
933 | struct ixgbe_ring *ring; | |
934 | ||
935 | ixgbe_for_each_ring(ring, q_vector->tx) | |
936 | adapter->tx_ring[ring->queue_index] = NULL; | |
937 | ||
938 | ixgbe_for_each_ring(ring, q_vector->rx) | |
939 | adapter->rx_ring[ring->queue_index] = NULL; | |
940 | ||
941 | adapter->q_vector[v_idx] = NULL; | |
5a85e737 | 942 | napi_hash_del(&q_vector->napi); |
8af3c33f JK |
943 | netif_napi_del(&q_vector->napi); |
944 | ||
945 | /* | |
946 | * ixgbe_get_stats64() might access the rings on this vector, | |
947 | * we must wait a grace period before freeing it. | |
948 | */ | |
949 | kfree_rcu(q_vector, rcu); | |
950 | } | |
951 | ||
952 | /** | |
953 | * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors | |
954 | * @adapter: board private structure to initialize | |
955 | * | |
956 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
957 | * return -ENOMEM. | |
958 | **/ | |
959 | static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) | |
960 | { | |
49c7ffbe | 961 | int q_vectors = adapter->num_q_vectors; |
8af3c33f JK |
962 | int rxr_remaining = adapter->num_rx_queues; |
963 | int txr_remaining = adapter->num_tx_queues; | |
964 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; | |
965 | int err; | |
966 | ||
967 | /* only one q_vector if MSI-X is disabled. */ | |
968 | if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) | |
969 | q_vectors = 1; | |
970 | ||
971 | if (q_vectors >= (rxr_remaining + txr_remaining)) { | |
d0bfcdfd AD |
972 | for (; rxr_remaining; v_idx++) { |
973 | err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, | |
974 | 0, 0, 1, rxr_idx); | |
8af3c33f JK |
975 | |
976 | if (err) | |
977 | goto err_out; | |
978 | ||
979 | /* update counts and index */ | |
d0bfcdfd AD |
980 | rxr_remaining--; |
981 | rxr_idx++; | |
8af3c33f JK |
982 | } |
983 | } | |
984 | ||
d0bfcdfd AD |
985 | for (; v_idx < q_vectors; v_idx++) { |
986 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); | |
987 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); | |
988 | err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, | |
8af3c33f JK |
989 | tqpv, txr_idx, |
990 | rqpv, rxr_idx); | |
991 | ||
992 | if (err) | |
993 | goto err_out; | |
994 | ||
995 | /* update counts and index */ | |
996 | rxr_remaining -= rqpv; | |
8af3c33f | 997 | txr_remaining -= tqpv; |
d0bfcdfd AD |
998 | rxr_idx++; |
999 | txr_idx++; | |
8af3c33f JK |
1000 | } |
1001 | ||
1002 | return 0; | |
1003 | ||
1004 | err_out: | |
49c7ffbe AD |
1005 | adapter->num_tx_queues = 0; |
1006 | adapter->num_rx_queues = 0; | |
1007 | adapter->num_q_vectors = 0; | |
1008 | ||
1009 | while (v_idx--) | |
8af3c33f | 1010 | ixgbe_free_q_vector(adapter, v_idx); |
8af3c33f JK |
1011 | |
1012 | return -ENOMEM; | |
1013 | } | |
1014 | ||
1015 | /** | |
1016 | * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors | |
1017 | * @adapter: board private structure to initialize | |
1018 | * | |
1019 | * This function frees the memory allocated to the q_vectors. In addition if | |
1020 | * NAPI is enabled it will delete any references to the NAPI struct prior | |
1021 | * to freeing the q_vector. | |
1022 | **/ | |
1023 | static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) | |
1024 | { | |
49c7ffbe | 1025 | int v_idx = adapter->num_q_vectors; |
8af3c33f | 1026 | |
49c7ffbe AD |
1027 | adapter->num_tx_queues = 0; |
1028 | adapter->num_rx_queues = 0; | |
1029 | adapter->num_q_vectors = 0; | |
8af3c33f | 1030 | |
49c7ffbe | 1031 | while (v_idx--) |
8af3c33f JK |
1032 | ixgbe_free_q_vector(adapter, v_idx); |
1033 | } | |
1034 | ||
1035 | static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) | |
1036 | { | |
1037 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { | |
1038 | adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; | |
1039 | pci_disable_msix(adapter->pdev); | |
1040 | kfree(adapter->msix_entries); | |
1041 | adapter->msix_entries = NULL; | |
1042 | } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { | |
1043 | adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; | |
1044 | pci_disable_msi(adapter->pdev); | |
1045 | } | |
1046 | } | |
1047 | ||
1048 | /** | |
1049 | * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported | |
1050 | * @adapter: board private structure to initialize | |
1051 | * | |
1052 | * Attempt to configure the interrupts using the best available | |
1053 | * capabilities of the hardware and the kernel. | |
1054 | **/ | |
ac802f5d | 1055 | static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) |
8af3c33f JK |
1056 | { |
1057 | struct ixgbe_hw *hw = &adapter->hw; | |
ac802f5d | 1058 | int vector, v_budget, err; |
8af3c33f JK |
1059 | |
1060 | /* | |
1061 | * It's easy to be greedy for MSI-X vectors, but it really | |
1062 | * doesn't do us much good if we have a lot more vectors | |
1063 | * than CPU's. So let's be conservative and only ask for | |
1064 | * (roughly) the same number of vectors as there are CPU's. | |
1065 | * The default is to use pairs of vectors. | |
1066 | */ | |
1067 | v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); | |
1068 | v_budget = min_t(int, v_budget, num_online_cpus()); | |
1069 | v_budget += NON_Q_VECTORS; | |
1070 | ||
1071 | /* | |
1072 | * At the same time, hardware can only support a maximum of | |
1073 | * hw.mac->max_msix_vectors vectors. With features | |
1074 | * such as RSS and VMDq, we can easily surpass the number of Rx and Tx | |
1075 | * descriptor queues supported by our device. Thus, we cap it off in | |
1076 | * those rare cases where the cpu count also exceeds our vector limit. | |
1077 | */ | |
1078 | v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); | |
1079 | ||
1080 | /* A failure in MSI-X entry allocation isn't fatal, but it does | |
1081 | * mean we disable MSI-X capabilities of the adapter. */ | |
1082 | adapter->msix_entries = kcalloc(v_budget, | |
1083 | sizeof(struct msix_entry), GFP_KERNEL); | |
1084 | if (adapter->msix_entries) { | |
1085 | for (vector = 0; vector < v_budget; vector++) | |
1086 | adapter->msix_entries[vector].entry = vector; | |
1087 | ||
1088 | ixgbe_acquire_msix_vectors(adapter, v_budget); | |
1089 | ||
1090 | if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) | |
ac802f5d | 1091 | return; |
8af3c33f JK |
1092 | } |
1093 | ||
eec66731 JK |
1094 | /* At this point, we do not have MSI-X capabilities. We need to |
1095 | * reconfigure or disable various features which require MSI-X | |
1096 | * capability. | |
1097 | */ | |
1098 | ||
b724e9f2 AD |
1099 | /* disable DCB if number of TCs exceeds 1 */ |
1100 | if (netdev_get_num_tc(adapter->netdev) > 1) { | |
1101 | e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); | |
1102 | netdev_reset_tc(adapter->netdev); | |
39cb681b | 1103 | |
b724e9f2 AD |
1104 | if (adapter->hw.mac.type == ixgbe_mac_82598EB) |
1105 | adapter->hw.fc.requested_mode = adapter->last_lfc_mode; | |
1106 | ||
1107 | adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; | |
1108 | adapter->temp_dcb_cfg.pfc_mode_enable = false; | |
1109 | adapter->dcb_cfg.pfc_mode_enable = false; | |
1110 | } | |
1111 | adapter->dcb_cfg.num_tcs.pg_tcs = 1; | |
1112 | adapter->dcb_cfg.num_tcs.pfc_tcs = 1; | |
1113 | ||
1114 | /* disable SR-IOV */ | |
99d74487 | 1115 | ixgbe_disable_sriov(adapter); |
8af3c33f | 1116 | |
b724e9f2 | 1117 | /* disable RSS */ |
fbe7ca7f | 1118 | adapter->ring_feature[RING_F_RSS].limit = 1; |
b724e9f2 | 1119 | |
eec66731 JK |
1120 | /* recalculate number of queues now that many features have been |
1121 | * changed or disabled. | |
1122 | */ | |
ac802f5d | 1123 | ixgbe_set_num_queues(adapter); |
49c7ffbe AD |
1124 | adapter->num_q_vectors = 1; |
1125 | ||
8af3c33f | 1126 | err = pci_enable_msi(adapter->pdev); |
ac802f5d | 1127 | if (err) { |
8af3c33f | 1128 | netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, |
6ec1b71f JK |
1129 | "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n", |
1130 | err); | |
ac802f5d | 1131 | return; |
8af3c33f | 1132 | } |
ac802f5d | 1133 | adapter->flags |= IXGBE_FLAG_MSI_ENABLED; |
8af3c33f JK |
1134 | } |
1135 | ||
1136 | /** | |
1137 | * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme | |
1138 | * @adapter: board private structure to initialize | |
1139 | * | |
1140 | * We determine which interrupt scheme to use based on... | |
1141 | * - Kernel support (MSI, MSI-X) | |
1142 | * - which can be user-defined (via MODULE_PARAM) | |
1143 | * - Hardware queue count (num_*_queues) | |
1144 | * - defined by miscellaneous hardware support/features (RSS, etc.) | |
1145 | **/ | |
1146 | int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) | |
1147 | { | |
1148 | int err; | |
1149 | ||
1150 | /* Number of supported queues */ | |
ac802f5d | 1151 | ixgbe_set_num_queues(adapter); |
8af3c33f | 1152 | |
ac802f5d AD |
1153 | /* Set interrupt mode */ |
1154 | ixgbe_set_interrupt_capability(adapter); | |
8af3c33f JK |
1155 | |
1156 | err = ixgbe_alloc_q_vectors(adapter); | |
1157 | if (err) { | |
1158 | e_dev_err("Unable to allocate memory for queue vectors\n"); | |
1159 | goto err_alloc_q_vectors; | |
1160 | } | |
1161 | ||
1162 | ixgbe_cache_ring_register(adapter); | |
1163 | ||
1164 | e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", | |
1165 | (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", | |
1166 | adapter->num_rx_queues, adapter->num_tx_queues); | |
1167 | ||
1168 | set_bit(__IXGBE_DOWN, &adapter->state); | |
1169 | ||
1170 | return 0; | |
1171 | ||
1172 | err_alloc_q_vectors: | |
1173 | ixgbe_reset_interrupt_capability(adapter); | |
8af3c33f JK |
1174 | return err; |
1175 | } | |
1176 | ||
1177 | /** | |
1178 | * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings | |
1179 | * @adapter: board private structure to clear interrupt scheme on | |
1180 | * | |
1181 | * We go through and clear interrupt specific resources and reset the structure | |
1182 | * to pre-load conditions | |
1183 | **/ | |
1184 | void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) | |
1185 | { | |
1186 | adapter->num_tx_queues = 0; | |
1187 | adapter->num_rx_queues = 0; | |
1188 | ||
1189 | ixgbe_free_q_vectors(adapter); | |
1190 | ixgbe_reset_interrupt_capability(adapter); | |
1191 | } | |
1192 | ||
1193 | void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, | |
1194 | u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) | |
1195 | { | |
1196 | struct ixgbe_adv_tx_context_desc *context_desc; | |
1197 | u16 i = tx_ring->next_to_use; | |
1198 | ||
1199 | context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); | |
1200 | ||
1201 | i++; | |
1202 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; | |
1203 | ||
1204 | /* set bits to identify this as an advanced context descriptor */ | |
1205 | type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; | |
1206 | ||
1207 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); | |
1208 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); | |
1209 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); | |
1210 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | |
1211 | } | |
1212 |