fm10k: reinitialize queuing scheme after calling init_hw
[linux-2.6-block.git] / drivers / net / ethernet / intel / fm10k / fm10k_pf.c
CommitLineData
b6fec18f
AD
1/* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include "fm10k_pf.h"
c2653865 22#include "fm10k_vf.h"
b6fec18f
AD
23
24/**
25 * fm10k_reset_hw_pf - PF hardware reset
26 * @hw: pointer to hardware structure
27 *
28 * This function should return the hardware to a state similar to the
29 * one it is in after being powered on.
30 **/
31static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
32{
33 s32 err;
34 u32 reg;
35 u16 i;
36
37 /* Disable interrupts */
38 fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
39
40 /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
41 fm10k_write_reg(hw, FM10K_ITR2(0), 0);
42 fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
43
44 /* We assume here Tx and Rx queue 0 are owned by the PF */
45
46 /* Shut off VF access to their queues forcing them to queue 0 */
47 for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
48 fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
49 fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
50 }
51
52 /* shut down all rings */
53 err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
54 if (err)
55 return err;
56
57 /* Verify that DMA is no longer active */
58 reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
59 if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
60 return FM10K_ERR_DMA_PENDING;
61
ac981003
AD
62 /* verify the switch is ready for reset */
63 reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
64 if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
65 goto out;
66
b6fec18f
AD
67 /* Inititate data path reset */
68 reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
69 fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
70
71 /* Flush write and allow 100us for reset to complete */
72 fm10k_write_flush(hw);
73 udelay(FM10K_RESET_TIMEOUT);
74
75 /* Verify we made it out of reset */
76 reg = fm10k_read_reg(hw, FM10K_IP);
77 if (!(reg & FM10K_IP_NOTINRESET))
78 err = FM10K_ERR_RESET_FAILED;
79
ac981003 80out:
b6fec18f
AD
81 return err;
82}
83
c2653865
AD
84/**
85 * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
86 * @hw: pointer to hardware structure
87 *
88 * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
89 **/
90static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
91{
92 u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL);
93
94 return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
95}
96
b6fec18f
AD
97/**
98 * fm10k_init_hw_pf - PF hardware initialization
99 * @hw: pointer to hardware structure
100 *
101 **/
102static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
103{
104 u32 dma_ctrl, txqctl;
105 u16 i;
106
107 /* Establish default VSI as valid */
108 fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
109 fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
110 FM10K_DGLORTMAP_ANY);
111
112 /* Invalidate all other GLORT entries */
113 for (i = 1; i < FM10K_DGLORT_COUNT; i++)
114 fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
115
116 /* reset ITR2(0) to point to itself */
117 fm10k_write_reg(hw, FM10K_ITR2(0), 0);
118
119 /* reset VF ITR2(0) to point to 0 avoid PF registers */
120 fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
121
122 /* loop through all PF ITR2 registers pointing them to the previous */
123 for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
124 fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
125
126 /* Enable interrupt moderator if not already enabled */
127 fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
128
129 /* compute the default txqctl configuration */
130 txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
131 (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
132
133 for (i = 0; i < FM10K_MAX_QUEUES; i++) {
134 /* configure rings for 256 Queue / 32 Descriptor cache mode */
135 fm10k_write_reg(hw, FM10K_TQDLOC(i),
136 (i * FM10K_TQDLOC_BASE_32_DESC) |
137 FM10K_TQDLOC_SIZE_32_DESC);
138 fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
139
140 /* configure rings to provide TPH processing hints */
141 fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i),
142 FM10K_TPH_TXCTRL_DESC_TPHEN |
143 FM10K_TPH_TXCTRL_DESC_RROEN |
144 FM10K_TPH_TXCTRL_DESC_WROEN |
145 FM10K_TPH_TXCTRL_DATA_RROEN);
146 fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i),
147 FM10K_TPH_RXCTRL_DESC_TPHEN |
148 FM10K_TPH_RXCTRL_DESC_RROEN |
149 FM10K_TPH_RXCTRL_DATA_WROEN |
150 FM10K_TPH_RXCTRL_HDR_WROEN);
151 }
152
153 /* set max hold interval to align with 1.024 usec in all modes */
154 switch (hw->bus.speed) {
155 case fm10k_bus_speed_2500:
156 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
157 break;
158 case fm10k_bus_speed_5000:
159 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
160 break;
161 case fm10k_bus_speed_8000:
162 dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
163 break;
164 default:
165 dma_ctrl = 0;
166 break;
167 }
168
169 /* Configure TSO flags */
170 fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
171 fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
172
173 /* Enable DMA engine
174 * Set Rx Descriptor size to 32
175 * Set Minimum MSS to 64
176 * Set Maximum number of Rx queues to 256 / 32 Descriptor
177 */
178 dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
179 FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
180 FM10K_DMA_CTRL_32_DESC;
181
182 fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl);
183
184 /* record maximum queue count, we limit ourselves to 128 */
185 hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
186
c2653865
AD
187 /* We support either 64 VFs or 7 VFs depending on if we have ARI */
188 hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
189
b6fec18f
AD
190 return 0;
191}
192
401b5383
AD
193/**
194 * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
195 * @hw: pointer to hardware structure
196 * @vid: VLAN ID to add to table
197 * @vsi: Index indicating VF ID or PF ID in table
198 * @set: Indicates if this is a set or clear operation
199 *
200 * This function adds or removes the corresponding VLAN ID from the VLAN
201 * filter table for the corresponding function. In addition to the
202 * standard set/clear that supports one bit a multi-bit write is
203 * supported to set 64 bits at a time.
204 **/
205static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
206{
207 u32 vlan_table, reg, mask, bit, len;
208
209 /* verify the VSI index is valid */
210 if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
211 return FM10K_ERR_PARAM;
212
213 /* VLAN multi-bit write:
214 * The multi-bit write has several parts to it.
215 * 3 2 1 0
216 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
217 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
218 * | RSVD0 | Length |C|RSVD0| VLAN ID |
219 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
220 *
221 * VLAN ID: Vlan Starting value
222 * RSVD0: Reserved section, must be 0
223 * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
224 * Length: Number of times to repeat the bit being set
225 */
226 len = vid >> 16;
227 vid = (vid << 17) >> 17;
228
229 /* verify the reserved 0 fields are 0 */
eca32047 230 if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
401b5383
AD
231 return FM10K_ERR_PARAM;
232
233 /* Loop through the table updating all required VLANs */
234 for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
235 len < FM10K_VLAN_TABLE_VID_MAX;
236 len -= 32 - bit, reg++, bit = 0) {
237 /* record the initial state of the register */
238 vlan_table = fm10k_read_reg(hw, reg);
239
240 /* truncate mask if we are at the start or end of the run */
241 mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
242
243 /* make necessary modifications to the register */
244 mask &= set ? ~vlan_table : vlan_table;
245 if (mask)
246 fm10k_write_reg(hw, reg, vlan_table ^ mask);
247 }
248
249 return 0;
250}
251
b6fec18f
AD
252/**
253 * fm10k_read_mac_addr_pf - Read device MAC address
254 * @hw: pointer to the HW structure
255 *
256 * Reads the device MAC address from the SM_AREA and stores the value.
257 **/
258static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
259{
260 u8 perm_addr[ETH_ALEN];
261 u32 serial_num;
262 int i;
263
264 serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1));
265
266 /* last byte should be all 1's */
267 if ((~serial_num) << 24)
268 return FM10K_ERR_INVALID_MAC_ADDR;
269
270 perm_addr[0] = (u8)(serial_num >> 24);
271 perm_addr[1] = (u8)(serial_num >> 16);
272 perm_addr[2] = (u8)(serial_num >> 8);
273
274 serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0));
275
276 /* first byte should be all 1's */
277 if ((~serial_num) >> 24)
278 return FM10K_ERR_INVALID_MAC_ADDR;
279
280 perm_addr[3] = (u8)(serial_num >> 16);
281 perm_addr[4] = (u8)(serial_num >> 8);
282 perm_addr[5] = (u8)(serial_num);
283
284 for (i = 0; i < ETH_ALEN; i++) {
285 hw->mac.perm_addr[i] = perm_addr[i];
286 hw->mac.addr[i] = perm_addr[i];
287 }
288
289 return 0;
290}
291
401b5383
AD
292/**
293 * fm10k_glort_valid_pf - Validate that the provided glort is valid
294 * @hw: pointer to the HW structure
295 * @glort: base glort to be validated
296 *
297 * This function will return an error if the provided glort is invalid
298 **/
299bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
300{
301 glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
302
303 return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
304}
305
306/**
eca32047 307 * fm10k_update_xc_addr_pf - Update device addresses
401b5383
AD
308 * @hw: pointer to the HW structure
309 * @glort: base resource tag for this request
310 * @mac: MAC address to add/remove from table
311 * @vid: VLAN ID to add/remove from table
312 * @add: Indicates if this is an add or remove operation
313 * @flags: flags field to indicate add and secure
314 *
315 * This function generates a message to the Switch API requesting
316 * that the given logical port add/remove the given L2 MAC/VLAN address.
317 **/
318static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
319 const u8 *mac, u16 vid, bool add, u8 flags)
320{
321 struct fm10k_mbx_info *mbx = &hw->mbx;
322 struct fm10k_mac_update mac_update;
323 u32 msg[5];
324
b32d15b9
JK
325 /* clear set bit from VLAN ID */
326 vid &= ~FM10K_VLAN_CLEAR;
327
33a44c28
MV
328 /* if glort or vlan are not valid return error */
329 if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
401b5383
AD
330 return FM10K_ERR_PARAM;
331
401b5383
AD
332 /* record fields */
333 mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
334 ((u32)mac[3] << 16) |
335 ((u32)mac[4] << 8) |
336 ((u32)mac[5]));
337 mac_update.mac_upper = cpu_to_le16(((u32)mac[0] << 8) |
338 ((u32)mac[1]));
339 mac_update.vlan = cpu_to_le16(vid);
340 mac_update.glort = cpu_to_le16(glort);
341 mac_update.action = add ? 0 : 1;
342 mac_update.flags = flags;
343
344 /* populate mac_update fields */
345 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
346 fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
347 &mac_update, sizeof(mac_update));
348
349 /* load onto outgoing mailbox */
350 return mbx->ops.enqueue_tx(hw, mbx, msg);
351}
352
353/**
eca32047 354 * fm10k_update_uc_addr_pf - Update device unicast addresses
401b5383
AD
355 * @hw: pointer to the HW structure
356 * @glort: base resource tag for this request
357 * @mac: MAC address to add/remove from table
358 * @vid: VLAN ID to add/remove from table
359 * @add: Indicates if this is an add or remove operation
360 * @flags: flags field to indicate add and secure
361 *
362 * This function is used to add or remove unicast addresses for
363 * the PF.
364 **/
365static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
366 const u8 *mac, u16 vid, bool add, u8 flags)
367{
368 /* verify MAC address is valid */
369 if (!is_valid_ether_addr(mac))
370 return FM10K_ERR_PARAM;
371
372 return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
373}
374
375/**
376 * fm10k_update_mc_addr_pf - Update device multicast addresses
377 * @hw: pointer to the HW structure
378 * @glort: base resource tag for this request
379 * @mac: MAC address to add/remove from table
380 * @vid: VLAN ID to add/remove from table
381 * @add: Indicates if this is an add or remove operation
382 *
383 * This function is used to add or remove multicast MAC addresses for
384 * the PF.
385 **/
386static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
387 const u8 *mac, u16 vid, bool add)
388{
389 /* verify multicast address is valid */
390 if (!is_multicast_ether_addr(mac))
391 return FM10K_ERR_PARAM;
392
393 return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
394}
395
396/**
397 * fm10k_update_xcast_mode_pf - Request update of multicast mode
398 * @hw: pointer to hardware structure
399 * @glort: base resource tag for this request
400 * @mode: integer value indicating mode being requested
401 *
402 * This function will attempt to request a higher mode for the port
403 * so that it can enable either multicast, multicast promiscuous, or
404 * promiscuous mode of operation.
405 **/
406static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
407{
408 struct fm10k_mbx_info *mbx = &hw->mbx;
409 u32 msg[3], xcast_mode;
410
411 if (mode > FM10K_XCAST_MODE_NONE)
412 return FM10K_ERR_PARAM;
413 /* if glort is not valid return error */
414 if (!fm10k_glort_valid_pf(hw, glort))
415 return FM10K_ERR_PARAM;
416
417 /* write xcast mode as a single u32 value,
418 * lower 16 bits: glort
419 * upper 16 bits: mode
420 */
421 xcast_mode = ((u32)mode << 16) | glort;
422
423 /* generate message requesting to change xcast mode */
424 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
425 fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
426
427 /* load onto outgoing mailbox */
428 return mbx->ops.enqueue_tx(hw, mbx, msg);
429}
430
431/**
432 * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
433 * @hw: pointer to hardware structure
434 *
435 * This function walks through the MSI-X vector table to determine the
436 * number of active interrupts and based on that information updates the
437 * interrupt moderator linked list.
438 **/
439static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
440{
441 u32 i;
442
443 /* Disable interrupt moderator */
444 fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
445
446 /* loop through PF from last to first looking enabled vectors */
447 for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
448 if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
449 break;
450 }
451
eca32047 452 /* always reset VFITR2[0] to point to last enabled PF vector */
401b5383
AD
453 fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
454
455 /* reset ITR2[0] to point to last enabled PF vector */
c2653865
AD
456 if (!hw->iov.num_vfs)
457 fm10k_write_reg(hw, FM10K_ITR2(0), i);
401b5383
AD
458
459 /* Enable interrupt moderator */
460 fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
461}
462
463/**
464 * fm10k_update_lport_state_pf - Notify the switch of a change in port state
465 * @hw: pointer to the HW structure
466 * @glort: base resource tag for this request
467 * @count: number of logical ports being updated
468 * @enable: boolean value indicating enable or disable
469 *
470 * This function is used to add/remove a logical port from the switch.
471 **/
472static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
473 u16 count, bool enable)
474{
475 struct fm10k_mbx_info *mbx = &hw->mbx;
476 u32 msg[3], lport_msg;
477
478 /* do nothing if we are being asked to create or destroy 0 ports */
479 if (!count)
480 return 0;
481
482 /* if glort is not valid return error */
483 if (!fm10k_glort_valid_pf(hw, glort))
484 return FM10K_ERR_PARAM;
485
486 /* construct the lport message from the 2 pieces of data we have */
487 lport_msg = ((u32)count << 16) | glort;
488
489 /* generate lport create/delete message */
490 fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
491 FM10K_PF_MSG_ID_LPORT_DELETE);
492 fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
493
494 /* load onto outgoing mailbox */
495 return mbx->ops.enqueue_tx(hw, mbx, msg);
496}
497
498/**
499 * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
500 * @hw: pointer to hardware structure
501 * @dglort: pointer to dglort configuration structure
502 *
503 * Reads the configuration structure contained in dglort_cfg and uses
504 * that information to then populate a DGLORTMAP/DEC entry and the queues
505 * to which it has been assigned.
506 **/
507static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
508 struct fm10k_dglort_cfg *dglort)
509{
510 u16 glort, queue_count, vsi_count, pc_count;
511 u16 vsi, queue, pc, q_idx;
512 u32 txqctl, dglortdec, dglortmap;
513
514 /* verify the dglort pointer */
515 if (!dglort)
516 return FM10K_ERR_PARAM;
517
518 /* verify the dglort values */
519 if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
520 (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
521 (dglort->queue_l > 8) || (dglort->queue_b >= 256))
522 return FM10K_ERR_PARAM;
523
524 /* determine count of VSIs and queues */
525 queue_count = 1 << (dglort->rss_l + dglort->pc_l);
526 vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
527 glort = dglort->glort;
528 q_idx = dglort->queue_b;
529
530 /* configure SGLORT for queues */
531 for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
532 for (queue = 0; queue < queue_count; queue++, q_idx++) {
533 if (q_idx >= FM10K_MAX_QUEUES)
534 break;
535
536 fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort);
537 fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort);
538 }
539 }
540
541 /* determine count of PCs and queues */
542 queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
543 pc_count = 1 << dglort->pc_l;
544
545 /* configure PC for Tx queues */
546 for (pc = 0; pc < pc_count; pc++) {
547 q_idx = pc + dglort->queue_b;
548 for (queue = 0; queue < queue_count; queue++) {
549 if (q_idx >= FM10K_MAX_QUEUES)
550 break;
551
552 txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx));
553 txqctl &= ~FM10K_TXQCTL_PC_MASK;
554 txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
555 fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl);
556
557 q_idx += pc_count;
558 }
559 }
560
561 /* configure DGLORTDEC */
562 dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
563 ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
564 ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
565 ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
566 ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
567 ((u32)(dglort->queue_l));
568 if (dglort->inner_rss)
569 dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
570
571 /* configure DGLORTMAP */
572 dglortmap = (dglort->idx == fm10k_dglort_default) ?
573 FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
574 dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
575 dglortmap |= dglort->glort;
576
577 /* write values to hardware */
578 fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
579 fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
580
581 return 0;
582}
583
c2653865
AD
584u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
585{
586 u16 num_pools = hw->iov.num_pools;
587
588 return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
589 8 : FM10K_MAX_QUEUES_POOL;
590}
591
592u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
593{
594 u16 num_vfs = hw->iov.num_vfs;
595 u16 vf_q_idx = FM10K_MAX_QUEUES;
596
597 vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
598
599 return vf_q_idx;
600}
601
602static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
603{
604 u16 num_pools = hw->iov.num_pools;
605
606 return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
607 FM10K_MAX_VECTORS_POOL;
608}
609
610static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
611{
612 u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
613
614 vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
615
616 return vf_v_idx;
617}
618
619/**
620 * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
621 * @hw: pointer to the HW structure
622 * @num_vfs: number of VFs to be allocated
623 * @num_pools: number of virtualization pools to be allocated
624 *
625 * Allocates queues and traffic classes to virtualization entities to prepare
626 * the PF for SR-IOV and VMDq
627 **/
628static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
629 u16 num_pools)
630{
631 u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
632 u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
633 int i, j;
634
635 /* hardware only supports up to 64 pools */
636 if (num_pools > 64)
637 return FM10K_ERR_PARAM;
638
639 /* the number of VFs cannot exceed the number of pools */
640 if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
641 return FM10K_ERR_PARAM;
642
643 /* record number of virtualization entities */
644 hw->iov.num_vfs = num_vfs;
645 hw->iov.num_pools = num_pools;
646
647 /* determine qmap offsets and counts */
648 qmap_stride = (num_vfs > 8) ? 32 : 256;
649 qpp = fm10k_queues_per_pool(hw);
650 vpp = fm10k_vectors_per_pool(hw);
651
652 /* calculate starting index for queues */
653 vf_q_idx = fm10k_vf_queue_index(hw, 0);
654 qmap_idx = 0;
655
656 /* establish TCs with -1 credits and no quanta to prevent transmit */
657 for (i = 0; i < num_vfs; i++) {
658 fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0);
659 fm10k_write_reg(hw, FM10K_TC_RATE(i), 0);
660 fm10k_write_reg(hw, FM10K_TC_CREDIT(i),
661 FM10K_TC_CREDIT_CREDIT_MASK);
662 }
663
664 /* zero out all mbmem registers */
665 for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
666 fm10k_write_reg(hw, FM10K_MBMEM(i), 0);
667
668 /* clear event notification of VF FLR */
669 fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0);
670 fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0);
671
672 /* loop through unallocated rings assigning them back to PF */
673 for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
674 fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
ded8b20d
JK
675 fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
676 FM10K_TXQCTL_UNLIMITED_BW | vid);
c2653865
AD
677 fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
678 }
679
680 /* PF should have already updated VFITR2[0] */
681
682 /* update all ITR registers to flow to VFITR2[0] */
683 for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
684 if (!(i & (vpp - 1)))
685 fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp);
686 else
687 fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
688 }
689
690 /* update PF ITR2[0] to reference the last vector */
691 fm10k_write_reg(hw, FM10K_ITR2(0),
692 fm10k_vf_vector_index(hw, num_vfs - 1));
693
694 /* loop through rings populating rings and TCs */
695 for (i = 0; i < num_vfs; i++) {
696 /* record index for VF queue 0 for use in end of loop */
697 vf_q_idx0 = vf_q_idx;
698
699 for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
700 /* assign VF and locked TC to queues */
701 fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
702 fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx),
703 (i << FM10K_TXQCTL_TC_SHIFT) | i |
704 FM10K_TXQCTL_VF | vid);
705 fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx),
706 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
707 FM10K_RXDCTL_DROP_ON_EMPTY);
708 fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
709 FM10K_RXQCTL_VF |
710 (i << FM10K_RXQCTL_VF_SHIFT));
711
712 /* map queue pair to VF */
713 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
714 fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
715 }
716
717 /* repeat the first ring for all of the remaining VF rings */
718 for (; j < qmap_stride; j++, qmap_idx++) {
719 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
720 fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
721 }
722 }
723
724 /* loop through remaining indexes assigning all to queue 0 */
725 while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
726 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
727 fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0);
728 qmap_idx++;
729 }
730
731 return 0;
732}
733
734/**
735 * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
736 * @hw: pointer to the HW structure
737 * @vf_idx: index of VF receiving GLORT
738 * @rate: Rate indicated in Mb/s
739 *
740 * Configured the TC for a given VF to allow only up to a given number
741 * of Mb/s of outgoing Tx throughput.
742 **/
743static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
744{
745 /* configure defaults */
746 u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
747 u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
748
749 /* verify vf is in range */
750 if (vf_idx >= hw->iov.num_vfs)
751 return FM10K_ERR_PARAM;
752
753 /* set interval to align with 4.096 usec in all modes */
754 switch (hw->bus.speed) {
755 case fm10k_bus_speed_2500:
756 interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
757 break;
758 case fm10k_bus_speed_5000:
759 interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
760 break;
761 default:
762 break;
763 }
764
765 if (rate) {
766 if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
767 return FM10K_ERR_PARAM;
768
769 /* The quanta is measured in Bytes per 4.096 or 8.192 usec
770 * The rate is provided in Mbits per second
771 * To tralslate from rate to quanta we need to multiply the
772 * rate by 8.192 usec and divide by 8 bits/byte. To avoid
773 * dealing with floating point we can round the values up
774 * to the nearest whole number ratio which gives us 128 / 125.
775 */
776 tc_rate = (rate * 128) / 125;
777
778 /* try to keep the rate limiting accurate by increasing
779 * the number of credits and interval for rates less than 4Gb/s
780 */
781 if (rate < 4000)
782 interval <<= 1;
783 else
784 tc_rate >>= 1;
785 }
786
787 /* update rate limiter with new values */
788 fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
789 fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
790 fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
791
792 return 0;
793}
794
795/**
796 * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
797 * @hw: pointer to the HW structure
798 * @vf_idx: index of VF receiving GLORT
799 *
800 * Update the interrupt moderator linked list to include any MSI-X
801 * interrupts which the VF has enabled in the MSI-X vector table.
802 **/
803static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
804{
805 u16 vf_v_idx, vf_v_limit, i;
806
807 /* verify vf is in range */
808 if (vf_idx >= hw->iov.num_vfs)
809 return FM10K_ERR_PARAM;
810
eca32047 811 /* determine vector offset and count */
c2653865
AD
812 vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
813 vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
814
815 /* search for first vector that is not masked */
816 for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
817 if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
818 break;
819 }
820
821 /* reset linked list so it now includes our active vectors */
822 if (vf_idx == (hw->iov.num_vfs - 1))
823 fm10k_write_reg(hw, FM10K_ITR2(0), i);
824 else
825 fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i);
826
827 return 0;
828}
829
830/**
831 * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
832 * @hw: pointer to the HW structure
833 * @vf_info: pointer to VF information structure
834 *
835 * Assign a MAC address and default VLAN to a VF and notify it of the update
836 **/
837static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
838 struct fm10k_vf_info *vf_info)
839{
840 u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
841 u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
842 s32 err = 0;
843 u16 vf_idx, vf_vid;
844
845 /* verify vf is in range */
846 if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
847 return FM10K_ERR_PARAM;
848
849 /* determine qmap offsets and counts */
850 qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
851 queues_per_pool = fm10k_queues_per_pool(hw);
852
853 /* calculate starting index for queues */
854 vf_idx = vf_info->vf_idx;
855 vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
856 qmap_idx = qmap_stride * vf_idx;
857
858 /* MAP Tx queue back to 0 temporarily, and disable it */
859 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
860 fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
861
862 /* determine correct default VLAN ID */
863 if (vf_info->pf_vid)
864 vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
865 else
866 vf_vid = vf_info->sw_vid;
867
868 /* generate MAC_ADDR request */
869 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
870 fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
871 vf_info->mac, vf_vid);
872
873 /* load onto outgoing mailbox, ignore any errors on enqueue */
874 if (vf_info->mbx.ops.enqueue_tx)
875 vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
876
877 /* verify ring has disabled before modifying base address registers */
878 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
879 for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
880 /* limit ourselves to a 1ms timeout */
881 if (timeout == 10) {
882 err = FM10K_ERR_DMA_PENDING;
883 goto err_out;
884 }
885
886 usleep_range(100, 200);
887 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
888 }
889
890 /* Update base address registers to contain MAC address */
891 if (is_valid_ether_addr(vf_info->mac)) {
892 tdbal = (((u32)vf_info->mac[3]) << 24) |
893 (((u32)vf_info->mac[4]) << 16) |
894 (((u32)vf_info->mac[5]) << 8);
895
896 tdbah = (((u32)0xFF) << 24) |
897 (((u32)vf_info->mac[0]) << 16) |
898 (((u32)vf_info->mac[1]) << 8) |
899 ((u32)vf_info->mac[2]);
900 }
901
902 /* Record the base address into queue 0 */
903 fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal);
904 fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah);
905
906err_out:
907 /* configure Queue control register */
908 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
909 FM10K_TXQCTL_VID_MASK;
910 txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
911 FM10K_TXQCTL_VF | vf_idx;
912
913 /* assign VID */
914 for (i = 0; i < queues_per_pool; i++)
915 fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
916
917 /* restore the queue back to VF ownership */
918 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
919 return err;
920}
921
922/**
923 * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
924 * @hw: pointer to the HW structure
925 * @vf_info: pointer to VF information structure
926 *
927 * Reassign the interrupts and queues to a VF following an FLR
928 **/
929static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
930 struct fm10k_vf_info *vf_info)
931{
932 u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
933 u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
934 u16 vf_v_idx, vf_v_limit, vf_vid;
935 u8 vf_idx = vf_info->vf_idx;
936 int i;
937
938 /* verify vf is in range */
939 if (vf_idx >= hw->iov.num_vfs)
940 return FM10K_ERR_PARAM;
941
942 /* clear event notification of VF FLR */
943 fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
944
945 /* force timeout and then disconnect the mailbox */
946 vf_info->mbx.timeout = 0;
947 if (vf_info->mbx.ops.disconnect)
948 vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
949
eca32047 950 /* determine vector offset and count */
c2653865
AD
951 vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
952 vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
953
954 /* determine qmap offsets and counts */
955 qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
956 queues_per_pool = fm10k_queues_per_pool(hw);
957 qmap_idx = qmap_stride * vf_idx;
958
959 /* make all the queues inaccessible to the VF */
960 for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
961 fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
962 fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
963 }
964
965 /* calculate starting index for queues */
966 vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
967
968 /* determine correct default VLAN ID */
969 if (vf_info->pf_vid)
970 vf_vid = vf_info->pf_vid;
971 else
972 vf_vid = vf_info->sw_vid;
973
974 /* configure Queue control register */
975 txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
976 (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
977 FM10K_TXQCTL_VF | vf_idx;
978 rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
979
980 /* stop further DMA and reset queue ownership back to VF */
981 for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
982 fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
983 fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
984 fm10k_write_reg(hw, FM10K_RXDCTL(i),
985 FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
986 FM10K_RXDCTL_DROP_ON_EMPTY);
987 fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl);
988 }
989
990 /* reset TC with -1 credits and no quanta to prevent transmit */
991 fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
992 fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0);
993 fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx),
994 FM10K_TC_CREDIT_CREDIT_MASK);
995
996 /* update our first entry in the table based on previous VF */
997 if (!vf_idx)
998 hw->mac.ops.update_int_moderator(hw);
999 else
1000 hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
1001
1002 /* reset linked list so it now includes our active vectors */
1003 if (vf_idx == (hw->iov.num_vfs - 1))
1004 fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx);
1005 else
1006 fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
1007
1008 /* link remaining vectors so that next points to previous */
1009 for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
1010 fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
1011
1012 /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
1013 for (i = FM10K_VFMBMEM_LEN; i--;)
1014 fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
1015 for (i = FM10K_VLAN_TABLE_SIZE; i--;)
1016 fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
1017 for (i = FM10K_RETA_SIZE; i--;)
1018 fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0);
1019 for (i = FM10K_RSSRK_SIZE; i--;)
1020 fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
1021 fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0);
1022
1023 /* Update base address registers to contain MAC address */
1024 if (is_valid_ether_addr(vf_info->mac)) {
1025 tdbal = (((u32)vf_info->mac[3]) << 24) |
1026 (((u32)vf_info->mac[4]) << 16) |
1027 (((u32)vf_info->mac[5]) << 8);
1028 tdbah = (((u32)0xFF) << 24) |
1029 (((u32)vf_info->mac[0]) << 16) |
1030 (((u32)vf_info->mac[1]) << 8) |
1031 ((u32)vf_info->mac[2]);
1032 }
1033
eca32047 1034 /* map queue pairs back to VF from last to first */
c2653865
AD
1035 for (i = queues_per_pool; i--;) {
1036 fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
1037 fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
1038 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
1039 fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
1040 }
1041
fba341d5
JK
1042 /* repeat the first ring for all the remaining VF rings */
1043 for (i = queues_per_pool; i < qmap_stride; i++) {
1044 fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
1045 fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
1046 }
1047
c2653865
AD
1048 return 0;
1049}
1050
1051/**
1052 * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
1053 * @hw: pointer to hardware structure
1054 * @vf_info: pointer to VF information structure
1055 * @lport_idx: Logical port offset from the hardware glort
1056 * @flags: Set of capability flags to extend port beyond basic functionality
1057 *
1058 * This function allows enabling a VF port by assigning it a GLORT and
1059 * setting the flags so that it can enable an Rx mode.
1060 **/
1061static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
1062 struct fm10k_vf_info *vf_info,
1063 u16 lport_idx, u8 flags)
1064{
1065 u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
1066
1067 /* if glort is not valid return error */
1068 if (!fm10k_glort_valid_pf(hw, glort))
1069 return FM10K_ERR_PARAM;
1070
1071 vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
1072 vf_info->glort = glort;
1073
1074 return 0;
1075}
1076
1077/**
1078 * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
1079 * @hw: pointer to hardware structure
1080 * @vf_info: pointer to VF information structure
1081 *
1082 * This function disables a VF port by stripping it of a GLORT and
1083 * setting the flags so that it cannot enable any Rx mode.
1084 **/
1085static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
1086 struct fm10k_vf_info *vf_info)
1087{
1088 u32 msg[1];
1089
1090 /* need to disable the port if it is already enabled */
1091 if (FM10K_VF_FLAG_ENABLED(vf_info)) {
1092 /* notify switch that this port has been disabled */
1093 fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
1094
1095 /* generate port state response to notify VF it is not ready */
1096 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1097 vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1098 }
1099
1100 /* clear flags and glort if it exists */
1101 vf_info->vf_flags = 0;
1102 vf_info->glort = 0;
1103}
1104
1105/**
1106 * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
1107 * @hw: pointer to hardware structure
1108 * @q: stats for all queues of a VF
1109 * @vf_idx: index of VF
1110 *
1111 * This function collects queue stats for VFs.
1112 **/
1113static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
1114 struct fm10k_hw_stats_q *q,
1115 u16 vf_idx)
1116{
1117 u32 idx, qpp;
1118
1119 /* get stats for all of the queues */
1120 qpp = fm10k_queues_per_pool(hw);
1121 idx = fm10k_vf_queue_index(hw, vf_idx);
1122 fm10k_update_hw_stats_q(hw, q, idx, qpp);
1123}
1124
5f226ddb
AD
1125static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
1126 struct fm10k_vf_info *vf_info,
1127 u64 timestamp)
1128{
1129 u32 msg[4];
1130
1131 /* generate port state response to notify VF it is not ready */
1132 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
1133 fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
1134
1135 return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
1136}
1137
c2653865
AD
1138/**
1139 * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
1140 * @hw: Pointer to hardware structure
1141 * @results: Pointer array to message, results[0] is pointer to message
1142 * @mbx: Pointer to mailbox information structure
1143 *
1144 * This function is a default handler for MSI-X requests from the VF. The
1145 * assumption is that in this case it is acceptable to just directly
eca32047 1146 * hand off the message from the VF to the underlying shared code.
c2653865
AD
1147 **/
1148s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
1149 struct fm10k_mbx_info *mbx)
1150{
1151 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1152 u8 vf_idx = vf_info->vf_idx;
1153
1154 return hw->iov.ops.assign_int_moderator(hw, vf_idx);
1155}
1156
9adbac59
JK
1157/**
1158 * fm10k_iov_select_vid - Select correct default VID
1159 * @hw: Pointer to hardware structure
1160 * @vid: VID to correct
1161 *
1162 * Will report an error if VID is out of range. For VID = 0, it will return
1163 * either the pf_vid or sw_vid depending on which one is set.
1164 */
1165static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
1166{
1167 if (!vid)
1168 return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
1169 else if (vf_info->pf_vid && vid != vf_info->pf_vid)
1170 return FM10K_ERR_PARAM;
1171 else
1172 return vid;
1173}
1174
c2653865
AD
1175/**
1176 * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
1177 * @hw: Pointer to hardware structure
1178 * @results: Pointer array to message, results[0] is pointer to message
1179 * @mbx: Pointer to mailbox information structure
1180 *
1181 * This function is a default handler for MAC/VLAN requests from the VF.
1182 * The assumption is that in this case it is acceptable to just directly
eca32047 1183 * hand off the message from the VF to the underlying shared code.
c2653865
AD
1184 **/
1185s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
1186 struct fm10k_mbx_info *mbx)
1187{
1188 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
c2653865
AD
1189 u8 mac[ETH_ALEN];
1190 u32 *result;
9adbac59
JK
1191 int err = 0;
1192 bool set;
c2653865
AD
1193 u16 vlan;
1194 u32 vid;
1195
1196 /* we shouldn't be updating rules on a disabled interface */
1197 if (!FM10K_VF_FLAG_ENABLED(vf_info))
1198 err = FM10K_ERR_PARAM;
1199
1200 if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
1201 result = results[FM10K_MAC_VLAN_MSG_VLAN];
1202
1203 /* record VLAN id requested */
1204 err = fm10k_tlv_attr_get_u32(result, &vid);
1205 if (err)
1206 return err;
1207
9adbac59
JK
1208 /* verify upper 16 bits are zero */
1209 if (vid >> 16)
c2653865 1210 return FM10K_ERR_PARAM;
9adbac59
JK
1211
1212 set = !(vid & FM10K_VLAN_CLEAR);
1213 vid &= ~FM10K_VLAN_CLEAR;
1214
1215 err = fm10k_iov_select_vid(vf_info, vid);
1216 if (err < 0)
1217 return err;
1218 else
1219 vid = err;
c2653865
AD
1220
1221 /* update VSI info for VF in regards to VLAN table */
9adbac59 1222 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
c2653865
AD
1223 }
1224
1225 if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
1226 result = results[FM10K_MAC_VLAN_MSG_MAC];
1227
1228 /* record unicast MAC address requested */
1229 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1230 if (err)
1231 return err;
1232
1233 /* block attempts to set MAC for a locked device */
1234 if (is_valid_ether_addr(vf_info->mac) &&
1235 memcmp(mac, vf_info->mac, ETH_ALEN))
1236 return FM10K_ERR_PARAM;
1237
9adbac59
JK
1238 set = !(vlan & FM10K_VLAN_CLEAR);
1239 vlan &= ~FM10K_VLAN_CLEAR;
1240
1241 err = fm10k_iov_select_vid(vf_info, vlan);
1242 if (err < 0)
1243 return err;
1244 else
1245 vlan = err;
c2653865
AD
1246
1247 /* notify switch of request for new unicast address */
9adbac59
JK
1248 err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
1249 mac, vlan, set, 0);
c2653865
AD
1250 }
1251
1252 if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
1253 result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
1254
1255 /* record multicast MAC address requested */
1256 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
1257 if (err)
1258 return err;
1259
1260 /* verify that the VF is allowed to request multicast */
1261 if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
1262 return FM10K_ERR_PARAM;
1263
9adbac59
JK
1264 set = !(vlan & FM10K_VLAN_CLEAR);
1265 vlan &= ~FM10K_VLAN_CLEAR;
1266
1267 err = fm10k_iov_select_vid(vf_info, vlan);
1268 if (err < 0)
1269 return err;
1270 else
1271 vlan = err;
c2653865
AD
1272
1273 /* notify switch of request for new multicast address */
9adbac59
JK
1274 err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
1275 mac, vlan, set);
c2653865
AD
1276 }
1277
1278 return err;
1279}
1280
1281/**
1282 * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
1283 * @vf_info: VF info structure containing capability flags
1284 * @mode: Requested xcast mode
1285 *
1286 * This function outputs the mode that most closely matches the requested
1287 * mode. If not modes match it will request we disable the port
1288 **/
1289static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
1290 u8 mode)
1291{
1292 u8 vf_flags = vf_info->vf_flags;
1293
1294 /* match up mode to capabilities as best as possible */
1295 switch (mode) {
1296 case FM10K_XCAST_MODE_PROMISC:
1297 if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
1298 return FM10K_XCAST_MODE_PROMISC;
1299 /* fallthough */
1300 case FM10K_XCAST_MODE_ALLMULTI:
1301 if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
1302 return FM10K_XCAST_MODE_ALLMULTI;
1303 /* fallthough */
1304 case FM10K_XCAST_MODE_MULTI:
1305 if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
1306 return FM10K_XCAST_MODE_MULTI;
1307 /* fallthough */
1308 case FM10K_XCAST_MODE_NONE:
1309 if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
1310 return FM10K_XCAST_MODE_NONE;
1311 /* fallthough */
1312 default:
1313 break;
1314 }
1315
1316 /* disable interface as it should not be able to request any */
1317 return FM10K_XCAST_MODE_DISABLE;
1318}
1319
1320/**
1321 * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
1322 * @hw: Pointer to hardware structure
1323 * @results: Pointer array to message, results[0] is pointer to message
1324 * @mbx: Pointer to mailbox information structure
1325 *
1326 * This function is a default handler for port state requests. The port
1327 * state requests for now are basic and consist of enabling or disabling
1328 * the port.
1329 **/
1330s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
1331 struct fm10k_mbx_info *mbx)
1332{
1333 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
1334 u32 *result;
1335 s32 err = 0;
1336 u32 msg[2];
1337 u8 mode = 0;
1338
1339 /* verify VF is allowed to enable even minimal mode */
1340 if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
1341 return FM10K_ERR_PARAM;
1342
1343 if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
1344 result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
1345
1346 /* XCAST mode update requested */
1347 err = fm10k_tlv_attr_get_u8(result, &mode);
1348 if (err)
1349 return FM10K_ERR_PARAM;
1350
1351 /* prep for possible demotion depending on capabilities */
1352 mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
1353
1354 /* if mode is not currently enabled, enable it */
1355 if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
1356 fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
1357
1358 /* swap mode back to a bit flag */
1359 mode = FM10K_VF_FLAG_SET_MODE(mode);
1360 } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
1361 /* need to disable the port if it is already enabled */
1362 if (FM10K_VF_FLAG_ENABLED(vf_info))
1363 err = fm10k_update_lport_state_pf(hw, vf_info->glort,
1364 1, false);
1365
ee4373e7
JK
1366 /* we need to clear VF_FLAG_ENABLED flags in order to ensure
1367 * that we actually re-enable the LPORT state below. Note that
1368 * this has no impact if the VF is already disabled, as the
1369 * flags are already cleared.
1370 */
1371 if (!err)
1372 vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
1373
c2653865
AD
1374 /* when enabling the port we should reset the rate limiters */
1375 hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
1376
1377 /* set mode for minimal functionality */
1378 mode = FM10K_VF_FLAG_SET_MODE_NONE;
1379
1380 /* generate port state response to notify VF it is ready */
1381 fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
1382 fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
1383 mbx->ops.enqueue_tx(hw, mbx, msg);
1384 }
1385
1386 /* if enable state toggled note the update */
1387 if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
1388 err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
1389 !!mode);
1390
1391 /* if state change succeeded, then update our stored state */
1392 mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
1393 if (!err)
1394 vf_info->vf_flags = mode;
1395
1396 return err;
1397}
1398
1399const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
1400 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
1401 FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
1402 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
1403 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
1404 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1405};
1406
b6fec18f
AD
1407/**
1408 * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
1409 * @hw: pointer to hardware structure
1410 * @stats: pointer to the stats structure to update
1411 *
1412 * This function collects and aggregates global and per queue hardware
1413 * statistics.
1414 **/
1415static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
1416 struct fm10k_hw_stats *stats)
1417{
1418 u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
1419 u32 id, id_prev;
1420
1421 /* Use Tx queue 0 as a canary to detect a reset */
1422 id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
1423
1424 /* Read Global Statistics */
1425 do {
1426 timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
1427 &stats->timeout);
1428 ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
1429 ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
1430 um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
1431 xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
1432 vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
1433 &stats->vlan_drop);
1434 loopback_drop = fm10k_read_hw_stats_32b(hw,
1435 FM10K_STATS_LOOPBACK_DROP,
eca32047 1436 &stats->loopback_drop);
b6fec18f
AD
1437 nodesc_drop = fm10k_read_hw_stats_32b(hw,
1438 FM10K_STATS_NODESC_DROP,
1439 &stats->nodesc_drop);
1440
1441 /* if value has not changed then we have consistent data */
1442 id_prev = id;
1443 id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
1444 } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
1445
1446 /* drop non-ID bits and set VALID ID bit */
1447 id &= FM10K_TXQCTL_ID_MASK;
1448 id |= FM10K_STAT_VALID;
1449
1450 /* Update Global Statistics */
1451 if (stats->stats_idx == id) {
1452 stats->timeout.count += timeout;
1453 stats->ur.count += ur;
1454 stats->ca.count += ca;
1455 stats->um.count += um;
1456 stats->xec.count += xec;
1457 stats->vlan_drop.count += vlan_drop;
1458 stats->loopback_drop.count += loopback_drop;
1459 stats->nodesc_drop.count += nodesc_drop;
1460 }
1461
1462 /* Update bases and record current PF id */
1463 fm10k_update_hw_base_32b(&stats->timeout, timeout);
1464 fm10k_update_hw_base_32b(&stats->ur, ur);
1465 fm10k_update_hw_base_32b(&stats->ca, ca);
1466 fm10k_update_hw_base_32b(&stats->um, um);
1467 fm10k_update_hw_base_32b(&stats->xec, xec);
1468 fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
1469 fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
1470 fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
1471 stats->stats_idx = id;
1472
1473 /* Update Queue Statistics */
1474 fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
1475}
1476
1477/**
1478 * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
1479 * @hw: pointer to hardware structure
1480 * @stats: pointer to the stats structure to update
1481 *
1482 * This function resets the base for global and per queue hardware
1483 * statistics.
1484 **/
1485static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
1486 struct fm10k_hw_stats *stats)
1487{
1488 /* Unbind Global Statistics */
1489 fm10k_unbind_hw_stats_32b(&stats->timeout);
1490 fm10k_unbind_hw_stats_32b(&stats->ur);
1491 fm10k_unbind_hw_stats_32b(&stats->ca);
1492 fm10k_unbind_hw_stats_32b(&stats->um);
1493 fm10k_unbind_hw_stats_32b(&stats->xec);
1494 fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
1495 fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
1496 fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
1497
1498 /* Unbind Queue Statistics */
1499 fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
1500
1501 /* Reinitialize bases for all stats */
1502 fm10k_update_hw_stats_pf(hw, stats);
1503}
1504
401b5383
AD
1505/**
1506 * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
1507 * @hw: pointer to hardware structure
1508 * @dma_mask: 64 bit DMA mask required for platform
1509 *
1510 * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
1511 * to limit the access to memory beyond what is physically in the system.
1512 **/
1513static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
1514{
1515 /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
1516 u32 phyaddr = (u32)(dma_mask >> 32);
1517
1518 fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr);
1519}
1520
b6fec18f
AD
1521/**
1522 * fm10k_get_fault_pf - Record a fault in one of the interface units
1523 * @hw: pointer to hardware structure
1524 * @type: pointer to fault type register offset
1525 * @fault: pointer to memory location to record the fault
1526 *
1527 * Record the fault register contents to the fault data structure and
1528 * clear the entry from the register.
1529 *
1530 * Returns ERR_PARAM if invalid register is specified or no error is present.
1531 **/
1532static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
1533 struct fm10k_fault *fault)
1534{
1535 u32 func;
1536
1537 /* verify the fault register is in range and is aligned */
1538 switch (type) {
1539 case FM10K_PCA_FAULT:
1540 case FM10K_THI_FAULT:
1541 case FM10K_FUM_FAULT:
1542 break;
1543 default:
1544 return FM10K_ERR_PARAM;
1545 }
1546
1547 /* only service faults that are valid */
1548 func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC);
1549 if (!(func & FM10K_FAULT_FUNC_VALID))
1550 return FM10K_ERR_PARAM;
1551
1552 /* read remaining fields */
1553 fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
1554 fault->address <<= 32;
1555 fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
1556 fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
1557
1558 /* clear valid bit to allow for next error */
1559 fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
1560
1561 /* Record which function triggered the error */
1562 if (func & FM10K_FAULT_FUNC_PF)
1563 fault->func = 0;
1564 else
1565 fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
1566 FM10K_FAULT_FUNC_VF_SHIFT);
1567
1568 /* record fault type */
1569 fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
1570
1571 return 0;
1572}
1573
401b5383
AD
1574/**
1575 * fm10k_request_lport_map_pf - Request LPORT map from the switch API
1576 * @hw: pointer to hardware structure
1577 *
1578 **/
1579static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
1580{
1581 struct fm10k_mbx_info *mbx = &hw->mbx;
1582 u32 msg[1];
1583
1584 /* issue request asking for LPORT map */
1585 fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
1586
1587 /* load onto outgoing mailbox */
1588 return mbx->ops.enqueue_tx(hw, mbx, msg);
1589}
1590
1591/**
1592 * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
1593 * @hw: pointer to hardware structure
1594 * @switch_ready: pointer to boolean value that will record switch state
1595 *
1596 * This funciton will check the DMA_CTRL2 register and mailbox in order
1597 * to determine if the switch is ready for the PF to begin requesting
1598 * addresses and mapping traffic to the local interface.
1599 **/
1600static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
1601{
1602 s32 ret_val = 0;
1603 u32 dma_ctrl2;
1604
eca32047 1605 /* verify the switch is ready for interaction */
401b5383
AD
1606 dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
1607 if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
1608 goto out;
1609
1610 /* retrieve generic host state info */
1611 ret_val = fm10k_get_host_state_generic(hw, switch_ready);
1612 if (ret_val)
1613 goto out;
1614
1615 /* interface cannot receive traffic without logical ports */
1616 if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
1617 ret_val = fm10k_request_lport_map_pf(hw);
1618
1619out:
1620 return ret_val;
1621}
1622
1623/* This structure defines the attibutes to be parsed below */
1624const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
1625 FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
1626 FM10K_TLV_ATTR_LAST
1627};
1628
1629/**
1630 * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
1631 * @hw: Pointer to hardware structure
1632 * @results: pointer array containing parsed data
1633 * @mbx: Pointer to mailbox information structure
1634 *
1635 * This handler configures the lport mapping based on the reply from the
1636 * switch API.
1637 **/
1638s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
1639 struct fm10k_mbx_info *mbx)
1640{
1641 u16 glort, mask;
1642 u32 dglort_map;
1643 s32 err;
1644
1645 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
1646 &dglort_map);
1647 if (err)
1648 return err;
1649
1650 /* extract values out of the header */
1651 glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
1652 mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
1653
1654 /* verify mask is set and none of the masked bits in glort are set */
1655 if (!mask || (glort & ~mask))
1656 return FM10K_ERR_PARAM;
1657
1658 /* verify the mask is contiguous, and that it is 1's followed by 0's */
1659 if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
1660 return FM10K_ERR_PARAM;
1661
1662 /* record the glort, mask, and port count */
1663 hw->mac.dglort_map = dglort_map;
1664
1665 return 0;
1666}
1667
1668const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
1669 FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
1670 FM10K_TLV_ATTR_LAST
1671};
1672
1673/**
1674 * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
1675 * @hw: Pointer to hardware structure
1676 * @results: pointer array containing parsed data
1677 * @mbx: Pointer to mailbox information structure
1678 *
1679 * This handler configures the default VLAN for the PF
1680 **/
1681s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
1682 struct fm10k_mbx_info *mbx)
1683{
1684 u16 glort, pvid;
1685 u32 pvid_update;
1686 s32 err;
1687
1688 err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
1689 &pvid_update);
1690 if (err)
1691 return err;
1692
1693 /* extract values from the pvid update */
1694 glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
1695 pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
1696
1697 /* if glort is not valid return error */
1698 if (!fm10k_glort_valid_pf(hw, glort))
1699 return FM10K_ERR_PARAM;
1700
1701 /* verify VID is valid */
1702 if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
1703 return FM10K_ERR_PARAM;
1704
1705 /* record the port VLAN ID value */
1706 hw->mac.default_vid = pvid;
1707
1708 return 0;
1709}
1710
1711/**
1712 * fm10k_record_global_table_data - Move global table data to swapi table info
1713 * @from: pointer to source table data structure
1714 * @to: pointer to destination table info structure
1715 *
1716 * This function is will copy table_data to the table_info contained in
1717 * the hw struct.
1718 **/
1719static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
1720 struct fm10k_swapi_table_info *to)
1721{
1722 /* convert from le32 struct to CPU byte ordered values */
1723 to->used = le32_to_cpu(from->used);
1724 to->avail = le32_to_cpu(from->avail);
1725}
1726
1727const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
1728 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
1729 sizeof(struct fm10k_swapi_error)),
1730 FM10K_TLV_ATTR_LAST
1731};
1732
1733/**
1734 * fm10k_msg_err_pf - Message handler for error reply
1735 * @hw: Pointer to hardware structure
1736 * @results: pointer array containing parsed data
1737 * @mbx: Pointer to mailbox information structure
1738 *
1739 * This handler will capture the data for any error replies to previous
1740 * messages that the PF has sent.
1741 **/
1742s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
1743 struct fm10k_mbx_info *mbx)
1744{
1745 struct fm10k_swapi_error err_msg;
1746 s32 err;
1747
1748 /* extract structure from message */
1749 err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
1750 &err_msg, sizeof(err_msg));
1751 if (err)
1752 return err;
1753
1754 /* record table status */
1755 fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
1756 fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
1757 fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
1758
1759 /* record SW API status value */
1760 hw->swapi.status = le32_to_cpu(err_msg.status);
1761
1762 return 0;
1763}
1764
5f226ddb
AD
1765const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
1766 FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
1767 sizeof(struct fm10k_swapi_1588_timestamp)),
1768 FM10K_TLV_ATTR_LAST
1769};
1770
1771/* currently there is no shared 1588 timestamp handler */
1772
1773/**
1774 * fm10k_adjust_systime_pf - Adjust systime frequency
1775 * @hw: pointer to hardware structure
1776 * @ppb: adjustment rate in parts per billion
1777 *
1778 * This function will adjust the SYSTIME_CFG register contained in BAR 4
1779 * if this function is supported for BAR 4 access. The adjustment amount
1780 * is based on the parts per billion value provided and adjusted to a
1781 * value based on parts per 2^48 clock cycles.
1782 *
1783 * If adjustment is not supported or the requested value is too large
1784 * we will return an error.
1785 **/
1786static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
1787{
1788 u64 systime_adjust;
1789
1790 /* if sw_addr is not set we don't have switch register access */
1791 if (!hw->sw_addr)
1792 return ppb ? FM10K_ERR_PARAM : 0;
1793
1794 /* we must convert the value from parts per billion to parts per
1795 * 2^48 cycles. In addition I have opted to only use the 30 most
1796 * significant bits of the adjustment value as the 8 least
1797 * significant bits are located in another register and represent
1798 * a value significantly less than a part per billion, the result
1799 * of dropping the 8 least significant bits is that the adjustment
1800 * value is effectively multiplied by 2^8 when we write it.
1801 *
1802 * As a result of all this the math for this breaks down as follows:
1803 * ppb / 10^9 == adjust * 2^8 / 2^48
1804 * If we solve this for adjust, and simplify it comes out as:
1805 * ppb * 2^31 / 5^9 == adjust
1806 */
1807 systime_adjust = (ppb < 0) ? -ppb : ppb;
1808 systime_adjust <<= 31;
1809 do_div(systime_adjust, 1953125);
1810
1811 /* verify the requested adjustment value is in range */
1812 if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
1813 return FM10K_ERR_PARAM;
1814
646725a7
JK
1815 if (ppb > 0)
1816 systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
5f226ddb
AD
1817
1818 fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
1819
1820 return 0;
1821}
1822
1823/**
1824 * fm10k_read_systime_pf - Reads value of systime registers
1825 * @hw: pointer to the hardware structure
1826 *
1827 * Function reads the content of 2 registers, combined to represent a 64 bit
1828 * value measured in nanosecods. In order to guarantee the value is accurate
1829 * we check the 32 most significant bits both before and after reading the
1830 * 32 least significant bits to verify they didn't change as we were reading
1831 * the registers.
1832 **/
1833static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
1834{
1835 u32 systime_l, systime_h, systime_tmp;
1836
1837 systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
1838
1839 do {
1840 systime_tmp = systime_h;
1841 systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
1842 systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
1843 } while (systime_tmp != systime_h);
1844
1845 return ((u64)systime_h << 32) | systime_l;
1846}
1847
401b5383
AD
1848static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
1849 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
1850 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
1851 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
1852 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
1853 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
1854 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
1855 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
1856};
1857
b6fec18f
AD
1858static struct fm10k_mac_ops mac_ops_pf = {
1859 .get_bus_info = &fm10k_get_bus_info_generic,
1860 .reset_hw = &fm10k_reset_hw_pf,
1861 .init_hw = &fm10k_init_hw_pf,
1862 .start_hw = &fm10k_start_hw_generic,
1863 .stop_hw = &fm10k_stop_hw_generic,
401b5383 1864 .update_vlan = &fm10k_update_vlan_pf,
b6fec18f 1865 .read_mac_addr = &fm10k_read_mac_addr_pf,
401b5383
AD
1866 .update_uc_addr = &fm10k_update_uc_addr_pf,
1867 .update_mc_addr = &fm10k_update_mc_addr_pf,
1868 .update_xcast_mode = &fm10k_update_xcast_mode_pf,
1869 .update_int_moderator = &fm10k_update_int_moderator_pf,
1870 .update_lport_state = &fm10k_update_lport_state_pf,
b6fec18f
AD
1871 .update_hw_stats = &fm10k_update_hw_stats_pf,
1872 .rebind_hw_stats = &fm10k_rebind_hw_stats_pf,
401b5383
AD
1873 .configure_dglort_map = &fm10k_configure_dglort_map_pf,
1874 .set_dma_mask = &fm10k_set_dma_mask_pf,
b6fec18f 1875 .get_fault = &fm10k_get_fault_pf,
401b5383 1876 .get_host_state = &fm10k_get_host_state_pf,
5f226ddb
AD
1877 .adjust_systime = &fm10k_adjust_systime_pf,
1878 .read_systime = &fm10k_read_systime_pf,
b6fec18f
AD
1879};
1880
c2653865
AD
1881static struct fm10k_iov_ops iov_ops_pf = {
1882 .assign_resources = &fm10k_iov_assign_resources_pf,
1883 .configure_tc = &fm10k_iov_configure_tc_pf,
1884 .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf,
1885 .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf,
1886 .reset_resources = &fm10k_iov_reset_resources_pf,
1887 .set_lport = &fm10k_iov_set_lport_pf,
1888 .reset_lport = &fm10k_iov_reset_lport_pf,
1889 .update_stats = &fm10k_iov_update_stats_pf,
5f226ddb 1890 .report_timestamp = &fm10k_iov_report_timestamp_pf,
c2653865
AD
1891};
1892
401b5383
AD
1893static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
1894{
1895 fm10k_get_invariants_generic(hw);
1896
1897 return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
1898}
1899
b6fec18f
AD
1900struct fm10k_info fm10k_pf_info = {
1901 .mac = fm10k_mac_pf,
401b5383 1902 .get_invariants = &fm10k_get_invariants_pf,
b6fec18f 1903 .mac_ops = &mac_ops_pf,
c2653865 1904 .iov_ops = &iov_ops_pf,
b6fec18f 1905};