Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
CommitLineData
25c089d7
YM
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
3 *
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
6 * this source tree.
7 */
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <asm/param.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/etherdevice.h>
15#include <linux/interrupt.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21#include <linux/string.h>
22#include <linux/version.h>
23#include <linux/workqueue.h>
24#include <linux/bitops.h>
25#include <linux/bug.h>
26#include "qed.h"
27#include <linux/qed/qed_chain.h>
28#include "qed_cxt.h"
29#include "qed_dev_api.h"
30#include <linux/qed/qed_eth_if.h>
31#include "qed_hsi.h"
32#include "qed_hw.h"
33#include "qed_int.h"
86622ee7 34#include "qed_mcp.h"
25c089d7
YM
35#include "qed_reg_addr.h"
36#include "qed_sp.h"
37
cee4d264
MC
38enum qed_rss_caps {
39 QED_RSS_IPV4 = 0x1,
40 QED_RSS_IPV6 = 0x2,
41 QED_RSS_IPV4_TCP = 0x4,
42 QED_RSS_IPV6_TCP = 0x8,
43 QED_RSS_IPV4_UDP = 0x10,
44 QED_RSS_IPV6_UDP = 0x20,
45};
46
47/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
48#define QED_RSS_IND_TABLE_SIZE 128
49#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
50
51struct qed_rss_params {
52 u8 update_rss_config;
53 u8 rss_enable;
54 u8 rss_eng_id;
55 u8 update_rss_capabilities;
56 u8 update_rss_ind_table;
57 u8 update_rss_key;
58 u8 rss_caps;
59 u8 rss_table_size_log;
60 u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
61 u32 rss_key[QED_RSS_KEY_SIZE];
62};
63
64enum qed_filter_opcode {
65 QED_FILTER_ADD,
66 QED_FILTER_REMOVE,
67 QED_FILTER_MOVE,
68 QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
69 QED_FILTER_FLUSH, /* Removes all filters */
70};
71
72enum qed_filter_ucast_type {
73 QED_FILTER_MAC,
74 QED_FILTER_VLAN,
75 QED_FILTER_MAC_VLAN,
76 QED_FILTER_INNER_MAC,
77 QED_FILTER_INNER_VLAN,
78 QED_FILTER_INNER_PAIR,
79 QED_FILTER_INNER_MAC_VNI_PAIR,
80 QED_FILTER_MAC_VNI_PAIR,
81 QED_FILTER_VNI,
82};
83
84struct qed_filter_ucast {
85 enum qed_filter_opcode opcode;
86 enum qed_filter_ucast_type type;
87 u8 is_rx_filter;
88 u8 is_tx_filter;
89 u8 vport_to_add_to;
90 u8 vport_to_remove_from;
91 unsigned char mac[ETH_ALEN];
92 u8 assert_on_error;
93 u16 vlan;
94 u32 vni;
95};
96
97struct qed_filter_mcast {
98 /* MOVE is not supported for multicast */
99 enum qed_filter_opcode opcode;
100 u8 vport_to_add_to;
101 u8 vport_to_remove_from;
102 u8 num_mc_addrs;
103#define QED_MAX_MC_ADDRS 64
104 unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
105};
106
107struct qed_filter_accept_flags {
108 u8 update_rx_mode_config;
109 u8 update_tx_mode_config;
110 u8 rx_accept_filter;
111 u8 tx_accept_filter;
112#define QED_ACCEPT_NONE 0x01
113#define QED_ACCEPT_UCAST_MATCHED 0x02
114#define QED_ACCEPT_UCAST_UNMATCHED 0x04
115#define QED_ACCEPT_MCAST_MATCHED 0x08
116#define QED_ACCEPT_MCAST_UNMATCHED 0x10
117#define QED_ACCEPT_BCAST 0x20
118};
119
120struct qed_sp_vport_update_params {
121 u16 opaque_fid;
122 u8 vport_id;
123 u8 update_vport_active_rx_flg;
124 u8 vport_active_rx_flg;
125 u8 update_vport_active_tx_flg;
126 u8 vport_active_tx_flg;
127 u8 update_approx_mcast_flg;
3f9b4a69
YM
128 u8 update_accept_any_vlan_flg;
129 u8 accept_any_vlan;
cee4d264
MC
130 unsigned long bins[8];
131 struct qed_rss_params *rss_params;
132 struct qed_filter_accept_flags accept_flags;
133};
134
088c8618
MC
135enum qed_tpa_mode {
136 QED_TPA_MODE_NONE,
137 QED_TPA_MODE_UNUSED,
138 QED_TPA_MODE_GRO,
139 QED_TPA_MODE_MAX
140};
141
142struct qed_sp_vport_start_params {
143 enum qed_tpa_mode tpa_mode;
144 bool remove_inner_vlan;
145 bool drop_ttl0;
146 u8 max_buffers_per_cqe;
147 u32 concrete_fid;
148 u16 opaque_fid;
149 u8 vport_id;
150 u16 mtu;
151};
152
cee4d264
MC
153#define QED_MAX_SGES_NUM 16
154#define CRC32_POLY 0x1edc6f41
155
156static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
088c8618 157 struct qed_sp_vport_start_params *p_params)
cee4d264 158{
cee4d264
MC
159 struct vport_start_ramrod_data *p_ramrod = NULL;
160 struct qed_spq_entry *p_ent = NULL;
06f56b81 161 struct qed_sp_init_data init_data;
cee4d264
MC
162 int rc = -EINVAL;
163 u16 rx_mode = 0;
164 u8 abs_vport_id = 0;
165
088c8618 166 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
cee4d264
MC
167 if (rc != 0)
168 return rc;
169
06f56b81
YM
170 memset(&init_data, 0, sizeof(init_data));
171 init_data.cid = qed_spq_get_cid(p_hwfn);
088c8618 172 init_data.opaque_fid = p_params->opaque_fid;
06f56b81 173 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
174
175 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 176 ETH_RAMROD_VPORT_START,
06f56b81 177 PROTOCOLID_ETH, &init_data);
cee4d264
MC
178 if (rc)
179 return rc;
180
181 p_ramrod = &p_ent->ramrod.vport_start;
182 p_ramrod->vport_id = abs_vport_id;
183
088c8618
MC
184 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
185 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
186 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
cee4d264
MC
187
188 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
189 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
190
191 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
192
193 /* TPA related fields */
194 memset(&p_ramrod->tpa_param, 0,
195 sizeof(struct eth_vport_tpa_param));
196
088c8618
MC
197 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
198
199 switch (p_params->tpa_mode) {
200 case QED_TPA_MODE_GRO:
201 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
202 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
203 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
204 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
205 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
206 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
207 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
208 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
209 break;
210 default:
211 break;
212 }
213
cee4d264
MC
214 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
215 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
088c8618 216 p_params->concrete_fid);
cee4d264
MC
217
218 return qed_spq_post(p_hwfn, p_ent, NULL);
219}
220
221static int
222qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
223 struct vport_update_ramrod_data *p_ramrod,
224 struct qed_rss_params *p_params)
225{
226 struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
227 u16 abs_l2_queue = 0, capabilities = 0;
228 int rc = 0, i;
229
230 if (!p_params) {
231 p_ramrod->common.update_rss_flg = 0;
232 return rc;
233 }
234
235 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
236 ETH_RSS_IND_TABLE_ENTRIES_NUM);
237
238 rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
239 if (rc)
240 return rc;
241
242 p_ramrod->common.update_rss_flg = p_params->update_rss_config;
243 rss->update_rss_capabilities = p_params->update_rss_capabilities;
244 rss->update_rss_ind_table = p_params->update_rss_ind_table;
245 rss->update_rss_key = p_params->update_rss_key;
246
247 rss->rss_mode = p_params->rss_enable ?
248 ETH_VPORT_RSS_MODE_REGULAR :
249 ETH_VPORT_RSS_MODE_DISABLED;
250
251 SET_FIELD(capabilities,
252 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
253 !!(p_params->rss_caps & QED_RSS_IPV4));
254 SET_FIELD(capabilities,
255 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
256 !!(p_params->rss_caps & QED_RSS_IPV6));
257 SET_FIELD(capabilities,
258 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
259 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
260 SET_FIELD(capabilities,
261 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
262 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
263 SET_FIELD(capabilities,
264 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
265 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
266 SET_FIELD(capabilities,
267 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
268 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
269 rss->tbl_size = p_params->rss_table_size_log;
270
271 rss->capabilities = cpu_to_le16(capabilities);
272
273 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
274 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
275 p_ramrod->common.update_rss_flg,
276 rss->rss_mode, rss->update_rss_capabilities,
277 capabilities, rss->update_rss_ind_table,
278 rss->update_rss_key);
279
280 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
281 rc = qed_fw_l2_queue(p_hwfn,
282 (u8)p_params->rss_ind_table[i],
283 &abs_l2_queue);
284 if (rc)
285 return rc;
286
287 rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
288 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
289 i, rss->indirection_table[i]);
290 }
291
292 for (i = 0; i < 10; i++)
293 rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
294
295 return rc;
296}
297
298static void
299qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
300 struct vport_update_ramrod_data *p_ramrod,
301 struct qed_filter_accept_flags accept_flags)
302{
303 p_ramrod->common.update_rx_mode_flg =
304 accept_flags.update_rx_mode_config;
305
306 p_ramrod->common.update_tx_mode_flg =
307 accept_flags.update_tx_mode_config;
308
309 /* Set Rx mode accept flags */
310 if (p_ramrod->common.update_rx_mode_flg) {
311 u8 accept_filter = accept_flags.rx_accept_filter;
312 u16 state = 0;
313
314 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
315 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
316 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
317
318 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
319 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
320
321 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
322 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
323 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
324
325 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
326 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
327 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
328
329 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
330 !!(accept_filter & QED_ACCEPT_BCAST));
331
332 p_ramrod->rx_mode.state = cpu_to_le16(state);
333 DP_VERBOSE(p_hwfn, QED_MSG_SP,
334 "p_ramrod->rx_mode.state = 0x%x\n", state);
335 }
336
337 /* Set Tx mode accept flags */
338 if (p_ramrod->common.update_tx_mode_flg) {
339 u8 accept_filter = accept_flags.tx_accept_filter;
340 u16 state = 0;
341
342 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
343 !!(accept_filter & QED_ACCEPT_NONE));
344
345 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
346 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
347 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
348
349 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
350 !!(accept_filter & QED_ACCEPT_NONE));
351
352 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
353 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
354 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
355
356 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
357 !!(accept_filter & QED_ACCEPT_BCAST));
358
359 p_ramrod->tx_mode.state = cpu_to_le16(state);
360 DP_VERBOSE(p_hwfn, QED_MSG_SP,
361 "p_ramrod->tx_mode.state = 0x%x\n", state);
362 }
363}
364
365static void
366qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
367 struct vport_update_ramrod_data *p_ramrod,
368 struct qed_sp_vport_update_params *p_params)
369{
370 int i;
371
372 memset(&p_ramrod->approx_mcast.bins, 0,
373 sizeof(p_ramrod->approx_mcast.bins));
374
375 if (p_params->update_approx_mcast_flg) {
376 p_ramrod->common.update_approx_mcast_flg = 1;
377 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
378 u32 *p_bins = (u32 *)p_params->bins;
379 __le32 val = cpu_to_le32(p_bins[i]);
380
381 p_ramrod->approx_mcast.bins[i] = val;
382 }
383 }
384}
385
386static int
387qed_sp_vport_update(struct qed_hwfn *p_hwfn,
388 struct qed_sp_vport_update_params *p_params,
389 enum spq_mode comp_mode,
390 struct qed_spq_comp_cb *p_comp_data)
391{
392 struct qed_rss_params *p_rss_params = p_params->rss_params;
393 struct vport_update_ramrod_data_cmn *p_cmn;
06f56b81 394 struct qed_sp_init_data init_data;
cee4d264
MC
395 struct vport_update_ramrod_data *p_ramrod = NULL;
396 struct qed_spq_entry *p_ent = NULL;
397 u8 abs_vport_id = 0;
398 int rc = -EINVAL;
399
400 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
401 if (rc != 0)
402 return rc;
403
06f56b81
YM
404 memset(&init_data, 0, sizeof(init_data));
405 init_data.cid = qed_spq_get_cid(p_hwfn);
406 init_data.opaque_fid = p_params->opaque_fid;
407 init_data.comp_mode = comp_mode;
408 init_data.p_comp_data = p_comp_data;
cee4d264
MC
409
410 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 411 ETH_RAMROD_VPORT_UPDATE,
06f56b81 412 PROTOCOLID_ETH, &init_data);
cee4d264
MC
413 if (rc)
414 return rc;
415
416 /* Copy input params to ramrod according to FW struct */
417 p_ramrod = &p_ent->ramrod.vport_update;
418 p_cmn = &p_ramrod->common;
419
420 p_cmn->vport_id = abs_vport_id;
421 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
422 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
423 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
424 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
3f9b4a69
YM
425 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
426 p_cmn->update_accept_any_vlan_flg =
427 p_params->update_accept_any_vlan_flg;
cee4d264
MC
428 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
429 if (rc) {
430 /* Return spq entry which is taken in qed_sp_init_request()*/
431 qed_spq_return_entry(p_hwfn, p_ent);
432 return rc;
433 }
434
435 /* Update mcast bins for VFs, PF doesn't use this functionality */
436 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
437
438 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
439 return qed_spq_post(p_hwfn, p_ent, NULL);
440}
441
442static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
443 u16 opaque_fid,
444 u8 vport_id)
445{
cee4d264 446 struct vport_stop_ramrod_data *p_ramrod;
06f56b81 447 struct qed_sp_init_data init_data;
cee4d264
MC
448 struct qed_spq_entry *p_ent;
449 u8 abs_vport_id = 0;
450 int rc;
451
452 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
453 if (rc != 0)
454 return rc;
455
06f56b81
YM
456 memset(&init_data, 0, sizeof(init_data));
457 init_data.cid = qed_spq_get_cid(p_hwfn);
458 init_data.opaque_fid = opaque_fid;
459 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
460
461 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 462 ETH_RAMROD_VPORT_STOP,
06f56b81 463 PROTOCOLID_ETH, &init_data);
cee4d264
MC
464 if (rc)
465 return rc;
466
467 p_ramrod = &p_ent->ramrod.vport_stop;
468 p_ramrod->vport_id = abs_vport_id;
469
470 return qed_spq_post(p_hwfn, p_ent, NULL);
471}
472
473static int qed_filter_accept_cmd(struct qed_dev *cdev,
474 u8 vport,
475 struct qed_filter_accept_flags accept_flags,
3f9b4a69
YM
476 u8 update_accept_any_vlan,
477 u8 accept_any_vlan,
478 enum spq_mode comp_mode,
479 struct qed_spq_comp_cb *p_comp_data)
cee4d264
MC
480{
481 struct qed_sp_vport_update_params vport_update_params;
482 int i, rc;
483
484 /* Prepare and send the vport rx_mode change */
485 memset(&vport_update_params, 0, sizeof(vport_update_params));
486 vport_update_params.vport_id = vport;
487 vport_update_params.accept_flags = accept_flags;
3f9b4a69
YM
488 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
489 vport_update_params.accept_any_vlan = accept_any_vlan;
cee4d264
MC
490
491 for_each_hwfn(cdev, i) {
492 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
493
494 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
495
496 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
497 comp_mode, p_comp_data);
498 if (rc != 0) {
499 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
500 return rc;
501 }
502
503 DP_VERBOSE(p_hwfn, QED_MSG_SP,
504 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
505 accept_flags.rx_accept_filter,
506 accept_flags.tx_accept_filter);
3f9b4a69
YM
507 if (update_accept_any_vlan)
508 DP_VERBOSE(p_hwfn, QED_MSG_SP,
509 "accept_any_vlan=%d configured\n",
510 accept_any_vlan);
cee4d264
MC
511 }
512
513 return 0;
514}
515
516static int qed_sp_release_queue_cid(
517 struct qed_hwfn *p_hwfn,
518 struct qed_hw_cid_data *p_cid_data)
519{
520 if (!p_cid_data->b_cid_allocated)
521 return 0;
522
523 qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
524
525 p_cid_data->b_cid_allocated = false;
526
527 return 0;
528}
529
530static int
531qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
532 u16 opaque_fid,
533 u32 cid,
534 struct qed_queue_start_common_params *params,
535 u8 stats_id,
536 u16 bd_max_bytes,
537 dma_addr_t bd_chain_phys_addr,
538 dma_addr_t cqe_pbl_addr,
539 u16 cqe_pbl_size)
540{
541 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 542 struct qed_spq_entry *p_ent = NULL;
06f56b81 543 struct qed_sp_init_data init_data;
cee4d264
MC
544 struct qed_hw_cid_data *p_rx_cid;
545 u16 abs_rx_q_id = 0;
546 u8 abs_vport_id = 0;
547 int rc = -EINVAL;
548
549 /* Store information for the stop */
550 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
551 p_rx_cid->cid = cid;
552 p_rx_cid->opaque_fid = opaque_fid;
553 p_rx_cid->vport_id = params->vport_id;
554
555 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
556 if (rc != 0)
557 return rc;
558
559 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
560 if (rc != 0)
561 return rc;
562
563 DP_VERBOSE(p_hwfn, QED_MSG_SP,
564 "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
565 opaque_fid, cid, params->queue_id, params->vport_id,
566 params->sb);
567
06f56b81
YM
568 /* Get SPQ entry */
569 memset(&init_data, 0, sizeof(init_data));
570 init_data.cid = cid;
571 init_data.opaque_fid = opaque_fid;
572 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
573
574 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 575 ETH_RAMROD_RX_QUEUE_START,
06f56b81 576 PROTOCOLID_ETH, &init_data);
cee4d264
MC
577 if (rc)
578 return rc;
579
580 p_ramrod = &p_ent->ramrod.rx_queue_start;
581
582 p_ramrod->sb_id = cpu_to_le16(params->sb);
583 p_ramrod->sb_index = params->sb_idx;
584 p_ramrod->vport_id = abs_vport_id;
585 p_ramrod->stats_counter_id = stats_id;
586 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
587 p_ramrod->complete_cqe_flg = 0;
588 p_ramrod->complete_event_flg = 1;
589
590 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
94494598 591 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
cee4d264
MC
592
593 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
94494598 594 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
cee4d264
MC
595
596 rc = qed_spq_post(p_hwfn, p_ent, NULL);
597
598 return rc;
599}
600
601static int
602qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
603 u16 opaque_fid,
604 struct qed_queue_start_common_params *params,
605 u16 bd_max_bytes,
606 dma_addr_t bd_chain_phys_addr,
607 dma_addr_t cqe_pbl_addr,
608 u16 cqe_pbl_size,
609 void __iomem **pp_prod)
610{
611 struct qed_hw_cid_data *p_rx_cid;
612 u64 init_prod_val = 0;
613 u16 abs_l2_queue = 0;
614 u8 abs_stats_id = 0;
615 int rc;
616
617 rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
618 if (rc != 0)
619 return rc;
620
621 rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
622 if (rc != 0)
623 return rc;
624
625 *pp_prod = (u8 __iomem *)p_hwfn->regview +
626 GTT_BAR0_MAP_REG_MSDM_RAM +
627 MSTORM_PRODS_OFFSET(abs_l2_queue);
628
629 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
630 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
631 (u32 *)(&init_prod_val));
632
633 /* Allocate a CID for the queue */
634 p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
635 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
636 &p_rx_cid->cid);
637 if (rc) {
638 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
639 return rc;
640 }
641 p_rx_cid->b_cid_allocated = true;
642
643 rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
644 opaque_fid,
645 p_rx_cid->cid,
646 params,
647 abs_stats_id,
648 bd_max_bytes,
649 bd_chain_phys_addr,
650 cqe_pbl_addr,
651 cqe_pbl_size);
652
653 if (rc != 0)
654 qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
655
656 return rc;
657}
658
659static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
660 u16 rx_queue_id,
661 bool eq_completion_only,
662 bool cqe_completion)
663{
664 struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
665 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
cee4d264 666 struct qed_spq_entry *p_ent = NULL;
06f56b81 667 struct qed_sp_init_data init_data;
cee4d264
MC
668 u16 abs_rx_q_id = 0;
669 int rc = -EINVAL;
670
06f56b81
YM
671 /* Get SPQ entry */
672 memset(&init_data, 0, sizeof(init_data));
673 init_data.cid = p_rx_cid->cid;
674 init_data.opaque_fid = p_rx_cid->opaque_fid;
675 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
676
677 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 678 ETH_RAMROD_RX_QUEUE_STOP,
06f56b81 679 PROTOCOLID_ETH, &init_data);
cee4d264
MC
680 if (rc)
681 return rc;
682
683 p_ramrod = &p_ent->ramrod.rx_queue_stop;
684
685 qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
686 qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
687 p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
688
689 /* Cleaning the queue requires the completion to arrive there.
690 * In addition, VFs require the answer to come as eqe to PF.
691 */
692 p_ramrod->complete_cqe_flg =
693 (!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
694 !eq_completion_only) || cqe_completion;
695 p_ramrod->complete_event_flg =
696 !(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
697 eq_completion_only;
698
699 rc = qed_spq_post(p_hwfn, p_ent, NULL);
700 if (rc)
701 return rc;
702
703 return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
704}
705
706static int
707qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
708 u16 opaque_fid,
709 u32 cid,
710 struct qed_queue_start_common_params *p_params,
711 u8 stats_id,
712 dma_addr_t pbl_addr,
713 u16 pbl_size,
714 union qed_qm_pq_params *p_pq_params)
715{
716 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
cee4d264 717 struct qed_spq_entry *p_ent = NULL;
06f56b81 718 struct qed_sp_init_data init_data;
cee4d264
MC
719 struct qed_hw_cid_data *p_tx_cid;
720 u8 abs_vport_id;
721 int rc = -EINVAL;
722 u16 pq_id;
723
724 /* Store information for the stop */
725 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
726 p_tx_cid->cid = cid;
727 p_tx_cid->opaque_fid = opaque_fid;
728
729 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
730 if (rc)
731 return rc;
732
06f56b81
YM
733 /* Get SPQ entry */
734 memset(&init_data, 0, sizeof(init_data));
735 init_data.cid = cid;
736 init_data.opaque_fid = opaque_fid;
737 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264 738
06f56b81 739 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 740 ETH_RAMROD_TX_QUEUE_START,
06f56b81 741 PROTOCOLID_ETH, &init_data);
cee4d264
MC
742 if (rc)
743 return rc;
744
745 p_ramrod = &p_ent->ramrod.tx_queue_start;
746 p_ramrod->vport_id = abs_vport_id;
747
748 p_ramrod->sb_id = cpu_to_le16(p_params->sb);
749 p_ramrod->sb_index = p_params->sb_idx;
750 p_ramrod->stats_counter_id = stats_id;
cee4d264
MC
751
752 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
94494598 753 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
cee4d264
MC
754
755 pq_id = qed_get_qm_pq(p_hwfn,
756 PROTOCOLID_ETH,
757 p_pq_params);
758 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
759
760 return qed_spq_post(p_hwfn, p_ent, NULL);
761}
762
763static int
764qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
765 u16 opaque_fid,
766 struct qed_queue_start_common_params *p_params,
767 dma_addr_t pbl_addr,
768 u16 pbl_size,
769 void __iomem **pp_doorbell)
770{
771 struct qed_hw_cid_data *p_tx_cid;
772 union qed_qm_pq_params pq_params;
773 u8 abs_stats_id = 0;
774 int rc;
775
776 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
777 if (rc)
778 return rc;
779
780 p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
781 memset(p_tx_cid, 0, sizeof(*p_tx_cid));
782 memset(&pq_params, 0, sizeof(pq_params));
783
784 /* Allocate a CID for the queue */
785 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
786 &p_tx_cid->cid);
787 if (rc) {
788 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
789 return rc;
790 }
791 p_tx_cid->b_cid_allocated = true;
792
793 DP_VERBOSE(p_hwfn, QED_MSG_SP,
794 "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
795 opaque_fid, p_tx_cid->cid,
796 p_params->queue_id, p_params->vport_id, p_params->sb);
797
798 rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
799 opaque_fid,
800 p_tx_cid->cid,
801 p_params,
802 abs_stats_id,
803 pbl_addr,
804 pbl_size,
805 &pq_params);
806
807 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
808 qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
809
810 if (rc)
811 qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
812
813 return rc;
814}
815
816static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
817 u16 tx_queue_id)
818{
819 struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
cee4d264 820 struct qed_spq_entry *p_ent = NULL;
06f56b81 821 struct qed_sp_init_data init_data;
cee4d264
MC
822 int rc = -EINVAL;
823
06f56b81
YM
824 /* Get SPQ entry */
825 memset(&init_data, 0, sizeof(init_data));
826 init_data.cid = p_tx_cid->cid;
827 init_data.opaque_fid = p_tx_cid->opaque_fid;
828 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
cee4d264
MC
829
830 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 831 ETH_RAMROD_TX_QUEUE_STOP,
06f56b81 832 PROTOCOLID_ETH, &init_data);
cee4d264
MC
833 if (rc)
834 return rc;
835
836 rc = qed_spq_post(p_hwfn, p_ent, NULL);
837 if (rc)
838 return rc;
839
840 return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
841}
842
843static enum eth_filter_action
844qed_filter_action(enum qed_filter_opcode opcode)
845{
846 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
847
848 switch (opcode) {
849 case QED_FILTER_ADD:
850 action = ETH_FILTER_ACTION_ADD;
851 break;
852 case QED_FILTER_REMOVE:
853 action = ETH_FILTER_ACTION_REMOVE;
854 break;
cee4d264 855 case QED_FILTER_FLUSH:
fc48b7a6 856 action = ETH_FILTER_ACTION_REMOVE_ALL;
cee4d264
MC
857 break;
858 default:
859 action = MAX_ETH_FILTER_ACTION;
860 }
861
862 return action;
863}
864
865static void qed_set_fw_mac_addr(__le16 *fw_msb,
866 __le16 *fw_mid,
867 __le16 *fw_lsb,
868 u8 *mac)
869{
870 ((u8 *)fw_msb)[0] = mac[1];
871 ((u8 *)fw_msb)[1] = mac[0];
872 ((u8 *)fw_mid)[0] = mac[3];
873 ((u8 *)fw_mid)[1] = mac[2];
874 ((u8 *)fw_lsb)[0] = mac[5];
875 ((u8 *)fw_lsb)[1] = mac[4];
876}
877
878static int
879qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
880 u16 opaque_fid,
881 struct qed_filter_ucast *p_filter_cmd,
882 struct vport_filter_update_ramrod_data **pp_ramrod,
883 struct qed_spq_entry **pp_ent,
884 enum spq_mode comp_mode,
885 struct qed_spq_comp_cb *p_comp_data)
886{
887 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
888 struct vport_filter_update_ramrod_data *p_ramrod;
cee4d264
MC
889 struct eth_filter_cmd *p_first_filter;
890 struct eth_filter_cmd *p_second_filter;
06f56b81 891 struct qed_sp_init_data init_data;
cee4d264
MC
892 enum eth_filter_action action;
893 int rc;
894
895 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
896 &vport_to_remove_from);
897 if (rc)
898 return rc;
899
900 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
901 &vport_to_add_to);
902 if (rc)
903 return rc;
904
06f56b81
YM
905 /* Get SPQ entry */
906 memset(&init_data, 0, sizeof(init_data));
907 init_data.cid = qed_spq_get_cid(p_hwfn);
908 init_data.opaque_fid = opaque_fid;
909 init_data.comp_mode = comp_mode;
910 init_data.p_comp_data = p_comp_data;
cee4d264
MC
911
912 rc = qed_sp_init_request(p_hwfn, pp_ent,
cee4d264 913 ETH_RAMROD_FILTERS_UPDATE,
06f56b81 914 PROTOCOLID_ETH, &init_data);
cee4d264
MC
915 if (rc)
916 return rc;
917
918 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
919 p_ramrod = *pp_ramrod;
920 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
921 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
922
923 switch (p_filter_cmd->opcode) {
fc48b7a6 924 case QED_FILTER_REPLACE:
cee4d264
MC
925 case QED_FILTER_MOVE:
926 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
927 default:
928 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
929 }
930
931 p_first_filter = &p_ramrod->filter_cmds[0];
932 p_second_filter = &p_ramrod->filter_cmds[1];
933
934 switch (p_filter_cmd->type) {
935 case QED_FILTER_MAC:
936 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
937 case QED_FILTER_VLAN:
938 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
939 case QED_FILTER_MAC_VLAN:
940 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
941 case QED_FILTER_INNER_MAC:
942 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
943 case QED_FILTER_INNER_VLAN:
944 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
945 case QED_FILTER_INNER_PAIR:
946 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
947 case QED_FILTER_INNER_MAC_VNI_PAIR:
948 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
949 break;
950 case QED_FILTER_MAC_VNI_PAIR:
951 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
952 case QED_FILTER_VNI:
953 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
954 }
955
956 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
957 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
958 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
959 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
960 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
961 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
962 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
963 &p_first_filter->mac_mid,
964 &p_first_filter->mac_lsb,
965 (u8 *)p_filter_cmd->mac);
966 }
967
968 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
969 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
970 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
971 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
972 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
973
974 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
975 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
976 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
977 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
978
979 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
980 p_second_filter->type = p_first_filter->type;
981 p_second_filter->mac_msb = p_first_filter->mac_msb;
982 p_second_filter->mac_mid = p_first_filter->mac_mid;
983 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
984 p_second_filter->vlan_id = p_first_filter->vlan_id;
985 p_second_filter->vni = p_first_filter->vni;
986
987 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
988
989 p_first_filter->vport_id = vport_to_remove_from;
990
991 p_second_filter->action = ETH_FILTER_ACTION_ADD;
992 p_second_filter->vport_id = vport_to_add_to;
fc48b7a6
YM
993 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
994 p_first_filter->vport_id = vport_to_add_to;
995 memcpy(p_second_filter, p_first_filter,
996 sizeof(*p_second_filter));
997 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
998 p_second_filter->action = ETH_FILTER_ACTION_ADD;
cee4d264
MC
999 } else {
1000 action = qed_filter_action(p_filter_cmd->opcode);
1001
1002 if (action == MAX_ETH_FILTER_ACTION) {
1003 DP_NOTICE(p_hwfn,
1004 "%d is not supported yet\n",
1005 p_filter_cmd->opcode);
1006 return -EINVAL;
1007 }
1008
1009 p_first_filter->action = action;
1010 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1011 QED_FILTER_REMOVE) ?
1012 vport_to_remove_from :
1013 vport_to_add_to;
1014 }
1015
1016 return 0;
1017}
1018
1019static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1020 u16 opaque_fid,
1021 struct qed_filter_ucast *p_filter_cmd,
1022 enum spq_mode comp_mode,
1023 struct qed_spq_comp_cb *p_comp_data)
1024{
1025 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
1026 struct qed_spq_entry *p_ent = NULL;
1027 struct eth_filter_cmd_header *p_header;
1028 int rc;
1029
1030 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1031 &p_ramrod, &p_ent,
1032 comp_mode, p_comp_data);
1033 if (rc != 0) {
1034 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1035 return rc;
1036 }
1037 p_header = &p_ramrod->filter_cmd_hdr;
1038 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1039
1040 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1041 if (rc != 0) {
1042 DP_ERR(p_hwfn,
1043 "Unicast filter ADD command failed %d\n",
1044 rc);
1045 return rc;
1046 }
1047
1048 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1049 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1050 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1051 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1052 "REMOVE" :
1053 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1054 "MOVE" : "REPLACE")),
1055 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1056 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1057 "VLAN" : "MAC & VLAN"),
1058 p_ramrod->filter_cmd_hdr.cmd_cnt,
1059 p_filter_cmd->is_rx_filter,
1060 p_filter_cmd->is_tx_filter);
1061 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1062 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1063 p_filter_cmd->vport_to_add_to,
1064 p_filter_cmd->vport_to_remove_from,
1065 p_filter_cmd->mac[0],
1066 p_filter_cmd->mac[1],
1067 p_filter_cmd->mac[2],
1068 p_filter_cmd->mac[3],
1069 p_filter_cmd->mac[4],
1070 p_filter_cmd->mac[5],
1071 p_filter_cmd->vlan);
1072
1073 return 0;
1074}
1075
1076/*******************************************************************************
1077 * Description:
1078 * Calculates crc 32 on a buffer
1079 * Note: crc32_length MUST be aligned to 8
1080 * Return:
1081 ******************************************************************************/
1082static u32 qed_calc_crc32c(u8 *crc32_packet,
1083 u32 crc32_length,
1084 u32 crc32_seed,
1085 u8 complement)
1086{
1087 u32 byte = 0;
1088 u32 bit = 0;
1089 u8 msb = 0;
1090 u8 current_byte = 0;
1091 u32 crc32_result = crc32_seed;
1092
1093 if ((!crc32_packet) ||
1094 (crc32_length == 0) ||
1095 ((crc32_length % 8) != 0))
1096 return crc32_result;
1097 for (byte = 0; byte < crc32_length; byte++) {
1098 current_byte = crc32_packet[byte];
1099 for (bit = 0; bit < 8; bit++) {
1100 msb = (u8)(crc32_result >> 31);
1101 crc32_result = crc32_result << 1;
1102 if (msb != (0x1 & (current_byte >> bit))) {
1103 crc32_result = crc32_result ^ CRC32_POLY;
1104 crc32_result |= 1; /*crc32_result[0] = 1;*/
1105 }
1106 }
1107 }
1108 return crc32_result;
1109}
1110
1111static inline u32 qed_crc32c_le(u32 seed,
1112 u8 *mac,
1113 u32 len)
1114{
1115 u32 packet_buf[2] = { 0 };
1116
1117 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1118 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1119}
1120
1121static u8 qed_mcast_bin_from_mac(u8 *mac)
1122{
1123 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1124 mac, ETH_ALEN);
1125
1126 return crc & 0xff;
1127}
1128
1129static int
1130qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1131 u16 opaque_fid,
1132 struct qed_filter_mcast *p_filter_cmd,
1133 enum spq_mode comp_mode,
1134 struct qed_spq_comp_cb *p_comp_data)
1135{
1136 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1137 struct vport_update_ramrod_data *p_ramrod = NULL;
cee4d264 1138 struct qed_spq_entry *p_ent = NULL;
06f56b81 1139 struct qed_sp_init_data init_data;
cee4d264
MC
1140 u8 abs_vport_id = 0;
1141 int rc, i;
1142
1143 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1144 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1145 &abs_vport_id);
1146 if (rc)
1147 return rc;
1148 } else {
1149 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1150 &abs_vport_id);
1151 if (rc)
1152 return rc;
1153 }
1154
06f56b81
YM
1155 /* Get SPQ entry */
1156 memset(&init_data, 0, sizeof(init_data));
1157 init_data.cid = qed_spq_get_cid(p_hwfn);
1158 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1159 init_data.comp_mode = comp_mode;
1160 init_data.p_comp_data = p_comp_data;
cee4d264
MC
1161
1162 rc = qed_sp_init_request(p_hwfn, &p_ent,
cee4d264 1163 ETH_RAMROD_VPORT_UPDATE,
06f56b81 1164 PROTOCOLID_ETH, &init_data);
cee4d264
MC
1165 if (rc) {
1166 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1167 return rc;
1168 }
1169
1170 p_ramrod = &p_ent->ramrod.vport_update;
1171 p_ramrod->common.update_approx_mcast_flg = 1;
1172
1173 /* explicitly clear out the entire vector */
1174 memset(&p_ramrod->approx_mcast.bins, 0,
1175 sizeof(p_ramrod->approx_mcast.bins));
1176 memset(bins, 0, sizeof(unsigned long) *
1177 ETH_MULTICAST_MAC_BINS_IN_REGS);
1178 /* filter ADD op is explicit set op and it removes
1179 * any existing filters for the vport
1180 */
1181 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1182 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1183 u32 bit;
1184
1185 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1186 __set_bit(bit, bins);
1187 }
1188
1189 /* Convert to correct endianity */
1190 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1191 u32 *p_bins = (u32 *)bins;
1192 struct vport_update_ramrod_mcast *approx_mcast;
1193
1194 approx_mcast = &p_ramrod->approx_mcast;
1195 approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
1196 }
1197 }
1198
1199 p_ramrod->common.vport_id = abs_vport_id;
1200
1201 return qed_spq_post(p_hwfn, p_ent, NULL);
1202}
1203
1204static int
1205qed_filter_mcast_cmd(struct qed_dev *cdev,
1206 struct qed_filter_mcast *p_filter_cmd,
1207 enum spq_mode comp_mode,
1208 struct qed_spq_comp_cb *p_comp_data)
1209{
1210 int rc = 0;
1211 int i;
1212
1213 /* only ADD and REMOVE operations are supported for multi-cast */
1214 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1215 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1216 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1217 return -EINVAL;
1218
1219 for_each_hwfn(cdev, i) {
1220 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1221
1222 u16 opaque_fid;
1223
1224 if (rc != 0)
1225 break;
1226
1227 opaque_fid = p_hwfn->hw_info.opaque_fid;
1228
1229 rc = qed_sp_eth_filter_mcast(p_hwfn,
1230 opaque_fid,
1231 p_filter_cmd,
1232 comp_mode,
1233 p_comp_data);
1234 }
1235 return rc;
1236}
1237
1238static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1239 struct qed_filter_ucast *p_filter_cmd,
1240 enum spq_mode comp_mode,
1241 struct qed_spq_comp_cb *p_comp_data)
1242{
1243 int rc = 0;
1244 int i;
1245
1246 for_each_hwfn(cdev, i) {
1247 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1248 u16 opaque_fid;
1249
1250 if (rc != 0)
1251 break;
1252
1253 opaque_fid = p_hwfn->hw_info.opaque_fid;
1254
1255 rc = qed_sp_eth_filter_ucast(p_hwfn,
1256 opaque_fid,
1257 p_filter_cmd,
1258 comp_mode,
1259 p_comp_data);
1260 }
1261
1262 return rc;
1263}
1264
86622ee7
YM
1265/* Statistics related code */
1266static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1267 u32 *p_addr,
1268 u32 *p_len,
1269 u16 statistics_bin)
1270{
1271 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1272 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1273 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1274}
1275
1276static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1277 struct qed_ptt *p_ptt,
1278 struct qed_eth_stats *p_stats,
1279 u16 statistics_bin)
1280{
1281 struct eth_pstorm_per_queue_stat pstats;
1282 u32 pstats_addr = 0, pstats_len = 0;
1283
1284 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1285 statistics_bin);
1286
1287 memset(&pstats, 0, sizeof(pstats));
1288 qed_memcpy_from(p_hwfn, p_ptt, &pstats,
1289 pstats_addr, pstats_len);
1290
1291 p_stats->tx_ucast_bytes +=
1292 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1293 p_stats->tx_mcast_bytes +=
1294 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1295 p_stats->tx_bcast_bytes +=
1296 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1297 p_stats->tx_ucast_pkts +=
1298 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1299 p_stats->tx_mcast_pkts +=
1300 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1301 p_stats->tx_bcast_pkts +=
1302 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1303 p_stats->tx_err_drop_pkts +=
1304 HILO_64_REGPAIR(pstats.error_drop_pkts);
1305}
1306
1307static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
1308 u32 *p_addr,
1309 u32 *p_len)
1310{
1311 *p_addr = BAR0_MAP_REG_TSDM_RAM +
1312 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1313 *p_len = sizeof(struct tstorm_per_port_stat);
1314}
1315
1316static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1317 struct qed_ptt *p_ptt,
1318 struct qed_eth_stats *p_stats,
1319 u16 statistics_bin)
1320{
1321 u32 tstats_addr = 0, tstats_len = 0;
1322 struct tstorm_per_port_stat tstats;
1323
1324 __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
1325
1326 memset(&tstats, 0, sizeof(tstats));
1327 qed_memcpy_from(p_hwfn, p_ptt, &tstats,
1328 tstats_addr, tstats_len);
1329
1330 p_stats->mftag_filter_discards +=
1331 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1332 p_stats->mac_filter_discards +=
1333 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1334}
1335
1336static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1337 u32 *p_addr,
1338 u32 *p_len,
1339 u16 statistics_bin)
1340{
1341 *p_addr = BAR0_MAP_REG_USDM_RAM +
1342 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1343 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1344}
1345
1346static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1347 struct qed_ptt *p_ptt,
1348 struct qed_eth_stats *p_stats,
1349 u16 statistics_bin)
1350{
1351 struct eth_ustorm_per_queue_stat ustats;
1352 u32 ustats_addr = 0, ustats_len = 0;
1353
1354 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1355 statistics_bin);
1356
1357 memset(&ustats, 0, sizeof(ustats));
1358 qed_memcpy_from(p_hwfn, p_ptt, &ustats,
1359 ustats_addr, ustats_len);
1360
1361 p_stats->rx_ucast_bytes +=
1362 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1363 p_stats->rx_mcast_bytes +=
1364 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1365 p_stats->rx_bcast_bytes +=
1366 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1367 p_stats->rx_ucast_pkts +=
1368 HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1369 p_stats->rx_mcast_pkts +=
1370 HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1371 p_stats->rx_bcast_pkts +=
1372 HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1373}
1374
1375static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1376 u32 *p_addr,
1377 u32 *p_len,
1378 u16 statistics_bin)
1379{
1380 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1381 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1382 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1383}
1384
1385static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1386 struct qed_ptt *p_ptt,
1387 struct qed_eth_stats *p_stats,
1388 u16 statistics_bin)
1389{
1390 struct eth_mstorm_per_queue_stat mstats;
1391 u32 mstats_addr = 0, mstats_len = 0;
1392
1393 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1394 statistics_bin);
1395
1396 memset(&mstats, 0, sizeof(mstats));
1397 qed_memcpy_from(p_hwfn, p_ptt, &mstats,
1398 mstats_addr, mstats_len);
1399
1400 p_stats->no_buff_discards +=
1401 HILO_64_REGPAIR(mstats.no_buff_discard);
1402 p_stats->packet_too_big_discard +=
1403 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1404 p_stats->ttl0_discard +=
1405 HILO_64_REGPAIR(mstats.ttl0_discard);
1406 p_stats->tpa_coalesced_pkts +=
1407 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1408 p_stats->tpa_coalesced_events +=
1409 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1410 p_stats->tpa_aborts_num +=
1411 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1412 p_stats->tpa_coalesced_bytes +=
1413 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1414}
1415
1416static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1417 struct qed_ptt *p_ptt,
1418 struct qed_eth_stats *p_stats)
1419{
1420 struct port_stats port_stats;
1421 int j;
1422
1423 memset(&port_stats, 0, sizeof(port_stats));
1424
1425 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1426 p_hwfn->mcp_info->port_addr +
1427 offsetof(struct public_port, stats),
1428 sizeof(port_stats));
1429
1430 p_stats->rx_64_byte_packets += port_stats.pmm.r64;
1431 p_stats->rx_127_byte_packets += port_stats.pmm.r127;
1432 p_stats->rx_255_byte_packets += port_stats.pmm.r255;
1433 p_stats->rx_511_byte_packets += port_stats.pmm.r511;
1434 p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
1435 p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
1436 p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
1437 p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
1438 p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
1439 p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
1440 p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
1441 p_stats->rx_crc_errors += port_stats.pmm.rfcs;
1442 p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
1443 p_stats->rx_pause_frames += port_stats.pmm.rxpf;
1444 p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
1445 p_stats->rx_align_errors += port_stats.pmm.raln;
1446 p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
1447 p_stats->rx_oversize_packets += port_stats.pmm.rovr;
1448 p_stats->rx_jabbers += port_stats.pmm.rjbr;
1449 p_stats->rx_undersize_packets += port_stats.pmm.rund;
1450 p_stats->rx_fragments += port_stats.pmm.rfrg;
1451 p_stats->tx_64_byte_packets += port_stats.pmm.t64;
1452 p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
1453 p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
1454 p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
1455 p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
1456 p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
1457 p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
1458 p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
1459 p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
1460 p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
1461 p_stats->tx_pause_frames += port_stats.pmm.txpf;
1462 p_stats->tx_pfc_frames += port_stats.pmm.txpp;
1463 p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
1464 p_stats->tx_total_collisions += port_stats.pmm.tncl;
1465 p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
1466 p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
1467 p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
1468 p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
1469 p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
1470 p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
1471 p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
1472 p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
1473 p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
1474 p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
1475 for (j = 0; j < 8; j++) {
1476 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1477 p_stats->brb_discards += port_stats.brb.brb_discard[j];
1478 }
1479}
1480
1481static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1482 struct qed_ptt *p_ptt,
1483 struct qed_eth_stats *stats,
1484 u16 statistics_bin)
1485{
1486 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1487 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1488 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1489 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1490
1491 if (p_hwfn->mcp_info)
1492 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1493}
1494
1495static void _qed_get_vport_stats(struct qed_dev *cdev,
1496 struct qed_eth_stats *stats)
1497{
1498 u8 fw_vport = 0;
1499 int i;
1500
1501 memset(stats, 0, sizeof(*stats));
1502
1503 for_each_hwfn(cdev, i) {
1504 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1505 struct qed_ptt *p_ptt;
1506
1507 /* The main vport index is relative first */
1508 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1509 DP_ERR(p_hwfn, "No vport available!\n");
1510 continue;
1511 }
1512
1513 p_ptt = qed_ptt_acquire(p_hwfn);
1514 if (!p_ptt) {
1515 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1516 continue;
1517 }
1518
1519 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
1520
1521 qed_ptt_release(p_hwfn, p_ptt);
1522 }
1523}
1524
1525void qed_get_vport_stats(struct qed_dev *cdev,
1526 struct qed_eth_stats *stats)
1527{
1528 u32 i;
1529
1530 if (!cdev) {
1531 memset(stats, 0, sizeof(*stats));
1532 return;
1533 }
1534
1535 _qed_get_vport_stats(cdev, stats);
1536
1537 if (!cdev->reset_stats)
1538 return;
1539
1540 /* Reduce the statistics baseline */
1541 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1542 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1543}
1544
1545/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1546void qed_reset_vport_stats(struct qed_dev *cdev)
1547{
1548 int i;
1549
1550 for_each_hwfn(cdev, i) {
1551 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1552 struct eth_mstorm_per_queue_stat mstats;
1553 struct eth_ustorm_per_queue_stat ustats;
1554 struct eth_pstorm_per_queue_stat pstats;
1555 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1556 u32 addr = 0, len = 0;
1557
1558 if (!p_ptt) {
1559 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1560 continue;
1561 }
1562
1563 memset(&mstats, 0, sizeof(mstats));
1564 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1565 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1566
1567 memset(&ustats, 0, sizeof(ustats));
1568 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1569 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1570
1571 memset(&pstats, 0, sizeof(pstats));
1572 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1573 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1574
1575 qed_ptt_release(p_hwfn, p_ptt);
1576 }
1577
1578 /* PORT statistics are not necessarily reset, so we need to
1579 * read and create a baseline for future statistics.
1580 */
1581 if (!cdev->reset_stats)
1582 DP_INFO(cdev, "Reset stats not allocated\n");
1583 else
1584 _qed_get_vport_stats(cdev, cdev->reset_stats);
1585}
1586
25c089d7
YM
1587static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1588 struct qed_dev_eth_info *info)
1589{
1590 int i;
1591
1592 memset(info, 0, sizeof(*info));
1593
1594 info->num_tc = 1;
1595
1596 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1597 for_each_hwfn(cdev, i)
1598 info->num_queues += FEAT_NUM(&cdev->hwfns[i],
1599 QED_PF_L2_QUE);
1600 if (cdev->int_params.fp_msix_cnt)
1601 info->num_queues = min_t(u8, info->num_queues,
1602 cdev->int_params.fp_msix_cnt);
1603 } else {
1604 info->num_queues = cdev->num_hwfns;
1605 }
1606
1607 info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
1608 ether_addr_copy(info->port_mac,
1609 cdev->hwfns[0].hw_info.hw_mac_addr);
1610
1611 qed_fill_dev_info(cdev, &info->common);
1612
1613 return 0;
1614}
1615
cc875c2e
YM
1616static void qed_register_eth_ops(struct qed_dev *cdev,
1617 struct qed_eth_cb_ops *ops,
1618 void *cookie)
1619{
1620 cdev->protocol_ops.eth = ops;
1621 cdev->ops_cookie = cookie;
1622}
1623
cee4d264 1624static int qed_start_vport(struct qed_dev *cdev,
088c8618 1625 struct qed_start_vport_params *params)
cee4d264
MC
1626{
1627 int rc, i;
1628
1629 for_each_hwfn(cdev, i) {
088c8618 1630 struct qed_sp_vport_start_params start = { 0 };
cee4d264
MC
1631 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1632
088c8618
MC
1633 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
1634 QED_TPA_MODE_NONE;
1635 start.remove_inner_vlan = params->remove_inner_vlan;
1636 start.drop_ttl0 = params->drop_ttl0;
1637 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
1638 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
1639 start.vport_id = params->vport_id;
1640 start.max_buffers_per_cqe = 16;
1641 start.mtu = params->mtu;
1642
1643 rc = qed_sp_vport_start(p_hwfn, &start);
cee4d264
MC
1644 if (rc) {
1645 DP_ERR(cdev, "Failed to start VPORT\n");
1646 return rc;
1647 }
1648
1649 qed_hw_start_fastpath(p_hwfn);
1650
1651 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1652 "Started V-PORT %d with MTU %d\n",
088c8618 1653 start.vport_id, start.mtu);
cee4d264
MC
1654 }
1655
9df2ed04
MC
1656 qed_reset_vport_stats(cdev);
1657
cee4d264
MC
1658 return 0;
1659}
1660
1661static int qed_stop_vport(struct qed_dev *cdev,
1662 u8 vport_id)
1663{
1664 int rc, i;
1665
1666 for_each_hwfn(cdev, i) {
1667 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1668
1669 rc = qed_sp_vport_stop(p_hwfn,
1670 p_hwfn->hw_info.opaque_fid,
1671 vport_id);
1672
1673 if (rc) {
1674 DP_ERR(cdev, "Failed to stop VPORT\n");
1675 return rc;
1676 }
1677 }
1678 return 0;
1679}
1680
1681static int qed_update_vport(struct qed_dev *cdev,
1682 struct qed_update_vport_params *params)
1683{
1684 struct qed_sp_vport_update_params sp_params;
1685 struct qed_rss_params sp_rss_params;
1686 int rc, i;
1687
1688 if (!cdev)
1689 return -ENODEV;
1690
1691 memset(&sp_params, 0, sizeof(sp_params));
1692 memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1693
1694 /* Translate protocol params into sp params */
1695 sp_params.vport_id = params->vport_id;
1696 sp_params.update_vport_active_rx_flg =
1697 params->update_vport_active_flg;
1698 sp_params.update_vport_active_tx_flg =
1699 params->update_vport_active_flg;
1700 sp_params.vport_active_rx_flg = params->vport_active_flg;
1701 sp_params.vport_active_tx_flg = params->vport_active_flg;
3f9b4a69
YM
1702 sp_params.accept_any_vlan = params->accept_any_vlan;
1703 sp_params.update_accept_any_vlan_flg =
1704 params->update_accept_any_vlan_flg;
cee4d264
MC
1705
1706 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1707 * We need to re-fix the rss values per engine for CMT.
1708 */
1709 if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1710 struct qed_update_vport_rss_params *rss =
1711 &params->rss_params;
1712 int k, max = 0;
1713
1714 /* Find largest entry, since it's possible RSS needs to
1715 * be disabled [in case only 1 queue per-hwfn]
1716 */
1717 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1718 max = (max > rss->rss_ind_table[k]) ?
1719 max : rss->rss_ind_table[k];
1720
1721 /* Either fix RSS values or disable RSS */
1722 if (cdev->num_hwfns < max + 1) {
1723 int divisor = (max + cdev->num_hwfns - 1) /
1724 cdev->num_hwfns;
1725
1726 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1727 "CMT - fixing RSS values (modulo %02x)\n",
1728 divisor);
1729
1730 for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1731 rss->rss_ind_table[k] =
1732 rss->rss_ind_table[k] % divisor;
1733 } else {
1734 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1735 "CMT - 1 queue per-hwfn; Disabling RSS\n");
1736 params->update_rss_flg = 0;
1737 }
1738 }
1739
1740 /* Now, update the RSS configuration for actual configuration */
1741 if (params->update_rss_flg) {
1742 sp_rss_params.update_rss_config = 1;
1743 sp_rss_params.rss_enable = 1;
1744 sp_rss_params.update_rss_capabilities = 1;
1745 sp_rss_params.update_rss_ind_table = 1;
1746 sp_rss_params.update_rss_key = 1;
1747 sp_rss_params.rss_caps = QED_RSS_IPV4 |
1748 QED_RSS_IPV6 |
1749 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
1750 sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1751 memcpy(sp_rss_params.rss_ind_table,
1752 params->rss_params.rss_ind_table,
1753 QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1754 memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1755 QED_RSS_KEY_SIZE * sizeof(u32));
1756 }
1757 sp_params.rss_params = &sp_rss_params;
1758
1759 for_each_hwfn(cdev, i) {
1760 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1761
1762 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1763 rc = qed_sp_vport_update(p_hwfn, &sp_params,
1764 QED_SPQ_MODE_EBLOCK,
1765 NULL);
1766 if (rc) {
1767 DP_ERR(cdev, "Failed to update VPORT\n");
1768 return rc;
1769 }
1770
1771 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1772 "Updated V-PORT %d: active_flag %d [update %d]\n",
1773 params->vport_id, params->vport_active_flg,
1774 params->update_vport_active_flg);
1775 }
1776
1777 return 0;
1778}
1779
1780static int qed_start_rxq(struct qed_dev *cdev,
1781 struct qed_queue_start_common_params *params,
1782 u16 bd_max_bytes,
1783 dma_addr_t bd_chain_phys_addr,
1784 dma_addr_t cqe_pbl_addr,
1785 u16 cqe_pbl_size,
1786 void __iomem **pp_prod)
1787{
1788 int rc, hwfn_index;
1789 struct qed_hwfn *p_hwfn;
1790
1791 hwfn_index = params->rss_id % cdev->num_hwfns;
1792 p_hwfn = &cdev->hwfns[hwfn_index];
1793
1794 /* Fix queue ID in 100g mode */
1795 params->queue_id /= cdev->num_hwfns;
1796
1797 rc = qed_sp_eth_rx_queue_start(p_hwfn,
1798 p_hwfn->hw_info.opaque_fid,
1799 params,
1800 bd_max_bytes,
1801 bd_chain_phys_addr,
1802 cqe_pbl_addr,
1803 cqe_pbl_size,
1804 pp_prod);
1805
1806 if (rc) {
1807 DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
1808 return rc;
1809 }
1810
1811 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1812 "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1813 params->queue_id, params->rss_id, params->vport_id,
1814 params->sb);
1815
1816 return 0;
1817}
1818
1819static int qed_stop_rxq(struct qed_dev *cdev,
1820 struct qed_stop_rxq_params *params)
1821{
1822 int rc, hwfn_index;
1823 struct qed_hwfn *p_hwfn;
1824
1825 hwfn_index = params->rss_id % cdev->num_hwfns;
1826 p_hwfn = &cdev->hwfns[hwfn_index];
1827
1828 rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1829 params->rx_queue_id / cdev->num_hwfns,
1830 params->eq_completion_only,
1831 false);
1832 if (rc) {
1833 DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
1834 return rc;
1835 }
1836
1837 return 0;
1838}
1839
1840static int qed_start_txq(struct qed_dev *cdev,
1841 struct qed_queue_start_common_params *p_params,
1842 dma_addr_t pbl_addr,
1843 u16 pbl_size,
1844 void __iomem **pp_doorbell)
1845{
1846 struct qed_hwfn *p_hwfn;
1847 int rc, hwfn_index;
1848
1849 hwfn_index = p_params->rss_id % cdev->num_hwfns;
1850 p_hwfn = &cdev->hwfns[hwfn_index];
1851
1852 /* Fix queue ID in 100g mode */
1853 p_params->queue_id /= cdev->num_hwfns;
1854
1855 rc = qed_sp_eth_tx_queue_start(p_hwfn,
1856 p_hwfn->hw_info.opaque_fid,
1857 p_params,
1858 pbl_addr,
1859 pbl_size,
1860 pp_doorbell);
1861
1862 if (rc) {
1863 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
1864 return rc;
1865 }
1866
1867 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1868 "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1869 p_params->queue_id, p_params->rss_id, p_params->vport_id,
1870 p_params->sb);
1871
1872 return 0;
1873}
1874
1875#define QED_HW_STOP_RETRY_LIMIT (10)
1876static int qed_fastpath_stop(struct qed_dev *cdev)
1877{
1878 qed_hw_stop_fastpath(cdev);
1879
1880 return 0;
1881}
1882
1883static int qed_stop_txq(struct qed_dev *cdev,
1884 struct qed_stop_txq_params *params)
1885{
1886 struct qed_hwfn *p_hwfn;
1887 int rc, hwfn_index;
1888
1889 hwfn_index = params->rss_id % cdev->num_hwfns;
1890 p_hwfn = &cdev->hwfns[hwfn_index];
1891
1892 rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1893 params->tx_queue_id / cdev->num_hwfns);
1894 if (rc) {
1895 DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
1896 return rc;
1897 }
1898
1899 return 0;
1900}
1901
1902static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
1903 enum qed_filter_rx_mode_type type)
1904{
1905 struct qed_filter_accept_flags accept_flags;
1906
1907 memset(&accept_flags, 0, sizeof(accept_flags));
1908
1909 accept_flags.update_rx_mode_config = 1;
1910 accept_flags.update_tx_mode_config = 1;
1911 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1912 QED_ACCEPT_MCAST_MATCHED |
1913 QED_ACCEPT_BCAST;
1914 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1915 QED_ACCEPT_MCAST_MATCHED |
1916 QED_ACCEPT_BCAST;
1917
1918 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
1919 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
1920 QED_ACCEPT_MCAST_UNMATCHED;
1921 else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
1922 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
1923
3f9b4a69 1924 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
cee4d264
MC
1925 QED_SPQ_MODE_CB, NULL);
1926}
1927
1928static int qed_configure_filter_ucast(struct qed_dev *cdev,
1929 struct qed_filter_ucast_params *params)
1930{
1931 struct qed_filter_ucast ucast;
1932
1933 if (!params->vlan_valid && !params->mac_valid) {
1934 DP_NOTICE(
1935 cdev,
1936 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
1937 return -EINVAL;
1938 }
1939
1940 memset(&ucast, 0, sizeof(ucast));
1941 switch (params->type) {
1942 case QED_FILTER_XCAST_TYPE_ADD:
1943 ucast.opcode = QED_FILTER_ADD;
1944 break;
1945 case QED_FILTER_XCAST_TYPE_DEL:
1946 ucast.opcode = QED_FILTER_REMOVE;
1947 break;
1948 case QED_FILTER_XCAST_TYPE_REPLACE:
1949 ucast.opcode = QED_FILTER_REPLACE;
1950 break;
1951 default:
1952 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
1953 params->type);
1954 }
1955
1956 if (params->vlan_valid && params->mac_valid) {
1957 ucast.type = QED_FILTER_MAC_VLAN;
1958 ether_addr_copy(ucast.mac, params->mac);
1959 ucast.vlan = params->vlan;
1960 } else if (params->mac_valid) {
1961 ucast.type = QED_FILTER_MAC;
1962 ether_addr_copy(ucast.mac, params->mac);
1963 } else {
1964 ucast.type = QED_FILTER_VLAN;
1965 ucast.vlan = params->vlan;
1966 }
1967
1968 ucast.is_rx_filter = true;
1969 ucast.is_tx_filter = true;
1970
1971 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
1972}
1973
1974static int qed_configure_filter_mcast(struct qed_dev *cdev,
1975 struct qed_filter_mcast_params *params)
1976{
1977 struct qed_filter_mcast mcast;
1978 int i;
1979
1980 memset(&mcast, 0, sizeof(mcast));
1981 switch (params->type) {
1982 case QED_FILTER_XCAST_TYPE_ADD:
1983 mcast.opcode = QED_FILTER_ADD;
1984 break;
1985 case QED_FILTER_XCAST_TYPE_DEL:
1986 mcast.opcode = QED_FILTER_REMOVE;
1987 break;
1988 default:
1989 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
1990 params->type);
1991 }
1992
1993 mcast.num_mc_addrs = params->num;
1994 for (i = 0; i < mcast.num_mc_addrs; i++)
1995 ether_addr_copy(mcast.mac[i], params->mac[i]);
1996
1997 return qed_filter_mcast_cmd(cdev, &mcast,
1998 QED_SPQ_MODE_CB, NULL);
1999}
2000
2001static int qed_configure_filter(struct qed_dev *cdev,
2002 struct qed_filter_params *params)
2003{
2004 enum qed_filter_rx_mode_type accept_flags;
2005
2006 switch (params->type) {
2007 case QED_FILTER_TYPE_UCAST:
2008 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2009 case QED_FILTER_TYPE_MCAST:
2010 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2011 case QED_FILTER_TYPE_RX_MODE:
2012 accept_flags = params->filter.accept_flags;
2013 return qed_configure_filter_rx_mode(cdev, accept_flags);
2014 default:
2015 DP_NOTICE(cdev, "Unknown filter type %d\n",
2016 (int)params->type);
2017 return -EINVAL;
2018 }
2019}
2020
2021static int qed_fp_cqe_completion(struct qed_dev *dev,
2022 u8 rss_id,
2023 struct eth_slow_path_rx_cqe *cqe)
2024{
2025 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2026 cqe);
2027}
2028
25c089d7
YM
2029static const struct qed_eth_ops qed_eth_ops_pass = {
2030 .common = &qed_common_ops_pass,
2031 .fill_dev_info = &qed_fill_eth_dev_info,
cc875c2e 2032 .register_ops = &qed_register_eth_ops,
cee4d264
MC
2033 .vport_start = &qed_start_vport,
2034 .vport_stop = &qed_stop_vport,
2035 .vport_update = &qed_update_vport,
2036 .q_rx_start = &qed_start_rxq,
2037 .q_rx_stop = &qed_stop_rxq,
2038 .q_tx_start = &qed_start_txq,
2039 .q_tx_stop = &qed_stop_txq,
2040 .filter_config = &qed_configure_filter,
2041 .fastpath_stop = &qed_fastpath_stop,
2042 .eth_cqe_completion = &qed_fp_cqe_completion,
9df2ed04 2043 .get_vport_stats = &qed_get_vport_stats,
25c089d7
YM
2044};
2045
2046const struct qed_eth_ops *qed_get_eth_ops(u32 version)
2047{
2048 if (version != QED_ETH_INTERFACE_VERSION) {
2049 pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
2050 version, QED_ETH_INTERFACE_VERSION);
2051 return NULL;
2052 }
2053
2054 return &qed_eth_ops_pass;
2055}
2056EXPORT_SYMBOL(qed_get_eth_ops);
2057
2058void qed_put_eth_ops(void)
2059{
2060 /* TODO - reference count for module? */
2061}
2062EXPORT_SYMBOL(qed_put_eth_ops);