xfrm: Add a dummy network device for napi.
[linux-2.6-block.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/version.h>
47 #include <linux/workqueue.h>
48 #include <linux/bitops.h>
49 #include <linux/bug.h>
50 #include <linux/vmalloc.h>
51 #include "qed.h"
52 #include <linux/qed/qed_chain.h>
53 #include "qed_cxt.h"
54 #include "qed_dev_api.h"
55 #include <linux/qed/qed_eth_if.h>
56 #include "qed_hsi.h"
57 #include "qed_hw.h"
58 #include "qed_int.h"
59 #include "qed_l2.h"
60 #include "qed_mcp.h"
61 #include "qed_reg_addr.h"
62 #include "qed_sp.h"
63 #include "qed_sriov.h"
64
65
66 #define QED_MAX_SGES_NUM 16
67 #define CRC32_POLY 0x1edc6f41
68
69 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
70                                struct qed_queue_cid *p_cid)
71 {
72         /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
73         if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
74                 qed_cxt_release_cid(p_hwfn, p_cid->cid);
75         vfree(p_cid);
76 }
77
78 /* The internal is only meant to be directly called by PFs initializeing CIDs
79  * for their VFs.
80  */
81 struct qed_queue_cid *
82 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
83                       u16 opaque_fid,
84                       u32 cid,
85                       u8 vf_qid,
86                       struct qed_queue_start_common_params *p_params)
87 {
88         bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
89         struct qed_queue_cid *p_cid;
90         int rc;
91
92         p_cid = vmalloc(sizeof(*p_cid));
93         if (!p_cid)
94                 return NULL;
95         memset(p_cid, 0, sizeof(*p_cid));
96
97         p_cid->opaque_fid = opaque_fid;
98         p_cid->cid = cid;
99         p_cid->vf_qid = vf_qid;
100         p_cid->rel = *p_params;
101         p_cid->p_owner = p_hwfn;
102
103         /* Don't try calculating the absolute indices for VFs */
104         if (IS_VF(p_hwfn->cdev)) {
105                 p_cid->abs = p_cid->rel;
106                 goto out;
107         }
108
109         /* Calculate the engine-absolute indices of the resources.
110          * This would guarantee they're valid later on.
111          * In some cases [SBs] we already have the right values.
112          */
113         rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
114         if (rc)
115                 goto fail;
116
117         rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
118         if (rc)
119                 goto fail;
120
121         /* In case of a PF configuring its VF's queues, the stats-id is already
122          * absolute [since there's a single index that's suitable per-VF].
123          */
124         if (b_is_same) {
125                 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
126                                   &p_cid->abs.stats_id);
127                 if (rc)
128                         goto fail;
129         } else {
130                 p_cid->abs.stats_id = p_cid->rel.stats_id;
131         }
132
133         /* SBs relevant information was already provided as absolute */
134         p_cid->abs.sb = p_cid->rel.sb;
135         p_cid->abs.sb_idx = p_cid->rel.sb_idx;
136
137         /* This is tricky - we're actually interested in whehter this is a PF
138          * entry meant for the VF.
139          */
140         if (!b_is_same)
141                 p_cid->is_vf = true;
142 out:
143         DP_VERBOSE(p_hwfn,
144                    QED_MSG_SP,
145                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
146                    p_cid->opaque_fid,
147                    p_cid->cid,
148                    p_cid->rel.vport_id,
149                    p_cid->abs.vport_id,
150                    p_cid->rel.queue_id,
151                    p_cid->abs.queue_id,
152                    p_cid->rel.stats_id,
153                    p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx);
154
155         return p_cid;
156
157 fail:
158         vfree(p_cid);
159         return NULL;
160 }
161
162 static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
163                                                   u16 opaque_fid, struct
164                                                   qed_queue_start_common_params
165                                                   *p_params)
166 {
167         struct qed_queue_cid *p_cid;
168         u32 cid = 0;
169
170         /* Get a unique firmware CID for this queue, in case it's a PF.
171          * VF's don't need a CID as the queue configuration will be done
172          * by PF.
173          */
174         if (IS_PF(p_hwfn->cdev)) {
175                 if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
176                         DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
177                         return NULL;
178                 }
179         }
180
181         p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
182         if (!p_cid && IS_PF(p_hwfn->cdev))
183                 qed_cxt_release_cid(p_hwfn, cid);
184
185         return p_cid;
186 }
187
188 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
189                            struct qed_sp_vport_start_params *p_params)
190 {
191         struct vport_start_ramrod_data *p_ramrod = NULL;
192         struct qed_spq_entry *p_ent =  NULL;
193         struct qed_sp_init_data init_data;
194         u8 abs_vport_id = 0;
195         int rc = -EINVAL;
196         u16 rx_mode = 0;
197
198         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
199         if (rc)
200                 return rc;
201
202         memset(&init_data, 0, sizeof(init_data));
203         init_data.cid = qed_spq_get_cid(p_hwfn);
204         init_data.opaque_fid = p_params->opaque_fid;
205         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
206
207         rc = qed_sp_init_request(p_hwfn, &p_ent,
208                                  ETH_RAMROD_VPORT_START,
209                                  PROTOCOLID_ETH, &init_data);
210         if (rc)
211                 return rc;
212
213         p_ramrod                = &p_ent->ramrod.vport_start;
214         p_ramrod->vport_id      = abs_vport_id;
215
216         p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
217         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
218         p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
219         p_ramrod->untagged              = p_params->only_untagged;
220
221         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
222         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
223
224         p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
225
226         /* TPA related fields */
227         memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
228
229         p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
230
231         switch (p_params->tpa_mode) {
232         case QED_TPA_MODE_GRO:
233                 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
234                 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
235                 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
236                 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
237                 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
238                 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
239                 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
240                 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
241                 break;
242         default:
243                 break;
244         }
245
246         p_ramrod->tx_switching_en = p_params->tx_switching;
247
248         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
249         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
250
251         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
252         p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
253                                                   p_params->concrete_fid);
254
255         return qed_spq_post(p_hwfn, p_ent, NULL);
256 }
257
258 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
259                               struct qed_sp_vport_start_params *p_params)
260 {
261         if (IS_VF(p_hwfn->cdev)) {
262                 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
263                                              p_params->mtu,
264                                              p_params->remove_inner_vlan,
265                                              p_params->tpa_mode,
266                                              p_params->max_buffers_per_cqe,
267                                              p_params->only_untagged);
268         }
269
270         return qed_sp_eth_vport_start(p_hwfn, p_params);
271 }
272
273 static int
274 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
275                         struct vport_update_ramrod_data *p_ramrod,
276                         struct qed_rss_params *p_rss)
277 {
278         struct eth_vport_rss_config *p_config;
279         u16 capabilities = 0;
280         int i, table_size;
281         int rc = 0;
282
283         if (!p_rss) {
284                 p_ramrod->common.update_rss_flg = 0;
285                 return rc;
286         }
287         p_config = &p_ramrod->rss_config;
288
289         BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
290
291         rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
292         if (rc)
293                 return rc;
294
295         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
296         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
297         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
298         p_config->update_rss_key = p_rss->update_rss_key;
299
300         p_config->rss_mode = p_rss->rss_enable ?
301                              ETH_VPORT_RSS_MODE_REGULAR :
302                              ETH_VPORT_RSS_MODE_DISABLED;
303
304         SET_FIELD(capabilities,
305                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
306                   !!(p_rss->rss_caps & QED_RSS_IPV4));
307         SET_FIELD(capabilities,
308                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
309                   !!(p_rss->rss_caps & QED_RSS_IPV6));
310         SET_FIELD(capabilities,
311                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
312                   !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
313         SET_FIELD(capabilities,
314                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
315                   !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
316         SET_FIELD(capabilities,
317                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
318                   !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
319         SET_FIELD(capabilities,
320                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
321                   !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
322         p_config->tbl_size = p_rss->rss_table_size_log;
323
324         p_config->capabilities = cpu_to_le16(capabilities);
325
326         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
327                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
328                    p_ramrod->common.update_rss_flg,
329                    p_config->rss_mode,
330                    p_config->update_rss_capabilities,
331                    p_config->capabilities,
332                    p_config->update_rss_ind_table, p_config->update_rss_key);
333
334         table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
335                            1 << p_config->tbl_size);
336         for (i = 0; i < table_size; i++) {
337                 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
338
339                 if (!p_queue)
340                         return -EINVAL;
341
342                 p_config->indirection_table[i] =
343                     cpu_to_le16(p_queue->abs.queue_id);
344         }
345
346         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
347                    "Configured RSS indirection table [%d entries]:\n",
348                    table_size);
349         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
350                 DP_VERBOSE(p_hwfn,
351                            NETIF_MSG_IFUP,
352                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
353                            le16_to_cpu(p_config->indirection_table[i]),
354                            le16_to_cpu(p_config->indirection_table[i + 1]),
355                            le16_to_cpu(p_config->indirection_table[i + 2]),
356                            le16_to_cpu(p_config->indirection_table[i + 3]),
357                            le16_to_cpu(p_config->indirection_table[i + 4]),
358                            le16_to_cpu(p_config->indirection_table[i + 5]),
359                            le16_to_cpu(p_config->indirection_table[i + 6]),
360                            le16_to_cpu(p_config->indirection_table[i + 7]),
361                            le16_to_cpu(p_config->indirection_table[i + 8]),
362                            le16_to_cpu(p_config->indirection_table[i + 9]),
363                            le16_to_cpu(p_config->indirection_table[i + 10]),
364                            le16_to_cpu(p_config->indirection_table[i + 11]),
365                            le16_to_cpu(p_config->indirection_table[i + 12]),
366                            le16_to_cpu(p_config->indirection_table[i + 13]),
367                            le16_to_cpu(p_config->indirection_table[i + 14]),
368                            le16_to_cpu(p_config->indirection_table[i + 15]));
369         }
370
371         for (i = 0; i < 10; i++)
372                 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
373
374         return rc;
375 }
376
377 static void
378 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
379                           struct vport_update_ramrod_data *p_ramrod,
380                           struct qed_filter_accept_flags accept_flags)
381 {
382         p_ramrod->common.update_rx_mode_flg =
383                 accept_flags.update_rx_mode_config;
384
385         p_ramrod->common.update_tx_mode_flg =
386                 accept_flags.update_tx_mode_config;
387
388         /* Set Rx mode accept flags */
389         if (p_ramrod->common.update_rx_mode_flg) {
390                 u8 accept_filter = accept_flags.rx_accept_filter;
391                 u16 state = 0;
392
393                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
394                           !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
395                             !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
396
397                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
398                           !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
399
400                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
401                           !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
402                             !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
403
404                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
405                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
406                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
407
408                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
409                           !!(accept_filter & QED_ACCEPT_BCAST));
410
411                 p_ramrod->rx_mode.state = cpu_to_le16(state);
412                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
413                            "p_ramrod->rx_mode.state = 0x%x\n", state);
414         }
415
416         /* Set Tx mode accept flags */
417         if (p_ramrod->common.update_tx_mode_flg) {
418                 u8 accept_filter = accept_flags.tx_accept_filter;
419                 u16 state = 0;
420
421                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
422                           !!(accept_filter & QED_ACCEPT_NONE));
423
424                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
425                           !!(accept_filter & QED_ACCEPT_NONE));
426
427                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
428                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
429                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
430
431                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
432                           !!(accept_filter & QED_ACCEPT_BCAST));
433
434                 p_ramrod->tx_mode.state = cpu_to_le16(state);
435                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
436                            "p_ramrod->tx_mode.state = 0x%x\n", state);
437         }
438 }
439
440 static void
441 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
442                             struct vport_update_ramrod_data *p_ramrod,
443                             struct qed_sge_tpa_params *p_params)
444 {
445         struct eth_vport_tpa_param *p_tpa;
446
447         if (!p_params) {
448                 p_ramrod->common.update_tpa_param_flg = 0;
449                 p_ramrod->common.update_tpa_en_flg = 0;
450                 p_ramrod->common.update_tpa_param_flg = 0;
451                 return;
452         }
453
454         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
455         p_tpa = &p_ramrod->tpa_param;
456         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
457         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
458         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
459         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
460
461         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
462         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
463         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
464         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
465         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
466         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
467         p_tpa->tpa_max_size = p_params->tpa_max_size;
468         p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
469         p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
470 }
471
472 static void
473 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
474                         struct vport_update_ramrod_data *p_ramrod,
475                         struct qed_sp_vport_update_params *p_params)
476 {
477         int i;
478
479         memset(&p_ramrod->approx_mcast.bins, 0,
480                sizeof(p_ramrod->approx_mcast.bins));
481
482         if (!p_params->update_approx_mcast_flg)
483                 return;
484
485         p_ramrod->common.update_approx_mcast_flg = 1;
486         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
487                 u32 *p_bins = (u32 *)p_params->bins;
488
489                 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
490         }
491 }
492
493 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
494                         struct qed_sp_vport_update_params *p_params,
495                         enum spq_mode comp_mode,
496                         struct qed_spq_comp_cb *p_comp_data)
497 {
498         struct qed_rss_params *p_rss_params = p_params->rss_params;
499         struct vport_update_ramrod_data_cmn *p_cmn;
500         struct qed_sp_init_data init_data;
501         struct vport_update_ramrod_data *p_ramrod = NULL;
502         struct qed_spq_entry *p_ent = NULL;
503         u8 abs_vport_id = 0, val;
504         int rc = -EINVAL;
505
506         if (IS_VF(p_hwfn->cdev)) {
507                 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
508                 return rc;
509         }
510
511         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
512         if (rc)
513                 return rc;
514
515         memset(&init_data, 0, sizeof(init_data));
516         init_data.cid = qed_spq_get_cid(p_hwfn);
517         init_data.opaque_fid = p_params->opaque_fid;
518         init_data.comp_mode = comp_mode;
519         init_data.p_comp_data = p_comp_data;
520
521         rc = qed_sp_init_request(p_hwfn, &p_ent,
522                                  ETH_RAMROD_VPORT_UPDATE,
523                                  PROTOCOLID_ETH, &init_data);
524         if (rc)
525                 return rc;
526
527         /* Copy input params to ramrod according to FW struct */
528         p_ramrod = &p_ent->ramrod.vport_update;
529         p_cmn = &p_ramrod->common;
530
531         p_cmn->vport_id = abs_vport_id;
532         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
533         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
534         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
535         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
536         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
537         val = p_params->update_accept_any_vlan_flg;
538         p_cmn->update_accept_any_vlan_flg = val;
539
540         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
541         val = p_params->update_inner_vlan_removal_flg;
542         p_cmn->update_inner_vlan_removal_en_flg = val;
543
544         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
545         val = p_params->update_default_vlan_enable_flg;
546         p_cmn->update_default_vlan_en_flg = val;
547
548         p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
549         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
550
551         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
552
553         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
554         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
555
556         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
557         val = p_params->update_anti_spoofing_en_flg;
558         p_ramrod->common.update_anti_spoofing_en_flg = val;
559
560         rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
561         if (rc) {
562                 /* Return spq entry which is taken in qed_sp_init_request()*/
563                 qed_spq_return_entry(p_hwfn, p_ent);
564                 return rc;
565         }
566
567         /* Update mcast bins for VFs, PF doesn't use this functionality */
568         qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
569
570         qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
571         qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
572         return qed_spq_post(p_hwfn, p_ent, NULL);
573 }
574
575 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
576 {
577         struct vport_stop_ramrod_data *p_ramrod;
578         struct qed_sp_init_data init_data;
579         struct qed_spq_entry *p_ent;
580         u8 abs_vport_id = 0;
581         int rc;
582
583         if (IS_VF(p_hwfn->cdev))
584                 return qed_vf_pf_vport_stop(p_hwfn);
585
586         rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
587         if (rc)
588                 return rc;
589
590         memset(&init_data, 0, sizeof(init_data));
591         init_data.cid = qed_spq_get_cid(p_hwfn);
592         init_data.opaque_fid = opaque_fid;
593         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
594
595         rc = qed_sp_init_request(p_hwfn, &p_ent,
596                                  ETH_RAMROD_VPORT_STOP,
597                                  PROTOCOLID_ETH, &init_data);
598         if (rc)
599                 return rc;
600
601         p_ramrod = &p_ent->ramrod.vport_stop;
602         p_ramrod->vport_id = abs_vport_id;
603
604         return qed_spq_post(p_hwfn, p_ent, NULL);
605 }
606
607 static int
608 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
609                        struct qed_filter_accept_flags *p_accept_flags)
610 {
611         struct qed_sp_vport_update_params s_params;
612
613         memset(&s_params, 0, sizeof(s_params));
614         memcpy(&s_params.accept_flags, p_accept_flags,
615                sizeof(struct qed_filter_accept_flags));
616
617         return qed_vf_pf_vport_update(p_hwfn, &s_params);
618 }
619
620 static int qed_filter_accept_cmd(struct qed_dev *cdev,
621                                  u8 vport,
622                                  struct qed_filter_accept_flags accept_flags,
623                                  u8 update_accept_any_vlan,
624                                  u8 accept_any_vlan,
625                                  enum spq_mode comp_mode,
626                                  struct qed_spq_comp_cb *p_comp_data)
627 {
628         struct qed_sp_vport_update_params vport_update_params;
629         int i, rc;
630
631         /* Prepare and send the vport rx_mode change */
632         memset(&vport_update_params, 0, sizeof(vport_update_params));
633         vport_update_params.vport_id = vport;
634         vport_update_params.accept_flags = accept_flags;
635         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
636         vport_update_params.accept_any_vlan = accept_any_vlan;
637
638         for_each_hwfn(cdev, i) {
639                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
640
641                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
642
643                 if (IS_VF(cdev)) {
644                         rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
645                         if (rc)
646                                 return rc;
647                         continue;
648                 }
649
650                 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
651                                          comp_mode, p_comp_data);
652                 if (rc) {
653                         DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
654                         return rc;
655                 }
656
657                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
658                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
659                            accept_flags.rx_accept_filter,
660                            accept_flags.tx_accept_filter);
661                 if (update_accept_any_vlan)
662                         DP_VERBOSE(p_hwfn, QED_MSG_SP,
663                                    "accept_any_vlan=%d configured\n",
664                                    accept_any_vlan);
665         }
666
667         return 0;
668 }
669
670 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
671                              struct qed_queue_cid *p_cid,
672                              u16 bd_max_bytes,
673                              dma_addr_t bd_chain_phys_addr,
674                              dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
675 {
676         struct rx_queue_start_ramrod_data *p_ramrod = NULL;
677         struct qed_spq_entry *p_ent = NULL;
678         struct qed_sp_init_data init_data;
679         int rc = -EINVAL;
680
681         DP_VERBOSE(p_hwfn, QED_MSG_SP,
682                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
683                    p_cid->opaque_fid, p_cid->cid,
684                    p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb);
685
686         /* Get SPQ entry */
687         memset(&init_data, 0, sizeof(init_data));
688         init_data.cid = p_cid->cid;
689         init_data.opaque_fid = p_cid->opaque_fid;
690         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
691
692         rc = qed_sp_init_request(p_hwfn, &p_ent,
693                                  ETH_RAMROD_RX_QUEUE_START,
694                                  PROTOCOLID_ETH, &init_data);
695         if (rc)
696                 return rc;
697
698         p_ramrod = &p_ent->ramrod.rx_queue_start;
699
700         p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
701         p_ramrod->sb_index = p_cid->abs.sb_idx;
702         p_ramrod->vport_id = p_cid->abs.vport_id;
703         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
704         p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
705         p_ramrod->complete_cqe_flg = 0;
706         p_ramrod->complete_event_flg = 1;
707
708         p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
709         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
710
711         p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
712         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
713
714         if (p_cid->is_vf) {
715                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
716                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
717                            "Queue%s is meant for VF rxq[%02x]\n",
718                            !!p_cid->b_legacy_vf ? " [legacy]" : "",
719                            p_cid->vf_qid);
720                 p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
721         }
722
723         return qed_spq_post(p_hwfn, p_ent, NULL);
724 }
725
726 static int
727 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
728                           struct qed_queue_cid *p_cid,
729                           u16 bd_max_bytes,
730                           dma_addr_t bd_chain_phys_addr,
731                           dma_addr_t cqe_pbl_addr,
732                           u16 cqe_pbl_size, void __iomem **pp_prod)
733 {
734         u32 init_prod_val = 0;
735
736         *pp_prod = p_hwfn->regview +
737                    GTT_BAR0_MAP_REG_MSDM_RAM +
738                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
739
740         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
741         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
742                           (u32 *)(&init_prod_val));
743
744         return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
745                                         bd_max_bytes,
746                                         bd_chain_phys_addr,
747                                         cqe_pbl_addr, cqe_pbl_size);
748 }
749
750 static int
751 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
752                        u16 opaque_fid,
753                        struct qed_queue_start_common_params *p_params,
754                        u16 bd_max_bytes,
755                        dma_addr_t bd_chain_phys_addr,
756                        dma_addr_t cqe_pbl_addr,
757                        u16 cqe_pbl_size,
758                        struct qed_rxq_start_ret_params *p_ret_params)
759 {
760         struct qed_queue_cid *p_cid;
761         int rc;
762
763         /* Allocate a CID for the queue */
764         p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
765         if (!p_cid)
766                 return -ENOMEM;
767
768         if (IS_PF(p_hwfn->cdev)) {
769                 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
770                                                bd_max_bytes,
771                                                bd_chain_phys_addr,
772                                                cqe_pbl_addr, cqe_pbl_size,
773                                                &p_ret_params->p_prod);
774         } else {
775                 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
776                                          bd_max_bytes,
777                                          bd_chain_phys_addr,
778                                          cqe_pbl_addr,
779                                          cqe_pbl_size, &p_ret_params->p_prod);
780         }
781
782         /* Provide the caller with a reference to as handler */
783         if (rc)
784                 qed_eth_queue_cid_release(p_hwfn, p_cid);
785         else
786                 p_ret_params->p_handle = (void *)p_cid;
787
788         return rc;
789 }
790
791 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
792                                 void **pp_rxq_handles,
793                                 u8 num_rxqs,
794                                 u8 complete_cqe_flg,
795                                 u8 complete_event_flg,
796                                 enum spq_mode comp_mode,
797                                 struct qed_spq_comp_cb *p_comp_data)
798 {
799         struct rx_queue_update_ramrod_data *p_ramrod = NULL;
800         struct qed_spq_entry *p_ent = NULL;
801         struct qed_sp_init_data init_data;
802         struct qed_queue_cid *p_cid;
803         int rc = -EINVAL;
804         u8 i;
805
806         memset(&init_data, 0, sizeof(init_data));
807         init_data.comp_mode = comp_mode;
808         init_data.p_comp_data = p_comp_data;
809
810         for (i = 0; i < num_rxqs; i++) {
811                 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
812
813                 /* Get SPQ entry */
814                 init_data.cid = p_cid->cid;
815                 init_data.opaque_fid = p_cid->opaque_fid;
816
817                 rc = qed_sp_init_request(p_hwfn, &p_ent,
818                                          ETH_RAMROD_RX_QUEUE_UPDATE,
819                                          PROTOCOLID_ETH, &init_data);
820                 if (rc)
821                         return rc;
822
823                 p_ramrod = &p_ent->ramrod.rx_queue_update;
824                 p_ramrod->vport_id = p_cid->abs.vport_id;
825
826                 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
827                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
828                 p_ramrod->complete_event_flg = complete_event_flg;
829
830                 rc = qed_spq_post(p_hwfn, p_ent, NULL);
831                 if (rc)
832                         return rc;
833         }
834
835         return rc;
836 }
837
838 static int
839 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
840                          struct qed_queue_cid *p_cid,
841                          bool b_eq_completion_only, bool b_cqe_completion)
842 {
843         struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
844         struct qed_spq_entry *p_ent = NULL;
845         struct qed_sp_init_data init_data;
846         int rc;
847
848         memset(&init_data, 0, sizeof(init_data));
849         init_data.cid = p_cid->cid;
850         init_data.opaque_fid = p_cid->opaque_fid;
851         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
852
853         rc = qed_sp_init_request(p_hwfn, &p_ent,
854                                  ETH_RAMROD_RX_QUEUE_STOP,
855                                  PROTOCOLID_ETH, &init_data);
856         if (rc)
857                 return rc;
858
859         p_ramrod = &p_ent->ramrod.rx_queue_stop;
860         p_ramrod->vport_id = p_cid->abs.vport_id;
861         p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
862
863         /* Cleaning the queue requires the completion to arrive there.
864          * In addition, VFs require the answer to come as eqe to PF.
865          */
866         p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
867                                       !b_eq_completion_only) ||
868                                      b_cqe_completion;
869         p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
870
871         return qed_spq_post(p_hwfn, p_ent, NULL);
872 }
873
874 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
875                           void *p_rxq,
876                           bool eq_completion_only, bool cqe_completion)
877 {
878         struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
879         int rc = -EINVAL;
880
881         if (IS_PF(p_hwfn->cdev))
882                 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
883                                               eq_completion_only,
884                                               cqe_completion);
885         else
886                 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
887
888         if (!rc)
889                 qed_eth_queue_cid_release(p_hwfn, p_cid);
890         return rc;
891 }
892
893 int
894 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
895                          struct qed_queue_cid *p_cid,
896                          dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
897 {
898         struct tx_queue_start_ramrod_data *p_ramrod = NULL;
899         struct qed_spq_entry *p_ent = NULL;
900         struct qed_sp_init_data init_data;
901         int rc = -EINVAL;
902
903         /* Get SPQ entry */
904         memset(&init_data, 0, sizeof(init_data));
905         init_data.cid = p_cid->cid;
906         init_data.opaque_fid = p_cid->opaque_fid;
907         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
908
909         rc = qed_sp_init_request(p_hwfn, &p_ent,
910                                  ETH_RAMROD_TX_QUEUE_START,
911                                  PROTOCOLID_ETH, &init_data);
912         if (rc)
913                 return rc;
914
915         p_ramrod = &p_ent->ramrod.tx_queue_start;
916         p_ramrod->vport_id = p_cid->abs.vport_id;
917
918         p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb);
919         p_ramrod->sb_index = p_cid->abs.sb_idx;
920         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
921
922         p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
923         p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
924
925         p_ramrod->pbl_size = cpu_to_le16(pbl_size);
926         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
927
928         p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
929
930         return qed_spq_post(p_hwfn, p_ent, NULL);
931 }
932
933 static int
934 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
935                           struct qed_queue_cid *p_cid,
936                           u8 tc,
937                           dma_addr_t pbl_addr,
938                           u16 pbl_size, void __iomem **pp_doorbell)
939 {
940         union qed_qm_pq_params pq_params;
941         int rc;
942
943         memset(&pq_params, 0, sizeof(pq_params));
944
945         rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
946                                       pbl_addr, pbl_size,
947                                       qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
948                                                     &pq_params));
949         if (rc)
950                 return rc;
951
952         /* Provide the caller with the necessary return values */
953         *pp_doorbell = p_hwfn->doorbells +
954                        qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
955
956         return 0;
957 }
958
959 static int
960 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
961                        u16 opaque_fid,
962                        struct qed_queue_start_common_params *p_params,
963                        u8 tc,
964                        dma_addr_t pbl_addr,
965                        u16 pbl_size,
966                        struct qed_txq_start_ret_params *p_ret_params)
967 {
968         struct qed_queue_cid *p_cid;
969         int rc;
970
971         p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
972         if (!p_cid)
973                 return -EINVAL;
974
975         if (IS_PF(p_hwfn->cdev))
976                 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
977                                                pbl_addr, pbl_size,
978                                                &p_ret_params->p_doorbell);
979         else
980                 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
981                                          pbl_addr, pbl_size,
982                                          &p_ret_params->p_doorbell);
983
984         if (rc)
985                 qed_eth_queue_cid_release(p_hwfn, p_cid);
986         else
987                 p_ret_params->p_handle = (void *)p_cid;
988
989         return rc;
990 }
991
992 static int
993 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
994 {
995         struct qed_spq_entry *p_ent = NULL;
996         struct qed_sp_init_data init_data;
997         int rc;
998
999         memset(&init_data, 0, sizeof(init_data));
1000         init_data.cid = p_cid->cid;
1001         init_data.opaque_fid = p_cid->opaque_fid;
1002         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1003
1004         rc = qed_sp_init_request(p_hwfn, &p_ent,
1005                                  ETH_RAMROD_TX_QUEUE_STOP,
1006                                  PROTOCOLID_ETH, &init_data);
1007         if (rc)
1008                 return rc;
1009
1010         return qed_spq_post(p_hwfn, p_ent, NULL);
1011 }
1012
1013 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1014 {
1015         struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1016         int rc;
1017
1018         if (IS_PF(p_hwfn->cdev))
1019                 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1020         else
1021                 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1022
1023         if (!rc)
1024                 qed_eth_queue_cid_release(p_hwfn, p_cid);
1025         return rc;
1026 }
1027
1028 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1029 {
1030         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1031
1032         switch (opcode) {
1033         case QED_FILTER_ADD:
1034                 action = ETH_FILTER_ACTION_ADD;
1035                 break;
1036         case QED_FILTER_REMOVE:
1037                 action = ETH_FILTER_ACTION_REMOVE;
1038                 break;
1039         case QED_FILTER_FLUSH:
1040                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1041                 break;
1042         default:
1043                 action = MAX_ETH_FILTER_ACTION;
1044         }
1045
1046         return action;
1047 }
1048
1049 static void qed_set_fw_mac_addr(__le16 *fw_msb,
1050                                 __le16 *fw_mid,
1051                                 __le16 *fw_lsb,
1052                                 u8 *mac)
1053 {
1054         ((u8 *)fw_msb)[0] = mac[1];
1055         ((u8 *)fw_msb)[1] = mac[0];
1056         ((u8 *)fw_mid)[0] = mac[3];
1057         ((u8 *)fw_mid)[1] = mac[2];
1058         ((u8 *)fw_lsb)[0] = mac[5];
1059         ((u8 *)fw_lsb)[1] = mac[4];
1060 }
1061
1062 static int
1063 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1064                         u16 opaque_fid,
1065                         struct qed_filter_ucast *p_filter_cmd,
1066                         struct vport_filter_update_ramrod_data **pp_ramrod,
1067                         struct qed_spq_entry **pp_ent,
1068                         enum spq_mode comp_mode,
1069                         struct qed_spq_comp_cb *p_comp_data)
1070 {
1071         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1072         struct vport_filter_update_ramrod_data *p_ramrod;
1073         struct eth_filter_cmd *p_first_filter;
1074         struct eth_filter_cmd *p_second_filter;
1075         struct qed_sp_init_data init_data;
1076         enum eth_filter_action action;
1077         int rc;
1078
1079         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1080                           &vport_to_remove_from);
1081         if (rc)
1082                 return rc;
1083
1084         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1085                           &vport_to_add_to);
1086         if (rc)
1087                 return rc;
1088
1089         /* Get SPQ entry */
1090         memset(&init_data, 0, sizeof(init_data));
1091         init_data.cid = qed_spq_get_cid(p_hwfn);
1092         init_data.opaque_fid = opaque_fid;
1093         init_data.comp_mode = comp_mode;
1094         init_data.p_comp_data = p_comp_data;
1095
1096         rc = qed_sp_init_request(p_hwfn, pp_ent,
1097                                  ETH_RAMROD_FILTERS_UPDATE,
1098                                  PROTOCOLID_ETH, &init_data);
1099         if (rc)
1100                 return rc;
1101
1102         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1103         p_ramrod = *pp_ramrod;
1104         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1105         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1106
1107         switch (p_filter_cmd->opcode) {
1108         case QED_FILTER_REPLACE:
1109         case QED_FILTER_MOVE:
1110                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1111         default:
1112                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1113         }
1114
1115         p_first_filter  = &p_ramrod->filter_cmds[0];
1116         p_second_filter = &p_ramrod->filter_cmds[1];
1117
1118         switch (p_filter_cmd->type) {
1119         case QED_FILTER_MAC:
1120                 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1121         case QED_FILTER_VLAN:
1122                 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1123         case QED_FILTER_MAC_VLAN:
1124                 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1125         case QED_FILTER_INNER_MAC:
1126                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1127         case QED_FILTER_INNER_VLAN:
1128                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1129         case QED_FILTER_INNER_PAIR:
1130                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1131         case QED_FILTER_INNER_MAC_VNI_PAIR:
1132                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1133                 break;
1134         case QED_FILTER_MAC_VNI_PAIR:
1135                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1136         case QED_FILTER_VNI:
1137                 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1138         }
1139
1140         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1141             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1142             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1143             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1144             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1145             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1146                 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1147                                     &p_first_filter->mac_mid,
1148                                     &p_first_filter->mac_lsb,
1149                                     (u8 *)p_filter_cmd->mac);
1150         }
1151
1152         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1153             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1154             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1155             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1156                 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1157
1158         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1159             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1160             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1161                 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1162
1163         if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1164                 p_second_filter->type = p_first_filter->type;
1165                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1166                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1167                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1168                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1169                 p_second_filter->vni = p_first_filter->vni;
1170
1171                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1172
1173                 p_first_filter->vport_id = vport_to_remove_from;
1174
1175                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1176                 p_second_filter->vport_id = vport_to_add_to;
1177         } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1178                 p_first_filter->vport_id = vport_to_add_to;
1179                 memcpy(p_second_filter, p_first_filter,
1180                        sizeof(*p_second_filter));
1181                 p_first_filter->action  = ETH_FILTER_ACTION_REMOVE_ALL;
1182                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1183         } else {
1184                 action = qed_filter_action(p_filter_cmd->opcode);
1185
1186                 if (action == MAX_ETH_FILTER_ACTION) {
1187                         DP_NOTICE(p_hwfn,
1188                                   "%d is not supported yet\n",
1189                                   p_filter_cmd->opcode);
1190                         return -EINVAL;
1191                 }
1192
1193                 p_first_filter->action = action;
1194                 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1195                                             QED_FILTER_REMOVE) ?
1196                                            vport_to_remove_from :
1197                                            vport_to_add_to;
1198         }
1199
1200         return 0;
1201 }
1202
1203 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1204                             u16 opaque_fid,
1205                             struct qed_filter_ucast *p_filter_cmd,
1206                             enum spq_mode comp_mode,
1207                             struct qed_spq_comp_cb *p_comp_data)
1208 {
1209         struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
1210         struct qed_spq_entry                    *p_ent          = NULL;
1211         struct eth_filter_cmd_header            *p_header;
1212         int                                     rc;
1213
1214         rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1215                                      &p_ramrod, &p_ent,
1216                                      comp_mode, p_comp_data);
1217         if (rc) {
1218                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1219                 return rc;
1220         }
1221         p_header = &p_ramrod->filter_cmd_hdr;
1222         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1223
1224         rc = qed_spq_post(p_hwfn, p_ent, NULL);
1225         if (rc) {
1226                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1227                 return rc;
1228         }
1229
1230         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1231                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1232                    (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1233                    ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1234                    "REMOVE" :
1235                    ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1236                     "MOVE" : "REPLACE")),
1237                    (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1238                    ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1239                     "VLAN" : "MAC & VLAN"),
1240                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1241                    p_filter_cmd->is_rx_filter,
1242                    p_filter_cmd->is_tx_filter);
1243         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1244                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1245                    p_filter_cmd->vport_to_add_to,
1246                    p_filter_cmd->vport_to_remove_from,
1247                    p_filter_cmd->mac[0],
1248                    p_filter_cmd->mac[1],
1249                    p_filter_cmd->mac[2],
1250                    p_filter_cmd->mac[3],
1251                    p_filter_cmd->mac[4],
1252                    p_filter_cmd->mac[5],
1253                    p_filter_cmd->vlan);
1254
1255         return 0;
1256 }
1257
1258 /*******************************************************************************
1259  * Description:
1260  *         Calculates crc 32 on a buffer
1261  *         Note: crc32_length MUST be aligned to 8
1262  * Return:
1263  ******************************************************************************/
1264 static u32 qed_calc_crc32c(u8 *crc32_packet,
1265                            u32 crc32_length, u32 crc32_seed, u8 complement)
1266 {
1267         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1268         u8 msb = 0, current_byte = 0;
1269
1270         if ((!crc32_packet) ||
1271             (crc32_length == 0) ||
1272             ((crc32_length % 8) != 0))
1273                 return crc32_result;
1274         for (byte = 0; byte < crc32_length; byte++) {
1275                 current_byte = crc32_packet[byte];
1276                 for (bit = 0; bit < 8; bit++) {
1277                         msb = (u8)(crc32_result >> 31);
1278                         crc32_result = crc32_result << 1;
1279                         if (msb != (0x1 & (current_byte >> bit))) {
1280                                 crc32_result = crc32_result ^ CRC32_POLY;
1281                                 crc32_result |= 1; /*crc32_result[0] = 1;*/
1282                         }
1283                 }
1284         }
1285         return crc32_result;
1286 }
1287
1288 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1289 {
1290         u32 packet_buf[2] = { 0 };
1291
1292         memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1293         return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1294 }
1295
1296 u8 qed_mcast_bin_from_mac(u8 *mac)
1297 {
1298         u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1299                                 mac, ETH_ALEN);
1300
1301         return crc & 0xff;
1302 }
1303
1304 static int
1305 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1306                         u16 opaque_fid,
1307                         struct qed_filter_mcast *p_filter_cmd,
1308                         enum spq_mode comp_mode,
1309                         struct qed_spq_comp_cb *p_comp_data)
1310 {
1311         unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1312         struct vport_update_ramrod_data *p_ramrod = NULL;
1313         struct qed_spq_entry *p_ent = NULL;
1314         struct qed_sp_init_data init_data;
1315         u8 abs_vport_id = 0;
1316         int rc, i;
1317
1318         if (p_filter_cmd->opcode == QED_FILTER_ADD)
1319                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1320                                   &abs_vport_id);
1321         else
1322                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1323                                   &abs_vport_id);
1324         if (rc)
1325                 return rc;
1326
1327         /* Get SPQ entry */
1328         memset(&init_data, 0, sizeof(init_data));
1329         init_data.cid = qed_spq_get_cid(p_hwfn);
1330         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1331         init_data.comp_mode = comp_mode;
1332         init_data.p_comp_data = p_comp_data;
1333
1334         rc = qed_sp_init_request(p_hwfn, &p_ent,
1335                                  ETH_RAMROD_VPORT_UPDATE,
1336                                  PROTOCOLID_ETH, &init_data);
1337         if (rc) {
1338                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1339                 return rc;
1340         }
1341
1342         p_ramrod = &p_ent->ramrod.vport_update;
1343         p_ramrod->common.update_approx_mcast_flg = 1;
1344
1345         /* explicitly clear out the entire vector */
1346         memset(&p_ramrod->approx_mcast.bins, 0,
1347                sizeof(p_ramrod->approx_mcast.bins));
1348         memset(bins, 0, sizeof(unsigned long) *
1349                ETH_MULTICAST_MAC_BINS_IN_REGS);
1350         /* filter ADD op is explicit set op and it removes
1351          *  any existing filters for the vport
1352          */
1353         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1354                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1355                         u32 bit;
1356
1357                         bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1358                         __set_bit(bit, bins);
1359                 }
1360
1361                 /* Convert to correct endianity */
1362                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1363                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1364                         u32 *p_bins = (u32 *)bins;
1365
1366                         p_ramrod_bins = &p_ramrod->approx_mcast;
1367                         p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
1368                 }
1369         }
1370
1371         p_ramrod->common.vport_id = abs_vport_id;
1372
1373         return qed_spq_post(p_hwfn, p_ent, NULL);
1374 }
1375
1376 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1377                                 struct qed_filter_mcast *p_filter_cmd,
1378                                 enum spq_mode comp_mode,
1379                                 struct qed_spq_comp_cb *p_comp_data)
1380 {
1381         int rc = 0;
1382         int i;
1383
1384         /* only ADD and REMOVE operations are supported for multi-cast */
1385         if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1386              (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1387             (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1388                 return -EINVAL;
1389
1390         for_each_hwfn(cdev, i) {
1391                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1392
1393                 u16 opaque_fid;
1394
1395                 if (IS_VF(cdev)) {
1396                         qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1397                         continue;
1398                 }
1399
1400                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1401
1402                 rc = qed_sp_eth_filter_mcast(p_hwfn,
1403                                              opaque_fid,
1404                                              p_filter_cmd,
1405                                              comp_mode, p_comp_data);
1406         }
1407         return rc;
1408 }
1409
1410 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1411                                 struct qed_filter_ucast *p_filter_cmd,
1412                                 enum spq_mode comp_mode,
1413                                 struct qed_spq_comp_cb *p_comp_data)
1414 {
1415         int rc = 0;
1416         int i;
1417
1418         for_each_hwfn(cdev, i) {
1419                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1420                 u16 opaque_fid;
1421
1422                 if (IS_VF(cdev)) {
1423                         rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1424                         continue;
1425                 }
1426
1427                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1428
1429                 rc = qed_sp_eth_filter_ucast(p_hwfn,
1430                                              opaque_fid,
1431                                              p_filter_cmd,
1432                                              comp_mode, p_comp_data);
1433                 if (rc)
1434                         break;
1435         }
1436
1437         return rc;
1438 }
1439
1440 /* Statistics related code */
1441 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1442                                            u32 *p_addr,
1443                                            u32 *p_len, u16 statistics_bin)
1444 {
1445         if (IS_PF(p_hwfn->cdev)) {
1446                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1447                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1448                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1449         } else {
1450                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1451                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1452
1453                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1454                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1455         }
1456 }
1457
1458 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1459                                    struct qed_ptt *p_ptt,
1460                                    struct qed_eth_stats *p_stats,
1461                                    u16 statistics_bin)
1462 {
1463         struct eth_pstorm_per_queue_stat pstats;
1464         u32 pstats_addr = 0, pstats_len = 0;
1465
1466         __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1467                                        statistics_bin);
1468
1469         memset(&pstats, 0, sizeof(pstats));
1470         qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1471
1472         p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1473         p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1474         p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1475         p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1476         p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1477         p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1478         p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
1479 }
1480
1481 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1482                                    struct qed_ptt *p_ptt,
1483                                    struct qed_eth_stats *p_stats,
1484                                    u16 statistics_bin)
1485 {
1486         struct tstorm_per_port_stat tstats;
1487         u32 tstats_addr, tstats_len;
1488
1489         if (IS_PF(p_hwfn->cdev)) {
1490                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1491                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1492                 tstats_len = sizeof(struct tstorm_per_port_stat);
1493         } else {
1494                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1495                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1496
1497                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1498                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1499         }
1500
1501         memset(&tstats, 0, sizeof(tstats));
1502         qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1503
1504         p_stats->mftag_filter_discards +=
1505                 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1506         p_stats->mac_filter_discards +=
1507                 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1508 }
1509
1510 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1511                                            u32 *p_addr,
1512                                            u32 *p_len, u16 statistics_bin)
1513 {
1514         if (IS_PF(p_hwfn->cdev)) {
1515                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1516                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1517                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1518         } else {
1519                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1520                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1521
1522                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1523                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1524         }
1525 }
1526
1527 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1528                                    struct qed_ptt *p_ptt,
1529                                    struct qed_eth_stats *p_stats,
1530                                    u16 statistics_bin)
1531 {
1532         struct eth_ustorm_per_queue_stat ustats;
1533         u32 ustats_addr = 0, ustats_len = 0;
1534
1535         __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1536                                        statistics_bin);
1537
1538         memset(&ustats, 0, sizeof(ustats));
1539         qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1540
1541         p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1542         p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1543         p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1544         p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1545         p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1546         p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1547 }
1548
1549 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1550                                            u32 *p_addr,
1551                                            u32 *p_len, u16 statistics_bin)
1552 {
1553         if (IS_PF(p_hwfn->cdev)) {
1554                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1555                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1556                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1557         } else {
1558                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1559                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1560
1561                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1562                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1563         }
1564 }
1565
1566 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1567                                    struct qed_ptt *p_ptt,
1568                                    struct qed_eth_stats *p_stats,
1569                                    u16 statistics_bin)
1570 {
1571         struct eth_mstorm_per_queue_stat mstats;
1572         u32 mstats_addr = 0, mstats_len = 0;
1573
1574         __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1575                                        statistics_bin);
1576
1577         memset(&mstats, 0, sizeof(mstats));
1578         qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1579
1580         p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
1581         p_stats->packet_too_big_discard +=
1582                 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1583         p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1584         p_stats->tpa_coalesced_pkts +=
1585                 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1586         p_stats->tpa_coalesced_events +=
1587                 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1588         p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
1589         p_stats->tpa_coalesced_bytes +=
1590                 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1591 }
1592
1593 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1594                                        struct qed_ptt *p_ptt,
1595                                        struct qed_eth_stats *p_stats)
1596 {
1597         struct port_stats port_stats;
1598         int j;
1599
1600         memset(&port_stats, 0, sizeof(port_stats));
1601
1602         qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1603                         p_hwfn->mcp_info->port_addr +
1604                         offsetof(struct public_port, stats),
1605                         sizeof(port_stats));
1606
1607         p_stats->rx_64_byte_packets             += port_stats.eth.r64;
1608         p_stats->rx_65_to_127_byte_packets      += port_stats.eth.r127;
1609         p_stats->rx_128_to_255_byte_packets     += port_stats.eth.r255;
1610         p_stats->rx_256_to_511_byte_packets     += port_stats.eth.r511;
1611         p_stats->rx_512_to_1023_byte_packets    += port_stats.eth.r1023;
1612         p_stats->rx_1024_to_1518_byte_packets   += port_stats.eth.r1518;
1613         p_stats->rx_1519_to_1522_byte_packets   += port_stats.eth.r1522;
1614         p_stats->rx_1519_to_2047_byte_packets   += port_stats.eth.r2047;
1615         p_stats->rx_2048_to_4095_byte_packets   += port_stats.eth.r4095;
1616         p_stats->rx_4096_to_9216_byte_packets   += port_stats.eth.r9216;
1617         p_stats->rx_9217_to_16383_byte_packets  += port_stats.eth.r16383;
1618         p_stats->rx_crc_errors                  += port_stats.eth.rfcs;
1619         p_stats->rx_mac_crtl_frames             += port_stats.eth.rxcf;
1620         p_stats->rx_pause_frames                += port_stats.eth.rxpf;
1621         p_stats->rx_pfc_frames                  += port_stats.eth.rxpp;
1622         p_stats->rx_align_errors                += port_stats.eth.raln;
1623         p_stats->rx_carrier_errors              += port_stats.eth.rfcr;
1624         p_stats->rx_oversize_packets            += port_stats.eth.rovr;
1625         p_stats->rx_jabbers                     += port_stats.eth.rjbr;
1626         p_stats->rx_undersize_packets           += port_stats.eth.rund;
1627         p_stats->rx_fragments                   += port_stats.eth.rfrg;
1628         p_stats->tx_64_byte_packets             += port_stats.eth.t64;
1629         p_stats->tx_65_to_127_byte_packets      += port_stats.eth.t127;
1630         p_stats->tx_128_to_255_byte_packets     += port_stats.eth.t255;
1631         p_stats->tx_256_to_511_byte_packets     += port_stats.eth.t511;
1632         p_stats->tx_512_to_1023_byte_packets    += port_stats.eth.t1023;
1633         p_stats->tx_1024_to_1518_byte_packets   += port_stats.eth.t1518;
1634         p_stats->tx_1519_to_2047_byte_packets   += port_stats.eth.t2047;
1635         p_stats->tx_2048_to_4095_byte_packets   += port_stats.eth.t4095;
1636         p_stats->tx_4096_to_9216_byte_packets   += port_stats.eth.t9216;
1637         p_stats->tx_9217_to_16383_byte_packets  += port_stats.eth.t16383;
1638         p_stats->tx_pause_frames                += port_stats.eth.txpf;
1639         p_stats->tx_pfc_frames                  += port_stats.eth.txpp;
1640         p_stats->tx_lpi_entry_count             += port_stats.eth.tlpiec;
1641         p_stats->tx_total_collisions            += port_stats.eth.tncl;
1642         p_stats->rx_mac_bytes                   += port_stats.eth.rbyte;
1643         p_stats->rx_mac_uc_packets              += port_stats.eth.rxuca;
1644         p_stats->rx_mac_mc_packets              += port_stats.eth.rxmca;
1645         p_stats->rx_mac_bc_packets              += port_stats.eth.rxbca;
1646         p_stats->rx_mac_frames_ok               += port_stats.eth.rxpok;
1647         p_stats->tx_mac_bytes                   += port_stats.eth.tbyte;
1648         p_stats->tx_mac_uc_packets              += port_stats.eth.txuca;
1649         p_stats->tx_mac_mc_packets              += port_stats.eth.txmca;
1650         p_stats->tx_mac_bc_packets              += port_stats.eth.txbca;
1651         p_stats->tx_mac_ctrl_frames             += port_stats.eth.txcf;
1652         for (j = 0; j < 8; j++) {
1653                 p_stats->brb_truncates  += port_stats.brb.brb_truncate[j];
1654                 p_stats->brb_discards   += port_stats.brb.brb_discard[j];
1655         }
1656 }
1657
1658 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1659                                   struct qed_ptt *p_ptt,
1660                                   struct qed_eth_stats *stats,
1661                                   u16 statistics_bin, bool b_get_port_stats)
1662 {
1663         __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1664         __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1665         __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1666         __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1667
1668         if (b_get_port_stats && p_hwfn->mcp_info)
1669                 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1670 }
1671
1672 static void _qed_get_vport_stats(struct qed_dev *cdev,
1673                                  struct qed_eth_stats *stats)
1674 {
1675         u8 fw_vport = 0;
1676         int i;
1677
1678         memset(stats, 0, sizeof(*stats));
1679
1680         for_each_hwfn(cdev, i) {
1681                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1682                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1683                                                     :  NULL;
1684
1685                 if (IS_PF(cdev)) {
1686                         /* The main vport index is relative first */
1687                         if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1688                                 DP_ERR(p_hwfn, "No vport available!\n");
1689                                 goto out;
1690                         }
1691                 }
1692
1693                 if (IS_PF(cdev) && !p_ptt) {
1694                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1695                         continue;
1696                 }
1697
1698                 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1699                                       IS_PF(cdev) ? true : false);
1700
1701 out:
1702                 if (IS_PF(cdev) && p_ptt)
1703                         qed_ptt_release(p_hwfn, p_ptt);
1704         }
1705 }
1706
1707 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1708 {
1709         u32 i;
1710
1711         if (!cdev) {
1712                 memset(stats, 0, sizeof(*stats));
1713                 return;
1714         }
1715
1716         _qed_get_vport_stats(cdev, stats);
1717
1718         if (!cdev->reset_stats)
1719                 return;
1720
1721         /* Reduce the statistics baseline */
1722         for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1723                 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1724 }
1725
1726 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1727 void qed_reset_vport_stats(struct qed_dev *cdev)
1728 {
1729         int i;
1730
1731         for_each_hwfn(cdev, i) {
1732                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1733                 struct eth_mstorm_per_queue_stat mstats;
1734                 struct eth_ustorm_per_queue_stat ustats;
1735                 struct eth_pstorm_per_queue_stat pstats;
1736                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1737                                                     : NULL;
1738                 u32 addr = 0, len = 0;
1739
1740                 if (IS_PF(cdev) && !p_ptt) {
1741                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1742                         continue;
1743                 }
1744
1745                 memset(&mstats, 0, sizeof(mstats));
1746                 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1747                 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1748
1749                 memset(&ustats, 0, sizeof(ustats));
1750                 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1751                 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1752
1753                 memset(&pstats, 0, sizeof(pstats));
1754                 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1755                 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1756
1757                 if (IS_PF(cdev))
1758                         qed_ptt_release(p_hwfn, p_ptt);
1759         }
1760
1761         /* PORT statistics are not necessarily reset, so we need to
1762          * read and create a baseline for future statistics.
1763          */
1764         if (!cdev->reset_stats)
1765                 DP_INFO(cdev, "Reset stats not allocated\n");
1766         else
1767                 _qed_get_vport_stats(cdev, cdev->reset_stats);
1768 }
1769
1770 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1771                                  struct qed_dev_eth_info *info)
1772 {
1773         int i;
1774
1775         memset(info, 0, sizeof(*info));
1776
1777         info->num_tc = 1;
1778
1779         if (IS_PF(cdev)) {
1780                 int max_vf_vlan_filters = 0;
1781                 int max_vf_mac_filters = 0;
1782
1783                 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1784                         u16 num_queues = 0;
1785
1786                         /* Since the feature controls only queue-zones,
1787                          * make sure we have the contexts [rx, tx, xdp] to
1788                          * match.
1789                          */
1790                         for_each_hwfn(cdev, i) {
1791                                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
1792                                 u16 l2_queues = (u16)FEAT_NUM(hwfn,
1793                                                               QED_PF_L2_QUE);
1794                                 u16 cids;
1795
1796                                 cids = hwfn->pf_params.eth_pf_params.num_cons;
1797                                 num_queues += min_t(u16, l2_queues, cids / 3);
1798                         }
1799
1800                         /* queues might theoretically be >256, but interrupts'
1801                          * upper-limit guarantes that it would fit in a u8.
1802                          */
1803                         if (cdev->int_params.fp_msix_cnt) {
1804                                 u8 irqs = cdev->int_params.fp_msix_cnt;
1805
1806                                 info->num_queues = (u8)min_t(u16,
1807                                                              num_queues, irqs);
1808                         }
1809                 } else {
1810                         info->num_queues = cdev->num_hwfns;
1811                 }
1812
1813                 if (IS_QED_SRIOV(cdev)) {
1814                         max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
1815                                               QED_ETH_VF_NUM_VLAN_FILTERS;
1816                         max_vf_mac_filters = cdev->p_iov_info->total_vfs *
1817                                              QED_ETH_VF_NUM_MAC_FILTERS;
1818                 }
1819                 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
1820                                                   QED_VLAN) -
1821                                          max_vf_vlan_filters;
1822                 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
1823                                                  QED_MAC) -
1824                                         max_vf_mac_filters;
1825
1826                 ether_addr_copy(info->port_mac,
1827                                 cdev->hwfns[0].hw_info.hw_mac_addr);
1828         } else {
1829                 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
1830                 if (cdev->num_hwfns > 1) {
1831                         u8 queues = 0;
1832
1833                         qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
1834                         info->num_queues += queues;
1835                 }
1836
1837                 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
1838                                             (u8 *)&info->num_vlan_filters);
1839                 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
1840                                            (u8 *)&info->num_mac_filters);
1841                 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
1842
1843                 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
1844         }
1845
1846         qed_fill_dev_info(cdev, &info->common);
1847
1848         if (IS_VF(cdev))
1849                 memset(info->common.hw_mac, 0, ETH_ALEN);
1850
1851         return 0;
1852 }
1853
1854 static void qed_register_eth_ops(struct qed_dev *cdev,
1855                                  struct qed_eth_cb_ops *ops, void *cookie)
1856 {
1857         cdev->protocol_ops.eth = ops;
1858         cdev->ops_cookie = cookie;
1859
1860         /* For VF, we start bulletin reading */
1861         if (IS_VF(cdev))
1862                 qed_vf_start_iov_wq(cdev);
1863 }
1864
1865 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
1866 {
1867         if (IS_PF(cdev))
1868                 return true;
1869
1870         return qed_vf_check_mac(&cdev->hwfns[0], mac);
1871 }
1872
1873 static int qed_start_vport(struct qed_dev *cdev,
1874                            struct qed_start_vport_params *params)
1875 {
1876         int rc, i;
1877
1878         for_each_hwfn(cdev, i) {
1879                 struct qed_sp_vport_start_params start = { 0 };
1880                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1881
1882                 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
1883                                                         QED_TPA_MODE_NONE;
1884                 start.remove_inner_vlan = params->remove_inner_vlan;
1885                 start.only_untagged = true;     /* untagged only */
1886                 start.drop_ttl0 = params->drop_ttl0;
1887                 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
1888                 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
1889                 start.vport_id = params->vport_id;
1890                 start.max_buffers_per_cqe = 16;
1891                 start.mtu = params->mtu;
1892
1893                 rc = qed_sp_vport_start(p_hwfn, &start);
1894                 if (rc) {
1895                         DP_ERR(cdev, "Failed to start VPORT\n");
1896                         return rc;
1897                 }
1898
1899                 qed_hw_start_fastpath(p_hwfn);
1900
1901                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1902                            "Started V-PORT %d with MTU %d\n",
1903                            start.vport_id, start.mtu);
1904         }
1905
1906         if (params->clear_stats)
1907                 qed_reset_vport_stats(cdev);
1908
1909         return 0;
1910 }
1911
1912 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
1913 {
1914         int rc, i;
1915
1916         for_each_hwfn(cdev, i) {
1917                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1918
1919                 rc = qed_sp_vport_stop(p_hwfn,
1920                                        p_hwfn->hw_info.opaque_fid, vport_id);
1921
1922                 if (rc) {
1923                         DP_ERR(cdev, "Failed to stop VPORT\n");
1924                         return rc;
1925                 }
1926         }
1927         return 0;
1928 }
1929
1930 static int qed_update_vport_rss(struct qed_dev *cdev,
1931                                 struct qed_update_vport_rss_params *input,
1932                                 struct qed_rss_params *rss)
1933 {
1934         int i, fn;
1935
1936         /* Update configuration with what's correct regardless of CMT */
1937         rss->update_rss_config = 1;
1938         rss->rss_enable = 1;
1939         rss->update_rss_capabilities = 1;
1940         rss->update_rss_ind_table = 1;
1941         rss->update_rss_key = 1;
1942         rss->rss_caps = input->rss_caps;
1943         memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
1944
1945         /* In regular scenario, we'd simply need to take input handlers.
1946          * But in CMT, we'd have to split the handlers according to the
1947          * engine they were configured on. We'd then have to understand
1948          * whether RSS is really required, since 2-queues on CMT doesn't
1949          * require RSS.
1950          */
1951         if (cdev->num_hwfns == 1) {
1952                 memcpy(rss->rss_ind_table,
1953                        input->rss_ind_table,
1954                        QED_RSS_IND_TABLE_SIZE * sizeof(void *));
1955                 rss->rss_table_size_log = 7;
1956                 return 0;
1957         }
1958
1959         /* Start by copying the non-spcific information to the 2nd copy */
1960         memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
1961
1962         /* CMT should be round-robin */
1963         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
1964                 struct qed_queue_cid *cid = input->rss_ind_table[i];
1965                 struct qed_rss_params *t_rss;
1966
1967                 if (cid->p_owner == QED_LEADING_HWFN(cdev))
1968                         t_rss = &rss[0];
1969                 else
1970                         t_rss = &rss[1];
1971
1972                 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
1973         }
1974
1975         /* Make sure RSS is actually required */
1976         for_each_hwfn(cdev, fn) {
1977                 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
1978                         if (rss[fn].rss_ind_table[i] !=
1979                             rss[fn].rss_ind_table[0])
1980                                 break;
1981                 }
1982                 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
1983                         DP_VERBOSE(cdev, NETIF_MSG_IFUP,
1984                                    "CMT - 1 queue per-hwfn; Disabling RSS\n");
1985                         return -EINVAL;
1986                 }
1987                 rss[fn].rss_table_size_log = 6;
1988         }
1989
1990         return 0;
1991 }
1992
1993 static int qed_update_vport(struct qed_dev *cdev,
1994                             struct qed_update_vport_params *params)
1995 {
1996         struct qed_sp_vport_update_params sp_params;
1997         struct qed_rss_params *rss;
1998         int rc = 0, i;
1999
2000         if (!cdev)
2001                 return -ENODEV;
2002
2003         rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
2004         if (!rss)
2005                 return -ENOMEM;
2006
2007         memset(&sp_params, 0, sizeof(sp_params));
2008
2009         /* Translate protocol params into sp params */
2010         sp_params.vport_id = params->vport_id;
2011         sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2012         sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2013         sp_params.vport_active_rx_flg = params->vport_active_flg;
2014         sp_params.vport_active_tx_flg = params->vport_active_flg;
2015         sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2016         sp_params.tx_switching_flg = params->tx_switching_flg;
2017         sp_params.accept_any_vlan = params->accept_any_vlan;
2018         sp_params.update_accept_any_vlan_flg =
2019                 params->update_accept_any_vlan_flg;
2020
2021         /* Prepare the RSS configuration */
2022         if (params->update_rss_flg)
2023                 if (qed_update_vport_rss(cdev, &params->rss_params, rss))
2024                         params->update_rss_flg = 0;
2025
2026         for_each_hwfn(cdev, i) {
2027                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2028
2029                 if (params->update_rss_flg)
2030                         sp_params.rss_params = &rss[i];
2031
2032                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2033                 rc = qed_sp_vport_update(p_hwfn, &sp_params,
2034                                          QED_SPQ_MODE_EBLOCK,
2035                                          NULL);
2036                 if (rc) {
2037                         DP_ERR(cdev, "Failed to update VPORT\n");
2038                         goto out;
2039                 }
2040
2041                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2042                            "Updated V-PORT %d: active_flag %d [update %d]\n",
2043                            params->vport_id, params->vport_active_flg,
2044                            params->update_vport_active_flg);
2045         }
2046
2047 out:
2048         vfree(rss);
2049         return rc;
2050 }
2051
2052 static int qed_start_rxq(struct qed_dev *cdev,
2053                          u8 rss_num,
2054                          struct qed_queue_start_common_params *p_params,
2055                          u16 bd_max_bytes,
2056                          dma_addr_t bd_chain_phys_addr,
2057                          dma_addr_t cqe_pbl_addr,
2058                          u16 cqe_pbl_size,
2059                          struct qed_rxq_start_ret_params *ret_params)
2060 {
2061         struct qed_hwfn *p_hwfn;
2062         int rc, hwfn_index;
2063
2064         hwfn_index = rss_num % cdev->num_hwfns;
2065         p_hwfn = &cdev->hwfns[hwfn_index];
2066
2067         p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2068         p_params->stats_id = p_params->vport_id;
2069
2070         rc = qed_eth_rx_queue_start(p_hwfn,
2071                                     p_hwfn->hw_info.opaque_fid,
2072                                     p_params,
2073                                     bd_max_bytes,
2074                                     bd_chain_phys_addr,
2075                                     cqe_pbl_addr, cqe_pbl_size, ret_params);
2076         if (rc) {
2077                 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2078                 return rc;
2079         }
2080
2081         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2082                    "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
2083                    p_params->queue_id, rss_num, p_params->vport_id,
2084                    p_params->sb);
2085
2086         return 0;
2087 }
2088
2089 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2090 {
2091         int rc, hwfn_index;
2092         struct qed_hwfn *p_hwfn;
2093
2094         hwfn_index = rss_id % cdev->num_hwfns;
2095         p_hwfn = &cdev->hwfns[hwfn_index];
2096
2097         rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2098         if (rc) {
2099                 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2100                 return rc;
2101         }
2102
2103         return 0;
2104 }
2105
2106 static int qed_start_txq(struct qed_dev *cdev,
2107                          u8 rss_num,
2108                          struct qed_queue_start_common_params *p_params,
2109                          dma_addr_t pbl_addr,
2110                          u16 pbl_size,
2111                          struct qed_txq_start_ret_params *ret_params)
2112 {
2113         struct qed_hwfn *p_hwfn;
2114         int rc, hwfn_index;
2115
2116         hwfn_index = rss_num % cdev->num_hwfns;
2117         p_hwfn = &cdev->hwfns[hwfn_index];
2118         p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2119         p_params->stats_id = p_params->vport_id;
2120
2121         rc = qed_eth_tx_queue_start(p_hwfn,
2122                                     p_hwfn->hw_info.opaque_fid,
2123                                     p_params, 0,
2124                                     pbl_addr, pbl_size, ret_params);
2125
2126         if (rc) {
2127                 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2128                 return rc;
2129         }
2130
2131         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2132                    "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
2133                    p_params->queue_id, rss_num, p_params->vport_id,
2134                    p_params->sb);
2135
2136         return 0;
2137 }
2138
2139 #define QED_HW_STOP_RETRY_LIMIT (10)
2140 static int qed_fastpath_stop(struct qed_dev *cdev)
2141 {
2142         qed_hw_stop_fastpath(cdev);
2143
2144         return 0;
2145 }
2146
2147 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2148 {
2149         struct qed_hwfn *p_hwfn;
2150         int rc, hwfn_index;
2151
2152         hwfn_index = rss_id % cdev->num_hwfns;
2153         p_hwfn = &cdev->hwfns[hwfn_index];
2154
2155         rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2156         if (rc) {
2157                 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2158                 return rc;
2159         }
2160
2161         return 0;
2162 }
2163
2164 static int qed_tunn_configure(struct qed_dev *cdev,
2165                               struct qed_tunn_params *tunn_params)
2166 {
2167         struct qed_tunn_update_params tunn_info;
2168         int i, rc;
2169
2170         if (IS_VF(cdev))
2171                 return 0;
2172
2173         memset(&tunn_info, 0, sizeof(tunn_info));
2174         if (tunn_params->update_vxlan_port == 1) {
2175                 tunn_info.update_vxlan_udp_port = 1;
2176                 tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
2177         }
2178
2179         if (tunn_params->update_geneve_port == 1) {
2180                 tunn_info.update_geneve_udp_port = 1;
2181                 tunn_info.geneve_udp_port = tunn_params->geneve_port;
2182         }
2183
2184         for_each_hwfn(cdev, i) {
2185                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2186
2187                 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
2188                                                QED_SPQ_MODE_EBLOCK, NULL);
2189
2190                 if (rc)
2191                         return rc;
2192         }
2193
2194         return 0;
2195 }
2196
2197 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2198                                         enum qed_filter_rx_mode_type type)
2199 {
2200         struct qed_filter_accept_flags accept_flags;
2201
2202         memset(&accept_flags, 0, sizeof(accept_flags));
2203
2204         accept_flags.update_rx_mode_config = 1;
2205         accept_flags.update_tx_mode_config = 1;
2206         accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2207                                         QED_ACCEPT_MCAST_MATCHED |
2208                                         QED_ACCEPT_BCAST;
2209         accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2210                                         QED_ACCEPT_MCAST_MATCHED |
2211                                         QED_ACCEPT_BCAST;
2212
2213         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2214                 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2215                                                  QED_ACCEPT_MCAST_UNMATCHED;
2216                 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2217         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2218                 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2219                 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2220         }
2221
2222         return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2223                                      QED_SPQ_MODE_CB, NULL);
2224 }
2225
2226 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2227                                       struct qed_filter_ucast_params *params)
2228 {
2229         struct qed_filter_ucast ucast;
2230
2231         if (!params->vlan_valid && !params->mac_valid) {
2232                 DP_NOTICE(cdev,
2233                           "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2234                 return -EINVAL;
2235         }
2236
2237         memset(&ucast, 0, sizeof(ucast));
2238         switch (params->type) {
2239         case QED_FILTER_XCAST_TYPE_ADD:
2240                 ucast.opcode = QED_FILTER_ADD;
2241                 break;
2242         case QED_FILTER_XCAST_TYPE_DEL:
2243                 ucast.opcode = QED_FILTER_REMOVE;
2244                 break;
2245         case QED_FILTER_XCAST_TYPE_REPLACE:
2246                 ucast.opcode = QED_FILTER_REPLACE;
2247                 break;
2248         default:
2249                 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2250                           params->type);
2251         }
2252
2253         if (params->vlan_valid && params->mac_valid) {
2254                 ucast.type = QED_FILTER_MAC_VLAN;
2255                 ether_addr_copy(ucast.mac, params->mac);
2256                 ucast.vlan = params->vlan;
2257         } else if (params->mac_valid) {
2258                 ucast.type = QED_FILTER_MAC;
2259                 ether_addr_copy(ucast.mac, params->mac);
2260         } else {
2261                 ucast.type = QED_FILTER_VLAN;
2262                 ucast.vlan = params->vlan;
2263         }
2264
2265         ucast.is_rx_filter = true;
2266         ucast.is_tx_filter = true;
2267
2268         return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2269 }
2270
2271 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2272                                       struct qed_filter_mcast_params *params)
2273 {
2274         struct qed_filter_mcast mcast;
2275         int i;
2276
2277         memset(&mcast, 0, sizeof(mcast));
2278         switch (params->type) {
2279         case QED_FILTER_XCAST_TYPE_ADD:
2280                 mcast.opcode = QED_FILTER_ADD;
2281                 break;
2282         case QED_FILTER_XCAST_TYPE_DEL:
2283                 mcast.opcode = QED_FILTER_REMOVE;
2284                 break;
2285         default:
2286                 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2287                           params->type);
2288         }
2289
2290         mcast.num_mc_addrs = params->num;
2291         for (i = 0; i < mcast.num_mc_addrs; i++)
2292                 ether_addr_copy(mcast.mac[i], params->mac[i]);
2293
2294         return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2295 }
2296
2297 static int qed_configure_filter(struct qed_dev *cdev,
2298                                 struct qed_filter_params *params)
2299 {
2300         enum qed_filter_rx_mode_type accept_flags;
2301
2302         switch (params->type) {
2303         case QED_FILTER_TYPE_UCAST:
2304                 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2305         case QED_FILTER_TYPE_MCAST:
2306                 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2307         case QED_FILTER_TYPE_RX_MODE:
2308                 accept_flags = params->filter.accept_flags;
2309                 return qed_configure_filter_rx_mode(cdev, accept_flags);
2310         default:
2311                 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2312                 return -EINVAL;
2313         }
2314 }
2315
2316 static int qed_fp_cqe_completion(struct qed_dev *dev,
2317                                  u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2318 {
2319         return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2320                                       cqe);
2321 }
2322
2323 #ifdef CONFIG_QED_SRIOV
2324 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2325 #endif
2326
2327 #ifdef CONFIG_DCB
2328 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2329 #endif
2330
2331 static const struct qed_eth_ops qed_eth_ops_pass = {
2332         .common = &qed_common_ops_pass,
2333 #ifdef CONFIG_QED_SRIOV
2334         .iov = &qed_iov_ops_pass,
2335 #endif
2336 #ifdef CONFIG_DCB
2337         .dcb = &qed_dcbnl_ops_pass,
2338 #endif
2339         .fill_dev_info = &qed_fill_eth_dev_info,
2340         .register_ops = &qed_register_eth_ops,
2341         .check_mac = &qed_check_mac,
2342         .vport_start = &qed_start_vport,
2343         .vport_stop = &qed_stop_vport,
2344         .vport_update = &qed_update_vport,
2345         .q_rx_start = &qed_start_rxq,
2346         .q_rx_stop = &qed_stop_rxq,
2347         .q_tx_start = &qed_start_txq,
2348         .q_tx_stop = &qed_stop_txq,
2349         .filter_config = &qed_configure_filter,
2350         .fastpath_stop = &qed_fastpath_stop,
2351         .eth_cqe_completion = &qed_fp_cqe_completion,
2352         .get_vport_stats = &qed_get_vport_stats,
2353         .tunn_config = &qed_tunn_configure,
2354 };
2355
2356 const struct qed_eth_ops *qed_get_eth_ops(void)
2357 {
2358         return &qed_eth_ops_pass;
2359 }
2360 EXPORT_SYMBOL(qed_get_eth_ops);
2361
2362 void qed_put_eth_ops(void)
2363 {
2364         /* TODO - reference count for module? */
2365 }
2366 EXPORT_SYMBOL(qed_put_eth_ops);