nfp: add nfp_cppcore_pcie_unit() helper
[linux-2.6-block.git] / drivers / net / ethernet / netronome / nfp / nfp_net_main.c
CommitLineData
63461a02
JK
1/*
2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_main.c
36 * Netronome network device driver: Main entry point
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Alejandro Lucero <alejandro.lucero@netronome.com>
39 * Jason McMullan <jason.mcmullan@netronome.com>
40 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
41 */
42
43#include <linux/etherdevice.h>
44#include <linux/kernel.h>
45#include <linux/init.h>
46#include <linux/pci.h>
47#include <linux/pci_regs.h>
48#include <linux/msi.h>
49#include <linux/random.h>
172f638c 50#include <linux/rtnetlink.h>
63461a02
JK
51
52#include "nfpcore/nfp.h"
53#include "nfpcore/nfp_cpp.h"
54#include "nfpcore/nfp_nffw.h"
ce22f5a2 55#include "nfpcore/nfp_nsp.h"
63461a02
JK
56#include "nfpcore/nfp6000_pcie.h"
57
58#include "nfp_net_ctrl.h"
59#include "nfp_net.h"
60#include "nfp_main.h"
61
62#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
63
64static int nfp_is_ready(struct nfp_cpp *cpp)
65{
66 const char *cp;
67 long state;
68 int err;
69
70 cp = nfp_hwinfo_lookup(cpp, "board.state");
71 if (!cp)
72 return 0;
73
74 err = kstrtol(cp, 0, &state);
75 if (err < 0)
76 return 0;
77
78 return state == 15;
79}
80
81/**
82 * nfp_net_map_area() - Help function to map an area
83 * @cpp: NFP CPP handler
84 * @name: Name for the area
85 * @target: CPP target
86 * @addr: CPP address
87 * @size: Size of the area
88 * @area: Area handle (returned).
89 *
90 * This function is primarily to simplify the code in the main probe
91 * function. To undo the effect of this functions call
92 * @nfp_cpp_area_release_free(*area);
93 *
94 * Return: Pointer to memory mapped area or ERR_PTR
95 */
96static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
97 const char *name, int isl, int target,
98 unsigned long long addr, unsigned long size,
99 struct nfp_cpp_area **area)
100{
101 u8 __iomem *res;
102 u32 dest;
103 int err;
104
105 dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
106
107 *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
108 if (!*area) {
109 err = -EIO;
110 goto err_area;
111 }
112
113 err = nfp_cpp_area_acquire(*area);
114 if (err < 0)
115 goto err_acquire;
116
117 res = nfp_cpp_area_iomem(*area);
118 if (!res) {
119 err = -EIO;
120 goto err_map;
121 }
122
123 return res;
124
125err_map:
126 nfp_cpp_area_release(*area);
127err_acquire:
128 nfp_cpp_area_free(*area);
129err_area:
130 return (u8 __iomem *)ERR_PTR(err);
131}
132
b9de0077
JK
133/**
134 * nfp_net_get_mac_addr() - Get the MAC address.
135 * @nn: NFP Network structure
136 * @cpp: NFP CPP handle
137 * @id: NFP port id
138 *
139 * First try to get the MAC address from NSP ETH table. If that
140 * fails try HWInfo. As a last resort generate a random address.
141 */
63461a02 142static void
b9de0077 143nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
63461a02 144{
b9de0077 145 struct nfp_net_dp *dp = &nn->dp;
63461a02
JK
146 u8 mac_addr[ETH_ALEN];
147 const char *mac_str;
148 char name[32];
149
b9de0077
JK
150 if (nn->eth_port) {
151 ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr);
152 ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr);
153 return;
154 }
155
63461a02
JK
156 snprintf(name, sizeof(name), "eth%d.mac", id);
157
158 mac_str = nfp_hwinfo_lookup(cpp, name);
159 if (!mac_str) {
79c12a75
JK
160 dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
161 eth_hw_addr_random(dp->netdev);
63461a02
JK
162 return;
163 }
164
165 if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
166 &mac_addr[0], &mac_addr[1], &mac_addr[2],
167 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
79c12a75 168 dev_warn(dp->dev,
63461a02 169 "Can't parse MAC address (%s). Generate.\n", mac_str);
79c12a75 170 eth_hw_addr_random(dp->netdev);
63461a02
JK
171 return;
172 }
173
79c12a75
JK
174 ether_addr_copy(dp->netdev->dev_addr, mac_addr);
175 ether_addr_copy(dp->netdev->perm_addr, mac_addr);
63461a02
JK
176}
177
b9de0077 178static struct nfp_eth_table_port *
90fdc561 179nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
63461a02
JK
180{
181 int i;
182
90fdc561
JK
183 for (i = 0; eth_tbl && i < eth_tbl->count; i++)
184 if (eth_tbl->ports[i].eth_index == id)
185 return &eth_tbl->ports[i];
47465aed 186
b9de0077 187 return NULL;
63461a02
JK
188}
189
190static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
191{
192 char name[256];
63461a02
JK
193 int err = 0;
194 u64 val;
195
cd6f8db9
SH
196 snprintf(name, sizeof(name), "nfd_cfg_pf%u_num_ports",
197 nfp_cppcore_pcie_unit(pf->cpp));
63461a02
JK
198
199 val = nfp_rtsym_read_le(pf->cpp, name, &err);
200 /* Default to one port */
201 if (err) {
202 if (err != -ENOENT)
203 nfp_err(pf->cpp, "Unable to read adapter port count\n");
204 val = 1;
205 }
206
207 return val;
208}
209
210static unsigned int
211nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
212 unsigned int stride, u32 start_off, u32 num_off)
213{
214 unsigned int i, min_qc, max_qc;
215
216 min_qc = readl(ctrl_bar + start_off);
217 max_qc = min_qc;
218
219 for (i = 0; i < pf->num_ports; i++) {
220 /* To make our lives simpler only accept configuration where
221 * queues are allocated to PFs in order (queues of PFn all have
222 * indexes lower than PFn+1).
223 */
224 if (max_qc > readl(ctrl_bar + start_off))
225 return 0;
226
227 max_qc = readl(ctrl_bar + start_off);
228 max_qc += readl(ctrl_bar + num_off) * stride;
229 ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
230 }
231
232 return max_qc - min_qc;
233}
234
235static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
236{
237 const struct nfp_rtsym *ctrl_sym;
238 u8 __iomem *ctrl_bar;
239 char pf_symbol[256];
63461a02 240
cd6f8db9
SH
241 snprintf(pf_symbol, sizeof(pf_symbol), "_pf%u_net_bar0",
242 nfp_cppcore_pcie_unit(pf->cpp));
63461a02
JK
243
244 ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
245 if (!ctrl_sym) {
246 dev_err(&pf->pdev->dev,
247 "Failed to find PF BAR0 symbol %s\n", pf_symbol);
248 return NULL;
249 }
250
251 if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
252 dev_err(&pf->pdev->dev,
253 "PF BAR0 too small to contain %d ports\n",
254 pf->num_ports);
255 return NULL;
256 }
257
258 ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
259 ctrl_sym->domain, ctrl_sym->target,
260 ctrl_sym->addr, ctrl_sym->size,
261 &pf->ctrl_area);
262 if (IS_ERR(ctrl_bar)) {
263 dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
264 PTR_ERR(ctrl_bar));
265 return NULL;
266 }
267
268 return ctrl_bar;
269}
270
271static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
272{
273 struct nfp_net *nn;
274
275 while (!list_empty(&pf->ports)) {
276 nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
277 list_del(&nn->port_list);
b9de0077 278 pf->num_netdevs--;
63461a02
JK
279
280 nfp_net_netdev_free(nn);
281 }
282}
283
284static struct nfp_net *
285nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
286 void __iomem *tx_bar, void __iomem *rx_bar,
b9de0077
JK
287 int stride, struct nfp_net_fw_version *fw_ver,
288 struct nfp_eth_table_port *eth_port)
63461a02
JK
289{
290 u32 n_tx_rings, n_rx_rings;
291 struct nfp_net *nn;
292
293 n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
294 n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
295
296 /* Allocate and initialise the netdev */
297 nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
298 if (IS_ERR(nn))
299 return nn;
300
bd5ca062 301 nn->cpp = pf->cpp;
63461a02 302 nn->fw_ver = *fw_ver;
d2b84397 303 nn->dp.ctrl_bar = ctrl_bar;
63461a02
JK
304 nn->tx_bar = tx_bar;
305 nn->rx_bar = rx_bar;
79c12a75 306 nn->dp.is_vf = 0;
63461a02
JK
307 nn->stride_rx = stride;
308 nn->stride_tx = stride;
b9de0077 309 nn->eth_port = eth_port;
63461a02
JK
310
311 return nn;
312}
313
314static int
315nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
316 unsigned int id)
317{
318 int err;
319
320 /* Get MAC address */
b9de0077 321 nfp_net_get_mac_addr(nn, pf->cpp, id);
63461a02
JK
322
323 /* Get ME clock frequency from ctrl BAR
324 * XXX for now frequency is hardcoded until we figure out how
325 * to get the value from nfp-hwinfo into ctrl bar
326 */
327 nn->me_freq_mhz = 1200;
328
79c12a75 329 err = nfp_net_netdev_init(nn->dp.netdev);
63461a02
JK
330 if (err)
331 return err;
332
333 nfp_net_debugfs_port_add(nn, pf->ddir, id);
334
335 nfp_net_info(nn);
336
337 return 0;
338}
339
340static int
341nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
342 void __iomem *tx_bar, void __iomem *rx_bar,
343 int stride, struct nfp_net_fw_version *fw_ver)
344{
345 u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
b9de0077 346 struct nfp_eth_table_port *eth_port;
63461a02
JK
347 struct nfp_net *nn;
348 unsigned int i;
349 int err;
350
351 prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
352 prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
353
354 for (i = 0; i < pf->num_ports; i++) {
355 tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
356 tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
357 tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
358 rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
359 prev_tx_base = tgt_tx_base;
360 prev_rx_base = tgt_rx_base;
361
90fdc561 362 eth_port = nfp_net_find_port(pf->eth_tbl, i);
b9de0077
JK
363 if (eth_port && eth_port->override_changed) {
364 nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i);
365 } else {
366 nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar,
367 rx_bar, stride,
368 fw_ver, eth_port);
369 if (IS_ERR(nn)) {
370 err = PTR_ERR(nn);
371 goto err_free_prev;
372 }
373 list_add_tail(&nn->port_list, &pf->ports);
374 pf->num_netdevs++;
63461a02 375 }
63461a02
JK
376
377 ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
378 }
379
b9de0077
JK
380 if (list_empty(&pf->ports))
381 return -ENODEV;
382
63461a02
JK
383 return 0;
384
385err_free_prev:
386 nfp_net_pf_free_netdevs(pf);
387 return err;
388}
389
390static int
391nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
392 void __iomem *ctrl_bar, void __iomem *tx_bar,
393 void __iomem *rx_bar, int stride,
394 struct nfp_net_fw_version *fw_ver)
395{
396 unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
397 struct nfp_net *nn;
398 int err;
399
400 /* Allocate the netdevs and do basic init */
401 err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
402 stride, fw_ver);
403 if (err)
404 return err;
405
406 /* Get MSI-X vectors */
407 wanted_irqs = 0;
408 list_for_each_entry(nn, &pf->ports, port_list)
79c12a75 409 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
63461a02
JK
410 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
411 GFP_KERNEL);
412 if (!pf->irq_entries) {
413 err = -ENOMEM;
414 goto err_nn_free;
415 }
416
417 num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
b9de0077 418 NFP_NET_MIN_PORT_IRQS * pf->num_netdevs,
63461a02
JK
419 wanted_irqs);
420 if (!num_irqs) {
421 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
422 err = -ENOMEM;
423 goto err_vec_free;
424 }
425
426 /* Distribute IRQs to ports */
427 irqs_left = num_irqs;
b9de0077 428 ports_left = pf->num_netdevs;
63461a02
JK
429 list_for_each_entry(nn, &pf->ports, port_list) {
430 unsigned int n;
431
432 n = DIV_ROUND_UP(irqs_left, ports_left);
433 nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
434 n);
435 irqs_left -= n;
436 ports_left--;
437 }
438
439 /* Finish netdev init and register */
440 id = 0;
441 list_for_each_entry(nn, &pf->ports, port_list) {
442 err = nfp_net_pf_init_port_netdev(pf, nn, id);
443 if (err)
444 goto err_prev_deinit;
445
446 id++;
447 }
448
449 return 0;
450
451err_prev_deinit:
452 list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
453 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
79c12a75 454 nfp_net_netdev_clean(nn->dp.netdev);
63461a02
JK
455 }
456 nfp_net_irqs_disable(pf->pdev);
457err_vec_free:
458 kfree(pf->irq_entries);
459err_nn_free:
460 nfp_net_pf_free_netdevs(pf);
461 return err;
462}
463
172f638c
JK
464static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
465{
466 nfp_net_debugfs_dir_clean(&pf->ddir);
467
468 nfp_net_irqs_disable(pf->pdev);
469 kfree(pf->irq_entries);
470
471 nfp_cpp_area_release_free(pf->rx_area);
472 nfp_cpp_area_release_free(pf->tx_area);
473 nfp_cpp_area_release_free(pf->ctrl_area);
474}
475
476static void nfp_net_refresh_netdevs(struct work_struct *work)
477{
478 struct nfp_pf *pf = container_of(work, struct nfp_pf,
479 port_refresh_work);
90fdc561 480 struct nfp_eth_table *eth_table;
172f638c
JK
481 struct nfp_net *nn, *next;
482
483 mutex_lock(&pf->port_lock);
484
485 /* Check for nfp_net_pci_remove() racing against us */
486 if (list_empty(&pf->ports))
487 goto out;
488
90fdc561
JK
489 list_for_each_entry(nn, &pf->ports, port_list)
490 nfp_net_link_changed_read_clear(nn);
491
492 eth_table = nfp_eth_read_ports(pf->cpp);
493 if (!eth_table) {
494 nfp_err(pf->cpp, "Error refreshing port config!\n");
495 goto out;
496 }
497
498 rtnl_lock();
499 list_for_each_entry(nn, &pf->ports, port_list) {
500 if (!nn->eth_port)
501 continue;
502 nn->eth_port = nfp_net_find_port(eth_table,
503 nn->eth_port->eth_index);
504 }
505 rtnl_unlock();
506
507 kfree(pf->eth_tbl);
508 pf->eth_tbl = eth_table;
509
172f638c
JK
510 list_for_each_entry_safe(nn, next, &pf->ports, port_list) {
511 if (!nn->eth_port) {
1f1120a5 512 nfp_warn(pf->cpp, "Warning: port not present after reconfig\n");
172f638c
JK
513 continue;
514 }
515 if (!nn->eth_port->override_changed)
516 continue;
517
518 nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n");
519
520 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
521 nfp_net_netdev_clean(nn->dp.netdev);
522
523 list_del(&nn->port_list);
524 pf->num_netdevs--;
525 nfp_net_netdev_free(nn);
526 }
527
528 if (list_empty(&pf->ports))
529 nfp_net_pci_remove_finish(pf);
530out:
531 mutex_unlock(&pf->port_lock);
532}
533
90fdc561 534void nfp_net_refresh_port_table(struct nfp_net *nn)
172f638c
JK
535{
536 struct nfp_pf *pf = pci_get_drvdata(nn->pdev);
172f638c 537
90fdc561
JK
538 schedule_work(&pf->port_refresh_work);
539}
172f638c 540
90fdc561
JK
541int nfp_net_refresh_eth_port(struct nfp_net *nn)
542{
543 struct nfp_eth_table_port *eth_port;
544 struct nfp_eth_table *eth_table;
172f638c 545
90fdc561
JK
546 eth_table = nfp_eth_read_ports(nn->cpp);
547 if (!eth_table) {
548 nn_err(nn, "Error refreshing port state table!\n");
549 return -EIO;
550 }
172f638c 551
90fdc561
JK
552 eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index);
553 if (!eth_port) {
554 nn_err(nn, "Error finding state of the port!\n");
555 kfree(eth_table);
556 return -EIO;
172f638c
JK
557 }
558
90fdc561 559 memcpy(nn->eth_port, eth_port, sizeof(*eth_port));
172f638c 560
90fdc561 561 kfree(eth_table);
172f638c 562
90fdc561 563 return 0;
172f638c
JK
564}
565
63461a02
JK
566/*
567 * PCI device functions
568 */
569int nfp_net_pci_probe(struct nfp_pf *pf)
570{
571 u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
572 u32 total_tx_qcs, total_rx_qcs;
573 struct nfp_net_fw_version fw_ver;
574 u32 tx_area_sz, rx_area_sz;
575 u32 start_q;
576 int stride;
577 int err;
578
172f638c 579 INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs);
d12537df
JK
580 mutex_init(&pf->port_lock);
581
63461a02
JK
582 /* Verify that the board has completed initialization */
583 if (!nfp_is_ready(pf->cpp)) {
584 nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
585 return -EINVAL;
586 }
587
d12537df 588 mutex_lock(&pf->port_lock);
63461a02
JK
589 pf->num_ports = nfp_net_pf_get_num_ports(pf);
590
591 ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
d12537df
JK
592 if (!ctrl_bar) {
593 err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
594 goto err_unlock;
595 }
63461a02
JK
596
597 nfp_net_get_fw_version(&fw_ver, ctrl_bar);
598 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
599 nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
600 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
601 err = -EINVAL;
602 goto err_ctrl_unmap;
603 }
604
605 /* Determine stride */
606 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
607 stride = 2;
608 nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
609 } else {
610 switch (fw_ver.major) {
611 case 1 ... 4:
612 stride = 4;
613 break;
614 default:
615 nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
616 fw_ver.resv, fw_ver.class,
617 fw_ver.major, fw_ver.minor);
618 err = -EINVAL;
619 goto err_ctrl_unmap;
620 }
621 }
622
623 /* Find how many QC structs need to be mapped */
624 total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
625 NFP_NET_CFG_START_TXQ,
626 NFP_NET_CFG_MAX_TXRINGS);
627 total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
628 NFP_NET_CFG_START_RXQ,
629 NFP_NET_CFG_MAX_RXRINGS);
630 if (!total_tx_qcs || !total_rx_qcs) {
631 nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
632 total_tx_qcs, total_rx_qcs);
633 err = -EINVAL;
634 goto err_ctrl_unmap;
635 }
636
637 tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
638 rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
639
640 /* Map TX queues */
641 start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
642 tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
643 NFP_PCIE_QUEUE(start_q),
644 tx_area_sz, &pf->tx_area);
645 if (IS_ERR(tx_bar)) {
646 nfp_err(pf->cpp, "Failed to map TX area.\n");
647 err = PTR_ERR(tx_bar);
648 goto err_ctrl_unmap;
649 }
650
651 /* Map RX queues */
652 start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
653 rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
654 NFP_PCIE_QUEUE(start_q),
655 rx_area_sz, &pf->rx_area);
656 if (IS_ERR(rx_bar)) {
657 nfp_err(pf->cpp, "Failed to map RX area.\n");
658 err = PTR_ERR(rx_bar);
659 goto err_unmap_tx;
660 }
661
662 pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
663
664 err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
665 stride, &fw_ver);
666 if (err)
667 goto err_clean_ddir;
668
d12537df
JK
669 mutex_unlock(&pf->port_lock);
670
63461a02
JK
671 return 0;
672
673err_clean_ddir:
674 nfp_net_debugfs_dir_clean(&pf->ddir);
675 nfp_cpp_area_release_free(pf->rx_area);
676err_unmap_tx:
677 nfp_cpp_area_release_free(pf->tx_area);
678err_ctrl_unmap:
679 nfp_cpp_area_release_free(pf->ctrl_area);
d12537df
JK
680err_unlock:
681 mutex_unlock(&pf->port_lock);
63461a02
JK
682 return err;
683}
684
685void nfp_net_pci_remove(struct nfp_pf *pf)
686{
687 struct nfp_net *nn;
688
d12537df
JK
689 mutex_lock(&pf->port_lock);
690 if (list_empty(&pf->ports))
691 goto out;
692
63461a02
JK
693 list_for_each_entry(nn, &pf->ports, port_list) {
694 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
695
79c12a75 696 nfp_net_netdev_clean(nn->dp.netdev);
63461a02
JK
697 }
698
699 nfp_net_pf_free_netdevs(pf);
700
172f638c 701 nfp_net_pci_remove_finish(pf);
d12537df
JK
702out:
703 mutex_unlock(&pf->port_lock);
172f638c
JK
704
705 cancel_work_sync(&pf->port_refresh_work);
63461a02 706}