nfp: don't clutter init code passing fw_ver around
[linux-2.6-block.git] / drivers / net / ethernet / netronome / nfp / nfp_net_main.c
CommitLineData
63461a02
JK
1/*
2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34/*
35 * nfp_net_main.c
36 * Netronome network device driver: Main entry point
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Alejandro Lucero <alejandro.lucero@netronome.com>
39 * Jason McMullan <jason.mcmullan@netronome.com>
40 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
41 */
42
43#include <linux/etherdevice.h>
44#include <linux/kernel.h>
45#include <linux/init.h>
ec8b1fbe 46#include <linux/lockdep.h>
63461a02
JK
47#include <linux/pci.h>
48#include <linux/pci_regs.h>
49#include <linux/msi.h>
50#include <linux/random.h>
172f638c 51#include <linux/rtnetlink.h>
63461a02
JK
52
53#include "nfpcore/nfp.h"
54#include "nfpcore/nfp_cpp.h"
55#include "nfpcore/nfp_nffw.h"
ce22f5a2 56#include "nfpcore/nfp_nsp.h"
63461a02 57#include "nfpcore/nfp6000_pcie.h"
7ac9ebd5 58#include "nfp_app.h"
63461a02
JK
59#include "nfp_net_ctrl.h"
60#include "nfp_net.h"
61#include "nfp_main.h"
eb488c26 62#include "nfp_port.h"
63461a02
JK
63
64#define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
65
66static int nfp_is_ready(struct nfp_cpp *cpp)
67{
68 const char *cp;
69 long state;
70 int err;
71
72 cp = nfp_hwinfo_lookup(cpp, "board.state");
73 if (!cp)
74 return 0;
75
76 err = kstrtol(cp, 0, &state);
77 if (err < 0)
78 return 0;
79
80 return state == 15;
81}
82
83/**
84 * nfp_net_map_area() - Help function to map an area
85 * @cpp: NFP CPP handler
86 * @name: Name for the area
87 * @target: CPP target
88 * @addr: CPP address
89 * @size: Size of the area
90 * @area: Area handle (returned).
91 *
92 * This function is primarily to simplify the code in the main probe
93 * function. To undo the effect of this functions call
94 * @nfp_cpp_area_release_free(*area);
95 *
96 * Return: Pointer to memory mapped area or ERR_PTR
97 */
98static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
99 const char *name, int isl, int target,
100 unsigned long long addr, unsigned long size,
101 struct nfp_cpp_area **area)
102{
103 u8 __iomem *res;
104 u32 dest;
105 int err;
106
107 dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
108
109 *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
110 if (!*area) {
111 err = -EIO;
112 goto err_area;
113 }
114
115 err = nfp_cpp_area_acquire(*area);
116 if (err < 0)
117 goto err_acquire;
118
119 res = nfp_cpp_area_iomem(*area);
120 if (!res) {
121 err = -EIO;
122 goto err_map;
123 }
124
125 return res;
126
127err_map:
128 nfp_cpp_area_release(*area);
129err_acquire:
130 nfp_cpp_area_free(*area);
131err_area:
132 return (u8 __iomem *)ERR_PTR(err);
133}
134
b9de0077
JK
135/**
136 * nfp_net_get_mac_addr() - Get the MAC address.
137 * @nn: NFP Network structure
138 * @cpp: NFP CPP handle
139 * @id: NFP port id
140 *
141 * First try to get the MAC address from NSP ETH table. If that
142 * fails try HWInfo. As a last resort generate a random address.
143 */
8aa0cb00 144void
b9de0077 145nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id)
63461a02 146{
eb488c26 147 struct nfp_eth_table_port *eth_port;
b9de0077 148 struct nfp_net_dp *dp = &nn->dp;
63461a02
JK
149 u8 mac_addr[ETH_ALEN];
150 const char *mac_str;
151 char name[32];
152
eb488c26
JK
153 eth_port = __nfp_port_get_eth_port(nn->port);
154 if (eth_port) {
155 ether_addr_copy(dp->netdev->dev_addr, eth_port->mac_addr);
156 ether_addr_copy(dp->netdev->perm_addr, eth_port->mac_addr);
b9de0077
JK
157 return;
158 }
159
63461a02
JK
160 snprintf(name, sizeof(name), "eth%d.mac", id);
161
162 mac_str = nfp_hwinfo_lookup(cpp, name);
163 if (!mac_str) {
79c12a75
JK
164 dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
165 eth_hw_addr_random(dp->netdev);
63461a02
JK
166 return;
167 }
168
169 if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
170 &mac_addr[0], &mac_addr[1], &mac_addr[2],
171 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
79c12a75 172 dev_warn(dp->dev,
63461a02 173 "Can't parse MAC address (%s). Generate.\n", mac_str);
79c12a75 174 eth_hw_addr_random(dp->netdev);
63461a02
JK
175 return;
176 }
177
79c12a75
JK
178 ether_addr_copy(dp->netdev->dev_addr, mac_addr);
179 ether_addr_copy(dp->netdev->perm_addr, mac_addr);
63461a02
JK
180}
181
8aa0cb00 182struct nfp_eth_table_port *
90fdc561 183nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
63461a02
JK
184{
185 int i;
186
90fdc561
JK
187 for (i = 0; eth_tbl && i < eth_tbl->count; i++)
188 if (eth_tbl->ports[i].eth_index == id)
189 return &eth_tbl->ports[i];
47465aed 190
b9de0077 191 return NULL;
63461a02
JK
192}
193
69394af5
JK
194static int
195nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
196 unsigned int default_val)
63461a02
JK
197{
198 char name[256];
63461a02
JK
199 int err = 0;
200 u64 val;
201
69394af5 202 snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
63461a02
JK
203
204 val = nfp_rtsym_read_le(pf->cpp, name, &err);
63461a02 205 if (err) {
69394af5
JK
206 if (err == -ENOENT)
207 return default_val;
208 nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
209 return err;
63461a02
JK
210 }
211
212 return val;
213}
214
69394af5
JK
215static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
216{
217 return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
218}
219
8aa0cb00
JK
220static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
221{
222 return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
223 NFP_APP_CORE_NIC);
224}
225
c24ca95f
JK
226static u8 __iomem *
227nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
228 unsigned int min_size, struct nfp_cpp_area **area)
63461a02 229{
c24ca95f 230 const struct nfp_rtsym *sym;
63461a02 231 char pf_symbol[256];
c24ca95f 232 u8 __iomem *mem;
63461a02 233
c24ca95f 234 snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
cd6f8db9 235 nfp_cppcore_pcie_unit(pf->cpp));
63461a02 236
c24ca95f
JK
237 sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
238 if (!sym) {
239 nfp_err(pf->cpp, "Failed to find PF symbol %s\n", pf_symbol);
240 return (u8 __iomem *)ERR_PTR(-ENOENT);
63461a02
JK
241 }
242
c24ca95f
JK
243 if (sym->size < min_size) {
244 nfp_err(pf->cpp, "PF symbol %s too small\n", pf_symbol);
245 return (u8 __iomem *)ERR_PTR(-EINVAL);
63461a02
JK
246 }
247
c24ca95f
JK
248 mem = nfp_net_map_area(pf->cpp, name, sym->domain, sym->target,
249 sym->addr, sym->size, area);
250 if (IS_ERR(mem)) {
251 nfp_err(pf->cpp, "Failed to map PF symbol %s: %ld\n",
252 pf_symbol, PTR_ERR(mem));
253 return mem;
63461a02
JK
254 }
255
c24ca95f 256 return mem;
63461a02
JK
257}
258
9140b30d
JK
259static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
260{
eb488c26 261 nfp_port_free(nn->port);
9140b30d
JK
262 list_del(&nn->vnic_list);
263 pf->num_vnics--;
264 nfp_net_free(nn);
265}
266
d4e7f092 267static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
63461a02
JK
268{
269 struct nfp_net *nn;
270
d4e7f092
JK
271 while (!list_empty(&pf->vnics)) {
272 nn = list_first_entry(&pf->vnics, struct nfp_net, vnic_list);
9140b30d 273 nfp_net_pf_free_vnic(pf, nn);
63461a02
JK
274 }
275}
276
277static struct nfp_net *
a7b1ad08 278nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
73e253f0 279 void __iomem *ctrl_bar, void __iomem *qc_bar,
21537bc7 280 int stride, unsigned int eth_id)
63461a02 281{
73e253f0 282 u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
63461a02 283 struct nfp_net *nn;
8aa0cb00 284 int err;
63461a02 285
73e253f0
JK
286 tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
287 rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
63461a02
JK
288 n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
289 n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
290
d4e7f092 291 /* Allocate and initialise the vNIC */
a7b1ad08 292 nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
63461a02
JK
293 if (IS_ERR(nn))
294 return nn;
295
7ac9ebd5 296 nn->app = pf->app;
21537bc7 297 nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
d2b84397 298 nn->dp.ctrl_bar = ctrl_bar;
73e253f0
JK
299 nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
300 nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
79c12a75 301 nn->dp.is_vf = 0;
63461a02
JK
302 nn->stride_rx = stride;
303 nn->stride_tx = stride;
eb488c26 304
8aa0cb00
JK
305 err = nfp_app_vnic_init(pf->app, nn, eth_id);
306 if (err) {
307 nfp_net_free(nn);
308 return ERR_PTR(err);
eb488c26 309 }
d88b0a23
JK
310
311 pf->num_vnics++;
312 list_add_tail(&nn->vnic_list, &pf->vnics);
63461a02
JK
313
314 return nn;
315}
316
317static int
d4e7f092 318nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
63461a02
JK
319{
320 int err;
321
63461a02
JK
322 /* Get ME clock frequency from ctrl BAR
323 * XXX for now frequency is hardcoded until we figure out how
324 * to get the value from nfp-hwinfo into ctrl bar
325 */
326 nn->me_freq_mhz = 1200;
327
beba69ca 328 err = nfp_net_init(nn);
63461a02
JK
329 if (err)
330 return err;
331
d4e7f092 332 nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
63461a02 333
53e7a08f
JK
334 if (nn->port) {
335 err = nfp_devlink_port_register(pf->app, nn->port);
336 if (err)
337 goto err_dfs_clean;
338 }
339
63461a02
JK
340 nfp_net_info(nn);
341
342 return 0;
53e7a08f
JK
343
344err_dfs_clean:
345 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
346 nfp_net_clean(nn);
347 return err;
63461a02
JK
348}
349
350static int
d4e7f092 351nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
21537bc7 352 void __iomem *qc_bar, int stride)
63461a02 353{
63461a02
JK
354 struct nfp_net *nn;
355 unsigned int i;
356 int err;
357
d4e7f092 358 for (i = 0; i < pf->max_data_vnics; i++) {
73e253f0 359 nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
21537bc7 360 stride, i);
d88b0a23
JK
361 if (IS_ERR(nn)) {
362 err = PTR_ERR(nn);
363 goto err_free_prev;
63461a02 364 }
63461a02
JK
365
366 ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
d88b0a23 367
8aa0cb00
JK
368 /* Kill the vNIC if app init marked it as invalid */
369 if (nn->port && nn->port->type == NFP_PORT_INVALID) {
d88b0a23
JK
370 nfp_net_pf_free_vnic(pf, nn);
371 continue;
372 }
63461a02
JK
373 }
374
d4e7f092 375 if (list_empty(&pf->vnics))
b9de0077
JK
376 return -ENODEV;
377
63461a02
JK
378 return 0;
379
380err_free_prev:
d4e7f092 381 nfp_net_pf_free_vnics(pf);
63461a02
JK
382 return err;
383}
384
71f8a116
JK
385static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
386{
53e7a08f
JK
387 if (nn->port)
388 nfp_devlink_port_unregister(nn->port);
71f8a116
JK
389 nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
390 nfp_net_clean(nn);
bb45e51c 391 nfp_app_vnic_clean(pf->app, nn);
71f8a116
JK
392}
393
63461a02 394static int
d4e7f092 395nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
21537bc7 396 void __iomem *ctrl_bar, void __iomem *qc_bar, int stride)
63461a02 397{
d4e7f092 398 unsigned int id, wanted_irqs, num_irqs, vnics_left, irqs_left;
63461a02
JK
399 struct nfp_net *nn;
400 int err;
401
d4e7f092 402 /* Allocate the vnics and do basic init */
21537bc7 403 err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
63461a02
JK
404 if (err)
405 return err;
406
407 /* Get MSI-X vectors */
408 wanted_irqs = 0;
d4e7f092 409 list_for_each_entry(nn, &pf->vnics, vnic_list)
79c12a75 410 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
63461a02
JK
411 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
412 GFP_KERNEL);
413 if (!pf->irq_entries) {
414 err = -ENOMEM;
415 goto err_nn_free;
416 }
417
418 num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
d4e7f092 419 NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
63461a02
JK
420 wanted_irqs);
421 if (!num_irqs) {
422 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
423 err = -ENOMEM;
424 goto err_vec_free;
425 }
426
d4e7f092 427 /* Distribute IRQs to vNICs */
63461a02 428 irqs_left = num_irqs;
d4e7f092
JK
429 vnics_left = pf->num_vnics;
430 list_for_each_entry(nn, &pf->vnics, vnic_list) {
63461a02
JK
431 unsigned int n;
432
d4e7f092 433 n = DIV_ROUND_UP(irqs_left, vnics_left);
63461a02
JK
434 nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
435 n);
436 irqs_left -= n;
d4e7f092 437 vnics_left--;
63461a02
JK
438 }
439
d4e7f092 440 /* Finish vNIC init and register */
63461a02 441 id = 0;
d4e7f092
JK
442 list_for_each_entry(nn, &pf->vnics, vnic_list) {
443 err = nfp_net_pf_init_vnic(pf, nn, id);
63461a02
JK
444 if (err)
445 goto err_prev_deinit;
446
447 id++;
448 }
449
450 return 0;
451
452err_prev_deinit:
71f8a116
JK
453 list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
454 nfp_net_pf_clean_vnic(pf, nn);
63461a02
JK
455 nfp_net_irqs_disable(pf->pdev);
456err_vec_free:
457 kfree(pf->irq_entries);
458err_nn_free:
d4e7f092 459 nfp_net_pf_free_vnics(pf);
63461a02
JK
460 return err;
461}
462
7ac9ebd5
JK
463static int nfp_net_pf_app_init(struct nfp_pf *pf)
464{
8aa0cb00
JK
465 int err;
466
467 pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
468 if (IS_ERR(pf->app))
469 return PTR_ERR(pf->app);
470
471 err = nfp_app_init(pf->app);
472 if (err)
473 goto err_free;
474
475 return 0;
7ac9ebd5 476
8aa0cb00
JK
477err_free:
478 nfp_app_free(pf->app);
479 return err;
7ac9ebd5
JK
480}
481
482static void nfp_net_pf_app_clean(struct nfp_pf *pf)
483{
484 nfp_app_free(pf->app);
1851f93f 485 pf->app = NULL;
7ac9ebd5
JK
486}
487
172f638c
JK
488static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
489{
490 nfp_net_debugfs_dir_clean(&pf->ddir);
491
492 nfp_net_irqs_disable(pf->pdev);
493 kfree(pf->irq_entries);
494
7ac9ebd5
JK
495 nfp_net_pf_app_clean(pf);
496
73e253f0 497 nfp_cpp_area_release_free(pf->qc_area);
d4e7f092 498 nfp_cpp_area_release_free(pf->data_vnic_bar);
172f638c
JK
499}
500
3d4ed1e7
JK
501static int
502nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
503 struct nfp_eth_table *eth_table)
504{
505 struct nfp_eth_table_port *eth_port;
506
507 ASSERT_RTNL();
508
509 eth_port = nfp_net_find_port(eth_table, port->eth_id);
510 if (!eth_port) {
46b25031 511 set_bit(NFP_PORT_CHANGED, &port->flags);
3d4ed1e7
JK
512 nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
513 port->eth_id);
514 return -EIO;
515 }
516 if (eth_port->override_changed) {
517 nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
518 port->type = NFP_PORT_INVALID;
519 }
520
521 memcpy(port->eth_port, eth_port, sizeof(*eth_port));
522
523 return 0;
524}
525
ec8b1fbe 526int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
172f638c 527{
90fdc561 528 struct nfp_eth_table *eth_table;
172f638c 529 struct nfp_net *nn, *next;
3eb3b74a 530 struct nfp_port *port;
172f638c 531
ec8b1fbe 532 lockdep_assert_held(&pf->lock);
172f638c
JK
533
534 /* Check for nfp_net_pci_remove() racing against us */
d4e7f092 535 if (list_empty(&pf->vnics))
ec8b1fbe 536 return 0;
172f638c 537
6d4f8cba
JK
538 /* Update state of all ports */
539 rtnl_lock();
3eb3b74a
JK
540 list_for_each_entry(port, &pf->ports, port_list)
541 clear_bit(NFP_PORT_CHANGED, &port->flags);
90fdc561
JK
542
543 eth_table = nfp_eth_read_ports(pf->cpp);
544 if (!eth_table) {
46b25031
JK
545 list_for_each_entry(port, &pf->ports, port_list)
546 if (__nfp_port_get_eth_port(port))
547 set_bit(NFP_PORT_CHANGED, &port->flags);
6d4f8cba 548 rtnl_unlock();
90fdc561 549 nfp_err(pf->cpp, "Error refreshing port config!\n");
ec8b1fbe 550 return -EIO;
90fdc561
JK
551 }
552
3eb3b74a
JK
553 list_for_each_entry(port, &pf->ports, port_list)
554 if (__nfp_port_get_eth_port(port))
555 nfp_net_eth_port_update(pf->cpp, port, eth_table);
90fdc561
JK
556 rtnl_unlock();
557
3d4ed1e7 558 kfree(eth_table);
90fdc561 559
6d4f8cba 560 /* Shoot off the ports which became invalid */
d4e7f092 561 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
eb488c26 562 if (!nn->port || nn->port->type != NFP_PORT_INVALID)
172f638c
JK
563 continue;
564
71f8a116 565 nfp_net_pf_clean_vnic(pf, nn);
9140b30d 566 nfp_net_pf_free_vnic(pf, nn);
172f638c
JK
567 }
568
d4e7f092 569 if (list_empty(&pf->vnics))
172f638c 570 nfp_net_pci_remove_finish(pf);
ec8b1fbe
JK
571
572 return 0;
573}
574
575static void nfp_net_refresh_vnics(struct work_struct *work)
576{
577 struct nfp_pf *pf = container_of(work, struct nfp_pf,
578 port_refresh_work);
579
580 mutex_lock(&pf->lock);
581 nfp_net_refresh_port_table_sync(pf);
d4e7f092 582 mutex_unlock(&pf->lock);
172f638c
JK
583}
584
eb488c26 585void nfp_net_refresh_port_table(struct nfp_port *port)
172f638c 586{
eb488c26 587 struct nfp_pf *pf = port->app->pf;
172f638c 588
1f60a581
JK
589 set_bit(NFP_PORT_CHANGED, &port->flags);
590
90fdc561
JK
591 schedule_work(&pf->port_refresh_work);
592}
172f638c 593
eb488c26 594int nfp_net_refresh_eth_port(struct nfp_port *port)
90fdc561 595{
eb488c26 596 struct nfp_cpp *cpp = port->app->cpp;
90fdc561 597 struct nfp_eth_table *eth_table;
3d4ed1e7 598 int ret;
172f638c 599
6d4f8cba
JK
600 clear_bit(NFP_PORT_CHANGED, &port->flags);
601
eb488c26 602 eth_table = nfp_eth_read_ports(cpp);
90fdc561 603 if (!eth_table) {
46b25031 604 set_bit(NFP_PORT_CHANGED, &port->flags);
eb488c26 605 nfp_err(cpp, "Error refreshing port state table!\n");
90fdc561
JK
606 return -EIO;
607 }
172f638c 608
3d4ed1e7 609 ret = nfp_net_eth_port_update(cpp, port, eth_table);
172f638c 610
90fdc561 611 kfree(eth_table);
172f638c 612
3d4ed1e7 613 return ret;
172f638c
JK
614}
615
63461a02
JK
616/*
617 * PCI device functions
618 */
619int nfp_net_pci_probe(struct nfp_pf *pf)
620{
63461a02 621 struct nfp_net_fw_version fw_ver;
73e253f0
JK
622 u8 __iomem *ctrl_bar, *qc_bar;
623 u32 ctrl_bar_sz;
63461a02
JK
624 int stride;
625 int err;
626
d4e7f092 627 INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
d12537df 628
63461a02
JK
629 /* Verify that the board has completed initialization */
630 if (!nfp_is_ready(pf->cpp)) {
631 nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
632 return -EINVAL;
633 }
634
d4e7f092
JK
635 mutex_lock(&pf->lock);
636 pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
69394af5
JK
637 if ((int)pf->max_data_vnics < 0) {
638 err = pf->max_data_vnics;
639 goto err_unlock;
640 }
63461a02 641
c24ca95f
JK
642 ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
643 ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
644 ctrl_bar_sz, &pf->data_vnic_bar);
645 if (IS_ERR(ctrl_bar)) {
646 err = PTR_ERR(ctrl_bar);
647 if (!pf->fw_loaded && err == -ENOENT)
648 err = -EPROBE_DEFER;
d12537df
JK
649 goto err_unlock;
650 }
63461a02
JK
651
652 nfp_net_get_fw_version(&fw_ver, ctrl_bar);
653 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
654 nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
655 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
656 err = -EINVAL;
657 goto err_ctrl_unmap;
658 }
659
660 /* Determine stride */
661 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
662 stride = 2;
663 nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
664 } else {
665 switch (fw_ver.major) {
666 case 1 ... 4:
667 stride = 4;
668 break;
669 default:
670 nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
671 fw_ver.resv, fw_ver.class,
672 fw_ver.major, fw_ver.minor);
673 err = -EINVAL;
674 goto err_ctrl_unmap;
675 }
676 }
677
73e253f0
JK
678 /* Map queues */
679 qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
680 NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
681 &pf->qc_area);
682 if (IS_ERR(qc_bar)) {
683 nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
684 err = PTR_ERR(qc_bar);
63461a02
JK
685 goto err_ctrl_unmap;
686 }
687
7ac9ebd5
JK
688 err = nfp_net_pf_app_init(pf);
689 if (err)
73e253f0 690 goto err_unmap_qc;
7ac9ebd5 691
63461a02
JK
692 pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
693
21537bc7 694 err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, qc_bar, stride);
63461a02
JK
695 if (err)
696 goto err_clean_ddir;
697
d4e7f092 698 mutex_unlock(&pf->lock);
d12537df 699
63461a02
JK
700 return 0;
701
702err_clean_ddir:
703 nfp_net_debugfs_dir_clean(&pf->ddir);
7ac9ebd5 704 nfp_net_pf_app_clean(pf);
73e253f0
JK
705err_unmap_qc:
706 nfp_cpp_area_release_free(pf->qc_area);
63461a02 707err_ctrl_unmap:
d4e7f092 708 nfp_cpp_area_release_free(pf->data_vnic_bar);
d12537df 709err_unlock:
d4e7f092 710 mutex_unlock(&pf->lock);
63461a02
JK
711 return err;
712}
713
714void nfp_net_pci_remove(struct nfp_pf *pf)
715{
716 struct nfp_net *nn;
717
d4e7f092
JK
718 mutex_lock(&pf->lock);
719 if (list_empty(&pf->vnics))
d12537df
JK
720 goto out;
721
71f8a116
JK
722 list_for_each_entry(nn, &pf->vnics, vnic_list)
723 nfp_net_pf_clean_vnic(pf, nn);
63461a02 724
d4e7f092 725 nfp_net_pf_free_vnics(pf);
63461a02 726
172f638c 727 nfp_net_pci_remove_finish(pf);
d12537df 728out:
d4e7f092 729 mutex_unlock(&pf->lock);
172f638c
JK
730
731 cancel_work_sync(&pf->port_refresh_work);
63461a02 732}