IB/ipath: Prevent random program use of diags interface
[linux-2.6-block.git] / drivers / infiniband / hw / ipath / ipath_driver.c
CommitLineData
7bb206e3 1/*
759d5768 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
7bb206e3
BS
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/spinlock.h>
35#include <linux/idr.h>
36#include <linux/pci.h>
37#include <linux/delay.h>
38#include <linux/netdevice.h>
39#include <linux/vmalloc.h>
40
41#include "ipath_kernel.h"
b1c1b6a3 42#include "ipath_verbs.h"
27b678dd 43#include "ipath_common.h"
7bb206e3
BS
44
45static void ipath_update_pio_bufs(struct ipath_devdata *);
46
47const char *ipath_get_unit_name(int unit)
48{
49 static char iname[16];
50 snprintf(iname, sizeof iname, "infinipath%u", unit);
51 return iname;
52}
53
759d5768 54#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
7bb206e3
BS
55#define PFX IPATH_DRV_NAME ": "
56
57/*
58 * The size has to be longer than this string, so we can append
59 * board/chip information to it in the init code.
60 */
b55f4f06 61const char ib_ipath_version[] = IPATH_IDSTR "\n";
7bb206e3
BS
62
63static struct idr unit_table;
64DEFINE_SPINLOCK(ipath_devs_lock);
65LIST_HEAD(ipath_dev_list);
66
0fd41363 67wait_queue_head_t ipath_state_wait;
7bb206e3
BS
68
69unsigned ipath_debug = __IPATH_INFO;
70
71module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
72MODULE_PARM_DESC(debug, "mask for debug prints");
73EXPORT_SYMBOL_GPL(ipath_debug);
74
75MODULE_LICENSE("GPL");
759d5768
BS
76MODULE_AUTHOR("QLogic <support@pathscale.com>");
77MODULE_DESCRIPTION("QLogic InfiniPath driver");
7bb206e3
BS
78
79const char *ipath_ibcstatus_str[] = {
80 "Disabled",
81 "LinkUp",
82 "PollActive",
83 "PollQuiet",
84 "SleepDelay",
85 "SleepQuiet",
86 "LState6", /* unused */
87 "LState7", /* unused */
88 "CfgDebounce",
89 "CfgRcvfCfg",
90 "CfgWaitRmt",
91 "CfgIdle",
92 "RecovRetrain",
93 "LState0xD", /* unused */
94 "RecovWaitRmt",
95 "RecovIdle",
96};
97
7bb206e3
BS
98static void __devexit ipath_remove_one(struct pci_dev *);
99static int __devinit ipath_init_one(struct pci_dev *,
100 const struct pci_device_id *);
101
102/* Only needed for registration, nothing else needs this info */
103#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
104#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
105#define PCI_DEVICE_ID_INFINIPATH_PE800 0x10
106
107static const struct pci_device_id ipath_pci_tbl[] = {
6f4bb3d8
RD
108 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
109 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_PE800) },
110 { 0, }
7bb206e3
BS
111};
112
113MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
114
115static struct pci_driver ipath_driver = {
116 .name = IPATH_DRV_NAME,
117 .probe = ipath_init_one,
118 .remove = __devexit_p(ipath_remove_one),
119 .id_table = ipath_pci_tbl,
120};
121
7bb206e3
BS
122
123static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
124 u32 *bar0, u32 *bar1)
125{
126 int ret;
127
128 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
129 if (ret)
130 ipath_dev_err(dd, "failed to read bar0 before enable: "
131 "error %d\n", -ret);
132
133 ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
134 if (ret)
135 ipath_dev_err(dd, "failed to read bar1 before enable: "
136 "error %d\n", -ret);
137
138 ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
139}
140
141static void ipath_free_devdata(struct pci_dev *pdev,
142 struct ipath_devdata *dd)
143{
144 unsigned long flags;
145
146 pci_set_drvdata(pdev, NULL);
147
148 if (dd->ipath_unit != -1) {
149 spin_lock_irqsave(&ipath_devs_lock, flags);
150 idr_remove(&unit_table, dd->ipath_unit);
151 list_del(&dd->ipath_list);
152 spin_unlock_irqrestore(&ipath_devs_lock, flags);
153 }
06993ca6 154 vfree(dd);
7bb206e3
BS
155}
156
157static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
158{
159 unsigned long flags;
160 struct ipath_devdata *dd;
7bb206e3
BS
161 int ret;
162
163 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
164 dd = ERR_PTR(-ENOMEM);
165 goto bail;
166 }
167
06993ca6 168 dd = vmalloc(sizeof(*dd));
7bb206e3
BS
169 if (!dd) {
170 dd = ERR_PTR(-ENOMEM);
171 goto bail;
172 }
06993ca6 173 memset(dd, 0, sizeof(*dd));
7bb206e3
BS
174 dd->ipath_unit = -1;
175
176 spin_lock_irqsave(&ipath_devs_lock, flags);
177
178 ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
179 if (ret < 0) {
180 printk(KERN_ERR IPATH_DRV_NAME
181 ": Could not allocate unit ID: error %d\n", -ret);
182 ipath_free_devdata(pdev, dd);
183 dd = ERR_PTR(ret);
184 goto bail_unlock;
185 }
186
187 dd->pcidev = pdev;
188 pci_set_drvdata(pdev, dd);
189
190 list_add(&dd->ipath_list, &ipath_dev_list);
191
192bail_unlock:
193 spin_unlock_irqrestore(&ipath_devs_lock, flags);
194
195bail:
196 return dd;
197}
198
199static inline struct ipath_devdata *__ipath_lookup(int unit)
200{
201 return idr_find(&unit_table, unit);
202}
203
204struct ipath_devdata *ipath_lookup(int unit)
205{
206 struct ipath_devdata *dd;
207 unsigned long flags;
208
209 spin_lock_irqsave(&ipath_devs_lock, flags);
210 dd = __ipath_lookup(unit);
211 spin_unlock_irqrestore(&ipath_devs_lock, flags);
212
213 return dd;
214}
215
216int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp)
217{
218 int nunits, npresent, nup;
219 struct ipath_devdata *dd;
220 unsigned long flags;
221 u32 maxports;
222
223 nunits = npresent = nup = maxports = 0;
224
225 spin_lock_irqsave(&ipath_devs_lock, flags);
226
227 list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
228 nunits++;
229 if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
230 npresent++;
231 if (dd->ipath_lid &&
232 !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
233 | IPATH_LINKUNK)))
234 nup++;
235 if (dd->ipath_cfgports > maxports)
236 maxports = dd->ipath_cfgports;
237 }
238
239 spin_unlock_irqrestore(&ipath_devs_lock, flags);
240
241 if (npresentp)
242 *npresentp = npresent;
243 if (nupp)
244 *nupp = nup;
245 if (maxportsp)
246 *maxportsp = maxports;
247
248 return nunits;
249}
250
7bb206e3
BS
251/*
252 * These next two routines are placeholders in case we don't have per-arch
253 * code for controlling write combining. If explicit control of write
254 * combining is not available, performance will probably be awful.
255 */
256
257int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
258{
259 return -EOPNOTSUPP;
260}
261
262void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
263{
264}
265
266static int __devinit ipath_init_one(struct pci_dev *pdev,
267 const struct pci_device_id *ent)
268{
269 int ret, len, j;
270 struct ipath_devdata *dd;
271 unsigned long long addr;
272 u32 bar0 = 0, bar1 = 0;
273 u8 rev;
274
7bb206e3
BS
275 dd = ipath_alloc_devdata(pdev);
276 if (IS_ERR(dd)) {
277 ret = PTR_ERR(dd);
278 printk(KERN_ERR IPATH_DRV_NAME
279 ": Could not allocate devdata: error %d\n", -ret);
f37bda92 280 goto bail;
7bb206e3
BS
281 }
282
283 ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
284
285 read_bars(dd, pdev, &bar0, &bar1);
286
287 ret = pci_enable_device(pdev);
288 if (ret) {
289 /* This can happen iff:
290 *
291 * We did a chip reset, and then failed to reprogram the
292 * BAR, or the chip reset due to an internal error. We then
293 * unloaded the driver and reloaded it.
294 *
295 * Both reset cases set the BAR back to initial state. For
296 * the latter case, the AER sticky error bit at offset 0x718
297 * should be set, but the Linux kernel doesn't yet know
298 * about that, it appears. If the original BAR was retained
299 * in the kernel data structures, this may be OK.
300 */
301 ipath_dev_err(dd, "enable unit %d failed: error %d\n",
302 dd->ipath_unit, -ret);
303 goto bail_devdata;
304 }
305 addr = pci_resource_start(pdev, 0);
306 len = pci_resource_len(pdev, 0);
51f65ebc 307 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x "
7bb206e3
BS
308 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
309 ent->device, ent->driver_data);
310
311 read_bars(dd, pdev, &bar0, &bar1);
312
313 if (!bar1 && !(bar0 & ~0xf)) {
314 if (addr) {
315 dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
316 "rewriting as %llx\n", addr);
317 ret = pci_write_config_dword(
318 pdev, PCI_BASE_ADDRESS_0, addr);
319 if (ret) {
320 ipath_dev_err(dd, "rewrite of BAR0 "
321 "failed: err %d\n", -ret);
322 goto bail_disable;
323 }
324 ret = pci_write_config_dword(
325 pdev, PCI_BASE_ADDRESS_1, addr >> 32);
326 if (ret) {
327 ipath_dev_err(dd, "rewrite of BAR1 "
328 "failed: err %d\n", -ret);
329 goto bail_disable;
330 }
331 } else {
332 ipath_dev_err(dd, "BAR is 0 (probable RESET), "
333 "not usable until reboot\n");
334 ret = -ENODEV;
335 goto bail_disable;
336 }
337 }
338
339 ret = pci_request_regions(pdev, IPATH_DRV_NAME);
340 if (ret) {
341 dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
342 "err %d\n", dd->ipath_unit, -ret);
343 goto bail_disable;
344 }
345
346 ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
347 if (ret) {
68dd43a1
BS
348 /*
349 * if the 64 bit setup fails, try 32 bit. Some systems
350 * do not setup 64 bit maps on systems with 2GB or less
351 * memory installed.
352 */
353 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
354 if (ret) {
b1d8865a
BS
355 dev_info(&pdev->dev,
356 "Unable to set DMA mask for unit %u: %d\n",
357 dd->ipath_unit, ret);
68dd43a1
BS
358 goto bail_regions;
359 }
b1d8865a 360 else {
68dd43a1 361 ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
b1d8865a
BS
362 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
363 if (ret)
364 dev_info(&pdev->dev,
365 "Unable to set DMA consistent mask "
366 "for unit %u: %d\n",
367 dd->ipath_unit, ret);
368
369 }
370 }
371 else {
372 ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
373 if (ret)
374 dev_info(&pdev->dev,
375 "Unable to set DMA consistent mask "
376 "for unit %u: %d\n",
377 dd->ipath_unit, ret);
7bb206e3
BS
378 }
379
380 pci_set_master(pdev);
381
382 /*
383 * Save BARs to rewrite after device reset. Save all 64 bits of
384 * BAR, just in case.
385 */
386 dd->ipath_pcibar0 = addr;
387 dd->ipath_pcibar1 = addr >> 32;
388 dd->ipath_deviceid = ent->device; /* save for later use */
389 dd->ipath_vendorid = ent->vendor;
390
391 /* setup the chip-specific functions, as early as possible. */
392 switch (ent->device) {
393 case PCI_DEVICE_ID_INFINIPATH_HT:
820054b7 394#ifdef CONFIG_HT_IRQ
525d0ca1 395 ipath_init_iba6110_funcs(dd);
7bb206e3 396 break;
820054b7
BS
397#else
398 ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
399 "CONFIG_HT_IRQ is not enabled\n", ent->device);
400 return -ENODEV;
e757bef2 401#endif
7bb206e3 402 case PCI_DEVICE_ID_INFINIPATH_PE800:
820054b7 403#ifdef CONFIG_PCI_MSI
525d0ca1 404 ipath_init_iba6120_funcs(dd);
7bb206e3 405 break;
820054b7
BS
406#else
407 ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
408 "CONFIG_PCI_MSI is not enabled\n", ent->device);
409 return -ENODEV;
e757bef2 410#endif
7bb206e3 411 default:
759d5768 412 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
7bb206e3
BS
413 "failing\n", ent->device);
414 return -ENODEV;
415 }
416
417 for (j = 0; j < 6; j++) {
418 if (!pdev->resource[j].start)
419 continue;
e29419ff
GKH
420 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
421 j, (unsigned long long)pdev->resource[j].start,
422 (unsigned long long)pdev->resource[j].end,
423 (unsigned long long)pci_resource_len(pdev, j));
7bb206e3
BS
424 }
425
426 if (!addr) {
427 ipath_dev_err(dd, "No valid address in BAR 0!\n");
428 ret = -ENODEV;
429 goto bail_regions;
430 }
431
432 dd->ipath_deviceid = ent->device; /* save for later use */
433 dd->ipath_vendorid = ent->vendor;
434
435 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
436 if (ret) {
437 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
438 "%u: err %d\n", dd->ipath_unit, -ret);
439 goto bail_regions; /* shouldn't ever happen */
440 }
441 dd->ipath_pcirev = rev;
442
eb9dc6f4
BS
443#if defined(__powerpc__)
444 /* There isn't a generic way to specify writethrough mappings */
445 dd->ipath_kregbase = __ioremap(addr, len,
446 (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
447#else
7bb206e3 448 dd->ipath_kregbase = ioremap_nocache(addr, len);
eb9dc6f4 449#endif
7bb206e3
BS
450
451 if (!dd->ipath_kregbase) {
452 ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
453 addr);
454 ret = -ENOMEM;
455 goto bail_iounmap;
456 }
457 dd->ipath_kregend = (u64 __iomem *)
458 ((void __iomem *)dd->ipath_kregbase + len);
459 dd->ipath_physaddr = addr; /* used for io_remap, etc. */
460 /* for user mmap */
b35f004d
BS
461 ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
462 addr, dd->ipath_kregbase);
7bb206e3
BS
463
464 /*
465 * clear ipath_flags here instead of in ipath_init_chip as it is set
466 * by ipath_setup_htconfig.
467 */
468 dd->ipath_flags = 0;
fba75200
BS
469 dd->ipath_lli_counter = 0;
470 dd->ipath_lli_errors = 0;
7bb206e3
BS
471
472 if (dd->ipath_f_bus(dd, pdev))
473 ipath_dev_err(dd, "Failed to setup config space; "
474 "continuing anyway\n");
475
476 /*
dace1453 477 * set up our interrupt handler; IRQF_SHARED probably not needed,
7bb206e3
BS
478 * since MSI interrupts shouldn't be shared but won't hurt for now.
479 * check 0 irq after we return from chip-specific bus setup, since
480 * that can affect this due to setup
481 */
51f65ebc 482 if (!dd->ipath_irq)
7bb206e3
BS
483 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
484 "work\n");
485 else {
51f65ebc 486 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
7bb206e3
BS
487 IPATH_DRV_NAME, dd);
488 if (ret) {
489 ipath_dev_err(dd, "Couldn't setup irq handler, "
51f65ebc 490 "irq=%d: %d\n", dd->ipath_irq, ret);
7bb206e3
BS
491 goto bail_iounmap;
492 }
493 }
494
495 ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
496 if (ret)
7b196e2f 497 goto bail_irqsetup;
7bb206e3
BS
498
499 ret = ipath_enable_wc(dd);
500
501 if (ret) {
502 ipath_dev_err(dd, "Write combining not enabled "
503 "(err %d): performance may be poor\n",
504 -ret);
505 ret = 0;
506 }
507
508 ipath_device_create_group(&pdev->dev, dd);
509 ipathfs_add_device(dd);
510 ipath_user_add(dd);
a2acb2ff 511 ipath_diag_add(dd);
b1c1b6a3 512 ipath_register_ib_device(dd);
7bb206e3
BS
513
514 goto bail;
515
7b196e2f
AJ
516bail_irqsetup:
517 if (pdev->irq) free_irq(pdev->irq, dd);
518
7bb206e3
BS
519bail_iounmap:
520 iounmap((volatile void __iomem *) dd->ipath_kregbase);
521
522bail_regions:
523 pci_release_regions(pdev);
524
525bail_disable:
526 pci_disable_device(pdev);
527
528bail_devdata:
529 ipath_free_devdata(pdev, dd);
530
7bb206e3
BS
531bail:
532 return ret;
533}
534
7227aac4 535static void __devexit cleanup_device(struct ipath_devdata *dd)
7bb206e3 536{
7227aac4 537 int port;
7bb206e3 538
7227aac4 539 ipath_shutdown_device(dd);
7bb206e3 540
7227aac4
BS
541 if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
542 /* can't do anything more with chip; needs re-init */
543 *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
544 if (dd->ipath_kregbase) {
545 /*
546 * if we haven't already cleaned up before these are
547 * to ensure any register reads/writes "fail" until
548 * re-init
549 */
550 dd->ipath_kregbase = NULL;
551 dd->ipath_uregbase = 0;
552 dd->ipath_sregbase = 0;
553 dd->ipath_cregbase = 0;
554 dd->ipath_kregsize = 0;
555 }
556 ipath_disable_wc(dd);
557 }
c78f6415 558
7227aac4
BS
559 if (dd->ipath_pioavailregs_dma) {
560 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
561 (void *) dd->ipath_pioavailregs_dma,
562 dd->ipath_pioavailregs_phys);
563 dd->ipath_pioavailregs_dma = NULL;
564 }
565 if (dd->ipath_dummy_hdrq) {
566 dma_free_coherent(&dd->pcidev->dev,
567 dd->ipath_pd[0]->port_rcvhdrq_size,
568 dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
569 dd->ipath_dummy_hdrq = NULL;
570 }
571
572 if (dd->ipath_pageshadow) {
573 struct page **tmpp = dd->ipath_pageshadow;
574 dma_addr_t *tmpd = dd->ipath_physshadow;
575 int i, cnt = 0;
576
577 ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
578 "locked\n");
579 for (port = 0; port < dd->ipath_cfgports; port++) {
580 int port_tidbase = port * dd->ipath_rcvtidcnt;
581 int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
582 for (i = port_tidbase; i < maxtid; i++) {
583 if (!tmpp[i])
584 continue;
585 pci_unmap_page(dd->pcidev, tmpd[i],
586 PAGE_SIZE, PCI_DMA_FROMDEVICE);
587 ipath_release_user_pages(&tmpp[i], 1);
588 tmpp[i] = NULL;
589 cnt++;
590 }
591 }
592 if (cnt) {
593 ipath_stats.sps_pageunlocks += cnt;
594 ipath_cdbg(VERBOSE, "There were still %u expTID "
595 "entries locked\n", cnt);
596 }
597 if (ipath_stats.sps_pagelocks ||
598 ipath_stats.sps_pageunlocks)
599 ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
600 "unlocked via ipath_m{un}lock\n",
601 (unsigned long long)
602 ipath_stats.sps_pagelocks,
603 (unsigned long long)
604 ipath_stats.sps_pageunlocks);
605
606 ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
607 dd->ipath_pageshadow);
9783ab40 608 tmpp = dd->ipath_pageshadow;
7227aac4 609 dd->ipath_pageshadow = NULL;
9783ab40 610 vfree(tmpp);
c78f6415
BS
611 }
612
7227aac4
BS
613 /*
614 * free any resources still in use (usually just kernel ports)
615 * at unload; we do for portcnt, not cfgports, because cfgports
616 * could have changed while we were loaded.
617 */
618 for (port = 0; port < dd->ipath_portcnt; port++) {
619 struct ipath_portdata *pd = dd->ipath_pd[port];
620 dd->ipath_pd[port] = NULL;
621 ipath_free_pddata(dd, pd);
622 }
623 kfree(dd->ipath_pd);
624 /*
625 * debuggability, in case some cleanup path tries to use it
626 * after this
627 */
628 dd->ipath_pd = NULL;
629}
630
631static void __devexit ipath_remove_one(struct pci_dev *pdev)
632{
633 struct ipath_devdata *dd = pci_get_drvdata(pdev);
634
635 ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
636
637 if (dd->verbs_dev)
638 ipath_unregister_ib_device(dd->verbs_dev);
639
a2acb2ff
BS
640 ipath_diag_remove(dd);
641 ipath_user_remove(dd);
7bb206e3
BS
642 ipathfs_remove_device(dd);
643 ipath_device_remove_group(&pdev->dev, dd);
7227aac4 644
7bb206e3
BS
645 ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
646 "unit %u\n", dd, (u32) dd->ipath_unit);
7227aac4
BS
647
648 cleanup_device(dd);
649
650 /*
651 * turn off rcv, send, and interrupts for all ports, all drivers
652 * should also hard reset the chip here?
653 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
654 * for all versions of the driver, if they were allocated
655 */
51f65ebc
BS
656 if (dd->ipath_irq) {
657 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
658 dd->ipath_unit, dd->ipath_irq);
659 dd->ipath_f_free_irq(dd);
7227aac4
BS
660 } else
661 ipath_dbg("irq is 0, not doing free_irq "
662 "for unit %u\n", dd->ipath_unit);
663 /*
664 * we check for NULL here, because it's outside
665 * the kregbase check, and we need to call it
666 * after the free_irq. Thus it's possible that
667 * the function pointers were never initialized.
668 */
669 if (dd->ipath_f_cleanup)
670 /* clean up chip-specific stuff */
671 dd->ipath_f_cleanup(dd);
672
673 ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
674 iounmap((volatile void __iomem *) dd->ipath_kregbase);
7bb206e3
BS
675 pci_release_regions(pdev);
676 ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
677 pci_disable_device(pdev);
678
679 ipath_free_devdata(pdev, dd);
7bb206e3
BS
680}
681
682/* general driver use */
683DEFINE_MUTEX(ipath_mutex);
684
685static DEFINE_SPINLOCK(ipath_pioavail_lock);
686
687/**
688 * ipath_disarm_piobufs - cancel a range of PIO buffers
689 * @dd: the infinipath device
690 * @first: the first PIO buffer to cancel
691 * @cnt: the number of PIO buffers to cancel
692 *
693 * cancel a range of PIO buffers, used when they might be armed, but
694 * not triggered. Used at init to ensure buffer state, and also user
695 * process close, in case it died while writing to a PIO buffer
696 * Also after errors.
697 */
698void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
699 unsigned cnt)
700{
701 unsigned i, last = first + cnt;
702 u64 sendctrl, sendorig;
703
704 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
705 sendorig = dd->ipath_sendctrl | INFINIPATH_S_DISARM;
706 for (i = first; i < last; i++) {
707 sendctrl = sendorig |
708 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT);
709 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
710 sendctrl);
711 }
712
713 /*
714 * Write it again with current value, in case ipath_sendctrl changed
715 * while we were looping; no critical bits that would require
716 * locking.
717 *
718 * Write a 0, and then the original value, reading scratch in
719 * between. This seems to avoid a chip timing race that causes
720 * pioavail updates to memory to stop.
721 */
722 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
723 0);
724 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
725 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
726 dd->ipath_sendctrl);
727}
728
729/**
730 * ipath_wait_linkstate - wait for an IB link state change to occur
731 * @dd: the infinipath device
732 * @state: the state to wait for
733 * @msecs: the number of milliseconds to wait
734 *
735 * wait up to msecs milliseconds for IB link state change to occur for
736 * now, take the easy polling route. Currently used only by
34b2aafe 737 * ipath_set_linkstate. Returns 0 if state reached, otherwise
7bb206e3
BS
738 * -ETIMEDOUT state can have multiple states set, for any of several
739 * transitions.
740 */
34b2aafe
BS
741static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
742 int msecs)
7bb206e3 743{
0fd41363
BS
744 dd->ipath_state_wanted = state;
745 wait_event_interruptible_timeout(ipath_state_wait,
7bb206e3
BS
746 (dd->ipath_flags & state),
747 msecs_to_jiffies(msecs));
0fd41363 748 dd->ipath_state_wanted = 0;
7bb206e3
BS
749
750 if (!(dd->ipath_flags & state)) {
751 u64 val;
0fd41363
BS
752 ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
753 " ms\n",
7bb206e3
BS
754 /* test INIT ahead of DOWN, both can be set */
755 (state & IPATH_LINKINIT) ? "INIT" :
756 ((state & IPATH_LINKDOWN) ? "DOWN" :
757 ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
758 msecs);
759 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
760 ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
761 (unsigned long long) ipath_read_kreg64(
762 dd, dd->ipath_kregs->kr_ibcctrl),
763 (unsigned long long) val,
764 ipath_ibcstatus_str[val & 0xf]);
765 }
766 return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
767}
768
8ec1077b
BS
769/*
770 * Decode the error status into strings, deciding whether to always
771 * print * it or not depending on "normal packet errors" vs everything
772 * else. Return 1 if "real" errors, otherwise 0 if only packet
773 * errors, so caller can decide what to print with the string.
774 */
775int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
7bb206e3 776{
8ec1077b 777 int iserr = 1;
7bb206e3 778 *buf = '\0';
8ec1077b
BS
779 if (err & INFINIPATH_E_PKTERRS) {
780 if (!(err & ~INFINIPATH_E_PKTERRS))
781 iserr = 0; // if only packet errors.
782 if (ipath_debug & __IPATH_ERRPKTDBG) {
783 if (err & INFINIPATH_E_REBP)
784 strlcat(buf, "EBP ", blen);
785 if (err & INFINIPATH_E_RVCRC)
786 strlcat(buf, "VCRC ", blen);
787 if (err & INFINIPATH_E_RICRC) {
788 strlcat(buf, "CRC ", blen);
789 // clear for check below, so only once
790 err &= INFINIPATH_E_RICRC;
791 }
792 if (err & INFINIPATH_E_RSHORTPKTLEN)
793 strlcat(buf, "rshortpktlen ", blen);
794 if (err & INFINIPATH_E_SDROPPEDDATAPKT)
795 strlcat(buf, "sdroppeddatapkt ", blen);
796 if (err & INFINIPATH_E_SPKTLEN)
797 strlcat(buf, "spktlen ", blen);
798 }
799 if ((err & INFINIPATH_E_RICRC) &&
800 !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
801 strlcat(buf, "CRC ", blen);
802 if (!iserr)
803 goto done;
804 }
7bb206e3
BS
805 if (err & INFINIPATH_E_RHDRLEN)
806 strlcat(buf, "rhdrlen ", blen);
807 if (err & INFINIPATH_E_RBADTID)
808 strlcat(buf, "rbadtid ", blen);
809 if (err & INFINIPATH_E_RBADVERSION)
810 strlcat(buf, "rbadversion ", blen);
811 if (err & INFINIPATH_E_RHDR)
812 strlcat(buf, "rhdr ", blen);
813 if (err & INFINIPATH_E_RLONGPKTLEN)
814 strlcat(buf, "rlongpktlen ", blen);
7bb206e3
BS
815 if (err & INFINIPATH_E_RMAXPKTLEN)
816 strlcat(buf, "rmaxpktlen ", blen);
817 if (err & INFINIPATH_E_RMINPKTLEN)
818 strlcat(buf, "rminpktlen ", blen);
8ec1077b
BS
819 if (err & INFINIPATH_E_SMINPKTLEN)
820 strlcat(buf, "sminpktlen ", blen);
7bb206e3
BS
821 if (err & INFINIPATH_E_RFORMATERR)
822 strlcat(buf, "rformaterr ", blen);
823 if (err & INFINIPATH_E_RUNSUPVL)
824 strlcat(buf, "runsupvl ", blen);
825 if (err & INFINIPATH_E_RUNEXPCHAR)
826 strlcat(buf, "runexpchar ", blen);
827 if (err & INFINIPATH_E_RIBFLOW)
828 strlcat(buf, "ribflow ", blen);
7bb206e3
BS
829 if (err & INFINIPATH_E_SUNDERRUN)
830 strlcat(buf, "sunderrun ", blen);
831 if (err & INFINIPATH_E_SPIOARMLAUNCH)
832 strlcat(buf, "spioarmlaunch ", blen);
833 if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
834 strlcat(buf, "sunexperrpktnum ", blen);
7bb206e3
BS
835 if (err & INFINIPATH_E_SDROPPEDSMPPKT)
836 strlcat(buf, "sdroppedsmppkt ", blen);
837 if (err & INFINIPATH_E_SMAXPKTLEN)
838 strlcat(buf, "smaxpktlen ", blen);
7bb206e3
BS
839 if (err & INFINIPATH_E_SUNSUPVL)
840 strlcat(buf, "sunsupVL ", blen);
7bb206e3
BS
841 if (err & INFINIPATH_E_INVALIDADDR)
842 strlcat(buf, "invalidaddr ", blen);
7bb206e3
BS
843 if (err & INFINIPATH_E_RRCVEGRFULL)
844 strlcat(buf, "rcvegrfull ", blen);
845 if (err & INFINIPATH_E_RRCVHDRFULL)
846 strlcat(buf, "rcvhdrfull ", blen);
847 if (err & INFINIPATH_E_IBSTATUSCHANGED)
848 strlcat(buf, "ibcstatuschg ", blen);
849 if (err & INFINIPATH_E_RIBLOSTLINK)
850 strlcat(buf, "riblostlink ", blen);
851 if (err & INFINIPATH_E_HARDWARE)
852 strlcat(buf, "hardware ", blen);
853 if (err & INFINIPATH_E_RESET)
854 strlcat(buf, "reset ", blen);
8ec1077b
BS
855done:
856 return iserr;
7bb206e3
BS
857}
858
859/**
860 * get_rhf_errstring - decode RHF errors
861 * @err: the err number
862 * @msg: the output buffer
863 * @len: the length of the output buffer
864 *
865 * only used one place now, may want more later
866 */
867static void get_rhf_errstring(u32 err, char *msg, size_t len)
868{
869 /* if no errors, and so don't need to check what's first */
870 *msg = '\0';
871
872 if (err & INFINIPATH_RHF_H_ICRCERR)
873 strlcat(msg, "icrcerr ", len);
874 if (err & INFINIPATH_RHF_H_VCRCERR)
875 strlcat(msg, "vcrcerr ", len);
876 if (err & INFINIPATH_RHF_H_PARITYERR)
877 strlcat(msg, "parityerr ", len);
878 if (err & INFINIPATH_RHF_H_LENERR)
879 strlcat(msg, "lenerr ", len);
880 if (err & INFINIPATH_RHF_H_MTUERR)
881 strlcat(msg, "mtuerr ", len);
882 if (err & INFINIPATH_RHF_H_IHDRERR)
883 /* infinipath hdr checksum error */
884 strlcat(msg, "ipathhdrerr ", len);
885 if (err & INFINIPATH_RHF_H_TIDERR)
886 strlcat(msg, "tiderr ", len);
887 if (err & INFINIPATH_RHF_H_MKERR)
888 /* bad port, offset, etc. */
889 strlcat(msg, "invalid ipathhdr ", len);
890 if (err & INFINIPATH_RHF_H_IBERR)
891 strlcat(msg, "iberr ", len);
892 if (err & INFINIPATH_RHF_L_SWA)
893 strlcat(msg, "swA ", len);
894 if (err & INFINIPATH_RHF_L_SWB)
895 strlcat(msg, "swB ", len);
896}
897
898/**
899 * ipath_get_egrbuf - get an eager buffer
900 * @dd: the infinipath device
901 * @bufnum: the eager buffer to get
902 * @err: unused
903 *
904 * must only be called if ipath_pd[port] is known to be allocated
905 */
906static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum,
907 int err)
908{
1fd3b40f
BS
909 return dd->ipath_port0_skbinfo ?
910 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
7bb206e3
BS
911}
912
913/**
914 * ipath_alloc_skb - allocate an skb and buffer with possible constraints
915 * @dd: the infinipath device
916 * @gfp_mask: the sk_buff SFP mask
917 */
918struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
919 gfp_t gfp_mask)
920{
921 struct sk_buff *skb;
922 u32 len;
923
924 /*
925 * Only fully supported way to handle this is to allocate lots
926 * extra, align as needed, and then do skb_reserve(). That wastes
927 * a lot of memory... I'll have to hack this into infinipath_copy
928 * also.
929 */
930
931 /*
1fd3b40f
BS
932 * We need 2 extra bytes for ipath_ether data sent in the
933 * key header. In order to keep everything dword aligned,
934 * we'll reserve 4 bytes.
7bb206e3 935 */
1fd3b40f
BS
936 len = dd->ipath_ibmaxlen + 4;
937
7bb206e3 938 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1fd3b40f 939 /* We need a 2KB multiple alignment, and there is no way
7bb206e3
BS
940 * to do it except to allocate extra and then skb_reserve
941 * enough to bring it up to the right alignment.
942 */
1fd3b40f 943 len += 2047;
7bb206e3 944 }
1fd3b40f 945
7bb206e3
BS
946 skb = __dev_alloc_skb(len, gfp_mask);
947 if (!skb) {
948 ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
949 len);
950 goto bail;
951 }
1fd3b40f
BS
952
953 skb_reserve(skb, 4);
954
7bb206e3 955 if (dd->ipath_flags & IPATH_4BYTE_TID) {
1fd3b40f 956 u32 una = (unsigned long)skb->data & 2047;
7bb206e3 957 if (una)
1fd3b40f
BS
958 skb_reserve(skb, 2048 - una);
959 }
7bb206e3
BS
960
961bail:
962 return skb;
963}
964
3d37b9e2
RC
965static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
966 u32 eflags,
967 u32 l,
968 u32 etail,
969 u64 *rc)
970{
971 char emsg[128];
972 struct ipath_message_header *hdr;
973
974 get_rhf_errstring(eflags, emsg, sizeof emsg);
975 hdr = (struct ipath_message_header *)&rc[1];
976 ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
977 "tlen=%x opcode=%x egridx=%x: %s\n",
978 eflags, l,
979 ipath_hdrget_rcv_type((__le32 *) rc),
980 ipath_hdrget_length_in_bytes((__le32 *) rc),
981 be32_to_cpu(hdr->bth[0]) >> 24,
982 etail, emsg);
983
984 /* Count local link integrity errors. */
985 if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
986 u8 n = (dd->ipath_ibcctrl >>
987 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
988 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
989
990 if (++dd->ipath_lli_counter > n) {
991 dd->ipath_lli_counter = 0;
992 dd->ipath_lli_errors++;
993 }
994 }
995}
996
7bb206e3
BS
997/*
998 * ipath_kreceive - receive a packet
999 * @dd: the infinipath device
1000 *
1001 * called from interrupt handler for errors or receive interrupt
1002 */
1003void ipath_kreceive(struct ipath_devdata *dd)
1004{
1005 u64 *rc;
1006 void *ebuf;
1007 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1008 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
1009 u32 etail = -1, l, hdrqtail;
27b678dd 1010 struct ipath_message_header *hdr;
57abad25 1011 u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
7bb206e3 1012 static u64 totcalls; /* stats, may eventually remove */
7bb206e3
BS
1013
1014 if (!dd->ipath_hdrqtailptr) {
1015 ipath_dev_err(dd,
1016 "hdrqtailptr not set, can't do receives\n");
1017 goto bail;
1018 }
1019
1020 /* There is already a thread processing this queue. */
1021 if (test_and_set_bit(0, &dd->ipath_rcv_pending))
1022 goto bail;
1023
f5f99929 1024 l = dd->ipath_port0head;
57abad25
BS
1025 hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
1026 if (l == hdrqtail)
7bb206e3
BS
1027 goto done;
1028
57abad25 1029reloop:
f5f99929 1030 for (i = 0; l != hdrqtail; i++) {
7bb206e3
BS
1031 u32 qp;
1032 u8 *bthbytes;
1033
1034 rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
27b678dd 1035 hdr = (struct ipath_message_header *)&rc[1];
7bb206e3
BS
1036 /*
1037 * could make a network order version of IPATH_KD_QP, and
1038 * do the obvious shift before masking to speed this up.
1039 */
1040 qp = ntohl(hdr->bth[1]) & 0xffffff;
1041 bthbytes = (u8 *) hdr->bth;
1042
27b678dd
BS
1043 eflags = ipath_hdrget_err_flags((__le32 *) rc);
1044 etype = ipath_hdrget_rcv_type((__le32 *) rc);
7bb206e3 1045 /* total length */
27b678dd 1046 tlen = ipath_hdrget_length_in_bytes((__le32 *) rc);
7bb206e3
BS
1047 ebuf = NULL;
1048 if (etype != RCVHQ_RCV_TYPE_EXPECTED) {
1049 /*
1050 * it turns out that the chips uses an eager buffer
1051 * for all non-expected packets, whether it "needs"
1052 * one or not. So always get the index, but don't
1053 * set ebuf (so we try to copy data) unless the
1054 * length requires it.
1055 */
27b678dd 1056 etail = ipath_hdrget_index((__le32 *) rc);
7bb206e3
BS
1057 if (tlen > sizeof(*hdr) ||
1058 etype == RCVHQ_RCV_TYPE_NON_KD)
1059 ebuf = ipath_get_egrbuf(dd, etail, 0);
1060 }
1061
1062 /*
1063 * both tiderr and ipathhdrerr are set for all plain IB
1064 * packets; only ipathhdrerr should be set.
1065 */
1066
1067 if (etype != RCVHQ_RCV_TYPE_NON_KD && etype !=
27b678dd 1068 RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver(
7bb206e3
BS
1069 hdr->iph.ver_port_tid_offset) !=
1070 IPS_PROTO_VERSION) {
1071 ipath_cdbg(PKT, "Bad InfiniPath protocol version "
1072 "%x\n", etype);
1073 }
1074
3d37b9e2
RC
1075 if (unlikely(eflags))
1076 ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
1077 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
34b2aafe
BS
1078 ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen);
1079 if (dd->ipath_lli_counter)
1080 dd->ipath_lli_counter--;
1081 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1082 "qp=%x), len %x; ignored\n",
1083 etype, bthbytes[0], qp, tlen);
7bb206e3 1084 }
34b2aafe
BS
1085 else if (etype == RCVHQ_RCV_TYPE_EAGER)
1086 ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
1087 "qp=%x), len %x; ignored\n",
1088 etype, bthbytes[0], qp, tlen);
7bb206e3
BS
1089 else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
1090 ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
1091 be32_to_cpu(hdr->bth[0]) & 0xff);
3d37b9e2 1092 else {
7bb206e3
BS
1093 /*
1094 * error packet, type of error unknown.
1095 * Probably type 3, but we don't know, so don't
1096 * even try to print the opcode, etc.
1097 */
1098 ipath_dbg("Error Pkt, but no eflags! egrbuf %x, "
1099 "len %x\nhdrq@%lx;hdrq+%x rhf: %llx; "
1100 "hdr %llx %llx %llx %llx %llx\n",
1101 etail, tlen, (unsigned long) rc, l,
1102 (unsigned long long) rc[0],
1103 (unsigned long long) rc[1],
1104 (unsigned long long) rc[2],
1105 (unsigned long long) rc[3],
1106 (unsigned long long) rc[4],
1107 (unsigned long long) rc[5]);
1108 }
1109 l += rsize;
1110 if (l >= maxcnt)
1111 l = 0;
f5f99929
BS
1112 if (etype != RCVHQ_RCV_TYPE_EXPECTED)
1113 updegr = 1;
7bb206e3 1114 /*
f5f99929
BS
1115 * update head regs on last packet, and every 16 packets.
1116 * Reduce bus traffic, while still trying to prevent
1117 * rcvhdrq overflows, for when the queue is nearly full
7bb206e3 1118 */
f5f99929
BS
1119 if (l == hdrqtail || (i && !(i&0xf))) {
1120 u64 lval;
525d0ca1
BS
1121 if (l == hdrqtail)
1122 /* request IBA6120 interrupt only on last */
f5f99929
BS
1123 lval = dd->ipath_rhdrhead_intr_off | l;
1124 else
1125 lval = l;
1126 (void)ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0);
1127 if (updegr) {
1128 (void)ipath_write_ureg(dd, ur_rcvegrindexhead,
1129 etail, 0);
1130 updegr = 0;
1131 }
1132 }
7bb206e3
BS
1133 }
1134
57abad25 1135 if (!dd->ipath_rhdrhead_intr_off && !reloop) {
525d0ca1 1136 /* IBA6110 workaround; we can have a race clearing chip
57abad25
BS
1137 * interrupt with another interrupt about to be delivered,
1138 * and can clear it before it is delivered on the GPIO
1139 * workaround. By doing the extra check here for the
1140 * in-memory tail register updating while we were doing
1141 * earlier packets, we "almost" guarantee we have covered
1142 * that case.
1143 */
1144 u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
1145 if (hqtail != hdrqtail) {
1146 hdrqtail = hqtail;
1147 reloop = 1; /* loop 1 extra time at most */
1148 goto reloop;
1149 }
1150 }
1151
7bb206e3
BS
1152 pkttot += i;
1153
1154 dd->ipath_port0head = l;
1155
7bb206e3
BS
1156 if (pkttot > ipath_stats.sps_maxpkts_call)
1157 ipath_stats.sps_maxpkts_call = pkttot;
1158 ipath_stats.sps_port0pkts += pkttot;
1159 ipath_stats.sps_avgpkts_call =
1160 ipath_stats.sps_port0pkts / ++totcalls;
1161
1162done:
1163 clear_bit(0, &dd->ipath_rcv_pending);
1164 smp_mb__after_clear_bit();
1165
1166bail:;
1167}
1168
1169/**
1170 * ipath_update_pio_bufs - update shadow copy of the PIO availability map
1171 * @dd: the infinipath device
1172 *
1173 * called whenever our local copy indicates we have run out of send buffers
1174 * NOTE: This can be called from interrupt context by some code
1175 * and from non-interrupt context by ipath_getpiobuf().
1176 */
1177
1178static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1179{
1180 unsigned long flags;
1181 int i;
1182 const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
1183
1184 /* If the generation (check) bits have changed, then we update the
1185 * busy bit for the corresponding PIO buffer. This algorithm will
1186 * modify positions to the value they already have in some cases
1187 * (i.e., no change), but it's faster than changing only the bits
1188 * that have changed.
1189 *
1190 * We would like to do this atomicly, to avoid spinlocks in the
1191 * critical send path, but that's not really possible, given the
1192 * type of changes, and that this routine could be called on
1193 * multiple cpu's simultaneously, so we lock in this routine only,
1194 * to avoid conflicting updates; all we change is the shadow, and
1195 * it's a single 64 bit memory location, so by definition the update
1196 * is atomic in terms of what other cpu's can see in testing the
1197 * bits. The spin_lock overhead isn't too bad, since it only
1198 * happens when all buffers are in use, so only cpu overhead, not
1199 * latency or bandwidth is affected.
1200 */
1201#define _IPATH_ALL_CHECKBITS 0x5555555555555555ULL
1202 if (!dd->ipath_pioavailregs_dma) {
1203 ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
1204 return;
1205 }
1206 if (ipath_debug & __IPATH_VERBDBG) {
1207 /* only if packet debug and verbose */
1208 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1209 unsigned long *shadow = dd->ipath_pioavailshadow;
1210
1211 ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
1212 "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
1213 "s3=%lx\n",
1214 (unsigned long long) le64_to_cpu(dma[0]),
1215 shadow[0],
1216 (unsigned long long) le64_to_cpu(dma[1]),
1217 shadow[1],
1218 (unsigned long long) le64_to_cpu(dma[2]),
1219 shadow[2],
1220 (unsigned long long) le64_to_cpu(dma[3]),
1221 shadow[3]);
1222 if (piobregs > 4)
1223 ipath_cdbg(
1224 PKT, "2nd group, dma4=%llx shad4=%lx, "
1225 "d5=%llx s5=%lx, d6=%llx s6=%lx, "
1226 "d7=%llx s7=%lx\n",
1227 (unsigned long long) le64_to_cpu(dma[4]),
1228 shadow[4],
1229 (unsigned long long) le64_to_cpu(dma[5]),
1230 shadow[5],
1231 (unsigned long long) le64_to_cpu(dma[6]),
1232 shadow[6],
1233 (unsigned long long) le64_to_cpu(dma[7]),
1234 shadow[7]);
1235 }
1236 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1237 for (i = 0; i < piobregs; i++) {
1238 u64 pchbusy, pchg, piov, pnew;
1239 /*
1240 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1241 */
1242 if (i > 3) {
1243 if (i & 1)
1244 piov = le64_to_cpu(
1245 dd->ipath_pioavailregs_dma[i - 1]);
1246 else
1247 piov = le64_to_cpu(
1248 dd->ipath_pioavailregs_dma[i + 1]);
1249 } else
1250 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1251 pchg = _IPATH_ALL_CHECKBITS &
1252 ~(dd->ipath_pioavailshadow[i] ^ piov);
1253 pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
1254 if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
1255 pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
1256 pnew |= piov & pchbusy;
1257 dd->ipath_pioavailshadow[i] = pnew;
1258 }
1259 }
1260 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1261}
1262
1263/**
1264 * ipath_setrcvhdrsize - set the receive header size
1265 * @dd: the infinipath device
1266 * @rhdrsize: the receive header size
1267 *
1268 * called from user init code, and also layered driver init
1269 */
1270int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
1271{
1272 int ret = 0;
1273
1274 if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
1275 if (dd->ipath_rcvhdrsize != rhdrsize) {
1276 dev_info(&dd->pcidev->dev,
1277 "Error: can't set protocol header "
1278 "size %u, already %u\n",
1279 rhdrsize, dd->ipath_rcvhdrsize);
1280 ret = -EAGAIN;
1281 } else
1282 ipath_cdbg(VERBOSE, "Reuse same protocol header "
1283 "size %u\n", dd->ipath_rcvhdrsize);
1284 } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
1285 (sizeof(u64) / sizeof(u32)))) {
1286 ipath_dbg("Error: can't set protocol header size %u "
1287 "(> max %u)\n", rhdrsize,
1288 dd->ipath_rcvhdrentsize -
1289 (u32) (sizeof(u64) / sizeof(u32)));
1290 ret = -EOVERFLOW;
1291 } else {
1292 dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
1293 dd->ipath_rcvhdrsize = rhdrsize;
1294 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
1295 dd->ipath_rcvhdrsize);
1296 ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
1297 dd->ipath_rcvhdrsize);
1298 }
1299 return ret;
1300}
1301
1302/**
1303 * ipath_getpiobuf - find an available pio buffer
1304 * @dd: the infinipath device
1305 * @pbufnum: the buffer number is placed here
1306 *
1307 * do appropriate marking as busy, etc.
1308 * returns buffer number if one found (>=0), negative number is error.
0fd41363 1309 * Used by ipath_layer_send
7bb206e3
BS
1310 */
1311u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum)
1312{
1313 int i, j, starti, updated = 0;
1314 unsigned piobcnt, iter;
1315 unsigned long flags;
1316 unsigned long *shadow = dd->ipath_pioavailshadow;
1317 u32 __iomem *buf;
1318
1319 piobcnt = (unsigned)(dd->ipath_piobcnt2k
1320 + dd->ipath_piobcnt4k);
1321 starti = dd->ipath_lastport_piobuf;
1322 iter = piobcnt - starti;
1323 if (dd->ipath_upd_pio_shadow) {
1324 /*
1325 * Minor optimization. If we had no buffers on last call,
1326 * start out by doing the update; continue and do scan even
1327 * if no buffers were updated, to be paranoid
1328 */
1329 ipath_update_pio_bufs(dd);
1330 /* we scanned here, don't do it at end of scan */
1331 updated = 1;
1332 i = starti;
1333 } else
1334 i = dd->ipath_lastpioindex;
1335
1336rescan:
1337 /*
1338 * while test_and_set_bit() is atomic, we do that and then the
1339 * change_bit(), and the pair is not. See if this is the cause
1340 * of the remaining armlaunch errors.
1341 */
1342 spin_lock_irqsave(&ipath_pioavail_lock, flags);
1343 for (j = 0; j < iter; j++, i++) {
1344 if (i >= piobcnt)
1345 i = starti;
1346 /*
1347 * To avoid bus lock overhead, we first find a candidate
1348 * buffer, then do the test and set, and continue if that
1349 * fails.
1350 */
1351 if (test_bit((2 * i) + 1, shadow) ||
1352 test_and_set_bit((2 * i) + 1, shadow))
1353 continue;
1354 /* flip generation bit */
1355 change_bit(2 * i, shadow);
1356 break;
1357 }
1358 spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
1359
1360 if (j == iter) {
1361 volatile __le64 *dma = dd->ipath_pioavailregs_dma;
1362
1363 /*
1364 * first time through; shadow exhausted, but may be real
1365 * buffers available, so go see; if any updated, rescan
1366 * (once)
1367 */
1368 if (!updated) {
1369 ipath_update_pio_bufs(dd);
1370 updated = 1;
1371 i = starti;
1372 goto rescan;
1373 }
1374 dd->ipath_upd_pio_shadow = 1;
1375 /*
1376 * not atomic, but if we lose one once in a while, that's OK
1377 */
1378 ipath_stats.sps_nopiobufs++;
1379 if (!(++dd->ipath_consec_nopiobuf % 100000)) {
1380 ipath_dbg(
1381 "%u pio sends with no bufavail; dmacopy: "
1382 "%llx %llx %llx %llx; shadow: "
1383 "%lx %lx %lx %lx\n",
1384 dd->ipath_consec_nopiobuf,
1385 (unsigned long long) le64_to_cpu(dma[0]),
1386 (unsigned long long) le64_to_cpu(dma[1]),
1387 (unsigned long long) le64_to_cpu(dma[2]),
1388 (unsigned long long) le64_to_cpu(dma[3]),
1389 shadow[0], shadow[1], shadow[2],
1390 shadow[3]);
1391 /*
1392 * 4 buffers per byte, 4 registers above, cover rest
1393 * below
1394 */
1395 if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
1396 (sizeof(shadow[0]) * 4 * 4))
1397 ipath_dbg("2nd group: dmacopy: %llx %llx "
1398 "%llx %llx; shadow: %lx %lx "
1399 "%lx %lx\n",
1400 (unsigned long long)
1401 le64_to_cpu(dma[4]),
1402 (unsigned long long)
1403 le64_to_cpu(dma[5]),
1404 (unsigned long long)
1405 le64_to_cpu(dma[6]),
1406 (unsigned long long)
1407 le64_to_cpu(dma[7]),
1408 shadow[4], shadow[5],
1409 shadow[6], shadow[7]);
1410 }
1411 buf = NULL;
1412 goto bail;
1413 }
1414
7bb206e3
BS
1415 /*
1416 * set next starting place. Since it's just an optimization,
1417 * it doesn't matter who wins on this, so no locking
1418 */
1419 dd->ipath_lastpioindex = i + 1;
1420 if (dd->ipath_upd_pio_shadow)
1421 dd->ipath_upd_pio_shadow = 0;
1422 if (dd->ipath_consec_nopiobuf)
1423 dd->ipath_consec_nopiobuf = 0;
1424 if (i < dd->ipath_piobcnt2k)
1425 buf = (u32 __iomem *) (dd->ipath_pio2kbase +
1426 i * dd->ipath_palign);
1427 else
1428 buf = (u32 __iomem *)
1429 (dd->ipath_pio4kbase +
1430 (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
1431 ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
1432 i, (i < dd->ipath_piobcnt2k) ? 2 : 4, buf);
1433 if (pbufnum)
1434 *pbufnum = i;
1435
1436bail:
1437 return buf;
1438}
1439
1440/**
1441 * ipath_create_rcvhdrq - create a receive header queue
1442 * @dd: the infinipath device
1443 * @pd: the port data
1444 *
f37bda92
BS
1445 * this must be contiguous memory (from an i/o perspective), and must be
1446 * DMA'able (which means for some systems, it will go through an IOMMU,
1447 * or be forced into a low address range).
7bb206e3
BS
1448 */
1449int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1450 struct ipath_portdata *pd)
1451{
f37bda92 1452 int ret = 0;
7bb206e3 1453
7bb206e3 1454 if (!pd->port_rcvhdrq) {
f37bda92 1455 dma_addr_t phys_hdrqtail;
7bb206e3 1456 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
f37bda92
BS
1457 int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1458 sizeof(u32), PAGE_SIZE);
7bb206e3
BS
1459
1460 pd->port_rcvhdrq = dma_alloc_coherent(
1461 &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
1462 gfp_flags);
1463
1464 if (!pd->port_rcvhdrq) {
1465 ipath_dev_err(dd, "attempt to allocate %d bytes "
1466 "for port %u rcvhdrq failed\n",
1467 amt, pd->port_port);
1468 ret = -ENOMEM;
1469 goto bail;
1470 }
f37bda92
BS
1471 pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
1472 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL);
1473 if (!pd->port_rcvhdrtail_kvaddr) {
1474 ipath_dev_err(dd, "attempt to allocate 1 page "
1475 "for port %u rcvhdrqtailaddr failed\n",
1476 pd->port_port);
1477 ret = -ENOMEM;
221e3198
BS
1478 dma_free_coherent(&dd->pcidev->dev, amt,
1479 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
1480 pd->port_rcvhdrq = NULL;
f37bda92
BS
1481 goto bail;
1482 }
1483 pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
7bb206e3
BS
1484
1485 pd->port_rcvhdrq_size = amt;
1486
1487 ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
1488 "for port %u rcvhdr Q\n",
1489 amt >> PAGE_SHIFT, pd->port_rcvhdrq,
1490 (unsigned long) pd->port_rcvhdrq_phys,
1491 (unsigned long) pd->port_rcvhdrq_size,
1492 pd->port_port);
f37bda92
BS
1493
1494 ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
1495 pd->port_port,
1496 (unsigned long long) phys_hdrqtail);
7bb206e3 1497 }
f37bda92
BS
1498 else
1499 ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
1500 "hdrtailaddr@%p %llx physical\n",
1501 pd->port_port, pd->port_rcvhdrq,
1fd3b40f
BS
1502 (unsigned long long) pd->port_rcvhdrq_phys,
1503 pd->port_rcvhdrtail_kvaddr, (unsigned long long)
1504 pd->port_rcvhdrqtailaddr_phys);
f37bda92
BS
1505
1506 /* clear for security and sanity on each use */
1507 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
076fafcd 1508 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
7bb206e3
BS
1509
1510 /*
1511 * tell chip each time we init it, even if we are re-using previous
f37bda92 1512 * memory (we zero the register at process close)
7bb206e3 1513 */
f37bda92
BS
1514 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1515 pd->port_port, pd->port_rcvhdrqtailaddr_phys);
7bb206e3
BS
1516 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1517 pd->port_port, pd->port_rcvhdrq_phys);
1518
1519 ret = 0;
1520bail:
1521 return ret;
1522}
1523
1524int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
1525 u64 bits_to_wait_for, u64 * valp)
1526{
1527 unsigned long timeout;
1528 u64 lastval, val;
1529 int ret;
1530
1531 lastval = ipath_read_kreg64(dd, reg_id);
1532 /* wait a ridiculously long time */
1533 timeout = jiffies + msecs_to_jiffies(5);
1534 do {
1535 val = ipath_read_kreg64(dd, reg_id);
1536 /* set so they have something, even on failures. */
1537 *valp = val;
1538 if ((val & bits_to_wait_for) == bits_to_wait_for) {
1539 ret = 0;
1540 break;
1541 }
1542 if (val != lastval)
1543 ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
1544 "waiting for %llx bits\n",
1545 (unsigned long long) lastval,
1546 (unsigned long long) val,
1547 (unsigned long long) bits_to_wait_for);
1548 cond_resched();
1549 if (time_after(jiffies, timeout)) {
1550 ipath_dbg("Didn't get bits %llx in register 0x%x, "
1551 "got %llx\n",
1552 (unsigned long long) bits_to_wait_for,
1553 reg_id, (unsigned long long) *valp);
1554 ret = -ENODEV;
1555 break;
1556 }
1557 } while (1);
1558
1559 return ret;
1560}
1561
1562/**
1563 * ipath_waitfor_mdio_cmdready - wait for last command to complete
1564 * @dd: the infinipath device
1565 *
1566 * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
1567 * away indicating the last command has completed. It doesn't return data
1568 */
1569int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1570{
1571 unsigned long timeout;
1572 u64 val;
1573 int ret;
1574
1575 /* wait a ridiculously long time */
1576 timeout = jiffies + msecs_to_jiffies(5);
1577 do {
1578 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
1579 if (!(val & IPATH_MDIO_CMDVALID)) {
1580 ret = 0;
1581 break;
1582 }
1583 cond_resched();
1584 if (time_after(jiffies, timeout)) {
1585 ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
1586 (unsigned long long) val);
1587 ret = -ENODEV;
1588 break;
1589 }
1590 } while (1);
1591
1592 return ret;
1593}
1594
34b2aafe 1595static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which)
7bb206e3
BS
1596{
1597 static const char *what[4] = {
1598 [0] = "DOWN",
1599 [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT",
1600 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
1601 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
1602 };
f37bda92
BS
1603 int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) &
1604 INFINIPATH_IBCC_LINKCMD_MASK;
1605
0fd41363 1606 ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate "
7bb206e3 1607 "is %s\n", dd->ipath_unit,
f37bda92 1608 what[linkcmd],
7bb206e3
BS
1609 ipath_ibcstatus_str[
1610 (ipath_read_kreg64
1611 (dd, dd->ipath_kregs->kr_ibcstatus) >>
1612 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1613 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]);
f37bda92 1614 /* flush all queued sends when going to DOWN or INIT, to be sure that
0fd41363 1615 * they don't block MAD packets */
f37bda92
BS
1616 if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) {
1617 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1618 INFINIPATH_S_ABORT);
1619 ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf,
1620 (unsigned)(dd->ipath_piobcnt2k +
1621 dd->ipath_piobcnt4k) -
1622 dd->ipath_lastport_piobuf);
1623 }
7bb206e3
BS
1624
1625 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1626 dd->ipath_ibcctrl | which);
1627}
1628
34b2aafe
BS
1629int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
1630{
1631 u32 lstate;
1632 int ret;
1633
1634 switch (newstate) {
1635 case IPATH_IB_LINKDOWN:
1636 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
1637 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1638 /* don't wait */
1639 ret = 0;
1640 goto bail;
1641
1642 case IPATH_IB_LINKDOWN_SLEEP:
1643 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
1644 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1645 /* don't wait */
1646 ret = 0;
1647 goto bail;
1648
1649 case IPATH_IB_LINKDOWN_DISABLE:
1650 ipath_set_ib_lstate(dd,
1651 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1652 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1653 /* don't wait */
1654 ret = 0;
1655 goto bail;
1656
1657 case IPATH_IB_LINKINIT:
1658 if (dd->ipath_flags & IPATH_LINKINIT) {
1659 ret = 0;
1660 goto bail;
1661 }
1662 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
1663 INFINIPATH_IBCC_LINKCMD_SHIFT);
1664 lstate = IPATH_LINKINIT;
1665 break;
1666
1667 case IPATH_IB_LINKARM:
1668 if (dd->ipath_flags & IPATH_LINKARMED) {
1669 ret = 0;
1670 goto bail;
1671 }
1672 if (!(dd->ipath_flags &
1673 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
1674 ret = -EINVAL;
1675 goto bail;
1676 }
1677 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
1678 INFINIPATH_IBCC_LINKCMD_SHIFT);
1679 /*
1680 * Since the port can transition to ACTIVE by receiving
1681 * a non VL 15 packet, wait for either state.
1682 */
1683 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
1684 break;
1685
1686 case IPATH_IB_LINKACTIVE:
1687 if (dd->ipath_flags & IPATH_LINKACTIVE) {
1688 ret = 0;
1689 goto bail;
1690 }
1691 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
1692 ret = -EINVAL;
1693 goto bail;
1694 }
1695 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
1696 INFINIPATH_IBCC_LINKCMD_SHIFT);
1697 lstate = IPATH_LINKACTIVE;
1698 break;
1699
946db67f
BS
1700 case IPATH_IB_LINK_LOOPBACK:
1701 dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
1702 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
1703 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1704 dd->ipath_ibcctrl);
1705 ret = 0;
1706 goto bail; // no state change to wait for
1707
1708 case IPATH_IB_LINK_EXTERNAL:
1709 dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n");
1710 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
1711 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1712 dd->ipath_ibcctrl);
1713 ret = 0;
1714 goto bail; // no state change to wait for
1715
34b2aafe
BS
1716 default:
1717 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
1718 ret = -EINVAL;
1719 goto bail;
1720 }
1721 ret = ipath_wait_linkstate(dd, lstate, 2000);
1722
1723bail:
1724 return ret;
1725}
1726
1727/**
1728 * ipath_set_mtu - set the MTU
1729 * @dd: the infinipath device
1730 * @arg: the new MTU
1731 *
1732 * we can handle "any" incoming size, the issue here is whether we
1733 * need to restrict our outgoing size. For now, we don't do any
1734 * sanity checking on this, and we don't deal with what happens to
1735 * programs that are already running when the size changes.
1736 * NOTE: changing the MTU will usually cause the IBC to go back to
1737 * link initialize (IPATH_IBSTATE_INIT) state...
1738 */
1739int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
1740{
1741 u32 piosize;
1742 int changed = 0;
1743 int ret;
1744
1745 /*
1746 * mtu is IB data payload max. It's the largest power of 2 less
1747 * than piosize (or even larger, since it only really controls the
1748 * largest we can receive; we can send the max of the mtu and
1749 * piosize). We check that it's one of the valid IB sizes.
1750 */
1751 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
1752 arg != 4096) {
1753 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
1754 ret = -EINVAL;
1755 goto bail;
1756 }
1757 if (dd->ipath_ibmtu == arg) {
1758 ret = 0; /* same as current */
1759 goto bail;
1760 }
1761
1762 piosize = dd->ipath_ibmaxlen;
1763 dd->ipath_ibmtu = arg;
1764
1765 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
1766 /* Only if it's not the initial value (or reset to it) */
1767 if (piosize != dd->ipath_init_ibmaxlen) {
1768 dd->ipath_ibmaxlen = piosize;
1769 changed = 1;
1770 }
1771 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
1772 piosize = arg + IPATH_PIO_MAXIBHDR;
1773 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
1774 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
1775 arg);
1776 dd->ipath_ibmaxlen = piosize;
1777 changed = 1;
1778 }
1779
1780 if (changed) {
1781 /*
1782 * set the IBC maxpktlength to the size of our pio
1783 * buffers in words
1784 */
1785 u64 ibc = dd->ipath_ibcctrl;
1786 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
1787 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
1788
1789 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
1790 dd->ipath_ibmaxlen = piosize;
1791 piosize /= sizeof(u32); /* in words */
1792 /*
1793 * for ICRC, which we only send in diag test pkt mode, and
1794 * we don't need to worry about that for mtu
1795 */
1796 piosize += 1;
1797
1798 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1799 dd->ipath_ibcctrl = ibc;
1800 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1801 dd->ipath_ibcctrl);
1802 dd->ipath_f_tidtemplate(dd);
1803 }
1804
1805 ret = 0;
1806
1807bail:
1808 return ret;
1809}
1810
1811int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
1812{
1813 dd->ipath_lid = arg;
1814 dd->ipath_lmc = lmc;
1815
1816 return 0;
1817}
1818
7bb206e3
BS
1819
1820/**
1821 * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
1822 * @dd: the infinipath device
1823 * @regno: the register number to write
1824 * @port: the port containing the register
1825 * @value: the value to write
1826 *
1827 * Registers that vary with the chip implementation constants (port)
1828 * use this routine.
1829 */
1830void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
1831 unsigned port, u64 value)
1832{
1833 u16 where;
1834
1835 if (port < dd->ipath_portcnt &&
1836 (regno == dd->ipath_kregs->kr_rcvhdraddr ||
1837 regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
1838 where = regno + port;
1839 else
1840 where = -1;
1841
1842 ipath_write_kreg(dd, where, value);
1843}
1844
1845/**
1846 * ipath_shutdown_device - shut down a device
1847 * @dd: the infinipath device
1848 *
1849 * This is called to make the device quiet when we are about to
1850 * unload the driver, and also when the device is administratively
1851 * disabled. It does not free any data structures.
1852 * Everything it does has to be setup again by ipath_init_chip(dd,1)
1853 */
1854void ipath_shutdown_device(struct ipath_devdata *dd)
1855{
7bb206e3
BS
1856 ipath_dbg("Shutting down the device\n");
1857
1858 dd->ipath_flags |= IPATH_LINKUNK;
1859 dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
1860 IPATH_LINKINIT | IPATH_LINKARMED |
1861 IPATH_LINKACTIVE);
1862 *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
1863 IPATH_STATUS_IB_READY);
1864
1865 /* mask interrupts, but not errors */
1866 ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
1867
1868 dd->ipath_rcvctrl = 0;
1869 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1870 dd->ipath_rcvctrl);
1871
1872 /*
1873 * gracefully stop all sends allowing any in progress to trickle out
1874 * first.
1875 */
1876 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL);
1877 /* flush it */
44f8e3f3 1878 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
7bb206e3
BS
1879 /*
1880 * enough for anything that's going to trickle out to have actually
1881 * done so.
1882 */
1883 udelay(5);
1884
1885 /*
1886 * abort any armed or launched PIO buffers that didn't go. (self
1887 * clearing). Will cause any packet currently being transmitted to
1888 * go out with an EBP, and may also cause a short packet error on
1889 * the receiver.
1890 */
1891 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1892 INFINIPATH_S_ABORT);
1893
1894 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
1895 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
1896
7bb206e3
BS
1897 /* disable IBC */
1898 dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
1899 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
a40f55fc 1900 dd->ipath_control | INFINIPATH_C_FREEZEMODE);
7bb206e3
BS
1901
1902 /*
1903 * clear SerdesEnable and turn the leds off; do this here because
1904 * we are unloading, so don't count on interrupts to move along
1905 * Turn the LEDs off explictly for the same reason.
1906 */
1907 dd->ipath_f_quiet_serdes(dd);
1908 dd->ipath_f_setextled(dd, 0, 0);
1909
1910 if (dd->ipath_stats_timer_active) {
1911 del_timer_sync(&dd->ipath_stats_timer);
1912 dd->ipath_stats_timer_active = 0;
1913 }
1914
1915 /*
1916 * clear all interrupts and errors, so that the next time the driver
1917 * is loaded or device is enabled, we know that whatever is set
1918 * happened while we were unloaded
1919 */
1920 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
1921 ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
1922 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
1923 ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
1924}
1925
1926/**
1927 * ipath_free_pddata - free a port's allocated data
1928 * @dd: the infinipath device
f37bda92 1929 * @pd: the portdata structure
7bb206e3 1930 *
f37bda92
BS
1931 * free up any allocated data for a port
1932 * This should not touch anything that would affect a simultaneous
1933 * re-allocation of port data, because it is called after ipath_mutex
1934 * is released (and can be called from reinit as well).
1935 * It should never change any chip state, or global driver state.
1936 * (The only exception to global state is freeing the port0 port0_skbs.)
7bb206e3 1937 */
f37bda92 1938void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
7bb206e3 1939{
7bb206e3
BS
1940 if (!pd)
1941 return;
f37bda92
BS
1942
1943 if (pd->port_rcvhdrq) {
7bb206e3
BS
1944 ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
1945 "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
1946 (unsigned long) pd->port_rcvhdrq_size);
1947 dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
1948 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
1949 pd->port_rcvhdrq = NULL;
f37bda92
BS
1950 if (pd->port_rcvhdrtail_kvaddr) {
1951 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
076fafcd 1952 pd->port_rcvhdrtail_kvaddr,
f37bda92
BS
1953 pd->port_rcvhdrqtailaddr_phys);
1954 pd->port_rcvhdrtail_kvaddr = NULL;
1955 }
7bb206e3 1956 }
f37bda92
BS
1957 if (pd->port_port && pd->port_rcvegrbuf) {
1958 unsigned e;
1959
1960 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
1961 void *base = pd->port_rcvegrbuf[e];
1962 size_t size = pd->port_rcvegrbuf_size;
1963
1964 ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
1965 "chunk %u/%u\n", base,
1966 (unsigned long) size,
1967 e, pd->port_rcvegrbuf_chunks);
1968 dma_free_coherent(&dd->pcidev->dev, size,
1969 base, pd->port_rcvegrbuf_phys[e]);
7bb206e3 1970 }
9929b0fb 1971 kfree(pd->port_rcvegrbuf);
f37bda92 1972 pd->port_rcvegrbuf = NULL;
9929b0fb 1973 kfree(pd->port_rcvegrbuf_phys);
f37bda92 1974 pd->port_rcvegrbuf_phys = NULL;
7bb206e3 1975 pd->port_rcvegrbuf_chunks = 0;
1fd3b40f 1976 } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
7bb206e3 1977 unsigned e;
1fd3b40f 1978 struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
7bb206e3 1979
1fd3b40f
BS
1980 dd->ipath_port0_skbinfo = NULL;
1981 ipath_cdbg(VERBOSE, "free closed port %d "
1982 "ipath_port0_skbinfo @ %p\n", pd->port_port,
1983 skbinfo);
7bb206e3 1984 for (e = 0; e < dd->ipath_rcvegrcnt; e++)
1fd3b40f
BS
1985 if (skbinfo[e].skb) {
1986 pci_unmap_single(dd->pcidev, skbinfo[e].phys,
1987 dd->ipath_ibmaxlen,
1988 PCI_DMA_FROMDEVICE);
1989 dev_kfree_skb(skbinfo[e].skb);
1990 }
1991 vfree(skbinfo);
7bb206e3 1992 }
f37bda92 1993 kfree(pd->port_tid_pg_list);
9929b0fb
BS
1994 vfree(pd->subport_uregbase);
1995 vfree(pd->subport_rcvegrbuf);
1996 vfree(pd->subport_rcvhdr_base);
f37bda92 1997 kfree(pd);
7bb206e3
BS
1998}
1999
ac2ae4c9 2000static int __init infinipath_init(void)
7bb206e3
BS
2001{
2002 int ret;
2003
39c0d0b9
BS
2004 if (ipath_debug & __IPATH_DBG)
2005 printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
7bb206e3
BS
2006
2007 /*
2008 * These must be called before the driver is registered with
2009 * the PCI subsystem.
2010 */
2011 idr_init(&unit_table);
2012 if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
2013 ret = -ENOMEM;
2014 goto bail;
2015 }
2016
2017 ret = pci_register_driver(&ipath_driver);
2018 if (ret < 0) {
2019 printk(KERN_ERR IPATH_DRV_NAME
2020 ": Unable to register driver: error %d\n", -ret);
2021 goto bail_unit;
2022 }
2023
2024 ret = ipath_driver_create_group(&ipath_driver.driver);
2025 if (ret < 0) {
2026 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
2027 "sysfs entries: error %d\n", -ret);
2028 goto bail_pci;
2029 }
2030
2031 ret = ipath_init_ipathfs();
2032 if (ret < 0) {
2033 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2034 "ipathfs: error %d\n", -ret);
2035 goto bail_group;
2036 }
2037
2038 goto bail;
2039
2040bail_group:
2041 ipath_driver_remove_group(&ipath_driver.driver);
2042
2043bail_pci:
2044 pci_unregister_driver(&ipath_driver);
2045
2046bail_unit:
2047 idr_destroy(&unit_table);
2048
2049bail:
2050 return ret;
2051}
2052
7bb206e3
BS
2053static void __exit infinipath_cleanup(void)
2054{
7bb206e3
BS
2055 ipath_exit_ipathfs();
2056
2057 ipath_driver_remove_group(&ipath_driver.driver);
2058
7bb206e3
BS
2059 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2060 pci_unregister_driver(&ipath_driver);
2061
2062 idr_destroy(&unit_table);
2063}
2064
2065/**
2066 * ipath_reset_device - reset the chip if possible
2067 * @unit: the device to reset
2068 *
2069 * Whether or not reset is successful, we attempt to re-initialize the chip
2070 * (that is, much like a driver unload/reload). We clear the INITTED flag
2071 * so that the various entry points will fail until we reinitialize. For
2072 * now, we only allow this if no user ports are open that use chip resources
2073 */
2074int ipath_reset_device(int unit)
2075{
2076 int ret, i;
2077 struct ipath_devdata *dd = ipath_lookup(unit);
2078
2079 if (!dd) {
2080 ret = -ENODEV;
2081 goto bail;
2082 }
2083
2084 dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
2085
2086 if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
2087 dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
2088 "not initialized or not present\n", unit);
2089 ret = -ENXIO;
2090 goto bail;
2091 }
2092
2093 if (dd->ipath_pd)
23e86a45 2094 for (i = 1; i < dd->ipath_cfgports; i++) {
7bb206e3
BS
2095 if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
2096 ipath_dbg("unit %u port %d is in use "
2097 "(PID %u cmd %s), can't reset\n",
2098 unit, i,
2099 dd->ipath_pd[i]->port_pid,
2100 dd->ipath_pd[i]->port_comm);
2101 ret = -EBUSY;
2102 goto bail;
2103 }
2104 }
2105
2106 dd->ipath_flags &= ~IPATH_INITTED;
2107 ret = dd->ipath_f_reset(dd);
2108 if (ret != 1)
2109 ipath_dbg("reset was not successful\n");
2110 ipath_dbg("Trying to reinitialize unit %u after reset attempt\n",
2111 unit);
2112 ret = ipath_init_chip(dd, 1);
2113 if (ret)
2114 ipath_dev_err(dd, "Reinitialize unit %u after "
2115 "reset failed with %d\n", unit, ret);
2116 else
2117 dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
2118 "resetting\n", unit);
2119
2120bail:
2121 return ret;
2122}
2123
30fc5c31
BS
2124int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2125{
2126 u64 val;
2127 if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) {
2128 return -1;
2129 }
2130 if ( dd->ipath_rx_pol_inv != new_pol_inv ) {
2131 dd->ipath_rx_pol_inv = new_pol_inv;
2132 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
2133 val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
3cd96564
RD
2134 INFINIPATH_XGXS_RX_POL_SHIFT);
2135 val |= ((u64)dd->ipath_rx_pol_inv) <<
2136 INFINIPATH_XGXS_RX_POL_SHIFT;
30fc5c31
BS
2137 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
2138 }
2139 return 0;
2140}
7bb206e3
BS
2141module_init(infinipath_init);
2142module_exit(infinipath_cleanup);