Merge branch 'bkl/procfs' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[linux-2.6-block.git] / drivers / staging / vme / bridges / vme_ca91cx42.c
CommitLineData
60479690
MW
1/*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3 *
66bd8db5
MW
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
60479690
MW
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Derived from ca91c042.c by Michael Wyrick
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
60479690
MW
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/types.h>
21#include <linux/errno.h>
60479690
MW
22#include <linux/pci.h>
23#include <linux/dma-mapping.h>
24#include <linux/poll.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
6af783c8 27#include <linux/sched.h>
5a0e3ad6 28#include <linux/slab.h>
60479690
MW
29#include <asm/time.h>
30#include <asm/io.h>
31#include <asm/uaccess.h>
32
33#include "../vme.h"
34#include "../vme_bridge.h"
35#include "vme_ca91cx42.h"
36
3d0f8bc7
MW
37static int __init ca91cx42_init(void);
38static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
39static void ca91cx42_remove(struct pci_dev *);
40static void __exit ca91cx42_exit(void);
60479690 41
12b2d5c0
MW
42/* Module parameters */
43static int geoid;
44
3d0f8bc7 45static char driver_name[] = "vme_ca91cx42";
60479690 46
13ac58da 47static const struct pci_device_id ca91cx42_ids[] = {
3d0f8bc7
MW
48 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
49 { },
60479690
MW
50};
51
3d0f8bc7
MW
52static struct pci_driver ca91cx42_driver = {
53 .name = driver_name,
54 .id_table = ca91cx42_ids,
55 .probe = ca91cx42_probe,
56 .remove = ca91cx42_remove,
60479690
MW
57};
58
29848ac9 59static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
60479690 60{
29848ac9 61 wake_up(&(bridge->dma_queue));
60479690 62
3d0f8bc7
MW
63 return CA91CX42_LINT_DMA;
64}
60479690 65
29848ac9 66static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
3d0f8bc7
MW
67{
68 int i;
69 u32 serviced = 0;
60479690 70
3d0f8bc7
MW
71 for (i = 0; i < 4; i++) {
72 if (stat & CA91CX42_LINT_LM[i]) {
73 /* We only enable interrupts if the callback is set */
29848ac9 74 bridge->lm_callback[i](i);
3d0f8bc7 75 serviced |= CA91CX42_LINT_LM[i];
60479690 76 }
60479690 77 }
60479690 78
3d0f8bc7
MW
79 return serviced;
80}
60479690 81
3d0f8bc7 82/* XXX This needs to be split into 4 queues */
29848ac9 83static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
3d0f8bc7 84{
29848ac9 85 wake_up(&(bridge->mbox_queue));
60479690 86
3d0f8bc7
MW
87 return CA91CX42_LINT_MBOX;
88}
60479690 89
29848ac9 90static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
3d0f8bc7 91{
29848ac9 92 wake_up(&(bridge->iack_queue));
60479690 93
3d0f8bc7 94 return CA91CX42_LINT_SW_IACK;
60479690
MW
95}
96
29848ac9 97static u32 ca91cx42_VERR_irqhandler(struct ca91cx42_driver *bridge)
60479690
MW
98{
99 int val;
100
29848ac9 101 val = ioread32(bridge->base + DGCS);
60479690
MW
102
103 if (!(val & 0x00000800)) {
3d0f8bc7
MW
104 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
105 "Error DGCS=%08X\n", val);
60479690 106 }
3d0f8bc7
MW
107
108 return CA91CX42_LINT_VERR;
60479690
MW
109}
110
29848ac9 111static u32 ca91cx42_LERR_irqhandler(struct ca91cx42_driver *bridge)
60479690
MW
112{
113 int val;
114
29848ac9 115 val = ioread32(bridge->base + DGCS);
60479690
MW
116
117 if (!(val & 0x00000800)) {
3d0f8bc7
MW
118 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
119 "Error DGCS=%08X\n", val);
60479690 120
60479690 121 }
60479690 122
3d0f8bc7 123 return CA91CX42_LINT_LERR;
60479690
MW
124}
125
3d0f8bc7 126
29848ac9
MW
127static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
128 int stat)
60479690 129{
3d0f8bc7 130 int vec, i, serviced = 0;
29848ac9
MW
131 struct ca91cx42_driver *bridge;
132
133 bridge = ca91cx42_bridge->driver_priv;
134
60479690
MW
135
136 for (i = 7; i > 0; i--) {
3d0f8bc7 137 if (stat & (1 << i)) {
29848ac9 138 vec = ioread32(bridge->base +
3d0f8bc7
MW
139 CA91CX42_V_STATID[i]) & 0xff;
140
c813f592 141 vme_irq_handler(ca91cx42_bridge, i, vec);
3d0f8bc7
MW
142
143 serviced |= (1 << i);
60479690
MW
144 }
145 }
3d0f8bc7
MW
146
147 return serviced;
60479690
MW
148}
149
29848ac9 150static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
60479690 151{
3d0f8bc7 152 u32 stat, enable, serviced = 0;
29848ac9
MW
153 struct vme_bridge *ca91cx42_bridge;
154 struct ca91cx42_driver *bridge;
60479690 155
29848ac9 156 ca91cx42_bridge = ptr;
60479690 157
29848ac9
MW
158 bridge = ca91cx42_bridge->driver_priv;
159
160 enable = ioread32(bridge->base + LINT_EN);
161 stat = ioread32(bridge->base + LINT_STAT);
60479690 162
3d0f8bc7
MW
163 /* Only look at unmasked interrupts */
164 stat &= enable;
165
166 if (unlikely(!stat))
167 return IRQ_NONE;
168
169 if (stat & CA91CX42_LINT_DMA)
29848ac9 170 serviced |= ca91cx42_DMA_irqhandler(bridge);
3d0f8bc7
MW
171 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
172 CA91CX42_LINT_LM3))
29848ac9 173 serviced |= ca91cx42_LM_irqhandler(bridge, stat);
3d0f8bc7 174 if (stat & CA91CX42_LINT_MBOX)
29848ac9 175 serviced |= ca91cx42_MB_irqhandler(bridge, stat);
3d0f8bc7 176 if (stat & CA91CX42_LINT_SW_IACK)
29848ac9 177 serviced |= ca91cx42_IACK_irqhandler(bridge);
3d0f8bc7 178 if (stat & CA91CX42_LINT_VERR)
29848ac9 179 serviced |= ca91cx42_VERR_irqhandler(bridge);
3d0f8bc7 180 if (stat & CA91CX42_LINT_LERR)
29848ac9 181 serviced |= ca91cx42_LERR_irqhandler(bridge);
3d0f8bc7
MW
182 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
183 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
184 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
185 CA91CX42_LINT_VIRQ7))
29848ac9 186 serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
3d0f8bc7
MW
187
188 /* Clear serviced interrupts */
29848ac9 189 iowrite32(stat, bridge->base + LINT_STAT);
60479690
MW
190
191 return IRQ_HANDLED;
192}
193
29848ac9 194static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
60479690 195{
3d0f8bc7
MW
196 int result, tmp;
197 struct pci_dev *pdev;
29848ac9
MW
198 struct ca91cx42_driver *bridge;
199
200 bridge = ca91cx42_bridge->driver_priv;
60479690 201
3d0f8bc7 202 /* Need pdev */
29848ac9 203 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
60479690 204
3d0f8bc7 205 /* Initialise list for VME bus errors */
29848ac9 206 INIT_LIST_HEAD(&(ca91cx42_bridge->vme_errors));
60479690 207
29848ac9 208 mutex_init(&(ca91cx42_bridge->irq_mtx));
c813f592 209
3d0f8bc7
MW
210 /* Disable interrupts from PCI to VME */
211 iowrite32(0, bridge->base + VINT_EN);
60479690 212
3d0f8bc7
MW
213 /* Disable PCI interrupts */
214 iowrite32(0, bridge->base + LINT_EN);
215 /* Clear Any Pending PCI Interrupts */
216 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
60479690 217
3d0f8bc7 218 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
29848ac9 219 driver_name, ca91cx42_bridge);
3d0f8bc7
MW
220 if (result) {
221 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
222 pdev->irq);
223 return result;
60479690
MW
224 }
225
3d0f8bc7
MW
226 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
227 iowrite32(0, bridge->base + LINT_MAP0);
228 iowrite32(0, bridge->base + LINT_MAP1);
229 iowrite32(0, bridge->base + LINT_MAP2);
230
231 /* Enable DMA, mailbox & LM Interrupts */
232 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
233 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
234 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
235
236 iowrite32(tmp, bridge->base + LINT_EN);
60479690 237
3d0f8bc7 238 return 0;
60479690
MW
239}
240
29848ac9
MW
241static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
242 struct pci_dev *pdev)
60479690 243{
3d0f8bc7 244 /* Disable interrupts from PCI to VME */
29848ac9 245 iowrite32(0, bridge->base + VINT_EN);
60479690 246
3d0f8bc7 247 /* Disable PCI interrupts */
29848ac9 248 iowrite32(0, bridge->base + LINT_EN);
3d0f8bc7 249 /* Clear Any Pending PCI Interrupts */
29848ac9 250 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
60479690 251
3d0f8bc7
MW
252 free_irq(pdev->irq, pdev);
253}
60479690 254
3d0f8bc7
MW
255/*
256 * Set up an VME interrupt
257 */
29848ac9
MW
258void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
259 int sync)
c813f592 260
3d0f8bc7 261{
c813f592 262 struct pci_dev *pdev;
3d0f8bc7 263 u32 tmp;
29848ac9
MW
264 struct ca91cx42_driver *bridge;
265
266 bridge = ca91cx42_bridge->driver_priv;
60479690 267
3d0f8bc7 268 /* Enable IRQ level */
29848ac9 269 tmp = ioread32(bridge->base + LINT_EN);
3d0f8bc7 270
c813f592 271 if (state == 0)
3d0f8bc7 272 tmp &= ~CA91CX42_LINT_VIRQ[level];
c813f592
MW
273 else
274 tmp |= CA91CX42_LINT_VIRQ[level];
60479690 275
29848ac9 276 iowrite32(tmp, bridge->base + LINT_EN);
c813f592
MW
277
278 if ((state == 0) && (sync != 0)) {
3d0f8bc7
MW
279 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
280 dev);
281
282 synchronize_irq(pdev->irq);
60479690 283 }
60479690
MW
284}
285
29848ac9
MW
286int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
287 int statid)
60479690 288{
3d0f8bc7 289 u32 tmp;
29848ac9
MW
290 struct ca91cx42_driver *bridge;
291
292 bridge = ca91cx42_bridge->driver_priv;
60479690 293
3d0f8bc7
MW
294 /* Universe can only generate even vectors */
295 if (statid & 1)
296 return -EINVAL;
60479690 297
29848ac9 298 mutex_lock(&(bridge->vme_int));
60479690 299
29848ac9 300 tmp = ioread32(bridge->base + VINT_EN);
60479690 301
3d0f8bc7 302 /* Set Status/ID */
29848ac9 303 iowrite32(statid << 24, bridge->base + STATID);
3d0f8bc7
MW
304
305 /* Assert VMEbus IRQ */
306 tmp = tmp | (1 << (level + 24));
29848ac9 307 iowrite32(tmp, bridge->base + VINT_EN);
60479690 308
3d0f8bc7 309 /* Wait for IACK */
29848ac9 310 wait_event_interruptible(bridge->iack_queue, 0);
3d0f8bc7
MW
311
312 /* Return interrupt to low state */
29848ac9 313 tmp = ioread32(bridge->base + VINT_EN);
3d0f8bc7 314 tmp = tmp & ~(1 << (level + 24));
29848ac9 315 iowrite32(tmp, bridge->base + VINT_EN);
3d0f8bc7 316
29848ac9 317 mutex_unlock(&(bridge->vme_int));
3d0f8bc7
MW
318
319 return 0;
60479690
MW
320}
321
3d0f8bc7
MW
322int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
323 unsigned long long vme_base, unsigned long long size,
324 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
60479690 325{
21e0cf6d 326 unsigned int i, addr = 0, granularity;
3d0f8bc7
MW
327 unsigned int temp_ctl = 0;
328 unsigned int vme_bound, pci_offset;
29848ac9
MW
329 struct ca91cx42_driver *bridge;
330
331 bridge = image->parent->driver_priv;
60479690 332
3d0f8bc7 333 i = image->number;
60479690 334
3d0f8bc7
MW
335 switch (aspace) {
336 case VME_A16:
337 addr |= CA91CX42_VSI_CTL_VAS_A16;
338 break;
339 case VME_A24:
340 addr |= CA91CX42_VSI_CTL_VAS_A24;
341 break;
342 case VME_A32:
343 addr |= CA91CX42_VSI_CTL_VAS_A32;
344 break;
345 case VME_USER1:
346 addr |= CA91CX42_VSI_CTL_VAS_USER1;
347 break;
348 case VME_USER2:
349 addr |= CA91CX42_VSI_CTL_VAS_USER2;
350 break;
351 case VME_A64:
352 case VME_CRCSR:
353 case VME_USER3:
354 case VME_USER4:
355 default:
356 printk(KERN_ERR "Invalid address space\n");
357 return -EINVAL;
358 break;
60479690
MW
359 }
360
3d0f8bc7
MW
361 /*
362 * Bound address is a valid address for the window, adjust
363 * accordingly
364 */
21e0cf6d 365 vme_bound = vme_base + size;
3d0f8bc7
MW
366 pci_offset = pci_base - vme_base;
367
3d0f8bc7
MW
368 if ((i == 0) || (i == 4))
369 granularity = 0x1000;
370 else
371 granularity = 0x10000;
372
373 if (vme_base & (granularity - 1)) {
374 printk(KERN_ERR "Invalid VME base alignment\n");
375 return -EINVAL;
376 }
377 if (vme_bound & (granularity - 1)) {
378 printk(KERN_ERR "Invalid VME bound alignment\n");
379 return -EINVAL;
380 }
381 if (pci_offset & (granularity - 1)) {
382 printk(KERN_ERR "Invalid PCI Offset alignment\n");
383 return -EINVAL;
60479690
MW
384 }
385
3d0f8bc7 386 /* Disable while we are mucking around */
29848ac9 387 temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
3d0f8bc7 388 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
29848ac9 389 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
60479690 390
3d0f8bc7 391 /* Setup mapping */
29848ac9
MW
392 iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
393 iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
394 iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
3d0f8bc7 395
3d0f8bc7
MW
396 /* Setup address space */
397 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
398 temp_ctl |= addr;
399
400 /* Setup cycle types */
401 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
402 if (cycle & VME_SUPER)
403 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
404 if (cycle & VME_USER)
405 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
406 if (cycle & VME_PROG)
407 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
408 if (cycle & VME_DATA)
409 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
410
411 /* Write ctl reg without enable */
29848ac9 412 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
3d0f8bc7
MW
413
414 if (enabled)
415 temp_ctl |= CA91CX42_VSI_CTL_EN;
416
29848ac9 417 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
3d0f8bc7
MW
418
419 return 0;
60479690
MW
420}
421
3d0f8bc7
MW
422int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
423 unsigned long long *vme_base, unsigned long long *size,
424 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
60479690 425{
3d0f8bc7
MW
426 unsigned int i, granularity = 0, ctl = 0;
427 unsigned long long vme_bound, pci_offset;
29848ac9
MW
428 struct ca91cx42_driver *bridge;
429
430 bridge = image->parent->driver_priv;
3d0f8bc7
MW
431
432 i = image->number;
60479690 433
3d0f8bc7
MW
434 if ((i == 0) || (i == 4))
435 granularity = 0x1000;
436 else
437 granularity = 0x10000;
438
439 /* Read Registers */
29848ac9 440 ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
3d0f8bc7 441
29848ac9
MW
442 *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
443 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
444 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
3d0f8bc7
MW
445
446 *pci_base = (dma_addr_t)vme_base + pci_offset;
447 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
448
449 *enabled = 0;
450 *aspace = 0;
451 *cycle = 0;
452
453 if (ctl & CA91CX42_VSI_CTL_EN)
454 *enabled = 1;
455
456 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
457 *aspace = VME_A16;
458 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
459 *aspace = VME_A24;
460 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
461 *aspace = VME_A32;
462 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
463 *aspace = VME_USER1;
464 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
465 *aspace = VME_USER2;
466
467 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
468 *cycle |= VME_SUPER;
469 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
470 *cycle |= VME_USER;
471 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
472 *cycle |= VME_PROG;
473 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
474 *cycle |= VME_DATA;
475
476 return 0;
477}
478
479/*
480 * Allocate and map PCI Resource
481 */
482static int ca91cx42_alloc_resource(struct vme_master_resource *image,
483 unsigned long long size)
484{
485 unsigned long long existing_size;
486 int retval = 0;
487 struct pci_dev *pdev;
29848ac9
MW
488 struct vme_bridge *ca91cx42_bridge;
489
490 ca91cx42_bridge = image->parent;
3d0f8bc7
MW
491
492 /* Find pci_dev container of dev */
493 if (ca91cx42_bridge->parent == NULL) {
494 printk(KERN_ERR "Dev entry NULL\n");
495 return -EINVAL;
496 }
497 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
498
8fafb476
MW
499 existing_size = (unsigned long long)(image->bus_resource.end -
500 image->bus_resource.start);
3d0f8bc7
MW
501
502 /* If the existing size is OK, return */
503 if (existing_size == (size - 1))
504 return 0;
505
506 if (existing_size != 0) {
507 iounmap(image->kern_base);
508 image->kern_base = NULL;
8fafb476
MW
509 if (image->bus_resource.name != NULL)
510 kfree(image->bus_resource.name);
511 release_resource(&(image->bus_resource));
512 memset(&(image->bus_resource), 0, sizeof(struct resource));
3d0f8bc7
MW
513 }
514
8fafb476
MW
515 if (image->bus_resource.name == NULL) {
516 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
517 if (image->bus_resource.name == NULL) {
3d0f8bc7
MW
518 printk(KERN_ERR "Unable to allocate memory for resource"
519 " name\n");
520 retval = -ENOMEM;
521 goto err_name;
522 }
60479690 523 }
3d0f8bc7 524
8fafb476 525 sprintf((char *)image->bus_resource.name, "%s.%d",
3d0f8bc7
MW
526 ca91cx42_bridge->name, image->number);
527
8fafb476
MW
528 image->bus_resource.start = 0;
529 image->bus_resource.end = (unsigned long)size;
530 image->bus_resource.flags = IORESOURCE_MEM;
3d0f8bc7
MW
531
532 retval = pci_bus_alloc_resource(pdev->bus,
8fafb476 533 &(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
3d0f8bc7
MW
534 0, NULL, NULL);
535 if (retval) {
536 printk(KERN_ERR "Failed to allocate mem resource for "
537 "window %d size 0x%lx start 0x%lx\n",
538 image->number, (unsigned long)size,
8fafb476 539 (unsigned long)image->bus_resource.start);
3d0f8bc7 540 goto err_resource;
60479690 541 }
3d0f8bc7
MW
542
543 image->kern_base = ioremap_nocache(
8fafb476 544 image->bus_resource.start, size);
3d0f8bc7
MW
545 if (image->kern_base == NULL) {
546 printk(KERN_ERR "Failed to remap resource\n");
547 retval = -ENOMEM;
548 goto err_remap;
60479690
MW
549 }
550
3d0f8bc7
MW
551 return 0;
552
553 iounmap(image->kern_base);
554 image->kern_base = NULL;
555err_remap:
8fafb476 556 release_resource(&(image->bus_resource));
3d0f8bc7 557err_resource:
8fafb476
MW
558 kfree(image->bus_resource.name);
559 memset(&(image->bus_resource), 0, sizeof(struct resource));
3d0f8bc7
MW
560err_name:
561 return retval;
562}
563
564/*
4860ab74
MW
565 * Free and unmap PCI Resource
566 */
3d0f8bc7
MW
567static void ca91cx42_free_resource(struct vme_master_resource *image)
568{
569 iounmap(image->kern_base);
570 image->kern_base = NULL;
8fafb476
MW
571 release_resource(&(image->bus_resource));
572 kfree(image->bus_resource.name);
573 memset(&(image->bus_resource), 0, sizeof(struct resource));
3d0f8bc7
MW
574}
575
576
577int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
578 unsigned long long vme_base, unsigned long long size,
579 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
580{
581 int retval = 0;
21e0cf6d 582 unsigned int i, granularity = 0;
3d0f8bc7
MW
583 unsigned int temp_ctl = 0;
584 unsigned long long pci_bound, vme_offset, pci_base;
29848ac9
MW
585 struct ca91cx42_driver *bridge;
586
587 bridge = image->parent->driver_priv;
3d0f8bc7 588
21e0cf6d
MW
589 i = image->number;
590
591 if ((i == 0) || (i == 4))
592 granularity = 0x1000;
593 else
594 granularity = 0x10000;
595
3d0f8bc7 596 /* Verify input data */
21e0cf6d 597 if (vme_base & (granularity - 1)) {
3d0f8bc7
MW
598 printk(KERN_ERR "Invalid VME Window alignment\n");
599 retval = -EINVAL;
600 goto err_window;
601 }
21e0cf6d 602 if (size & (granularity - 1)) {
3d0f8bc7
MW
603 printk(KERN_ERR "Invalid VME Window alignment\n");
604 retval = -EINVAL;
605 goto err_window;
606 }
607
608 spin_lock(&(image->lock));
609
3d0f8bc7
MW
610 /*
611 * Let's allocate the resource here rather than further up the stack as
612 * it avoids pushing loads of bus dependant stuff up the stack
613 */
614 retval = ca91cx42_alloc_resource(image, size);
615 if (retval) {
616 spin_unlock(&(image->lock));
617 printk(KERN_ERR "Unable to allocate memory for resource "
618 "name\n");
619 retval = -ENOMEM;
620 goto err_res;
621 }
622
8fafb476 623 pci_base = (unsigned long long)image->bus_resource.start;
3d0f8bc7
MW
624
625 /*
626 * Bound address is a valid address for the window, adjust
627 * according to window granularity.
628 */
21e0cf6d 629 pci_bound = pci_base + size;
3d0f8bc7
MW
630 vme_offset = vme_base - pci_base;
631
3d0f8bc7 632 /* Disable while we are mucking around */
29848ac9 633 temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
3d0f8bc7 634 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
29848ac9 635 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
3d0f8bc7 636
3d0f8bc7
MW
637 /* Setup cycle types */
638 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
639 if (cycle & VME_BLT)
640 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
641 if (cycle & VME_MBLT)
642 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
643
644 /* Setup data width */
645 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
646 switch (dwidth) {
647 case VME_D8:
648 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
649 break;
650 case VME_D16:
651 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
652 break;
653 case VME_D32:
654 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
655 break;
656 case VME_D64:
657 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
658 break;
659 default:
660 spin_unlock(&(image->lock));
661 printk(KERN_ERR "Invalid data width\n");
662 retval = -EINVAL;
663 goto err_dwidth;
664 break;
60479690 665 }
3d0f8bc7
MW
666
667 /* Setup address space */
668 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
669 switch (aspace) {
60479690 670 case VME_A16:
3d0f8bc7 671 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
60479690
MW
672 break;
673 case VME_A24:
3d0f8bc7 674 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
60479690
MW
675 break;
676 case VME_A32:
3d0f8bc7
MW
677 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
678 break;
679 case VME_CRCSR:
680 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
60479690
MW
681 break;
682 case VME_USER1:
3d0f8bc7 683 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
60479690
MW
684 break;
685 case VME_USER2:
3d0f8bc7
MW
686 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
687 break;
688 case VME_A64:
689 case VME_USER3:
690 case VME_USER4:
691 default:
692 spin_unlock(&(image->lock));
693 printk(KERN_ERR "Invalid address space\n");
694 retval = -EINVAL;
695 goto err_aspace;
60479690
MW
696 break;
697 }
698
3d0f8bc7
MW
699 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
700 if (cycle & VME_SUPER)
701 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
702 if (cycle & VME_PROG)
703 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
60479690 704
3d0f8bc7 705 /* Setup mapping */
29848ac9
MW
706 iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
707 iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
708 iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
3d0f8bc7
MW
709
710 /* Write ctl reg without enable */
29848ac9 711 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
3d0f8bc7
MW
712
713 if (enabled)
714 temp_ctl |= CA91CX42_LSI_CTL_EN;
715
29848ac9 716 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
3d0f8bc7
MW
717
718 spin_unlock(&(image->lock));
719 return 0;
720
721err_aspace:
722err_dwidth:
723 ca91cx42_free_resource(image);
724err_res:
725err_window:
726 return retval;
727}
728
729int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
730 unsigned long long *vme_base, unsigned long long *size,
731 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
732{
733 unsigned int i, ctl;
734 unsigned long long pci_base, pci_bound, vme_offset;
29848ac9
MW
735 struct ca91cx42_driver *bridge;
736
737 bridge = image->parent->driver_priv;
3d0f8bc7
MW
738
739 i = image->number;
740
29848ac9 741 ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
3d0f8bc7 742
29848ac9
MW
743 pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
744 vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
745 pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
3d0f8bc7
MW
746
747 *vme_base = pci_base + vme_offset;
21e0cf6d 748 *size = (unsigned long long)(pci_bound - pci_base);
3d0f8bc7
MW
749
750 *enabled = 0;
751 *aspace = 0;
752 *cycle = 0;
753 *dwidth = 0;
754
755 if (ctl & CA91CX42_LSI_CTL_EN)
756 *enabled = 1;
757
758 /* Setup address space */
759 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
760 case CA91CX42_LSI_CTL_VAS_A16:
761 *aspace = VME_A16;
762 break;
763 case CA91CX42_LSI_CTL_VAS_A24:
764 *aspace = VME_A24;
765 break;
766 case CA91CX42_LSI_CTL_VAS_A32:
767 *aspace = VME_A32;
768 break;
769 case CA91CX42_LSI_CTL_VAS_CRCSR:
770 *aspace = VME_CRCSR;
771 break;
772 case CA91CX42_LSI_CTL_VAS_USER1:
773 *aspace = VME_USER1;
774 break;
775 case CA91CX42_LSI_CTL_VAS_USER2:
776 *aspace = VME_USER2;
777 break;
778 }
779
780 /* XXX Not sure howto check for MBLT */
781 /* Setup cycle types */
782 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
783 *cycle |= VME_BLT;
784 else
785 *cycle |= VME_SCT;
786
787 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
788 *cycle |= VME_SUPER;
789 else
790 *cycle |= VME_USER;
791
792 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
793 *cycle = VME_PROG;
794 else
795 *cycle = VME_DATA;
796
797 /* Setup data width */
798 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
799 case CA91CX42_LSI_CTL_VDW_D8:
800 *dwidth = VME_D8;
801 break;
802 case CA91CX42_LSI_CTL_VDW_D16:
803 *dwidth = VME_D16;
804 break;
805 case CA91CX42_LSI_CTL_VDW_D32:
806 *dwidth = VME_D32;
807 break;
808 case CA91CX42_LSI_CTL_VDW_D64:
809 *dwidth = VME_D64;
810 break;
811 }
812
3d0f8bc7
MW
813 return 0;
814}
815
816int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
817 unsigned long long *vme_base, unsigned long long *size,
818 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
819{
820 int retval;
821
822 spin_lock(&(image->lock));
823
824 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
825 cycle, dwidth);
826
827 spin_unlock(&(image->lock));
828
829 return retval;
830}
831
832ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
833 size_t count, loff_t offset)
834{
21e0cf6d 835 ssize_t retval;
60479690 836
3d0f8bc7 837 spin_lock(&(image->lock));
60479690 838
3d0f8bc7
MW
839 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
840 retval = count;
60479690 841
3d0f8bc7
MW
842 spin_unlock(&(image->lock));
843
844 return retval;
60479690
MW
845}
846
3d0f8bc7
MW
847ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
848 size_t count, loff_t offset)
60479690 849{
3d0f8bc7 850 int retval = 0;
60479690 851
3d0f8bc7 852 spin_lock(&(image->lock));
60479690 853
3d0f8bc7
MW
854 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
855 retval = count;
856
857 spin_unlock(&(image->lock));
858
859 return retval;
60479690
MW
860}
861
04e10e15
MW
862unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
863 unsigned int mask, unsigned int compare, unsigned int swap,
864 loff_t offset)
865{
866 u32 pci_addr, result;
867 int i;
868 struct ca91cx42_driver *bridge;
869 struct device *dev;
870
871 bridge = image->parent->driver_priv;
872 dev = image->parent->parent;
873
874 /* Find the PCI address that maps to the desired VME address */
875 i = image->number;
876
877 /* Locking as we can only do one of these at a time */
878 mutex_lock(&(bridge->vme_rmw));
879
880 /* Lock image */
881 spin_lock(&(image->lock));
882
883 pci_addr = (u32)image->kern_base + offset;
884
885 /* Address must be 4-byte aligned */
886 if (pci_addr & 0x3) {
887 dev_err(dev, "RMW Address not 4-byte aligned\n");
888 return -EINVAL;
889 }
890
891 /* Ensure RMW Disabled whilst configuring */
892 iowrite32(0, bridge->base + SCYC_CTL);
893
894 /* Configure registers */
895 iowrite32(mask, bridge->base + SCYC_EN);
896 iowrite32(compare, bridge->base + SCYC_CMP);
897 iowrite32(swap, bridge->base + SCYC_SWP);
898 iowrite32(pci_addr, bridge->base + SCYC_ADDR);
899
900 /* Enable RMW */
901 iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
902
903 /* Kick process off with a read to the required address. */
904 result = ioread32(image->kern_base + offset);
905
906 /* Disable RMW */
907 iowrite32(0, bridge->base + SCYC_CTL);
908
909 spin_unlock(&(image->lock));
910
911 mutex_unlock(&(bridge->vme_rmw));
912
913 return result;
914}
915
4860ab74
MW
916int ca91cx42_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
917 struct vme_dma_attr *dest, size_t count)
918{
919 struct ca91cx42_dma_entry *entry, *prev;
920 struct vme_dma_pci *pci_attr;
921 struct vme_dma_vme *vme_attr;
922 dma_addr_t desc_ptr;
923 int retval = 0;
924
925 /* XXX descriptor must be aligned on 64-bit boundaries */
926 entry = (struct ca91cx42_dma_entry *)
927 kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
928 if (entry == NULL) {
929 printk(KERN_ERR "Failed to allocate memory for dma resource "
930 "structure\n");
931 retval = -ENOMEM;
932 goto err_mem;
933 }
934
935 /* Test descriptor alignment */
936 if ((unsigned long)&(entry->descriptor) & CA91CX42_DCPP_M) {
937 printk("Descriptor not aligned to 16 byte boundary as "
938 "required: %p\n", &(entry->descriptor));
939 retval = -EINVAL;
940 goto err_align;
941 }
942
943 memset(&(entry->descriptor), 0, sizeof(struct ca91cx42_dma_descriptor));
944
945 if (dest->type == VME_DMA_VME) {
946 entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
947 vme_attr = (struct vme_dma_vme *)dest->private;
948 pci_attr = (struct vme_dma_pci *)src->private;
949 } else {
950 vme_attr = (struct vme_dma_vme *)src->private;
951 pci_attr = (struct vme_dma_pci *)dest->private;
952 }
953
954 /* Check we can do fullfill required attributes */
955 if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
956 VME_USER2)) != 0) {
957
958 printk(KERN_ERR "Unsupported cycle type\n");
959 retval = -EINVAL;
960 goto err_aspace;
961 }
962
963 if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
964 VME_PROG | VME_DATA)) != 0) {
965
966 printk(KERN_ERR "Unsupported cycle type\n");
967 retval = -EINVAL;
968 goto err_cycle;
969 }
970
971 /* Check to see if we can fullfill source and destination */
972 if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
973 ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
974
975 printk(KERN_ERR "Cannot perform transfer with this "
976 "source-destination combination\n");
977 retval = -EINVAL;
978 goto err_direct;
979 }
980
981 /* Setup cycle types */
982 if (vme_attr->cycle & VME_BLT)
983 entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
984
985 /* Setup data width */
986 switch (vme_attr->dwidth) {
987 case VME_D8:
988 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
989 break;
990 case VME_D16:
991 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
992 break;
993 case VME_D32:
994 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
995 break;
996 case VME_D64:
997 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
998 break;
999 default:
1000 printk(KERN_ERR "Invalid data width\n");
1001 return -EINVAL;
1002 }
1003
1004 /* Setup address space */
1005 switch (vme_attr->aspace) {
1006 case VME_A16:
1007 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1008 break;
1009 case VME_A24:
1010 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1011 break;
1012 case VME_A32:
1013 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1014 break;
1015 case VME_USER1:
1016 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1017 break;
1018 case VME_USER2:
1019 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1020 break;
1021 default:
1022 printk(KERN_ERR "Invalid address space\n");
1023 return -EINVAL;
1024 break;
1025 }
1026
1027 if (vme_attr->cycle & VME_SUPER)
1028 entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1029 if (vme_attr->cycle & VME_PROG)
1030 entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1031
1032 entry->descriptor.dtbc = count;
1033 entry->descriptor.dla = pci_attr->address;
1034 entry->descriptor.dva = vme_attr->address;
1035 entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1036
1037 /* Add to list */
1038 list_add_tail(&(entry->list), &(list->entries));
1039
1040 /* Fill out previous descriptors "Next Address" */
1041 if (entry->list.prev != &(list->entries)) {
1042 prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1043 list);
1044 /* We need the bus address for the pointer */
1045 desc_ptr = virt_to_bus(&(entry->descriptor));
1046 prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1047 }
1048
1049 return 0;
1050
1051err_cycle:
1052err_aspace:
1053err_direct:
1054err_align:
1055 kfree(entry);
1056err_mem:
1057 return retval;
1058}
1059
1060static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1061{
1062 u32 tmp;
1063 struct ca91cx42_driver *bridge;
1064
1065 bridge = ca91cx42_bridge->driver_priv;
1066
1067 tmp = ioread32(bridge->base + DGCS);
1068
1069 if (tmp & CA91CX42_DGCS_ACT)
1070 return 0;
1071 else
1072 return 1;
1073}
1074
1075int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1076{
1077 struct vme_dma_resource *ctrlr;
1078 struct ca91cx42_dma_entry *entry;
1079 int retval = 0;
1080 dma_addr_t bus_addr;
1081 u32 val;
1082
1083 struct ca91cx42_driver *bridge;
1084
1085 ctrlr = list->parent;
1086
1087 bridge = ctrlr->parent->driver_priv;
1088
1089 mutex_lock(&(ctrlr->mtx));
1090
1091 if (!(list_empty(&(ctrlr->running)))) {
1092 /*
1093 * XXX We have an active DMA transfer and currently haven't
1094 * sorted out the mechanism for "pending" DMA transfers.
1095 * Return busy.
1096 */
1097 /* Need to add to pending here */
1098 mutex_unlock(&(ctrlr->mtx));
1099 return -EBUSY;
1100 } else {
1101 list_add(&(list->list), &(ctrlr->running));
1102 }
1103
1104 /* Get first bus address and write into registers */
1105 entry = list_first_entry(&(list->entries), struct ca91cx42_dma_entry,
1106 list);
1107
1108 bus_addr = virt_to_bus(&(entry->descriptor));
1109
1110 mutex_unlock(&(ctrlr->mtx));
1111
1112 iowrite32(0, bridge->base + DTBC);
1113 iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1114
1115 /* Start the operation */
1116 val = ioread32(bridge->base + DGCS);
1117
1118 /* XXX Could set VMEbus On and Off Counters here */
1119 val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1120
1121 val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1122 CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1123 CA91CX42_DGCS_PERR);
1124
1125 iowrite32(val, bridge->base + DGCS);
1126
1127 val |= CA91CX42_DGCS_GO;
1128
1129 iowrite32(val, bridge->base + DGCS);
1130
1131 wait_event_interruptible(bridge->dma_queue,
1132 ca91cx42_dma_busy(ctrlr->parent));
1133
1134 /*
1135 * Read status register, this register is valid until we kick off a
1136 * new transfer.
1137 */
1138 val = ioread32(bridge->base + DGCS);
1139
1140 if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1141 CA91CX42_DGCS_PERR)) {
1142
1143 printk(KERN_ERR "ca91c042: DMA Error. DGCS=%08X\n", val);
1144 val = ioread32(bridge->base + DCTL);
1145 }
1146
1147 /* Remove list from running list */
1148 mutex_lock(&(ctrlr->mtx));
1149 list_del(&(list->list));
1150 mutex_unlock(&(ctrlr->mtx));
1151
1152 return retval;
1153
1154}
1155
1156int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1157{
1158 struct list_head *pos, *temp;
1159 struct ca91cx42_dma_entry *entry;
1160
1161 /* detach and free each entry */
1162 list_for_each_safe(pos, temp, &(list->entries)) {
1163 list_del(pos);
1164 entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1165 kfree(entry);
1166 }
1167
1168 return 0;
1169}
1170
2b82beb8
MW
1171/*
1172 * All 4 location monitors reside at the same base - this is therefore a
1173 * system wide configuration.
1174 *
1175 * This does not enable the LM monitor - that should be done when the first
1176 * callback is attached and disabled when the last callback is removed.
1177 */
1178int ca91cx42_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1179 vme_address_t aspace, vme_cycle_t cycle)
1180{
1181 u32 temp_base, lm_ctl = 0;
1182 int i;
1183 struct ca91cx42_driver *bridge;
1184 struct device *dev;
1185
1186 bridge = lm->parent->driver_priv;
1187 dev = lm->parent->parent;
1188
1189 /* Check the alignment of the location monitor */
1190 temp_base = (u32)lm_base;
1191 if (temp_base & 0xffff) {
1192 dev_err(dev, "Location monitor must be aligned to 64KB "
1193 "boundary");
1194 return -EINVAL;
1195 }
1196
1197 mutex_lock(&(lm->mtx));
1198
1199 /* If we already have a callback attached, we can't move it! */
1200 for (i = 0; i < lm->monitors; i++) {
1201 if (bridge->lm_callback[i] != NULL) {
1202 mutex_unlock(&(lm->mtx));
1203 dev_err(dev, "Location monitor callback attached, "
1204 "can't reset\n");
1205 return -EBUSY;
1206 }
1207 }
1208
1209 switch (aspace) {
1210 case VME_A16:
1211 lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1212 break;
1213 case VME_A24:
1214 lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1215 break;
1216 case VME_A32:
1217 lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1218 break;
1219 default:
1220 mutex_unlock(&(lm->mtx));
1221 dev_err(dev, "Invalid address space\n");
1222 return -EINVAL;
1223 break;
1224 }
1225
1226 if (cycle & VME_SUPER)
1227 lm_ctl |= CA91CX42_LM_CTL_SUPR;
1228 if (cycle & VME_USER)
1229 lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1230 if (cycle & VME_PROG)
1231 lm_ctl |= CA91CX42_LM_CTL_PGM;
1232 if (cycle & VME_DATA)
1233 lm_ctl |= CA91CX42_LM_CTL_DATA;
1234
1235 iowrite32(lm_base, bridge->base + LM_BS);
1236 iowrite32(lm_ctl, bridge->base + LM_CTL);
1237
1238 mutex_unlock(&(lm->mtx));
1239
1240 return 0;
1241}
1242
1243/* Get configuration of the callback monitor and return whether it is enabled
1244 * or disabled.
1245 */
1246int ca91cx42_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
1247 vme_address_t *aspace, vme_cycle_t *cycle)
1248{
1249 u32 lm_ctl, enabled = 0;
1250 struct ca91cx42_driver *bridge;
1251
1252 bridge = lm->parent->driver_priv;
1253
1254 mutex_lock(&(lm->mtx));
1255
1256 *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1257 lm_ctl = ioread32(bridge->base + LM_CTL);
1258
1259 if (lm_ctl & CA91CX42_LM_CTL_EN)
1260 enabled = 1;
1261
1262 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1263 *aspace = VME_A16;
1264 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1265 *aspace = VME_A24;
1266 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1267 *aspace = VME_A32;
1268
1269 *cycle = 0;
1270 if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1271 *cycle |= VME_SUPER;
1272 if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1273 *cycle |= VME_USER;
1274 if (lm_ctl & CA91CX42_LM_CTL_PGM)
1275 *cycle |= VME_PROG;
1276 if (lm_ctl & CA91CX42_LM_CTL_DATA)
1277 *cycle |= VME_DATA;
1278
1279 mutex_unlock(&(lm->mtx));
1280
1281 return enabled;
1282}
1283
1284/*
1285 * Attach a callback to a specific location monitor.
1286 *
1287 * Callback will be passed the monitor triggered.
1288 */
1289int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1290 void (*callback)(int))
1291{
1292 u32 lm_ctl, tmp;
1293 struct ca91cx42_driver *bridge;
1294 struct device *dev;
1295
1296 bridge = lm->parent->driver_priv;
1297 dev = lm->parent->parent;
1298
1299 mutex_lock(&(lm->mtx));
1300
1301 /* Ensure that the location monitor is configured - need PGM or DATA */
1302 lm_ctl = ioread32(bridge->base + LM_CTL);
1303 if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1304 mutex_unlock(&(lm->mtx));
1305 dev_err(dev, "Location monitor not properly configured\n");
1306 return -EINVAL;
1307 }
1308
1309 /* Check that a callback isn't already attached */
1310 if (bridge->lm_callback[monitor] != NULL) {
1311 mutex_unlock(&(lm->mtx));
1312 dev_err(dev, "Existing callback attached\n");
1313 return -EBUSY;
1314 }
1315
1316 /* Attach callback */
1317 bridge->lm_callback[monitor] = callback;
1318
1319 /* Enable Location Monitor interrupt */
1320 tmp = ioread32(bridge->base + LINT_EN);
1321 tmp |= CA91CX42_LINT_LM[monitor];
1322 iowrite32(tmp, bridge->base + LINT_EN);
1323
1324 /* Ensure that global Location Monitor Enable set */
1325 if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1326 lm_ctl |= CA91CX42_LM_CTL_EN;
1327 iowrite32(lm_ctl, bridge->base + LM_CTL);
1328 }
1329
1330 mutex_unlock(&(lm->mtx));
1331
1332 return 0;
1333}
1334
1335/*
1336 * Detach a callback function forn a specific location monitor.
1337 */
1338int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1339{
1340 u32 tmp;
1341 struct ca91cx42_driver *bridge;
1342
1343 bridge = lm->parent->driver_priv;
1344
1345 mutex_lock(&(lm->mtx));
1346
1347 /* Disable Location Monitor and ensure previous interrupts are clear */
1348 tmp = ioread32(bridge->base + LINT_EN);
1349 tmp &= ~CA91CX42_LINT_LM[monitor];
1350 iowrite32(tmp, bridge->base + LINT_EN);
1351
1352 iowrite32(CA91CX42_LINT_LM[monitor],
1353 bridge->base + LINT_STAT);
1354
1355 /* Detach callback */
1356 bridge->lm_callback[monitor] = NULL;
1357
1358 /* If all location monitors disabled, disable global Location Monitor */
1359 if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1360 CA91CX42_LINT_LM3)) == 0) {
1361 tmp = ioread32(bridge->base + LM_CTL);
1362 tmp &= ~CA91CX42_LM_CTL_EN;
1363 iowrite32(tmp, bridge->base + LM_CTL);
1364 }
1365
1366 mutex_unlock(&(lm->mtx));
1367
1368 return 0;
1369}
1370
29848ac9 1371int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
60479690 1372{
3d0f8bc7 1373 u32 slot = 0;
29848ac9
MW
1374 struct ca91cx42_driver *bridge;
1375
1376 bridge = ca91cx42_bridge->driver_priv;
60479690 1377
12b2d5c0 1378 if (!geoid) {
29848ac9 1379 slot = ioread32(bridge->base + VCSR_BS);
12b2d5c0
MW
1380 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1381 } else
1382 slot = geoid;
1383
3d0f8bc7
MW
1384 return (int)slot;
1385
1386}
1387
1388static int __init ca91cx42_init(void)
1389{
1390 return pci_register_driver(&ca91cx42_driver);
1391}
1392
1393/*
1394 * Configure CR/CSR space
1395 *
1396 * Access to the CR/CSR can be configured at power-up. The location of the
1397 * CR/CSR registers in the CR/CSR address space is determined by the boards
1398 * Auto-ID or Geographic address. This function ensures that the window is
1399 * enabled at an offset consistent with the boards geopgraphic address.
1400 */
29848ac9
MW
1401static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1402 struct pci_dev *pdev)
3d0f8bc7
MW
1403{
1404 unsigned int crcsr_addr;
1405 int tmp, slot;
29848ac9
MW
1406 struct ca91cx42_driver *bridge;
1407
1408 bridge = ca91cx42_bridge->driver_priv;
3d0f8bc7 1409
29848ac9 1410 slot = ca91cx42_slot_get(ca91cx42_bridge);
25331ba2
MW
1411
1412 /* Write CSR Base Address if slot ID is supplied as a module param */
1413 if (geoid)
1414 iowrite32(geoid << 27, bridge->base + VCSR_BS);
1415
3d0f8bc7
MW
1416 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1417 if (slot == 0) {
1418 dev_err(&pdev->dev, "Slot number is unset, not configuring "
1419 "CR/CSR space\n");
1420 return -EINVAL;
60479690 1421 }
3d0f8bc7
MW
1422
1423 /* Allocate mem for CR/CSR image */
29848ac9
MW
1424 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1425 &(bridge->crcsr_bus));
1426 if (bridge->crcsr_kernel == NULL) {
3d0f8bc7
MW
1427 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1428 "image\n");
1429 return -ENOMEM;
60479690
MW
1430 }
1431
29848ac9 1432 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
60479690 1433
3d0f8bc7 1434 crcsr_addr = slot * (512 * 1024);
29848ac9 1435 iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
60479690 1436
29848ac9 1437 tmp = ioread32(bridge->base + VCSR_CTL);
3d0f8bc7 1438 tmp |= CA91CX42_VCSR_CTL_EN;
29848ac9 1439 iowrite32(tmp, bridge->base + VCSR_CTL);
60479690 1440
3d0f8bc7 1441 return 0;
60479690
MW
1442}
1443
29848ac9
MW
1444static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1445 struct pci_dev *pdev)
60479690 1446{
3d0f8bc7 1447 u32 tmp;
29848ac9
MW
1448 struct ca91cx42_driver *bridge;
1449
1450 bridge = ca91cx42_bridge->driver_priv;
60479690 1451
3d0f8bc7 1452 /* Turn off CR/CSR space */
29848ac9 1453 tmp = ioread32(bridge->base + VCSR_CTL);
3d0f8bc7 1454 tmp &= ~CA91CX42_VCSR_CTL_EN;
29848ac9 1455 iowrite32(tmp, bridge->base + VCSR_CTL);
60479690 1456
3d0f8bc7 1457 /* Free image */
29848ac9 1458 iowrite32(0, bridge->base + VCSR_TO);
60479690 1459
29848ac9
MW
1460 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1461 bridge->crcsr_bus);
3d0f8bc7 1462}
60479690 1463
3d0f8bc7
MW
1464static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1465{
1466 int retval, i;
1467 u32 data;
1468 struct list_head *pos = NULL;
29848ac9
MW
1469 struct vme_bridge *ca91cx42_bridge;
1470 struct ca91cx42_driver *ca91cx42_device;
3d0f8bc7
MW
1471 struct vme_master_resource *master_image;
1472 struct vme_slave_resource *slave_image;
3d0f8bc7 1473 struct vme_dma_resource *dma_ctrlr;
3d0f8bc7
MW
1474 struct vme_lm_resource *lm;
1475
1476 /* We want to support more than one of each bridge so we need to
1477 * dynamically allocate the bridge structure
1478 */
1479 ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1480
1481 if (ca91cx42_bridge == NULL) {
1482 dev_err(&pdev->dev, "Failed to allocate memory for device "
1483 "structure\n");
1484 retval = -ENOMEM;
1485 goto err_struct;
1486 }
1487
1488 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
1489
29848ac9
MW
1490 ca91cx42_device = kmalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1491
1492 if (ca91cx42_device == NULL) {
1493 dev_err(&pdev->dev, "Failed to allocate memory for device "
1494 "structure\n");
1495 retval = -ENOMEM;
1496 goto err_driver;
1497 }
1498
1499 memset(ca91cx42_device, 0, sizeof(struct ca91cx42_driver));
1500
1501 ca91cx42_bridge->driver_priv = ca91cx42_device;
1502
3d0f8bc7
MW
1503 /* Enable the device */
1504 retval = pci_enable_device(pdev);
1505 if (retval) {
1506 dev_err(&pdev->dev, "Unable to enable device\n");
1507 goto err_enable;
1508 }
1509
1510 /* Map Registers */
1511 retval = pci_request_regions(pdev, driver_name);
1512 if (retval) {
1513 dev_err(&pdev->dev, "Unable to reserve resources\n");
1514 goto err_resource;
1515 }
1516
1517 /* map registers in BAR 0 */
29848ac9 1518 ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
3d0f8bc7 1519 4096);
29848ac9 1520 if (!ca91cx42_device->base) {
3d0f8bc7
MW
1521 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1522 retval = -EIO;
1523 goto err_remap;
1524 }
1525
1526 /* Check to see if the mapping worked out */
29848ac9 1527 data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
3d0f8bc7
MW
1528 if (data != PCI_VENDOR_ID_TUNDRA) {
1529 dev_err(&pdev->dev, "PCI_ID check failed\n");
1530 retval = -EIO;
1531 goto err_test;
1532 }
1533
1534 /* Initialize wait queues & mutual exclusion flags */
29848ac9
MW
1535 init_waitqueue_head(&(ca91cx42_device->dma_queue));
1536 init_waitqueue_head(&(ca91cx42_device->iack_queue));
1537 mutex_init(&(ca91cx42_device->vme_int));
1538 mutex_init(&(ca91cx42_device->vme_rmw));
3d0f8bc7
MW
1539
1540 ca91cx42_bridge->parent = &(pdev->dev);
1541 strcpy(ca91cx42_bridge->name, driver_name);
1542
1543 /* Setup IRQ */
1544 retval = ca91cx42_irq_init(ca91cx42_bridge);
1545 if (retval != 0) {
1546 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1547 goto err_irq;
1548 }
1549
1550 /* Add master windows to list */
1551 INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1552 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1553 master_image = kmalloc(sizeof(struct vme_master_resource),
1554 GFP_KERNEL);
1555 if (master_image == NULL) {
1556 dev_err(&pdev->dev, "Failed to allocate memory for "
1557 "master resource structure\n");
1558 retval = -ENOMEM;
1559 goto err_master;
1560 }
1561 master_image->parent = ca91cx42_bridge;
1562 spin_lock_init(&(master_image->lock));
1563 master_image->locked = 0;
1564 master_image->number = i;
1565 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1566 VME_CRCSR | VME_USER1 | VME_USER2;
1567 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1568 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1569 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
8fafb476 1570 memset(&(master_image->bus_resource), 0,
3d0f8bc7
MW
1571 sizeof(struct resource));
1572 master_image->kern_base = NULL;
1573 list_add_tail(&(master_image->list),
1574 &(ca91cx42_bridge->master_resources));
1575 }
1576
1577 /* Add slave windows to list */
1578 INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1579 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1580 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1581 GFP_KERNEL);
1582 if (slave_image == NULL) {
1583 dev_err(&pdev->dev, "Failed to allocate memory for "
1584 "slave resource structure\n");
1585 retval = -ENOMEM;
1586 goto err_slave;
1587 }
1588 slave_image->parent = ca91cx42_bridge;
1589 mutex_init(&(slave_image->mtx));
1590 slave_image->locked = 0;
1591 slave_image->number = i;
1592 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1593 VME_USER2;
1594
1595 /* Only windows 0 and 4 support A16 */
1596 if (i == 0 || i == 4)
1597 slave_image->address_attr |= VME_A16;
1598
1599 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1600 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1601 list_add_tail(&(slave_image->list),
1602 &(ca91cx42_bridge->slave_resources));
1603 }
4860ab74 1604
3d0f8bc7
MW
1605 /* Add dma engines to list */
1606 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1607 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1608 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1609 GFP_KERNEL);
1610 if (dma_ctrlr == NULL) {
1611 dev_err(&pdev->dev, "Failed to allocate memory for "
1612 "dma resource structure\n");
1613 retval = -ENOMEM;
1614 goto err_dma;
1615 }
1616 dma_ctrlr->parent = ca91cx42_bridge;
1617 mutex_init(&(dma_ctrlr->mtx));
1618 dma_ctrlr->locked = 0;
1619 dma_ctrlr->number = i;
4f723df4
MW
1620 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1621 VME_DMA_MEM_TO_VME;
3d0f8bc7
MW
1622 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1623 INIT_LIST_HEAD(&(dma_ctrlr->running));
1624 list_add_tail(&(dma_ctrlr->list),
1625 &(ca91cx42_bridge->dma_resources));
1626 }
4860ab74 1627
3d0f8bc7
MW
1628 /* Add location monitor to list */
1629 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1630 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1631 if (lm == NULL) {
1632 dev_err(&pdev->dev, "Failed to allocate memory for "
1633 "location monitor resource structure\n");
1634 retval = -ENOMEM;
1635 goto err_lm;
1636 }
1637 lm->parent = ca91cx42_bridge;
1638 mutex_init(&(lm->mtx));
1639 lm->locked = 0;
1640 lm->number = 1;
1641 lm->monitors = 4;
1642 list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1643
1644 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1645 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1646 ca91cx42_bridge->master_get = ca91cx42_master_get;
1647 ca91cx42_bridge->master_set = ca91cx42_master_set;
1648 ca91cx42_bridge->master_read = ca91cx42_master_read;
1649 ca91cx42_bridge->master_write = ca91cx42_master_write;
3d0f8bc7
MW
1650 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1651 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1652 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1653 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
c813f592
MW
1654 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1655 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
3d0f8bc7
MW
1656 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1657 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1658 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1659 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
3d0f8bc7
MW
1660 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1661
29848ac9 1662 data = ioread32(ca91cx42_device->base + MISC_CTL);
3d0f8bc7
MW
1663 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1664 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
29848ac9
MW
1665 dev_info(&pdev->dev, "Slot ID is %d\n",
1666 ca91cx42_slot_get(ca91cx42_bridge));
3d0f8bc7 1667
29848ac9 1668 if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev)) {
3d0f8bc7 1669 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
60479690 1670 }
60479690 1671
3d0f8bc7
MW
1672 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1673 * ca91cx42_remove()
1674 */
1675 retval = vme_register_bridge(ca91cx42_bridge);
1676 if (retval != 0) {
1677 dev_err(&pdev->dev, "Chip Registration failed.\n");
1678 goto err_reg;
1679 }
1680
29848ac9
MW
1681 pci_set_drvdata(pdev, ca91cx42_bridge);
1682
3d0f8bc7
MW
1683 return 0;
1684
1685 vme_unregister_bridge(ca91cx42_bridge);
1686err_reg:
29848ac9 1687 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
3d0f8bc7
MW
1688err_lm:
1689 /* resources are stored in link list */
1690 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1691 lm = list_entry(pos, struct vme_lm_resource, list);
1692 list_del(pos);
1693 kfree(lm);
1694 }
3d0f8bc7
MW
1695err_dma:
1696 /* resources are stored in link list */
1697 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1698 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1699 list_del(pos);
1700 kfree(dma_ctrlr);
60479690 1701 }
3d0f8bc7
MW
1702err_slave:
1703 /* resources are stored in link list */
1704 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1705 slave_image = list_entry(pos, struct vme_slave_resource, list);
1706 list_del(pos);
1707 kfree(slave_image);
1708 }
1709err_master:
1710 /* resources are stored in link list */
1711 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1712 master_image = list_entry(pos, struct vme_master_resource,
1713 list);
1714 list_del(pos);
1715 kfree(master_image);
1716 }
1717
29848ac9 1718 ca91cx42_irq_exit(ca91cx42_device, pdev);
3d0f8bc7
MW
1719err_irq:
1720err_test:
29848ac9 1721 iounmap(ca91cx42_device->base);
3d0f8bc7
MW
1722err_remap:
1723 pci_release_regions(pdev);
1724err_resource:
1725 pci_disable_device(pdev);
1726err_enable:
29848ac9
MW
1727 kfree(ca91cx42_device);
1728err_driver:
3d0f8bc7
MW
1729 kfree(ca91cx42_bridge);
1730err_struct:
1731 return retval;
60479690 1732
60479690
MW
1733}
1734
3d0f8bc7 1735void ca91cx42_remove(struct pci_dev *pdev)
60479690 1736{
3d0f8bc7
MW
1737 struct list_head *pos = NULL;
1738 struct vme_master_resource *master_image;
1739 struct vme_slave_resource *slave_image;
1740 struct vme_dma_resource *dma_ctrlr;
1741 struct vme_lm_resource *lm;
29848ac9
MW
1742 struct ca91cx42_driver *bridge;
1743 struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1744
1745 bridge = ca91cx42_bridge->driver_priv;
1746
60479690 1747
3d0f8bc7 1748 /* Turn off Ints */
29848ac9 1749 iowrite32(0, bridge->base + LINT_EN);
3d0f8bc7
MW
1750
1751 /* Turn off the windows */
29848ac9
MW
1752 iowrite32(0x00800000, bridge->base + LSI0_CTL);
1753 iowrite32(0x00800000, bridge->base + LSI1_CTL);
1754 iowrite32(0x00800000, bridge->base + LSI2_CTL);
1755 iowrite32(0x00800000, bridge->base + LSI3_CTL);
1756 iowrite32(0x00800000, bridge->base + LSI4_CTL);
1757 iowrite32(0x00800000, bridge->base + LSI5_CTL);
1758 iowrite32(0x00800000, bridge->base + LSI6_CTL);
1759 iowrite32(0x00800000, bridge->base + LSI7_CTL);
1760 iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1761 iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1762 iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1763 iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1764 iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1765 iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1766 iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1767 iowrite32(0x00F00000, bridge->base + VSI7_CTL);
3d0f8bc7
MW
1768
1769 vme_unregister_bridge(ca91cx42_bridge);
bb9ea89e
MW
1770
1771 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1772
3d0f8bc7
MW
1773 /* resources are stored in link list */
1774 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1775 lm = list_entry(pos, struct vme_lm_resource, list);
1776 list_del(pos);
1777 kfree(lm);
60479690 1778 }
3d0f8bc7
MW
1779
1780 /* resources are stored in link list */
1781 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1782 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1783 list_del(pos);
1784 kfree(dma_ctrlr);
60479690
MW
1785 }
1786
3d0f8bc7
MW
1787 /* resources are stored in link list */
1788 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1789 slave_image = list_entry(pos, struct vme_slave_resource, list);
1790 list_del(pos);
1791 kfree(slave_image);
1792 }
60479690 1793
3d0f8bc7
MW
1794 /* resources are stored in link list */
1795 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1796 master_image = list_entry(pos, struct vme_master_resource,
1797 list);
1798 list_del(pos);
1799 kfree(master_image);
1800 }
60479690 1801
29848ac9 1802 ca91cx42_irq_exit(bridge, pdev);
60479690 1803
29848ac9 1804 iounmap(bridge->base);
60479690 1805
3d0f8bc7 1806 pci_release_regions(pdev);
60479690 1807
3d0f8bc7
MW
1808 pci_disable_device(pdev);
1809
1810 kfree(ca91cx42_bridge);
60479690
MW
1811}
1812
3d0f8bc7 1813static void __exit ca91cx42_exit(void)
60479690 1814{
3d0f8bc7
MW
1815 pci_unregister_driver(&ca91cx42_driver);
1816}
60479690 1817
12b2d5c0
MW
1818MODULE_PARM_DESC(geoid, "Override geographical addressing");
1819module_param(geoid, int, 0);
1820
3d0f8bc7
MW
1821MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1822MODULE_LICENSE("GPL");
60479690 1823
3d0f8bc7
MW
1824module_init(ca91cx42_init);
1825module_exit(ca91cx42_exit);