libnvdimm/altmap: Track namespace boundaries in altmap
[linux-2.6-block.git] / drivers / scsi / zorro_esp.c
CommitLineData
3109e5ae
MS
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ESP front-end for Amiga ZORRO SCSI systems.
4 *
5 * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
6 *
7 * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
8 * migration to ESP SCSI core
9 *
10 * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11 * Blizzard 1230 DMA and probe function fixes
3109e5ae
MS
12 */
13/*
14 * ZORRO bus code from:
15 */
16/*
17 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
18 * Amiga MacroSystemUS WarpEngine SCSI controller.
19 * Amiga Technologies/DKB A4091 SCSI controller.
20 *
21 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
22 * plus modifications of the 53c7xx.c driver to support the Amiga.
23 *
24 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
25 */
26
27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/dma-mapping.h>
33#include <linux/scatterlist.h>
34#include <linux/delay.h>
35#include <linux/zorro.h>
36#include <linux/slab.h>
37
38#include <asm/page.h>
39#include <asm/pgtable.h>
40#include <asm/cacheflush.h>
41#include <asm/amigahw.h>
42#include <asm/amigaints.h>
43
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_transport_spi.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_tcq.h>
48
49#include "esp_scsi.h"
50
51MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
52MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
53MODULE_LICENSE("GPL");
54
55/* per-board register layout definitions */
56
57/* Blizzard 1230 DMA interface */
58
59struct blz1230_dma_registers {
60 unsigned char dma_addr; /* DMA address [0x0000] */
61 unsigned char dmapad2[0x7fff];
62 unsigned char dma_latch; /* DMA latch [0x8000] */
63};
64
65/* Blizzard 1230II DMA interface */
66
67struct blz1230II_dma_registers {
68 unsigned char dma_addr; /* DMA address [0x0000] */
69 unsigned char dmapad2[0xf];
70 unsigned char dma_latch; /* DMA latch [0x0010] */
71};
72
73/* Blizzard 2060 DMA interface */
74
75struct blz2060_dma_registers {
76 unsigned char dma_led_ctrl; /* DMA led control [0x000] */
77 unsigned char dmapad1[0x0f];
78 unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
79 unsigned char dmapad2[0x03];
80 unsigned char dma_addr1; /* DMA address [0x014] */
81 unsigned char dmapad3[0x03];
82 unsigned char dma_addr2; /* DMA address [0x018] */
83 unsigned char dmapad4[0x03];
84 unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
85};
86
87/* DMA control bits */
88#define DMA_WRITE 0x80000000
89
90/* Cyberstorm DMA interface */
91
92struct cyber_dma_registers {
93 unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
94 unsigned char dmapad1[1];
95 unsigned char dma_addr1; /* DMA address [0x002] */
96 unsigned char dmapad2[1];
97 unsigned char dma_addr2; /* DMA address [0x004] */
98 unsigned char dmapad3[1];
99 unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
100 unsigned char dmapad4[0x3fb];
101 unsigned char cond_reg; /* DMA cond (ro) [0x402] */
102#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
103};
104
105/* DMA control bits */
106#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
107#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
108
109/* DMA status bits */
110#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
111
112/* The CyberStorm II DMA interface */
113struct cyberII_dma_registers {
114 unsigned char cond_reg; /* DMA cond (ro) [0x000] */
115#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
116 unsigned char dmapad4[0x3f];
117 unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
118 unsigned char dmapad1[3];
119 unsigned char dma_addr1; /* DMA address [0x044] */
120 unsigned char dmapad2[3];
121 unsigned char dma_addr2; /* DMA address [0x048] */
122 unsigned char dmapad3[3];
123 unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
124};
125
126/* Fastlane DMA interface */
127
128struct fastlane_dma_registers {
129 unsigned char cond_reg; /* DMA status (ro) [0x0000] */
130#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
131 char dmapad1[0x3f];
132 unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
133};
134
135/*
136 * The controller registers can be found in the Z2 config area at these
137 * offsets:
138 */
139#define FASTLANE_ESP_ADDR 0x1000001
140
141/* DMA status bits */
142#define FASTLANE_DMA_MINT 0x80
143#define FASTLANE_DMA_IACT 0x40
144#define FASTLANE_DMA_CREQ 0x20
145
146/* DMA control bits */
147#define FASTLANE_DMA_FCODE 0xa0
148#define FASTLANE_DMA_MASK 0xf3
149#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
150#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
151#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
152#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
153
154/*
155 * private data used for driver
156 */
157struct zorro_esp_priv {
158 struct esp *esp; /* our ESP instance - for Scsi_host* */
159 void __iomem *board_base; /* virtual address (Zorro III board) */
3109e5ae
MS
160 int zorro3; /* board is Zorro III */
161 unsigned char ctrl_data; /* shadow copy of ctrl_reg */
162};
163
164/*
165 * On all implementations except for the Oktagon, padding between ESP
166 * registers is three bytes.
167 * On Oktagon, it is one byte - use a different accessor there.
168 *
169 * Oktagon needs PDMA - currently unsupported!
170 */
171
172static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
173{
174 writeb(val, esp->regs + (reg * 4UL));
175}
176
177static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
178{
179 return readb(esp->regs + (reg * 4UL));
180}
181
3109e5ae
MS
182static int zorro_esp_irq_pending(struct esp *esp)
183{
184 /* check ESP status register; DMA has no status reg. */
185 if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
186 return 1;
187
188 return 0;
189}
190
191static int cyber_esp_irq_pending(struct esp *esp)
192{
193 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
194 unsigned char dma_status = readb(&dregs->cond_reg);
195
196 /* It's important to check the DMA IRQ bit in the correct way! */
197 return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
198 (dma_status & CYBER_DMA_HNDL_INTR));
199}
200
201static int fastlane_esp_irq_pending(struct esp *esp)
202{
203 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
204 unsigned char dma_status;
205
206 dma_status = readb(&dregs->cond_reg);
207
208 if (dma_status & FASTLANE_DMA_IACT)
209 return 0; /* not our IRQ */
210
211 /* Return non-zero if ESP requested IRQ */
212 return (
213 (dma_status & FASTLANE_DMA_CREQ) &&
214 (!(dma_status & FASTLANE_DMA_MINT)) &&
215 (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
216}
217
218static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
219 u32 dma_len)
220{
b7ded0e8 221 return dma_len > 0xFFFF ? 0xFFFF : dma_len;
3109e5ae
MS
222}
223
224static void zorro_esp_reset_dma(struct esp *esp)
225{
226 /* nothing to do here */
227}
228
229static void zorro_esp_dma_drain(struct esp *esp)
230{
231 /* nothing to do here */
232}
233
234static void zorro_esp_dma_invalidate(struct esp *esp)
235{
236 /* nothing to do here */
237}
238
239static void fastlane_esp_dma_invalidate(struct esp *esp)
240{
241 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
242 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
243 unsigned char *ctrl_data = &zep->ctrl_data;
244
245 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
246 writeb(0, &dregs->clear_strobe);
247 z_writel(0, zep->board_base);
248}
249
3109e5ae
MS
250/* Blizzard 1230/60 SCSI-IV DMA */
251
252static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
253 u32 esp_count, u32 dma_count, int write, u8 cmd)
254{
3109e5ae
MS
255 struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
256 u8 phase = esp->sreg & ESP_STAT_PMASK;
257
3109e5ae
MS
258 /*
259 * Use PIO if transferring message bytes to esp->command_block_dma.
260 * PIO requires a virtual address, so substitute esp->command_block
261 * for addr.
262 */
263 if (phase == ESP_MIP && addr == esp->command_block_dma) {
53dce332
FT
264 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
265 dma_count, write, cmd);
3109e5ae
MS
266 return;
267 }
268
53dce332
FT
269 /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
270 esp->send_cmd_error = 0;
271 esp->send_cmd_residual = 0;
272
3109e5ae
MS
273 if (write)
274 /* DMA receive */
275 dma_sync_single_for_device(esp->dev, addr, esp_count,
276 DMA_FROM_DEVICE);
277 else
278 /* DMA send */
279 dma_sync_single_for_device(esp->dev, addr, esp_count,
280 DMA_TO_DEVICE);
281
282 addr >>= 1;
283 if (write)
284 addr &= ~(DMA_WRITE);
285 else
286 addr |= DMA_WRITE;
287
288 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
289 writeb((addr >> 24) & 0xff, &dregs->dma_addr);
290 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
291 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
292 writeb(addr & 0xff, &dregs->dma_addr);
293
294 scsi_esp_cmd(esp, ESP_CMD_DMA);
295 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
296 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
3109e5ae
MS
297
298 scsi_esp_cmd(esp, cmd);
299}
300
301/* Blizzard 1230-II DMA */
302
303static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
304 u32 esp_count, u32 dma_count, int write, u8 cmd)
305{
3109e5ae
MS
306 struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
307 u8 phase = esp->sreg & ESP_STAT_PMASK;
308
3109e5ae
MS
309 /* Use PIO if transferring message bytes to esp->command_block_dma */
310 if (phase == ESP_MIP && addr == esp->command_block_dma) {
53dce332
FT
311 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
312 dma_count, write, cmd);
3109e5ae
MS
313 return;
314 }
315
53dce332
FT
316 esp->send_cmd_error = 0;
317 esp->send_cmd_residual = 0;
318
3109e5ae
MS
319 if (write)
320 /* DMA receive */
321 dma_sync_single_for_device(esp->dev, addr, esp_count,
322 DMA_FROM_DEVICE);
323 else
324 /* DMA send */
325 dma_sync_single_for_device(esp->dev, addr, esp_count,
326 DMA_TO_DEVICE);
327
328 addr >>= 1;
329 if (write)
330 addr &= ~(DMA_WRITE);
331 else
332 addr |= DMA_WRITE;
333
334 writeb((addr >> 24) & 0xff, &dregs->dma_latch);
335 writeb((addr >> 16) & 0xff, &dregs->dma_addr);
336 writeb((addr >> 8) & 0xff, &dregs->dma_addr);
337 writeb(addr & 0xff, &dregs->dma_addr);
338
339 scsi_esp_cmd(esp, ESP_CMD_DMA);
340 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
341 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
3109e5ae
MS
342
343 scsi_esp_cmd(esp, cmd);
344}
345
346/* Blizzard 2060 DMA */
347
348static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
349 u32 esp_count, u32 dma_count, int write, u8 cmd)
350{
3109e5ae
MS
351 struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
352 u8 phase = esp->sreg & ESP_STAT_PMASK;
353
3109e5ae
MS
354 /* Use PIO if transferring message bytes to esp->command_block_dma */
355 if (phase == ESP_MIP && addr == esp->command_block_dma) {
53dce332
FT
356 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
357 dma_count, write, cmd);
3109e5ae
MS
358 return;
359 }
360
53dce332
FT
361 esp->send_cmd_error = 0;
362 esp->send_cmd_residual = 0;
363
3109e5ae
MS
364 if (write)
365 /* DMA receive */
366 dma_sync_single_for_device(esp->dev, addr, esp_count,
367 DMA_FROM_DEVICE);
368 else
369 /* DMA send */
370 dma_sync_single_for_device(esp->dev, addr, esp_count,
371 DMA_TO_DEVICE);
372
373 addr >>= 1;
374 if (write)
375 addr &= ~(DMA_WRITE);
376 else
377 addr |= DMA_WRITE;
378
379 writeb(addr & 0xff, &dregs->dma_addr3);
380 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
381 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
382 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
383
384 scsi_esp_cmd(esp, ESP_CMD_DMA);
385 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
386 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
3109e5ae
MS
387
388 scsi_esp_cmd(esp, cmd);
389}
390
391/* Cyberstorm I DMA */
392
393static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
394 u32 esp_count, u32 dma_count, int write, u8 cmd)
395{
396 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
397 struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
398 u8 phase = esp->sreg & ESP_STAT_PMASK;
399 unsigned char *ctrl_data = &zep->ctrl_data;
400
3109e5ae
MS
401 /* Use PIO if transferring message bytes to esp->command_block_dma */
402 if (phase == ESP_MIP && addr == esp->command_block_dma) {
53dce332
FT
403 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
404 dma_count, write, cmd);
3109e5ae
MS
405 return;
406 }
407
53dce332
FT
408 esp->send_cmd_error = 0;
409 esp->send_cmd_residual = 0;
410
3109e5ae
MS
411 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
412 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
3109e5ae
MS
413
414 if (write) {
415 /* DMA receive */
416 dma_sync_single_for_device(esp->dev, addr, esp_count,
417 DMA_FROM_DEVICE);
418 addr &= ~(1);
419 } else {
420 /* DMA send */
421 dma_sync_single_for_device(esp->dev, addr, esp_count,
422 DMA_TO_DEVICE);
423 addr |= 1;
424 }
425
426 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
427 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
428 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
429 writeb(addr & 0xff, &dregs->dma_addr3);
430
431 if (write)
432 *ctrl_data &= ~(CYBER_DMA_WRITE);
433 else
434 *ctrl_data |= CYBER_DMA_WRITE;
435
436 *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
437
438 writeb(*ctrl_data, &dregs->ctrl_reg);
439
440 scsi_esp_cmd(esp, cmd);
441}
442
443/* Cyberstorm II DMA */
444
445static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
446 u32 esp_count, u32 dma_count, int write, u8 cmd)
447{
3109e5ae
MS
448 struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
449 u8 phase = esp->sreg & ESP_STAT_PMASK;
450
3109e5ae
MS
451 /* Use PIO if transferring message bytes to esp->command_block_dma */
452 if (phase == ESP_MIP && addr == esp->command_block_dma) {
53dce332
FT
453 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
454 dma_count, write, cmd);
3109e5ae
MS
455 return;
456 }
457
53dce332
FT
458 esp->send_cmd_error = 0;
459 esp->send_cmd_residual = 0;
460
3109e5ae
MS
461 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
462 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
3109e5ae
MS
463
464 if (write) {
465 /* DMA receive */
466 dma_sync_single_for_device(esp->dev, addr, esp_count,
467 DMA_FROM_DEVICE);
468 addr &= ~(1);
469 } else {
470 /* DMA send */
471 dma_sync_single_for_device(esp->dev, addr, esp_count,
472 DMA_TO_DEVICE);
473 addr |= 1;
474 }
475
476 writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
477 writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
478 writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
479 writeb(addr & 0xff, &dregs->dma_addr3);
480
481 scsi_esp_cmd(esp, cmd);
482}
483
484/* Fastlane DMA */
485
486static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
487 u32 esp_count, u32 dma_count, int write, u8 cmd)
488{
489 struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
490 struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
491 u8 phase = esp->sreg & ESP_STAT_PMASK;
492 unsigned char *ctrl_data = &zep->ctrl_data;
493
3109e5ae
MS
494 /* Use PIO if transferring message bytes to esp->command_block_dma */
495 if (phase == ESP_MIP && addr == esp->command_block_dma) {
53dce332
FT
496 esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
497 dma_count, write, cmd);
3109e5ae
MS
498 return;
499 }
500
53dce332
FT
501 esp->send_cmd_error = 0;
502 esp->send_cmd_residual = 0;
503
3109e5ae
MS
504 zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
505 zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
3109e5ae
MS
506
507 if (write) {
508 /* DMA receive */
509 dma_sync_single_for_device(esp->dev, addr, esp_count,
510 DMA_FROM_DEVICE);
511 addr &= ~(1);
512 } else {
513 /* DMA send */
514 dma_sync_single_for_device(esp->dev, addr, esp_count,
515 DMA_TO_DEVICE);
516 addr |= 1;
517 }
518
519 writeb(0, &dregs->clear_strobe);
520 z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
521
522 if (write) {
523 *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
524 FASTLANE_DMA_ENABLE;
525 } else {
526 *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
527 FASTLANE_DMA_ENABLE |
528 FASTLANE_DMA_WRITE);
529 }
530
531 writeb(*ctrl_data, &dregs->ctrl_reg);
532
533 scsi_esp_cmd(esp, cmd);
534}
535
536static int zorro_esp_dma_error(struct esp *esp)
537{
53dce332 538 return esp->send_cmd_error;
3109e5ae
MS
539}
540
541/* per-board ESP driver ops */
542
543static const struct esp_driver_ops blz1230_esp_ops = {
544 .esp_write8 = zorro_esp_write8,
545 .esp_read8 = zorro_esp_read8,
3109e5ae
MS
546 .irq_pending = zorro_esp_irq_pending,
547 .dma_length_limit = zorro_esp_dma_length_limit,
548 .reset_dma = zorro_esp_reset_dma,
549 .dma_drain = zorro_esp_dma_drain,
550 .dma_invalidate = zorro_esp_dma_invalidate,
551 .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd,
552 .dma_error = zorro_esp_dma_error,
553};
554
555static const struct esp_driver_ops blz1230II_esp_ops = {
556 .esp_write8 = zorro_esp_write8,
557 .esp_read8 = zorro_esp_read8,
3109e5ae
MS
558 .irq_pending = zorro_esp_irq_pending,
559 .dma_length_limit = zorro_esp_dma_length_limit,
560 .reset_dma = zorro_esp_reset_dma,
561 .dma_drain = zorro_esp_dma_drain,
562 .dma_invalidate = zorro_esp_dma_invalidate,
563 .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd,
564 .dma_error = zorro_esp_dma_error,
565};
566
567static const struct esp_driver_ops blz2060_esp_ops = {
568 .esp_write8 = zorro_esp_write8,
569 .esp_read8 = zorro_esp_read8,
3109e5ae
MS
570 .irq_pending = zorro_esp_irq_pending,
571 .dma_length_limit = zorro_esp_dma_length_limit,
572 .reset_dma = zorro_esp_reset_dma,
573 .dma_drain = zorro_esp_dma_drain,
574 .dma_invalidate = zorro_esp_dma_invalidate,
575 .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd,
576 .dma_error = zorro_esp_dma_error,
577};
578
579static const struct esp_driver_ops cyber_esp_ops = {
580 .esp_write8 = zorro_esp_write8,
581 .esp_read8 = zorro_esp_read8,
3109e5ae
MS
582 .irq_pending = cyber_esp_irq_pending,
583 .dma_length_limit = zorro_esp_dma_length_limit,
584 .reset_dma = zorro_esp_reset_dma,
585 .dma_drain = zorro_esp_dma_drain,
586 .dma_invalidate = zorro_esp_dma_invalidate,
587 .send_dma_cmd = zorro_esp_send_cyber_dma_cmd,
588 .dma_error = zorro_esp_dma_error,
589};
590
591static const struct esp_driver_ops cyberII_esp_ops = {
592 .esp_write8 = zorro_esp_write8,
593 .esp_read8 = zorro_esp_read8,
3109e5ae
MS
594 .irq_pending = zorro_esp_irq_pending,
595 .dma_length_limit = zorro_esp_dma_length_limit,
596 .reset_dma = zorro_esp_reset_dma,
597 .dma_drain = zorro_esp_dma_drain,
598 .dma_invalidate = zorro_esp_dma_invalidate,
599 .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd,
600 .dma_error = zorro_esp_dma_error,
601};
602
603static const struct esp_driver_ops fastlane_esp_ops = {
604 .esp_write8 = zorro_esp_write8,
605 .esp_read8 = zorro_esp_read8,
3109e5ae
MS
606 .irq_pending = fastlane_esp_irq_pending,
607 .dma_length_limit = zorro_esp_dma_length_limit,
608 .reset_dma = zorro_esp_reset_dma,
609 .dma_drain = zorro_esp_dma_drain,
610 .dma_invalidate = fastlane_esp_dma_invalidate,
611 .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd,
612 .dma_error = zorro_esp_dma_error,
613};
614
615/* Zorro driver config data */
616
617struct zorro_driver_data {
618 const char *name;
619 unsigned long offset;
620 unsigned long dma_offset;
621 int absolute; /* offset is absolute address */
622 int scsi_option;
623 const struct esp_driver_ops *esp_ops;
624};
625
626/* board types */
627
628enum {
629 ZORRO_BLZ1230,
630 ZORRO_BLZ1230II,
631 ZORRO_BLZ2060,
632 ZORRO_CYBER,
633 ZORRO_CYBERII,
634 ZORRO_FASTLANE,
635};
636
637/* per-board config data */
638
639static const struct zorro_driver_data zorro_esp_boards[] = {
640 [ZORRO_BLZ1230] = {
641 .name = "Blizzard 1230",
642 .offset = 0x8000,
643 .dma_offset = 0x10000,
644 .scsi_option = 1,
645 .esp_ops = &blz1230_esp_ops,
646 },
647 [ZORRO_BLZ1230II] = {
648 .name = "Blizzard 1230II",
649 .offset = 0x10000,
650 .dma_offset = 0x10021,
651 .scsi_option = 1,
652 .esp_ops = &blz1230II_esp_ops,
653 },
654 [ZORRO_BLZ2060] = {
655 .name = "Blizzard 2060",
656 .offset = 0x1ff00,
657 .dma_offset = 0x1ffe0,
658 .esp_ops = &blz2060_esp_ops,
659 },
660 [ZORRO_CYBER] = {
661 .name = "CyberStormI",
662 .offset = 0xf400,
663 .dma_offset = 0xf800,
664 .esp_ops = &cyber_esp_ops,
665 },
666 [ZORRO_CYBERII] = {
667 .name = "CyberStormII",
668 .offset = 0x1ff03,
669 .dma_offset = 0x1ff43,
670 .scsi_option = 1,
671 .esp_ops = &cyberII_esp_ops,
672 },
673 [ZORRO_FASTLANE] = {
674 .name = "Fastlane",
675 .offset = 0x1000001,
676 .dma_offset = 0x1000041,
677 .esp_ops = &fastlane_esp_ops,
678 },
679};
680
681static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
682 { /* Blizzard 1230 IV */
683 .id = ZORRO_ID(PHASE5, 0x11, 0),
684 .driver_data = ZORRO_BLZ1230,
685 },
686 { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
687 .id = ZORRO_ID(PHASE5, 0x0B, 0),
688 .driver_data = ZORRO_BLZ1230II,
689 },
690 { /* Blizzard 2060 */
691 .id = ZORRO_ID(PHASE5, 0x18, 0),
692 .driver_data = ZORRO_BLZ2060,
693 },
694 { /* Cyberstorm */
695 .id = ZORRO_ID(PHASE5, 0x0C, 0),
696 .driver_data = ZORRO_CYBER,
697 },
698 { /* Cyberstorm II */
699 .id = ZORRO_ID(PHASE5, 0x19, 0),
700 .driver_data = ZORRO_CYBERII,
701 },
702 { 0 }
703};
704MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
705
706static int zorro_esp_probe(struct zorro_dev *z,
707 const struct zorro_device_id *ent)
708{
709 struct scsi_host_template *tpnt = &scsi_esp_template;
710 struct Scsi_Host *host;
711 struct esp *esp;
712 const struct zorro_driver_data *zdd;
713 struct zorro_esp_priv *zep;
714 unsigned long board, ioaddr, dmaaddr;
715 int err;
716
717 board = zorro_resource_start(z);
718 zdd = &zorro_esp_boards[ent->driver_data];
719
720 pr_info("%s found at address 0x%lx.\n", zdd->name, board);
721
722 zep = kzalloc(sizeof(*zep), GFP_KERNEL);
723 if (!zep) {
724 pr_err("Can't allocate device private data!\n");
725 return -ENOMEM;
726 }
727
728 /* let's figure out whether we have a Zorro II or Zorro III board */
729 if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
730 if (board > 0xffffff)
731 zep->zorro3 = 1;
732 } else {
733 /*
734 * Even though most of these boards identify as Zorro II,
735 * they are in fact CPU expansion slot boards and have full
736 * access to all of memory. Fix up DMA bitmask here.
737 */
738 z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
739 }
740
741 /*
742 * If Zorro III and ID matches Fastlane, our device table entry
743 * contains data for the Blizzard 1230 II board which does share the
744 * same ID. Fix up device table entry here.
745 * TODO: Some Cyberstom060 boards also share this ID but would need
746 * to use the Cyberstorm I driver data ... we catch this by checking
747 * for presence of ESP chip later, but don't try to fix up yet.
748 */
749 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
750 pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
751 zdd->name, board);
752 zdd = &zorro_esp_boards[ZORRO_FASTLANE];
753 }
754
755 if (zdd->absolute) {
756 ioaddr = zdd->offset;
757 dmaaddr = zdd->dma_offset;
758 } else {
759 ioaddr = board + zdd->offset;
760 dmaaddr = board + zdd->dma_offset;
761 }
762
763 if (!zorro_request_device(z, zdd->name)) {
764 pr_err("cannot reserve region 0x%lx, abort\n",
765 board);
766 err = -EBUSY;
767 goto fail_free_zep;
768 }
769
770 host = scsi_host_alloc(tpnt, sizeof(struct esp));
771
772 if (!host) {
773 pr_err("No host detected; board configuration problem?\n");
774 err = -ENOMEM;
775 goto fail_release_device;
776 }
777
778 host->base = ioaddr;
779 host->this_id = 7;
780
781 esp = shost_priv(host);
782 esp->host = host;
783 esp->dev = &z->dev;
784
785 esp->scsi_id = host->this_id;
786 esp->scsi_id_mask = (1 << esp->scsi_id);
787
788 esp->cfreq = 40000000;
789
790 zep->esp = esp;
791
792 dev_set_drvdata(esp->dev, zep);
793
794 /* additional setup required for Fastlane */
795 if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
796 /* map full address space up to ESP base for DMA */
797 zep->board_base = ioremap_nocache(board,
798 FASTLANE_ESP_ADDR-1);
799 if (!zep->board_base) {
800 pr_err("Cannot allocate board address space\n");
801 err = -ENOMEM;
802 goto fail_free_host;
803 }
804 /* initialize DMA control shadow register */
805 zep->ctrl_data = (FASTLANE_DMA_FCODE |
806 FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
807 }
808
809 esp->ops = zdd->esp_ops;
810
811 if (ioaddr > 0xffffff)
812 esp->regs = ioremap_nocache(ioaddr, 0x20);
813 else
814 /* ZorroII address space remapped nocache by early startup */
815 esp->regs = ZTWO_VADDR(ioaddr);
816
817 if (!esp->regs) {
818 err = -ENOMEM;
819 goto fail_unmap_fastlane;
820 }
821
53dce332
FT
822 esp->fifo_reg = esp->regs + ESP_FDATA * 4;
823
3109e5ae
MS
824 /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
825 if (zdd->scsi_option) {
826 zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
827 if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
828 err = -ENODEV;
829 goto fail_unmap_regs;
830 }
831 }
832
833 if (zep->zorro3) {
834 /*
835 * Only Fastlane Z3 for now - add switch for correct struct
836 * dma_registers size if adding any more
837 */
838 esp->dma_regs = ioremap_nocache(dmaaddr,
839 sizeof(struct fastlane_dma_registers));
840 } else
841 /* ZorroII address space remapped nocache by early startup */
842 esp->dma_regs = ZTWO_VADDR(dmaaddr);
843
844 if (!esp->dma_regs) {
845 err = -ENOMEM;
846 goto fail_unmap_regs;
847 }
848
849 esp->command_block = dma_alloc_coherent(esp->dev, 16,
850 &esp->command_block_dma,
851 GFP_KERNEL);
852
853 if (!esp->command_block) {
854 err = -ENOMEM;
855 goto fail_unmap_dma_regs;
856 }
857
858 host->irq = IRQ_AMIGA_PORTS;
859 err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
860 "Amiga Zorro ESP", esp);
861 if (err < 0) {
862 err = -ENODEV;
863 goto fail_free_command_block;
864 }
865
866 /* register the chip */
44b1b4d2 867 err = scsi_esp_register(esp);
3109e5ae
MS
868
869 if (err) {
870 err = -ENOMEM;
871 goto fail_free_irq;
872 }
873
874 return 0;
875
876fail_free_irq:
877 free_irq(host->irq, esp);
878
879fail_free_command_block:
880 dma_free_coherent(esp->dev, 16,
881 esp->command_block,
882 esp->command_block_dma);
883
884fail_unmap_dma_regs:
885 if (zep->zorro3)
886 iounmap(esp->dma_regs);
887
888fail_unmap_regs:
889 if (ioaddr > 0xffffff)
890 iounmap(esp->regs);
891
892fail_unmap_fastlane:
893 if (zep->zorro3)
894 iounmap(zep->board_base);
895
896fail_free_host:
897 scsi_host_put(host);
898
899fail_release_device:
900 zorro_release_device(z);
901
902fail_free_zep:
903 kfree(zep);
904
905 return err;
906}
907
908static void zorro_esp_remove(struct zorro_dev *z)
909{
910 struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
911 struct esp *esp = zep->esp;
912 struct Scsi_Host *host = esp->host;
913
914 scsi_esp_unregister(esp);
915
916 free_irq(host->irq, esp);
917 dma_free_coherent(esp->dev, 16,
918 esp->command_block,
919 esp->command_block_dma);
920
921 if (zep->zorro3) {
922 iounmap(zep->board_base);
923 iounmap(esp->dma_regs);
924 }
925
926 if (host->base > 0xffffff)
927 iounmap(esp->regs);
928
929 scsi_host_put(host);
930
931 zorro_release_device(z);
932
933 kfree(zep);
934}
935
936static struct zorro_driver zorro_esp_driver = {
937 .name = KBUILD_MODNAME,
938 .id_table = zorro_esp_zorro_tbl,
939 .probe = zorro_esp_probe,
940 .remove = zorro_esp_remove,
941};
942
943static int __init zorro_esp_scsi_init(void)
944{
945 return zorro_register_driver(&zorro_esp_driver);
946}
947
948static void __exit zorro_esp_scsi_exit(void)
949{
950 zorro_unregister_driver(&zorro_esp_driver);
951}
952
953module_init(zorro_esp_scsi_init);
954module_exit(zorro_esp_scsi_exit);