Commit | Line | Data |
---|---|---|
73b6a2be RK |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/init.h> | |
4 | #include <linux/blkdev.h> | |
5 | #include <scsi/scsi_host.h> | |
6 | #include <linux/ata.h> | |
7 | #include <linux/libata.h> | |
8 | ||
9 | #include <asm/dma.h> | |
10 | #include <asm/ecard.h> | |
11 | ||
12 | #define DRV_NAME "pata_icside" | |
13 | ||
14 | #define ICS_IDENT_OFFSET 0x2280 | |
15 | ||
16 | #define ICS_ARCIN_V5_INTRSTAT 0x0000 | |
17 | #define ICS_ARCIN_V5_INTROFFSET 0x0004 | |
18 | ||
19 | #define ICS_ARCIN_V6_INTROFFSET_1 0x2200 | |
20 | #define ICS_ARCIN_V6_INTRSTAT_1 0x2290 | |
21 | #define ICS_ARCIN_V6_INTROFFSET_2 0x3200 | |
22 | #define ICS_ARCIN_V6_INTRSTAT_2 0x3290 | |
23 | ||
24 | struct portinfo { | |
25 | unsigned int dataoffset; | |
26 | unsigned int ctrloffset; | |
27 | unsigned int stepping; | |
28 | }; | |
29 | ||
30 | static const struct portinfo pata_icside_portinfo_v5 = { | |
31 | .dataoffset = 0x2800, | |
32 | .ctrloffset = 0x2b80, | |
33 | .stepping = 6, | |
34 | }; | |
35 | ||
36 | static const struct portinfo pata_icside_portinfo_v6_1 = { | |
37 | .dataoffset = 0x2000, | |
38 | .ctrloffset = 0x2380, | |
39 | .stepping = 6, | |
40 | }; | |
41 | ||
42 | static const struct portinfo pata_icside_portinfo_v6_2 = { | |
43 | .dataoffset = 0x3000, | |
44 | .ctrloffset = 0x3380, | |
45 | .stepping = 6, | |
46 | }; | |
47 | ||
48 | #define PATA_ICSIDE_MAX_SG 128 | |
49 | ||
50 | struct pata_icside_state { | |
51 | void __iomem *irq_port; | |
52 | void __iomem *ioc_base; | |
53 | unsigned int type; | |
54 | unsigned int dma; | |
55 | struct { | |
56 | u8 port_sel; | |
57 | u8 disabled; | |
58 | unsigned int speed[ATA_MAX_DEVICES]; | |
59 | } port[2]; | |
60 | struct scatterlist sg[PATA_ICSIDE_MAX_SG]; | |
61 | }; | |
62 | ||
f95637d2 RK |
63 | struct pata_icside_info { |
64 | struct pata_icside_state *state; | |
65 | struct expansion_card *ec; | |
66 | void __iomem *base; | |
67 | void __iomem *irqaddr; | |
68 | unsigned int irqmask; | |
69 | const expansioncard_ops_t *irqops; | |
70 | unsigned int mwdma_mask; | |
71 | unsigned int nr_ports; | |
72 | const struct portinfo *port[2]; | |
73 | }; | |
74 | ||
73b6a2be RK |
75 | #define ICS_TYPE_A3IN 0 |
76 | #define ICS_TYPE_A3USER 1 | |
77 | #define ICS_TYPE_V6 3 | |
78 | #define ICS_TYPE_V5 15 | |
79 | #define ICS_TYPE_NOTYPE ((unsigned int)-1) | |
80 | ||
81 | /* ---------------- Version 5 PCB Support Functions --------------------- */ | |
82 | /* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
83 | * Purpose : enable interrupts from card | |
84 | */ | |
85 | static void pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
86 | { | |
87 | struct pata_icside_state *state = ec->irq_data; | |
88 | ||
89 | writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET); | |
90 | } | |
91 | ||
92 | /* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
93 | * Purpose : disable interrupts from card | |
94 | */ | |
95 | static void pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr) | |
96 | { | |
97 | struct pata_icside_state *state = ec->irq_data; | |
98 | ||
99 | readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET); | |
100 | } | |
101 | ||
102 | static const expansioncard_ops_t pata_icside_ops_arcin_v5 = { | |
103 | .irqenable = pata_icside_irqenable_arcin_v5, | |
104 | .irqdisable = pata_icside_irqdisable_arcin_v5, | |
105 | }; | |
106 | ||
107 | ||
108 | /* ---------------- Version 6 PCB Support Functions --------------------- */ | |
109 | /* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
110 | * Purpose : enable interrupts from card | |
111 | */ | |
112 | static void pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
113 | { | |
114 | struct pata_icside_state *state = ec->irq_data; | |
115 | void __iomem *base = state->irq_port; | |
116 | ||
117 | if (!state->port[0].disabled) | |
118 | writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); | |
119 | if (!state->port[1].disabled) | |
120 | writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); | |
121 | } | |
122 | ||
123 | /* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
124 | * Purpose : disable interrupts from card | |
125 | */ | |
126 | static void pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) | |
127 | { | |
128 | struct pata_icside_state *state = ec->irq_data; | |
129 | ||
130 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); | |
131 | readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); | |
132 | } | |
133 | ||
134 | /* Prototype: pata_icside_irqprobe(struct expansion_card *ec) | |
135 | * Purpose : detect an active interrupt from card | |
136 | */ | |
137 | static int pata_icside_irqpending_arcin_v6(struct expansion_card *ec) | |
138 | { | |
139 | struct pata_icside_state *state = ec->irq_data; | |
140 | ||
141 | return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 || | |
142 | readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1; | |
143 | } | |
144 | ||
145 | static const expansioncard_ops_t pata_icside_ops_arcin_v6 = { | |
146 | .irqenable = pata_icside_irqenable_arcin_v6, | |
147 | .irqdisable = pata_icside_irqdisable_arcin_v6, | |
148 | .irqpending = pata_icside_irqpending_arcin_v6, | |
149 | }; | |
150 | ||
151 | ||
152 | /* | |
153 | * SG-DMA support. | |
154 | * | |
155 | * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers. | |
156 | * There is only one DMA controller per card, which means that only | |
157 | * one drive can be accessed at one time. NOTE! We do not enforce that | |
158 | * here, but we rely on the main IDE driver spotting that both | |
159 | * interfaces use the same IRQ, which should guarantee this. | |
160 | */ | |
161 | ||
162 | /* | |
163 | * Configure the IOMD to give the appropriate timings for the transfer | |
164 | * mode being requested. We take the advice of the ATA standards, and | |
165 | * calculate the cycle time based on the transfer mode, and the EIDE | |
166 | * MW DMA specs that the drive provides in the IDENTIFY command. | |
167 | * | |
168 | * We have the following IOMD DMA modes to choose from: | |
169 | * | |
170 | * Type Active Recovery Cycle | |
171 | * A 250 (250) 312 (550) 562 (800) | |
172 | * B 187 (200) 250 (550) 437 (750) | |
173 | * C 125 (125) 125 (375) 250 (500) | |
174 | * D 62 (50) 125 (375) 187 (425) | |
175 | * | |
176 | * (figures in brackets are actual measured timings on DIOR/DIOW) | |
177 | * | |
178 | * However, we also need to take care of the read/write active and | |
179 | * recovery timings: | |
180 | * | |
181 | * Read Write | |
182 | * Mode Active -- Recovery -- Cycle IOMD type | |
183 | * MW0 215 50 215 480 A | |
184 | * MW1 80 50 50 150 C | |
185 | * MW2 70 25 25 120 C | |
186 | */ | |
187 | static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev) | |
188 | { | |
189 | struct pata_icside_state *state = ap->host->private_data; | |
190 | struct ata_timing t; | |
191 | unsigned int cycle; | |
192 | char iomd_type; | |
193 | ||
194 | /* | |
195 | * DMA is based on a 16MHz clock | |
196 | */ | |
197 | if (ata_timing_compute(adev, adev->dma_mode, &t, 1000, 1)) | |
198 | return; | |
199 | ||
200 | /* | |
201 | * Choose the IOMD cycle timing which ensure that the interface | |
202 | * satisfies the measured active, recovery and cycle times. | |
203 | */ | |
204 | if (t.active <= 50 && t.recover <= 375 && t.cycle <= 425) | |
205 | iomd_type = 'D', cycle = 187; | |
206 | else if (t.active <= 125 && t.recover <= 375 && t.cycle <= 500) | |
207 | iomd_type = 'C', cycle = 250; | |
208 | else if (t.active <= 200 && t.recover <= 550 && t.cycle <= 750) | |
209 | iomd_type = 'B', cycle = 437; | |
210 | else | |
211 | iomd_type = 'A', cycle = 562; | |
212 | ||
213 | ata_dev_printk(adev, KERN_INFO, "timings: act %dns rec %dns cyc %dns (%c)\n", | |
214 | t.active, t.recover, t.cycle, iomd_type); | |
215 | ||
216 | state->port[ap->port_no].speed[adev->devno] = cycle; | |
217 | } | |
218 | ||
219 | static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc) | |
220 | { | |
221 | struct ata_port *ap = qc->ap; | |
222 | struct pata_icside_state *state = ap->host->private_data; | |
223 | struct scatterlist *sg, *rsg = state->sg; | |
224 | unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE; | |
225 | ||
226 | /* | |
227 | * We are simplex; BUG if we try to fiddle with DMA | |
228 | * while it's active. | |
229 | */ | |
230 | BUG_ON(dma_channel_active(state->dma)); | |
231 | ||
232 | /* | |
233 | * Copy ATAs scattered sg list into a contiguous array of sg | |
234 | */ | |
235 | ata_for_each_sg(sg, qc) { | |
236 | memcpy(rsg, sg, sizeof(*sg)); | |
237 | rsg++; | |
238 | } | |
239 | ||
240 | /* | |
241 | * Route the DMA signals to the correct interface | |
242 | */ | |
243 | writeb(state->port[ap->port_no].port_sel, state->ioc_base); | |
244 | ||
245 | set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]); | |
246 | set_dma_sg(state->dma, state->sg, rsg - state->sg); | |
247 | set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ); | |
248 | ||
249 | /* issue r/w command */ | |
250 | ap->ops->exec_command(ap, &qc->tf); | |
251 | } | |
252 | ||
253 | static void pata_icside_bmdma_start(struct ata_queued_cmd *qc) | |
254 | { | |
255 | struct ata_port *ap = qc->ap; | |
256 | struct pata_icside_state *state = ap->host->private_data; | |
257 | ||
258 | BUG_ON(dma_channel_active(state->dma)); | |
259 | enable_dma(state->dma); | |
260 | } | |
261 | ||
262 | static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) | |
263 | { | |
264 | struct ata_port *ap = qc->ap; | |
265 | struct pata_icside_state *state = ap->host->private_data; | |
266 | ||
267 | disable_dma(state->dma); | |
268 | ||
269 | /* see ata_bmdma_stop */ | |
270 | ata_altstatus(ap); | |
271 | } | |
272 | ||
273 | static u8 pata_icside_bmdma_status(struct ata_port *ap) | |
274 | { | |
275 | struct pata_icside_state *state = ap->host->private_data; | |
276 | void __iomem *irq_port; | |
277 | ||
278 | irq_port = state->irq_port + (ap->port_no ? ICS_ARCIN_V6_INTRSTAT_2 : | |
279 | ICS_ARCIN_V6_INTRSTAT_1); | |
280 | ||
281 | return readb(irq_port) & 1 ? ATA_DMA_INTR : 0; | |
282 | } | |
283 | ||
f95637d2 | 284 | static int icside_dma_init(struct pata_icside_info *info) |
73b6a2be | 285 | { |
f95637d2 RK |
286 | struct pata_icside_state *state = info->state; |
287 | struct expansion_card *ec = info->ec; | |
73b6a2be RK |
288 | int i; |
289 | ||
290 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | |
291 | state->port[0].speed[i] = 480; | |
292 | state->port[1].speed[i] = 480; | |
293 | } | |
294 | ||
295 | if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { | |
296 | state->dma = ec->dma; | |
f95637d2 | 297 | info->mwdma_mask = 0x07; /* MW0..2 */ |
73b6a2be RK |
298 | } |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
303 | ||
304 | static int pata_icside_port_start(struct ata_port *ap) | |
305 | { | |
306 | /* No PRD to alloc */ | |
307 | return ata_pad_alloc(ap, ap->dev); | |
308 | } | |
309 | ||
310 | static struct scsi_host_template pata_icside_sht = { | |
311 | .module = THIS_MODULE, | |
312 | .name = DRV_NAME, | |
313 | .ioctl = ata_scsi_ioctl, | |
314 | .queuecommand = ata_scsi_queuecmd, | |
315 | .can_queue = ATA_DEF_QUEUE, | |
316 | .this_id = ATA_SHT_THIS_ID, | |
317 | .sg_tablesize = PATA_ICSIDE_MAX_SG, | |
318 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | |
319 | .emulated = ATA_SHT_EMULATED, | |
320 | .use_clustering = ATA_SHT_USE_CLUSTERING, | |
321 | .proc_name = DRV_NAME, | |
322 | .dma_boundary = ~0, /* no dma boundaries */ | |
323 | .slave_configure = ata_scsi_slave_config, | |
324 | .slave_destroy = ata_scsi_slave_destroy, | |
325 | .bios_param = ata_std_bios_param, | |
326 | }; | |
327 | ||
328 | /* wish this was exported from libata-core */ | |
329 | static void ata_dummy_noret(struct ata_port *port) | |
330 | { | |
331 | } | |
332 | ||
333 | /* | |
334 | * We need to shut down unused ports to prevent spurious interrupts. | |
335 | * FIXME: the libata core doesn't call this function for PATA interfaces. | |
336 | */ | |
337 | static void pata_icside_port_disable(struct ata_port *ap) | |
338 | { | |
339 | struct pata_icside_state *state = ap->host->private_data; | |
340 | ||
341 | ata_port_printk(ap, KERN_ERR, "disabling icside port\n"); | |
342 | ||
343 | ata_port_disable(ap); | |
344 | ||
345 | state->port[ap->port_no].disabled = 1; | |
346 | ||
347 | if (state->type == ICS_TYPE_V6) { | |
348 | /* | |
349 | * Disable interrupts from this port, otherwise we | |
350 | * receive spurious interrupts from the floating | |
351 | * interrupt line. | |
352 | */ | |
353 | void __iomem *irq_port = state->irq_port + | |
354 | (ap->port_no ? ICS_ARCIN_V6_INTROFFSET_2 : ICS_ARCIN_V6_INTROFFSET_1); | |
355 | readb(irq_port); | |
356 | } | |
357 | } | |
358 | ||
359 | static u8 pata_icside_irq_ack(struct ata_port *ap, unsigned int chk_drq) | |
360 | { | |
361 | unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; | |
362 | u8 status; | |
363 | ||
364 | status = ata_busy_wait(ap, bits, 1000); | |
365 | if (status & bits) | |
366 | if (ata_msg_err(ap)) | |
367 | printk(KERN_ERR "abnormal status 0x%X\n", status); | |
368 | ||
369 | if (ata_msg_intr(ap)) | |
370 | printk(KERN_INFO "%s: irq ack: drv_stat 0x%X\n", | |
371 | __FUNCTION__, status); | |
372 | ||
373 | return status; | |
374 | } | |
375 | ||
376 | static struct ata_port_operations pata_icside_port_ops = { | |
377 | .port_disable = pata_icside_port_disable, | |
378 | ||
379 | .set_dmamode = pata_icside_set_dmamode, | |
380 | ||
381 | .tf_load = ata_tf_load, | |
382 | .tf_read = ata_tf_read, | |
383 | .exec_command = ata_exec_command, | |
384 | .check_status = ata_check_status, | |
385 | .dev_select = ata_std_dev_select, | |
386 | ||
f95637d2 RK |
387 | .cable_detect = ata_cable_40wire, |
388 | ||
73b6a2be RK |
389 | .bmdma_setup = pata_icside_bmdma_setup, |
390 | .bmdma_start = pata_icside_bmdma_start, | |
391 | ||
392 | .data_xfer = ata_data_xfer_noirq, | |
393 | ||
394 | /* no need to build any PRD tables for DMA */ | |
395 | .qc_prep = ata_noop_qc_prep, | |
396 | .qc_issue = ata_qc_issue_prot, | |
397 | ||
398 | .freeze = ata_bmdma_freeze, | |
399 | .thaw = ata_bmdma_thaw, | |
400 | .error_handler = ata_bmdma_error_handler, | |
401 | .post_internal_cmd = pata_icside_bmdma_stop, | |
402 | ||
73b6a2be RK |
403 | .irq_clear = ata_dummy_noret, |
404 | .irq_on = ata_irq_on, | |
405 | .irq_ack = pata_icside_irq_ack, | |
406 | ||
407 | .port_start = pata_icside_port_start, | |
408 | ||
409 | .bmdma_stop = pata_icside_bmdma_stop, | |
410 | .bmdma_status = pata_icside_bmdma_status, | |
411 | }; | |
412 | ||
f95637d2 RK |
413 | static void __devinit |
414 | pata_icside_setup_ioaddr(struct ata_ioports *ioaddr, void __iomem *base, | |
415 | const struct portinfo *info) | |
73b6a2be | 416 | { |
73b6a2be RK |
417 | void __iomem *cmd = base + info->dataoffset; |
418 | ||
419 | ioaddr->cmd_addr = cmd; | |
420 | ioaddr->data_addr = cmd + (ATA_REG_DATA << info->stepping); | |
421 | ioaddr->error_addr = cmd + (ATA_REG_ERR << info->stepping); | |
422 | ioaddr->feature_addr = cmd + (ATA_REG_FEATURE << info->stepping); | |
423 | ioaddr->nsect_addr = cmd + (ATA_REG_NSECT << info->stepping); | |
424 | ioaddr->lbal_addr = cmd + (ATA_REG_LBAL << info->stepping); | |
425 | ioaddr->lbam_addr = cmd + (ATA_REG_LBAM << info->stepping); | |
426 | ioaddr->lbah_addr = cmd + (ATA_REG_LBAH << info->stepping); | |
427 | ioaddr->device_addr = cmd + (ATA_REG_DEVICE << info->stepping); | |
428 | ioaddr->status_addr = cmd + (ATA_REG_STATUS << info->stepping); | |
429 | ioaddr->command_addr = cmd + (ATA_REG_CMD << info->stepping); | |
430 | ||
431 | ioaddr->ctl_addr = base + info->ctrloffset; | |
432 | ioaddr->altstatus_addr = ioaddr->ctl_addr; | |
433 | } | |
434 | ||
f95637d2 | 435 | static int __devinit pata_icside_register_v5(struct pata_icside_info *info) |
73b6a2be | 436 | { |
f95637d2 | 437 | struct pata_icside_state *state = info->state; |
73b6a2be RK |
438 | void __iomem *base; |
439 | ||
10bdaaa0 | 440 | base = ecardm_iomap(info->ec, ECARD_RES_MEMC, 0, 0); |
73b6a2be RK |
441 | if (!base) |
442 | return -ENOMEM; | |
443 | ||
444 | state->irq_port = base; | |
445 | ||
f95637d2 RK |
446 | info->base = base; |
447 | info->irqaddr = base + ICS_ARCIN_V5_INTRSTAT; | |
448 | info->irqmask = 1; | |
449 | info->irqops = &pata_icside_ops_arcin_v5; | |
450 | info->nr_ports = 1; | |
451 | info->port[0] = &pata_icside_portinfo_v5; | |
73b6a2be RK |
452 | |
453 | return 0; | |
454 | } | |
455 | ||
f95637d2 | 456 | static int __devinit pata_icside_register_v6(struct pata_icside_info *info) |
73b6a2be | 457 | { |
f95637d2 RK |
458 | struct pata_icside_state *state = info->state; |
459 | struct expansion_card *ec = info->ec; | |
73b6a2be RK |
460 | void __iomem *ioc_base, *easi_base; |
461 | unsigned int sel = 0; | |
73b6a2be | 462 | |
10bdaaa0 RK |
463 | ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); |
464 | if (!ioc_base) | |
465 | return -ENOMEM; | |
73b6a2be RK |
466 | |
467 | easi_base = ioc_base; | |
468 | ||
469 | if (ecard_resource_flags(ec, ECARD_RES_EASI)) { | |
10bdaaa0 RK |
470 | easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0); |
471 | if (!easi_base) | |
472 | return -ENOMEM; | |
73b6a2be RK |
473 | |
474 | /* | |
475 | * Enable access to the EASI region. | |
476 | */ | |
477 | sel = 1 << 5; | |
478 | } | |
479 | ||
480 | writeb(sel, ioc_base); | |
481 | ||
73b6a2be RK |
482 | state->irq_port = easi_base; |
483 | state->ioc_base = ioc_base; | |
484 | state->port[0].port_sel = sel; | |
485 | state->port[1].port_sel = sel | 1; | |
486 | ||
73b6a2be RK |
487 | /* |
488 | * FIXME: work around libata's aversion to calling port_disable. | |
489 | * This permanently disables interrupts on port 0 - bad luck if | |
490 | * you have a drive on that port. | |
491 | */ | |
492 | state->port[0].disabled = 1; | |
493 | ||
f95637d2 RK |
494 | info->base = easi_base; |
495 | info->irqops = &pata_icside_ops_arcin_v6; | |
496 | info->nr_ports = 2; | |
497 | info->port[0] = &pata_icside_portinfo_v6_1; | |
498 | info->port[1] = &pata_icside_portinfo_v6_2; | |
499 | ||
500 | return icside_dma_init(info); | |
501 | } | |
502 | ||
503 | static int __devinit pata_icside_add_ports(struct pata_icside_info *info) | |
504 | { | |
505 | struct expansion_card *ec = info->ec; | |
506 | struct ata_host *host; | |
507 | int i; | |
508 | ||
509 | if (info->irqaddr) { | |
510 | ec->irqaddr = info->irqaddr; | |
511 | ec->irqmask = info->irqmask; | |
512 | } | |
513 | if (info->irqops) | |
514 | ecard_setirq(ec, info->irqops, info->state); | |
515 | ||
516 | /* | |
517 | * Be on the safe side - disable interrupts | |
518 | */ | |
519 | ec->ops->irqdisable(ec, ec->irq); | |
520 | ||
521 | host = ata_host_alloc(&ec->dev, info->nr_ports); | |
522 | if (!host) | |
523 | return -ENOMEM; | |
524 | ||
525 | host->private_data = info->state; | |
526 | host->flags = ATA_HOST_SIMPLEX; | |
527 | ||
528 | for (i = 0; i < info->nr_ports; i++) { | |
529 | struct ata_port *ap = host->ports[i]; | |
530 | ||
531 | ap->pio_mask = 0x1f; | |
532 | ap->mwdma_mask = info->mwdma_mask; | |
533 | ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST; | |
534 | ap->ops = &pata_icside_port_ops; | |
535 | ||
536 | pata_icside_setup_ioaddr(&ap->ioaddr, info->base, info->port[i]); | |
537 | } | |
73b6a2be | 538 | |
f95637d2 RK |
539 | return ata_host_activate(host, ec->irq, ata_interrupt, 0, |
540 | &pata_icside_sht); | |
73b6a2be RK |
541 | } |
542 | ||
543 | static int __devinit | |
544 | pata_icside_probe(struct expansion_card *ec, const struct ecard_id *id) | |
545 | { | |
546 | struct pata_icside_state *state; | |
f95637d2 | 547 | struct pata_icside_info info; |
73b6a2be RK |
548 | void __iomem *idmem; |
549 | int ret; | |
550 | ||
551 | ret = ecard_request_resources(ec); | |
552 | if (ret) | |
553 | goto out; | |
554 | ||
f95637d2 | 555 | state = devm_kzalloc(&ec->dev, sizeof(*state), GFP_KERNEL); |
73b6a2be RK |
556 | if (!state) { |
557 | ret = -ENOMEM; | |
558 | goto release; | |
559 | } | |
560 | ||
561 | state->type = ICS_TYPE_NOTYPE; | |
562 | state->dma = NO_DMA; | |
563 | ||
10bdaaa0 | 564 | idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); |
73b6a2be RK |
565 | if (idmem) { |
566 | unsigned int type; | |
567 | ||
568 | type = readb(idmem + ICS_IDENT_OFFSET) & 1; | |
569 | type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1; | |
570 | type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2; | |
571 | type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3; | |
10bdaaa0 | 572 | ecardm_iounmap(ec, idmem); |
73b6a2be RK |
573 | |
574 | state->type = type; | |
575 | } | |
576 | ||
f95637d2 RK |
577 | memset(&info, 0, sizeof(info)); |
578 | info.state = state; | |
579 | info.ec = ec; | |
73b6a2be RK |
580 | |
581 | switch (state->type) { | |
582 | case ICS_TYPE_A3IN: | |
583 | dev_warn(&ec->dev, "A3IN unsupported\n"); | |
584 | ret = -ENODEV; | |
585 | break; | |
586 | ||
587 | case ICS_TYPE_A3USER: | |
588 | dev_warn(&ec->dev, "A3USER unsupported\n"); | |
589 | ret = -ENODEV; | |
590 | break; | |
591 | ||
592 | case ICS_TYPE_V5: | |
f95637d2 | 593 | ret = pata_icside_register_v5(&info); |
73b6a2be RK |
594 | break; |
595 | ||
596 | case ICS_TYPE_V6: | |
f95637d2 | 597 | ret = pata_icside_register_v6(&info); |
73b6a2be RK |
598 | break; |
599 | ||
600 | default: | |
601 | dev_warn(&ec->dev, "unknown interface type\n"); | |
602 | ret = -ENODEV; | |
603 | break; | |
604 | } | |
605 | ||
606 | if (ret == 0) | |
f95637d2 | 607 | ret = pata_icside_add_ports(&info); |
73b6a2be RK |
608 | |
609 | if (ret == 0) | |
610 | goto out; | |
611 | ||
73b6a2be RK |
612 | release: |
613 | ecard_release_resources(ec); | |
614 | out: | |
615 | return ret; | |
616 | } | |
617 | ||
618 | static void pata_icside_shutdown(struct expansion_card *ec) | |
619 | { | |
620 | struct ata_host *host = ecard_get_drvdata(ec); | |
621 | unsigned long flags; | |
622 | ||
623 | /* | |
624 | * Disable interrupts from this card. We need to do | |
625 | * this before disabling EASI since we may be accessing | |
626 | * this register via that region. | |
627 | */ | |
628 | local_irq_save(flags); | |
c7b87f3d | 629 | ec->ops->irqdisable(ec, ec->irq); |
73b6a2be RK |
630 | local_irq_restore(flags); |
631 | ||
632 | /* | |
633 | * Reset the ROM pointer so that we can read the ROM | |
634 | * after a soft reboot. This also disables access to | |
635 | * the IDE taskfile via the EASI region. | |
636 | */ | |
637 | if (host) { | |
638 | struct pata_icside_state *state = host->private_data; | |
639 | if (state->ioc_base) | |
640 | writeb(0, state->ioc_base); | |
641 | } | |
642 | } | |
643 | ||
644 | static void __devexit pata_icside_remove(struct expansion_card *ec) | |
645 | { | |
646 | struct ata_host *host = ecard_get_drvdata(ec); | |
647 | struct pata_icside_state *state = host->private_data; | |
648 | ||
649 | ata_host_detach(host); | |
650 | ||
651 | pata_icside_shutdown(ec); | |
652 | ||
653 | /* | |
654 | * don't NULL out the drvdata - devres/libata wants it | |
655 | * to free the ata_host structure. | |
656 | */ | |
73b6a2be RK |
657 | if (state->dma != NO_DMA) |
658 | free_dma(state->dma); | |
73b6a2be | 659 | |
73b6a2be RK |
660 | ecard_release_resources(ec); |
661 | } | |
662 | ||
663 | static const struct ecard_id pata_icside_ids[] = { | |
664 | { MANU_ICS, PROD_ICS_IDE }, | |
665 | { MANU_ICS2, PROD_ICS2_IDE }, | |
666 | { 0xffff, 0xffff } | |
667 | }; | |
668 | ||
669 | static struct ecard_driver pata_icside_driver = { | |
670 | .probe = pata_icside_probe, | |
671 | .remove = __devexit_p(pata_icside_remove), | |
672 | .shutdown = pata_icside_shutdown, | |
673 | .id_table = pata_icside_ids, | |
674 | .drv = { | |
675 | .name = DRV_NAME, | |
676 | }, | |
677 | }; | |
678 | ||
679 | static int __init pata_icside_init(void) | |
680 | { | |
681 | return ecard_register_driver(&pata_icside_driver); | |
682 | } | |
683 | ||
684 | static void __exit pata_icside_exit(void) | |
685 | { | |
686 | ecard_remove_driver(&pata_icside_driver); | |
687 | } | |
688 | ||
689 | MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); | |
690 | MODULE_LICENSE("GPL"); | |
691 | MODULE_DESCRIPTION("ICS PATA driver"); | |
692 | ||
693 | module_init(pata_icside_init); | |
694 | module_exit(pata_icside_exit); |