treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[linux-2.6-block.git] / arch / arm / mach-rpc / dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mach-rpc/dma.c
4  *
5  *  Copyright (C) 1998 Russell King
6  *
7  *  DMA functions specific to RiscPC architecture
8  */
9 #include <linux/mman.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/io.h>
14
15 #include <asm/page.h>
16 #include <asm/dma.h>
17 #include <asm/fiq.h>
18 #include <asm/irq.h>
19 #include <mach/hardware.h>
20 #include <linux/uaccess.h>
21
22 #include <asm/mach/dma.h>
23 #include <asm/hardware/iomd.h>
24
25 struct iomd_dma {
26         struct dma_struct       dma;
27         unsigned int            state;
28         unsigned long           base;           /* Controller base address */
29         int                     irq;            /* Controller IRQ */
30         struct scatterlist      cur_sg;         /* Current controller buffer */
31         dma_addr_t              dma_addr;
32         unsigned int            dma_len;
33 };
34
35 #if 0
36 typedef enum {
37         dma_size_8      = 1,
38         dma_size_16     = 2,
39         dma_size_32     = 4,
40         dma_size_128    = 16
41 } dma_size_t;
42 #endif
43
44 #define TRANSFER_SIZE   2
45
46 #define CURA    (0)
47 #define ENDA    (IOMD_IO0ENDA - IOMD_IO0CURA)
48 #define CURB    (IOMD_IO0CURB - IOMD_IO0CURA)
49 #define ENDB    (IOMD_IO0ENDB - IOMD_IO0CURA)
50 #define CR      (IOMD_IO0CR - IOMD_IO0CURA)
51 #define ST      (IOMD_IO0ST - IOMD_IO0CURA)
52
53 static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
54 {
55         unsigned long end, offset, flags = 0;
56
57         if (idma->dma.sg) {
58                 sg->dma_address = idma->dma_addr;
59                 offset = sg->dma_address & ~PAGE_MASK;
60
61                 end = offset + idma->dma_len;
62
63                 if (end > PAGE_SIZE)
64                         end = PAGE_SIZE;
65
66                 if (offset + TRANSFER_SIZE >= end)
67                         flags |= DMA_END_L;
68
69                 sg->length = end - TRANSFER_SIZE;
70
71                 idma->dma_len -= end - offset;
72                 idma->dma_addr += end - offset;
73
74                 if (idma->dma_len == 0) {
75                         if (idma->dma.sgcount > 1) {
76                                 idma->dma.sg = sg_next(idma->dma.sg);
77                                 idma->dma_addr = idma->dma.sg->dma_address;
78                                 idma->dma_len = idma->dma.sg->length;
79                                 idma->dma.sgcount--;
80                         } else {
81                                 idma->dma.sg = NULL;
82                                 flags |= DMA_END_S;
83                         }
84                 }
85         } else {
86                 flags = DMA_END_S | DMA_END_L;
87                 sg->dma_address = 0;
88                 sg->length = 0;
89         }
90
91         sg->length |= flags;
92 }
93
94 static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
95 {
96         struct iomd_dma *idma = dev_id;
97         unsigned long base = idma->base;
98
99         do {
100                 unsigned int status;
101
102                 status = iomd_readb(base + ST);
103                 if (!(status & DMA_ST_INT))
104                         return IRQ_HANDLED;
105
106                 if ((idma->state ^ status) & DMA_ST_AB)
107                         iomd_get_next_sg(&idma->cur_sg, idma);
108
109                 switch (status & (DMA_ST_OFL | DMA_ST_AB)) {
110                 case DMA_ST_OFL:                        /* OIA */
111                 case DMA_ST_AB:                         /* .IB */
112                         iomd_writel(idma->cur_sg.dma_address, base + CURA);
113                         iomd_writel(idma->cur_sg.length, base + ENDA);
114                         idma->state = DMA_ST_AB;
115                         break;
116
117                 case DMA_ST_OFL | DMA_ST_AB:            /* OIB */
118                 case 0:                                 /* .IA */
119                         iomd_writel(idma->cur_sg.dma_address, base + CURB);
120                         iomd_writel(idma->cur_sg.length, base + ENDB);
121                         idma->state = 0;
122                         break;
123                 }
124
125                 if (status & DMA_ST_OFL &&
126                     idma->cur_sg.length == (DMA_END_S|DMA_END_L))
127                         break;
128         } while (1);
129
130         idma->state = ~DMA_ST_AB;
131         disable_irq(irq);
132
133         return IRQ_HANDLED;
134 }
135
136 static int iomd_request_dma(unsigned int chan, dma_t *dma)
137 {
138         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
139
140         return request_irq(idma->irq, iomd_dma_handle,
141                            0, idma->dma.device_id, idma);
142 }
143
144 static void iomd_free_dma(unsigned int chan, dma_t *dma)
145 {
146         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
147
148         free_irq(idma->irq, idma);
149 }
150
151 static struct device isa_dma_dev = {
152         .init_name              = "fallback device",
153         .coherent_dma_mask      = ~(dma_addr_t)0,
154         .dma_mask               = &isa_dma_dev.coherent_dma_mask,
155 };
156
157 static void iomd_enable_dma(unsigned int chan, dma_t *dma)
158 {
159         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
160         unsigned long dma_base = idma->base;
161         unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
162
163         if (idma->dma.invalid) {
164                 idma->dma.invalid = 0;
165
166                 /*
167                  * Cope with ISA-style drivers which expect cache
168                  * coherence.
169                  */
170                 if (!idma->dma.sg) {
171                         idma->dma.sg = &idma->dma.buf;
172                         idma->dma.sgcount = 1;
173                         idma->dma.buf.length = idma->dma.count;
174                         idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev,
175                                 idma->dma.addr, idma->dma.count,
176                                 idma->dma.dma_mode == DMA_MODE_READ ?
177                                 DMA_FROM_DEVICE : DMA_TO_DEVICE);
178                 }
179
180                 iomd_writeb(DMA_CR_C, dma_base + CR);
181                 idma->state = DMA_ST_AB;
182         }
183
184         if (idma->dma.dma_mode == DMA_MODE_READ)
185                 ctrl |= DMA_CR_D;
186
187         iomd_writeb(ctrl, dma_base + CR);
188         enable_irq(idma->irq);
189 }
190
191 static void iomd_disable_dma(unsigned int chan, dma_t *dma)
192 {
193         struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
194         unsigned long dma_base = idma->base;
195         unsigned long flags;
196
197         local_irq_save(flags);
198         if (idma->state != ~DMA_ST_AB)
199                 disable_irq(idma->irq);
200         iomd_writeb(0, dma_base + CR);
201         local_irq_restore(flags);
202 }
203
204 static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
205 {
206         int tcr, speed;
207
208         if (cycle < 188)
209                 speed = 3;
210         else if (cycle <= 250)
211                 speed = 2;
212         else if (cycle < 438)
213                 speed = 1;
214         else
215                 speed = 0;
216
217         tcr = iomd_readb(IOMD_DMATCR);
218         speed &= 3;
219
220         switch (chan) {
221         case DMA_0:
222                 tcr = (tcr & ~0x03) | speed;
223                 break;
224
225         case DMA_1:
226                 tcr = (tcr & ~0x0c) | (speed << 2);
227                 break;
228
229         case DMA_2:
230                 tcr = (tcr & ~0x30) | (speed << 4);
231                 break;
232
233         case DMA_3:
234                 tcr = (tcr & ~0xc0) | (speed << 6);
235                 break;
236
237         default:
238                 break;
239         }
240
241         iomd_writeb(tcr, IOMD_DMATCR);
242
243         return speed;
244 }
245
246 static struct dma_ops iomd_dma_ops = {
247         .type           = "IOMD",
248         .request        = iomd_request_dma,
249         .free           = iomd_free_dma,
250         .enable         = iomd_enable_dma,
251         .disable        = iomd_disable_dma,
252         .setspeed       = iomd_set_dma_speed,
253 };
254
255 static struct fiq_handler fh = {
256         .name   = "floppydma"
257 };
258
259 struct floppy_dma {
260         struct dma_struct       dma;
261         unsigned int            fiq;
262 };
263
264 static void floppy_enable_dma(unsigned int chan, dma_t *dma)
265 {
266         struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
267         void *fiqhandler_start;
268         unsigned int fiqhandler_length;
269         struct pt_regs regs;
270
271         if (fdma->dma.sg)
272                 BUG();
273
274         if (fdma->dma.dma_mode == DMA_MODE_READ) {
275                 extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
276                 fiqhandler_start = &floppy_fiqin_start;
277                 fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
278         } else {
279                 extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
280                 fiqhandler_start = &floppy_fiqout_start;
281                 fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
282         }
283
284         regs.ARM_r9  = fdma->dma.count;
285         regs.ARM_r10 = (unsigned long)fdma->dma.addr;
286         regs.ARM_fp  = (unsigned long)FLOPPYDMA_BASE;
287
288         if (claim_fiq(&fh)) {
289                 printk("floppydma: couldn't claim FIQ.\n");
290                 return;
291         }
292
293         set_fiq_handler(fiqhandler_start, fiqhandler_length);
294         set_fiq_regs(&regs);
295         enable_fiq(fdma->fiq);
296 }
297
298 static void floppy_disable_dma(unsigned int chan, dma_t *dma)
299 {
300         struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
301         disable_fiq(fdma->fiq);
302         release_fiq(&fh);
303 }
304
305 static int floppy_get_residue(unsigned int chan, dma_t *dma)
306 {
307         struct pt_regs regs;
308         get_fiq_regs(&regs);
309         return regs.ARM_r9;
310 }
311
312 static struct dma_ops floppy_dma_ops = {
313         .type           = "FIQDMA",
314         .enable         = floppy_enable_dma,
315         .disable        = floppy_disable_dma,
316         .residue        = floppy_get_residue,
317 };
318
319 /*
320  * This is virtual DMA - we don't need anything here.
321  */
322 static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
323 {
324 }
325
326 static struct dma_ops sound_dma_ops = {
327         .type           = "VIRTUAL",
328         .enable         = sound_enable_disable_dma,
329         .disable        = sound_enable_disable_dma,
330 };
331
332 static struct iomd_dma iomd_dma[6];
333
334 static struct floppy_dma floppy_dma = {
335         .dma            = {
336                 .d_ops  = &floppy_dma_ops,
337         },
338         .fiq            = FIQ_FLOPPYDATA,
339 };
340
341 static dma_t sound_dma = {
342         .d_ops          = &sound_dma_ops,
343 };
344
345 static int __init rpc_dma_init(void)
346 {
347         unsigned int i;
348         int ret;
349
350         iomd_writeb(0, IOMD_IO0CR);
351         iomd_writeb(0, IOMD_IO1CR);
352         iomd_writeb(0, IOMD_IO2CR);
353         iomd_writeb(0, IOMD_IO3CR);
354
355         iomd_writeb(0xa0, IOMD_DMATCR);
356
357         /*
358          * Setup DMA channels 2,3 to be for podules
359          * and channels 0,1 for internal devices
360          */
361         iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
362
363         iomd_dma[DMA_0].base    = IOMD_IO0CURA;
364         iomd_dma[DMA_0].irq     = IRQ_DMA0;
365         iomd_dma[DMA_1].base    = IOMD_IO1CURA;
366         iomd_dma[DMA_1].irq     = IRQ_DMA1;
367         iomd_dma[DMA_2].base    = IOMD_IO2CURA;
368         iomd_dma[DMA_2].irq     = IRQ_DMA2;
369         iomd_dma[DMA_3].base    = IOMD_IO3CURA;
370         iomd_dma[DMA_3].irq     = IRQ_DMA3;
371         iomd_dma[DMA_S0].base   = IOMD_SD0CURA;
372         iomd_dma[DMA_S0].irq    = IRQ_DMAS0;
373         iomd_dma[DMA_S1].base   = IOMD_SD1CURA;
374         iomd_dma[DMA_S1].irq    = IRQ_DMAS1;
375
376         for (i = DMA_0; i <= DMA_S1; i++) {
377                 iomd_dma[i].dma.d_ops = &iomd_dma_ops;
378
379                 ret = isa_dma_add(i, &iomd_dma[i].dma);
380                 if (ret)
381                         printk("IOMDDMA%u: unable to register: %d\n", i, ret);
382         }
383
384         ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
385         if (ret)
386                 printk("IOMDFLOPPY: unable to register: %d\n", ret);
387         ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
388         if (ret)
389                 printk("IOMDSOUND: unable to register: %d\n", ret);
390         return 0;
391 }
392 core_initcall(rpc_dma_init);