Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / drivers / char / mbcs.c
CommitLineData
e1e19747
BL
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9/*
10 * MOATB Core Services driver.
11 */
12
e1e19747
BL
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/types.h>
17#include <linux/ioport.h>
a81a8f58 18#include <linux/kernel.h>
e1e19747
BL
19#include <linux/notifier.h>
20#include <linux/reboot.h>
21#include <linux/init.h>
22#include <linux/fs.h>
23#include <linux/delay.h>
24#include <linux/device.h>
25#include <linux/mm.h>
26#include <linux/uio.h>
a40ba849 27#include <linux/mutex.h>
5a0e3ad6 28#include <linux/slab.h>
b808b1d6 29#include <linux/pagemap.h>
e1e19747 30#include <asm/io.h>
7c0f6ba6 31#include <linux/uaccess.h>
e1e19747
BL
32#include <asm/pgtable.h>
33#include <asm/sn/addrs.h>
34#include <asm/sn/intr.h>
35#include <asm/sn/tiocx.h>
36#include "mbcs.h"
37
38#define MBCS_DEBUG 0
39#if MBCS_DEBUG
40#define DBG(fmt...) printk(KERN_ALERT fmt)
41#else
42#define DBG(fmt...)
43#endif
613655fa 44static DEFINE_MUTEX(mbcs_mutex);
39ef01e0 45static int mbcs_major;
e1e19747 46
39ef01e0 47static LIST_HEAD(soft_list);
e1e19747
BL
48
49/*
50 * file operations
51 */
39ef01e0 52static const struct file_operations mbcs_ops = {
f9f7bb9e 53 .owner = THIS_MODULE,
e1e19747
BL
54 .open = mbcs_open,
55 .llseek = mbcs_sram_llseek,
56 .read = mbcs_sram_read,
57 .write = mbcs_sram_write,
58 .mmap = mbcs_gscr_mmap,
59};
60
61struct mbcs_callback_arg {
62 int minor;
63 struct cx_dev *cx_dev;
64};
65
66static inline void mbcs_getdma_init(struct getdma *gdma)
67{
68 memset(gdma, 0, sizeof(struct getdma));
69 gdma->DoneIntEnable = 1;
70}
71
72static inline void mbcs_putdma_init(struct putdma *pdma)
73{
74 memset(pdma, 0, sizeof(struct putdma));
75 pdma->DoneIntEnable = 1;
76}
77
78static inline void mbcs_algo_init(struct algoblock *algo_soft)
79{
80 memset(algo_soft, 0, sizeof(struct algoblock));
81}
82
83static inline void mbcs_getdma_set(void *mmr,
84 uint64_t hostAddr,
85 uint64_t localAddr,
86 uint64_t localRamSel,
87 uint64_t numPkts,
88 uint64_t amoEnable,
89 uint64_t intrEnable,
90 uint64_t peerIO,
91 uint64_t amoHostDest,
92 uint64_t amoModType, uint64_t intrHostDest,
93 uint64_t intrVector)
94{
95 union dma_control rdma_control;
96 union dma_amo_dest amo_dest;
97 union intr_dest intr_dest;
98 union dma_localaddr local_addr;
99 union dma_hostaddr host_addr;
100
101 rdma_control.dma_control_reg = 0;
102 amo_dest.dma_amo_dest_reg = 0;
103 intr_dest.intr_dest_reg = 0;
104 local_addr.dma_localaddr_reg = 0;
105 host_addr.dma_hostaddr_reg = 0;
106
107 host_addr.dma_sys_addr = hostAddr;
108 MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
109
110 local_addr.dma_ram_addr = localAddr;
111 local_addr.dma_ram_sel = localRamSel;
112 MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
113
114 rdma_control.dma_op_length = numPkts;
115 rdma_control.done_amo_en = amoEnable;
116 rdma_control.done_int_en = intrEnable;
117 rdma_control.pio_mem_n = peerIO;
118 MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
119
120 amo_dest.dma_amo_sys_addr = amoHostDest;
121 amo_dest.dma_amo_mod_type = amoModType;
122 MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
123
124 intr_dest.address = intrHostDest;
125 intr_dest.int_vector = intrVector;
126 MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
127
128}
129
130static inline void mbcs_putdma_set(void *mmr,
131 uint64_t hostAddr,
132 uint64_t localAddr,
133 uint64_t localRamSel,
134 uint64_t numPkts,
135 uint64_t amoEnable,
136 uint64_t intrEnable,
137 uint64_t peerIO,
138 uint64_t amoHostDest,
139 uint64_t amoModType,
140 uint64_t intrHostDest, uint64_t intrVector)
141{
142 union dma_control wdma_control;
143 union dma_amo_dest amo_dest;
144 union intr_dest intr_dest;
145 union dma_localaddr local_addr;
146 union dma_hostaddr host_addr;
147
148 wdma_control.dma_control_reg = 0;
149 amo_dest.dma_amo_dest_reg = 0;
150 intr_dest.intr_dest_reg = 0;
151 local_addr.dma_localaddr_reg = 0;
152 host_addr.dma_hostaddr_reg = 0;
153
154 host_addr.dma_sys_addr = hostAddr;
155 MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
156
157 local_addr.dma_ram_addr = localAddr;
158 local_addr.dma_ram_sel = localRamSel;
159 MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
160
161 wdma_control.dma_op_length = numPkts;
162 wdma_control.done_amo_en = amoEnable;
163 wdma_control.done_int_en = intrEnable;
164 wdma_control.pio_mem_n = peerIO;
165 MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
166
167 amo_dest.dma_amo_sys_addr = amoHostDest;
168 amo_dest.dma_amo_mod_type = amoModType;
169 MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
170
171 intr_dest.address = intrHostDest;
172 intr_dest.int_vector = intrVector;
173 MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
174
175}
176
177static inline void mbcs_algo_set(void *mmr,
178 uint64_t amoHostDest,
179 uint64_t amoModType,
180 uint64_t intrHostDest,
181 uint64_t intrVector, uint64_t algoStepCount)
182{
183 union dma_amo_dest amo_dest;
184 union intr_dest intr_dest;
185 union algo_step step;
186
187 step.algo_step_reg = 0;
188 intr_dest.intr_dest_reg = 0;
189 amo_dest.dma_amo_dest_reg = 0;
190
191 amo_dest.dma_amo_sys_addr = amoHostDest;
192 amo_dest.dma_amo_mod_type = amoModType;
193 MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
194
195 intr_dest.address = intrHostDest;
196 intr_dest.int_vector = intrVector;
197 MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
198
199 step.alg_step_cnt = algoStepCount;
200 MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
201}
202
203static inline int mbcs_getdma_start(struct mbcs_soft *soft)
204{
205 void *mmr_base;
206 struct getdma *gdma;
207 uint64_t numPkts;
208 union cm_control cm_control;
209
210 mmr_base = soft->mmr_base;
211 gdma = &soft->getdma;
212
213 /* check that host address got setup */
214 if (!gdma->hostAddr)
215 return -1;
216
217 numPkts =
218 (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
219
220 /* program engine */
221 mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
222 gdma->localAddr,
223 (gdma->localAddr < MB2) ? 0 :
224 (gdma->localAddr < MB4) ? 1 :
225 (gdma->localAddr < MB6) ? 2 : 3,
226 numPkts,
227 gdma->DoneAmoEnable,
228 gdma->DoneIntEnable,
229 gdma->peerIO,
230 gdma->amoHostDest,
231 gdma->amoModType,
232 gdma->intrHostDest, gdma->intrVector);
233
234 /* start engine */
235 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
236 cm_control.rd_dma_go = 1;
237 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
238
239 return 0;
240
241}
242
243static inline int mbcs_putdma_start(struct mbcs_soft *soft)
244{
245 void *mmr_base;
246 struct putdma *pdma;
247 uint64_t numPkts;
248 union cm_control cm_control;
249
250 mmr_base = soft->mmr_base;
251 pdma = &soft->putdma;
252
253 /* check that host address got setup */
254 if (!pdma->hostAddr)
255 return -1;
256
257 numPkts =
258 (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
259
260 /* program engine */
261 mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
262 pdma->localAddr,
263 (pdma->localAddr < MB2) ? 0 :
264 (pdma->localAddr < MB4) ? 1 :
265 (pdma->localAddr < MB6) ? 2 : 3,
266 numPkts,
267 pdma->DoneAmoEnable,
268 pdma->DoneIntEnable,
269 pdma->peerIO,
270 pdma->amoHostDest,
271 pdma->amoModType,
272 pdma->intrHostDest, pdma->intrVector);
273
274 /* start engine */
275 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
276 cm_control.wr_dma_go = 1;
277 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
278
279 return 0;
280
281}
282
283static inline int mbcs_algo_start(struct mbcs_soft *soft)
284{
285 struct algoblock *algo_soft = &soft->algo;
286 void *mmr_base = soft->mmr_base;
287 union cm_control cm_control;
288
a40ba849 289 if (mutex_lock_interruptible(&soft->algolock))
e1e19747
BL
290 return -ERESTARTSYS;
291
292 atomic_set(&soft->algo_done, 0);
293
294 mbcs_algo_set(mmr_base,
295 algo_soft->amoHostDest,
296 algo_soft->amoModType,
297 algo_soft->intrHostDest,
298 algo_soft->intrVector, algo_soft->algoStepCount);
299
300 /* start algorithm */
301 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
302 cm_control.alg_done_int_en = 1;
303 cm_control.alg_go = 1;
304 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
305
a40ba849 306 mutex_unlock(&soft->algolock);
e1e19747
BL
307
308 return 0;
309}
310
311static inline ssize_t
312do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
313 size_t len, loff_t * off)
314{
315 int rv = 0;
316
46bca696 317 if (mutex_lock_interruptible(&soft->dmawritelock))
e1e19747
BL
318 return -ERESTARTSYS;
319
320 atomic_set(&soft->dmawrite_done, 0);
321
322 soft->putdma.hostAddr = hostAddr;
323 soft->putdma.localAddr = *off;
324 soft->putdma.bytes = len;
325
326 if (mbcs_putdma_start(soft) < 0) {
327 DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
328 "mbcs_putdma_start failed\n");
329 rv = -EAGAIN;
330 goto dmawrite_exit;
331 }
332
333 if (wait_event_interruptible(soft->dmawrite_queue,
334 atomic_read(&soft->dmawrite_done))) {
335 rv = -ERESTARTSYS;
336 goto dmawrite_exit;
337 }
338
339 rv = len;
340 *off += len;
341
342dmawrite_exit:
46bca696 343 mutex_unlock(&soft->dmawritelock);
e1e19747
BL
344
345 return rv;
346}
347
348static inline ssize_t
349do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
350 size_t len, loff_t * off)
351{
352 int rv = 0;
353
ae5e2979 354 if (mutex_lock_interruptible(&soft->dmareadlock))
e1e19747
BL
355 return -ERESTARTSYS;
356
357 atomic_set(&soft->dmawrite_done, 0);
358
359 soft->getdma.hostAddr = hostAddr;
360 soft->getdma.localAddr = *off;
361 soft->getdma.bytes = len;
362
363 if (mbcs_getdma_start(soft) < 0) {
364 DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
365 rv = -EAGAIN;
366 goto dmaread_exit;
367 }
368
369 if (wait_event_interruptible(soft->dmaread_queue,
370 atomic_read(&soft->dmaread_done))) {
371 rv = -ERESTARTSYS;
372 goto dmaread_exit;
373 }
374
375 rv = len;
376 *off += len;
377
378dmaread_exit:
ae5e2979 379 mutex_unlock(&soft->dmareadlock);
e1e19747
BL
380
381 return rv;
382}
383
39ef01e0 384static int mbcs_open(struct inode *ip, struct file *fp)
e1e19747
BL
385{
386 struct mbcs_soft *soft;
387 int minor;
388
613655fa 389 mutex_lock(&mbcs_mutex);
e1e19747
BL
390 minor = iminor(ip);
391
12ead6b0 392 /* Nothing protects access to this list... */
e1e19747
BL
393 list_for_each_entry(soft, &soft_list, list) {
394 if (soft->nasid == minor) {
395 fp->private_data = soft->cxdev;
613655fa 396 mutex_unlock(&mbcs_mutex);
e1e19747
BL
397 return 0;
398 }
399 }
400
613655fa 401 mutex_unlock(&mbcs_mutex);
e1e19747
BL
402 return -ENODEV;
403}
404
39ef01e0 405static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
e1e19747
BL
406{
407 struct cx_dev *cx_dev = fp->private_data;
408 struct mbcs_soft *soft = cx_dev->soft;
409 uint64_t hostAddr;
410 int rv = 0;
411
412 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
413 if (hostAddr == 0)
414 return -ENOMEM;
415
416 rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
417 if (rv < 0)
418 goto exit;
419
420 if (copy_to_user(buf, (void *)hostAddr, len))
421 rv = -EFAULT;
422
423 exit:
424 free_pages(hostAddr, get_order(len));
425
426 return rv;
427}
428
39ef01e0 429static ssize_t
9b52523a 430mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
e1e19747
BL
431{
432 struct cx_dev *cx_dev = fp->private_data;
433 struct mbcs_soft *soft = cx_dev->soft;
434 uint64_t hostAddr;
435 int rv = 0;
436
437 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
438 if (hostAddr == 0)
439 return -ENOMEM;
440
441 if (copy_from_user((void *)hostAddr, buf, len)) {
442 rv = -EFAULT;
443 goto exit;
444 }
445
446 rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
447
448 exit:
449 free_pages(hostAddr, get_order(len));
450
451 return rv;
452}
453
39ef01e0 454static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
e1e19747 455{
b808b1d6
AV
456 return generic_file_llseek_size(filp, off, whence, MAX_LFS_FILESIZE,
457 MBCS_SRAM_SIZE);
e1e19747
BL
458}
459
460static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
461{
462 uint64_t mmr_base;
463
464 mmr_base = (uint64_t) (soft->mmr_base + offset);
465
466 return mmr_base;
467}
468
469static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
470{
471 soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
472}
473
474static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
475{
476 soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
477}
478
39ef01e0 479static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
e1e19747
BL
480{
481 struct cx_dev *cx_dev = fp->private_data;
482 struct mbcs_soft *soft = cx_dev->soft;
483
484 if (vma->vm_pgoff != 0)
485 return -EINVAL;
486
487 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
488
314e51b9 489 /* Remap-pfn-range will mark the range VM_IO */
e1e19747
BL
490 if (remap_pfn_range(vma,
491 vma->vm_start,
492 __pa(soft->gscr_addr) >> PAGE_SHIFT,
493 PAGE_SIZE,
494 vma->vm_page_prot))
495 return -EAGAIN;
496
497 return 0;
498}
499
500/**
501 * mbcs_completion_intr_handler - Primary completion handler.
502 * @irq: irq
503 * @arg: soft struct for device
e1e19747
BL
504 *
505 */
506static irqreturn_t
7d12e780 507mbcs_completion_intr_handler(int irq, void *arg)
e1e19747
BL
508{
509 struct mbcs_soft *soft = (struct mbcs_soft *)arg;
510 void *mmr_base;
511 union cm_status cm_status;
512 union cm_control cm_control;
513
514 mmr_base = soft->mmr_base;
515 cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
516
517 if (cm_status.rd_dma_done) {
518 /* stop dma-read engine, clear status */
519 cm_control.cm_control_reg =
520 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
521 cm_control.rd_dma_clr = 1;
522 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
523 cm_control.cm_control_reg);
524 atomic_set(&soft->dmaread_done, 1);
525 wake_up(&soft->dmaread_queue);
526 }
527 if (cm_status.wr_dma_done) {
528 /* stop dma-write engine, clear status */
529 cm_control.cm_control_reg =
530 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
531 cm_control.wr_dma_clr = 1;
532 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
533 cm_control.cm_control_reg);
534 atomic_set(&soft->dmawrite_done, 1);
535 wake_up(&soft->dmawrite_queue);
536 }
537 if (cm_status.alg_done) {
538 /* clear status */
539 cm_control.cm_control_reg =
540 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
541 cm_control.alg_done_clr = 1;
542 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
543 cm_control.cm_control_reg);
544 atomic_set(&soft->algo_done, 1);
545 wake_up(&soft->algo_queue);
546 }
547
548 return IRQ_HANDLED;
549}
550
551/**
552 * mbcs_intr_alloc - Allocate interrupts.
553 * @dev: device pointer
554 *
555 */
556static int mbcs_intr_alloc(struct cx_dev *dev)
557{
558 struct sn_irq_info *sn_irq;
559 struct mbcs_soft *soft;
560 struct getdma *getdma;
561 struct putdma *putdma;
562 struct algoblock *algo;
563
564 soft = dev->soft;
565 getdma = &soft->getdma;
566 putdma = &soft->putdma;
567 algo = &soft->algo;
568
569 soft->get_sn_irq = NULL;
570 soft->put_sn_irq = NULL;
571 soft->algo_sn_irq = NULL;
572
573 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
574 if (sn_irq == NULL)
575 return -EAGAIN;
576 soft->get_sn_irq = sn_irq;
577 getdma->intrHostDest = sn_irq->irq_xtalkaddr;
578 getdma->intrVector = sn_irq->irq_irq;
579 if (request_irq(sn_irq->irq_irq,
0f2ed4c6 580 (void *)mbcs_completion_intr_handler, IRQF_SHARED,
e1e19747
BL
581 "MBCS get intr", (void *)soft)) {
582 tiocx_irq_free(soft->get_sn_irq);
583 return -EAGAIN;
584 }
585
586 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
587 if (sn_irq == NULL) {
588 free_irq(soft->get_sn_irq->irq_irq, soft);
589 tiocx_irq_free(soft->get_sn_irq);
590 return -EAGAIN;
591 }
592 soft->put_sn_irq = sn_irq;
593 putdma->intrHostDest = sn_irq->irq_xtalkaddr;
594 putdma->intrVector = sn_irq->irq_irq;
595 if (request_irq(sn_irq->irq_irq,
0f2ed4c6 596 (void *)mbcs_completion_intr_handler, IRQF_SHARED,
e1e19747
BL
597 "MBCS put intr", (void *)soft)) {
598 tiocx_irq_free(soft->put_sn_irq);
599 free_irq(soft->get_sn_irq->irq_irq, soft);
600 tiocx_irq_free(soft->get_sn_irq);
601 return -EAGAIN;
602 }
603
604 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
605 if (sn_irq == NULL) {
606 free_irq(soft->put_sn_irq->irq_irq, soft);
607 tiocx_irq_free(soft->put_sn_irq);
608 free_irq(soft->get_sn_irq->irq_irq, soft);
609 tiocx_irq_free(soft->get_sn_irq);
610 return -EAGAIN;
611 }
612 soft->algo_sn_irq = sn_irq;
613 algo->intrHostDest = sn_irq->irq_xtalkaddr;
614 algo->intrVector = sn_irq->irq_irq;
615 if (request_irq(sn_irq->irq_irq,
0f2ed4c6 616 (void *)mbcs_completion_intr_handler, IRQF_SHARED,
e1e19747
BL
617 "MBCS algo intr", (void *)soft)) {
618 tiocx_irq_free(soft->algo_sn_irq);
619 free_irq(soft->put_sn_irq->irq_irq, soft);
620 tiocx_irq_free(soft->put_sn_irq);
621 free_irq(soft->get_sn_irq->irq_irq, soft);
622 tiocx_irq_free(soft->get_sn_irq);
623 return -EAGAIN;
624 }
625
626 return 0;
627}
628
629/**
630 * mbcs_intr_dealloc - Remove interrupts.
631 * @dev: device pointer
632 *
633 */
634static void mbcs_intr_dealloc(struct cx_dev *dev)
635{
636 struct mbcs_soft *soft;
637
638 soft = dev->soft;
639
640 free_irq(soft->get_sn_irq->irq_irq, soft);
641 tiocx_irq_free(soft->get_sn_irq);
642 free_irq(soft->put_sn_irq->irq_irq, soft);
643 tiocx_irq_free(soft->put_sn_irq);
644 free_irq(soft->algo_sn_irq->irq_irq, soft);
645 tiocx_irq_free(soft->algo_sn_irq);
646}
647
648static inline int mbcs_hw_init(struct mbcs_soft *soft)
649{
650 void *mmr_base = soft->mmr_base;
651 union cm_control cm_control;
652 union cm_req_timeout cm_req_timeout;
653 uint64_t err_stat;
654
655 cm_req_timeout.cm_req_timeout_reg =
656 MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
657
658 cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
659 MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
660 cm_req_timeout.cm_req_timeout_reg);
661
662 mbcs_gscr_pioaddr_set(soft);
663 mbcs_debug_pioaddr_set(soft);
664
665 /* clear errors */
666 err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
667 MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
668 MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
669
670 /* enable interrupts */
671 /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
672 MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
673
674 /* arm status regs and clear engines */
675 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
676 cm_control.rearm_stat_regs = 1;
677 cm_control.alg_clr = 1;
678 cm_control.wr_dma_clr = 1;
679 cm_control.rd_dma_clr = 1;
680
681 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
682
683 return 0;
684}
685
74880c06 686static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf)
e1e19747
BL
687{
688 struct cx_dev *cx_dev = to_cx_dev(dev);
689 struct mbcs_soft *soft = cx_dev->soft;
690 uint64_t debug0;
691
692 /*
693 * By convention, the first debug register contains the
694 * algorithm number and revision.
695 */
696 debug0 = *(uint64_t *) soft->debug_addr;
697
a81a8f58
RD
698 return sprintf(buf, "0x%x 0x%x\n",
699 upper_32_bits(debug0), lower_32_bits(debug0));
e1e19747
BL
700}
701
74880c06 702static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
e1e19747
BL
703{
704 int n;
705 struct cx_dev *cx_dev = to_cx_dev(dev);
706 struct mbcs_soft *soft = cx_dev->soft;
707
708 if (count <= 0)
709 return 0;
710
711 n = simple_strtoul(buf, NULL, 0);
712
713 if (n == 1) {
714 mbcs_algo_start(soft);
715 if (wait_event_interruptible(soft->algo_queue,
716 atomic_read(&soft->algo_done)))
717 return -ERESTARTSYS;
718 }
719
720 return count;
721}
722
723DEVICE_ATTR(algo, 0644, show_algo, store_algo);
724
725/**
726 * mbcs_probe - Initialize for device
727 * @dev: device pointer
728 * @device_id: id table pointer
729 *
730 */
731static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
732{
733 struct mbcs_soft *soft;
734
735 dev->soft = NULL;
736
82ca76b6 737 soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
e1e19747
BL
738 if (soft == NULL)
739 return -ENOMEM;
740
741 soft->nasid = dev->cx_id.nasid;
742 list_add(&soft->list, &soft_list);
743 soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
744 dev->soft = soft;
745 soft->cxdev = dev;
746
747 init_waitqueue_head(&soft->dmawrite_queue);
748 init_waitqueue_head(&soft->dmaread_queue);
749 init_waitqueue_head(&soft->algo_queue);
750
46bca696 751 mutex_init(&soft->dmawritelock);
ae5e2979 752 mutex_init(&soft->dmareadlock);
a40ba849 753 mutex_init(&soft->algolock);
e1e19747
BL
754
755 mbcs_getdma_init(&soft->getdma);
756 mbcs_putdma_init(&soft->putdma);
757 mbcs_algo_init(&soft->algo);
758
759 mbcs_hw_init(soft);
760
761 /* Allocate interrupts */
762 mbcs_intr_alloc(dev);
763
764 device_create_file(&dev->dev, &dev_attr_algo);
765
766 return 0;
767}
768
769static int mbcs_remove(struct cx_dev *dev)
770{
771 if (dev->soft) {
772 mbcs_intr_dealloc(dev);
773 kfree(dev->soft);
774 }
775
776 device_remove_file(&dev->dev, &dev_attr_algo);
777
778 return 0;
779}
780
aa89ed9e 781static const struct cx_device_id mbcs_id_table[] = {
e1e19747
BL
782 {
783 .part_num = MBCS_PART_NUM,
784 .mfg_num = MBCS_MFG_NUM,
785 },
786 {
787 .part_num = MBCS_PART_NUM_ALG0,
788 .mfg_num = MBCS_MFG_NUM,
789 },
790 {0, 0}
791};
792
793MODULE_DEVICE_TABLE(cx, mbcs_id_table);
794
39ef01e0 795static struct cx_drv mbcs_driver = {
e1e19747
BL
796 .name = DEVICE_NAME,
797 .id_table = mbcs_id_table,
798 .probe = mbcs_probe,
799 .remove = mbcs_remove,
800};
801
802static void __exit mbcs_exit(void)
803{
68fc4fab 804 unregister_chrdev(mbcs_major, DEVICE_NAME);
e1e19747
BL
805 cx_driver_unregister(&mbcs_driver);
806}
807
808static int __init mbcs_init(void)
809{
810 int rv;
811
96f339c6
GE
812 if (!ia64_platform_is("sn2"))
813 return -ENODEV;
814
e1e19747
BL
815 // Put driver into chrdevs[]. Get major number.
816 rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
817 if (rv < 0) {
818 DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
819 return rv;
820 }
821 mbcs_major = rv;
822
823 return cx_driver_register(&mbcs_driver);
824}
825
826module_init(mbcs_init);
827module_exit(mbcs_exit);
828
829MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
830MODULE_DESCRIPTION("Driver for MOATB Core Services");
831MODULE_LICENSE("GPL");