rapidio: avoid data race between file operation callbacks and mport_cdev_add().
[linux-2.6-block.git] / drivers / rapidio / devices / rio_mport_cdev.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
e8de3701
AB
2/*
3 * RapidIO mport character device
4 *
5 * Copyright 2014-2015 Integrated Device Technology, Inc.
6 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * Copyright 2014-2015 Prodrive Technologies
8 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
9 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
10 * Copyright (C) 2014 Texas Instruments Incorporated
11 * Aurelien Jacquiot <a-jacquiot@ti.com>
e8de3701
AB
12 */
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/cdev.h>
16#include <linux/ioctl.h>
17#include <linux/uaccess.h>
18#include <linux/list.h>
19#include <linux/fs.h>
20#include <linux/err.h>
21#include <linux/net.h>
22#include <linux/poll.h>
23#include <linux/spinlock.h>
24#include <linux/sched.h>
25#include <linux/kfifo.h>
26
27#include <linux/mm.h>
28#include <linux/slab.h>
29#include <linux/vmalloc.h>
30#include <linux/mman.h>
31
32#include <linux/dma-mapping.h>
33#ifdef CONFIG_RAPIDIO_DMA_ENGINE
34#include <linux/dmaengine.h>
35#endif
36
37#include <linux/rio.h>
38#include <linux/rio_ids.h>
39#include <linux/rio_drv.h>
40#include <linux/rio_mport_cdev.h>
41
42#include "../rio.h"
43
44#define DRV_NAME "rio_mport"
45#define DRV_PREFIX DRV_NAME ": "
46#define DEV_NAME "rio_mport"
47#define DRV_VERSION "1.0.0"
48
49/* Debug output filtering masks */
50enum {
51 DBG_NONE = 0,
52 DBG_INIT = BIT(0), /* driver init */
53 DBG_EXIT = BIT(1), /* driver exit */
54 DBG_MPORT = BIT(2), /* mport add/remove */
55 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
56 DBG_DMA = BIT(4), /* DMA transfer messages */
57 DBG_MMAP = BIT(5), /* mapping messages */
58 DBG_IBW = BIT(6), /* inbound window */
59 DBG_EVENT = BIT(7), /* event handling messages */
60 DBG_OBW = BIT(8), /* outbound window messages */
61 DBG_DBELL = BIT(9), /* doorbell messages */
62 DBG_ALL = ~0,
63};
64
65#ifdef DEBUG
66#define rmcd_debug(level, fmt, arg...) \
67 do { \
68 if (DBG_##level & dbg_level) \
69 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
70 } while (0)
71#else
72#define rmcd_debug(level, fmt, arg...) \
73 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
74#endif
75
76#define rmcd_warn(fmt, arg...) \
77 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
78
79#define rmcd_error(fmt, arg...) \
80 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
81
82MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
83MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
84MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
85MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
86MODULE_DESCRIPTION("RapidIO mport character device driver");
87MODULE_LICENSE("GPL");
88MODULE_VERSION(DRV_VERSION);
89
90static int dma_timeout = 3000; /* DMA transfer timeout in msec */
91module_param(dma_timeout, int, S_IRUGO);
92MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
93
94#ifdef DEBUG
95static u32 dbg_level = DBG_NONE;
96module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
97MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
98#endif
99
100/*
101 * An internal DMA coherent buffer
102 */
103struct mport_dma_buf {
104 void *ib_base;
105 dma_addr_t ib_phys;
106 u32 ib_size;
107 u64 ib_rio_base;
108 bool ib_map;
109 struct file *filp;
110};
111
112/*
113 * Internal memory mapping structure
114 */
115enum rio_mport_map_dir {
116 MAP_INBOUND,
117 MAP_OUTBOUND,
118 MAP_DMA,
119};
120
121struct rio_mport_mapping {
122 struct list_head node;
123 struct mport_dev *md;
124 enum rio_mport_map_dir dir;
4e1016da 125 u16 rioid;
e8de3701
AB
126 u64 rio_addr;
127 dma_addr_t phys_addr; /* for mmap */
128 void *virt_addr; /* kernel address, for dma_free_coherent */
129 u64 size;
130 struct kref ref; /* refcount of vmas sharing the mapping */
131 struct file *filp;
132};
133
134struct rio_mport_dma_map {
135 int valid;
4e1016da 136 u64 length;
e8de3701
AB
137 void *vaddr;
138 dma_addr_t paddr;
139};
140
141#define MPORT_MAX_DMA_BUFS 16
142#define MPORT_EVENT_DEPTH 10
143
144/*
145 * mport_dev driver-specific structure that represents mport device
146 * @active mport device status flag
147 * @node list node to maintain list of registered mports
148 * @cdev character device
149 * @dev associated device object
150 * @mport associated subsystem's master port device object
151 * @buf_mutex lock for buffer handling
152 * @file_mutex - lock for open files list
153 * @file_list - list of open files on given mport
154 * @properties properties of this mport
155 * @portwrites queue of inbound portwrites
156 * @pw_lock lock for port write queue
157 * @mappings queue for memory mappings
158 * @dma_chan DMA channels associated with this device
159 * @dma_ref:
160 * @comp:
161 */
162struct mport_dev {
163 atomic_t active;
164 struct list_head node;
165 struct cdev cdev;
166 struct device dev;
167 struct rio_mport *mport;
168 struct mutex buf_mutex;
169 struct mutex file_mutex;
170 struct list_head file_list;
171 struct rio_mport_properties properties;
172 struct list_head doorbells;
173 spinlock_t db_lock;
174 struct list_head portwrites;
175 spinlock_t pw_lock;
176 struct list_head mappings;
177#ifdef CONFIG_RAPIDIO_DMA_ENGINE
178 struct dma_chan *dma_chan;
179 struct kref dma_ref;
180 struct completion comp;
181#endif
182};
183
184/*
185 * mport_cdev_priv - data structure specific to individual file object
186 * associated with an open device
187 * @md master port character device object
188 * @async_queue - asynchronous notification queue
189 * @list - file objects tracking list
190 * @db_filters inbound doorbell filters for this descriptor
191 * @pw_filters portwrite filters for this descriptor
192 * @event_fifo event fifo for this descriptor
193 * @event_rx_wait wait queue for this descriptor
194 * @fifo_lock lock for event_fifo
195 * @event_mask event mask for this descriptor
196 * @dmach DMA engine channel allocated for specific file object
197 */
198struct mport_cdev_priv {
199 struct mport_dev *md;
200 struct fasync_struct *async_queue;
201 struct list_head list;
202 struct list_head db_filters;
203 struct list_head pw_filters;
204 struct kfifo event_fifo;
205 wait_queue_head_t event_rx_wait;
206 spinlock_t fifo_lock;
4e1016da 207 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
e8de3701
AB
208#ifdef CONFIG_RAPIDIO_DMA_ENGINE
209 struct dma_chan *dmach;
210 struct list_head async_list;
e8de3701
AB
211 spinlock_t req_lock;
212 struct mutex dma_lock;
213 struct kref dma_ref;
214 struct completion comp;
215#endif
216};
217
218/*
219 * rio_mport_pw_filter - structure to describe a portwrite filter
220 * md_node node in mport device's list
221 * priv_node node in private file object's list
222 * priv reference to private data
223 * filter actual portwrite filter
224 */
225struct rio_mport_pw_filter {
226 struct list_head md_node;
227 struct list_head priv_node;
228 struct mport_cdev_priv *priv;
229 struct rio_pw_filter filter;
230};
231
232/*
233 * rio_mport_db_filter - structure to describe a doorbell filter
234 * @data_node reference to device node
235 * @priv_node node in private data
236 * @priv reference to private data
237 * @filter actual doorbell filter
238 */
239struct rio_mport_db_filter {
240 struct list_head data_node;
241 struct list_head priv_node;
242 struct mport_cdev_priv *priv;
243 struct rio_doorbell_filter filter;
244};
245
246static LIST_HEAD(mport_devs);
247static DEFINE_MUTEX(mport_devs_lock);
248
249#if (0) /* used by commented out portion of poll function : FIXME */
250static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
251#endif
252
253static struct class *dev_class;
254static dev_t dev_number;
255
e8de3701
AB
256static void mport_release_mapping(struct kref *ref);
257
258static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
259 int local)
260{
261 struct rio_mport *mport = priv->md->mport;
262 struct rio_mport_maint_io maint_io;
263 u32 *buffer;
264 u32 offset;
265 size_t length;
266 int ret, i;
267
268 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
269 return -EFAULT;
270
271 if ((maint_io.offset % 4) ||
4e1016da
AB
272 (maint_io.length == 0) || (maint_io.length % 4) ||
273 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
e8de3701
AB
274 return -EINVAL;
275
276 buffer = vmalloc(maint_io.length);
277 if (buffer == NULL)
278 return -ENOMEM;
279 length = maint_io.length/sizeof(u32);
280 offset = maint_io.offset;
281
282 for (i = 0; i < length; i++) {
283 if (local)
284 ret = __rio_local_read_config_32(mport,
285 offset, &buffer[i]);
286 else
287 ret = rio_mport_read_config_32(mport, maint_io.rioid,
288 maint_io.hopcount, offset, &buffer[i]);
289 if (ret)
290 goto out;
291
292 offset += 4;
293 }
294
4e1016da
AB
295 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
296 buffer, maint_io.length)))
e8de3701
AB
297 ret = -EFAULT;
298out:
299 vfree(buffer);
300 return ret;
301}
302
303static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
304 int local)
305{
306 struct rio_mport *mport = priv->md->mport;
307 struct rio_mport_maint_io maint_io;
308 u32 *buffer;
309 u32 offset;
310 size_t length;
311 int ret = -EINVAL, i;
312
313 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
314 return -EFAULT;
315
316 if ((maint_io.offset % 4) ||
4e1016da
AB
317 (maint_io.length == 0) || (maint_io.length % 4) ||
318 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
e8de3701
AB
319 return -EINVAL;
320
321 buffer = vmalloc(maint_io.length);
322 if (buffer == NULL)
323 return -ENOMEM;
324 length = maint_io.length;
325
4e1016da
AB
326 if (unlikely(copy_from_user(buffer,
327 (void __user *)(uintptr_t)maint_io.buffer, length))) {
e8de3701
AB
328 ret = -EFAULT;
329 goto out;
330 }
331
332 offset = maint_io.offset;
333 length /= sizeof(u32);
334
335 for (i = 0; i < length; i++) {
336 if (local)
337 ret = __rio_local_write_config_32(mport,
338 offset, buffer[i]);
339 else
340 ret = rio_mport_write_config_32(mport, maint_io.rioid,
341 maint_io.hopcount,
342 offset, buffer[i]);
343 if (ret)
344 goto out;
345
346 offset += 4;
347 }
348
349out:
350 vfree(buffer);
351 return ret;
352}
353
354
355/*
356 * Inbound/outbound memory mapping functions
357 */
358static int
359rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 360 u16 rioid, u64 raddr, u32 size,
e8de3701
AB
361 dma_addr_t *paddr)
362{
363 struct rio_mport *mport = md->mport;
364 struct rio_mport_mapping *map;
365 int ret;
366
367 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
368
4e1016da 369 map = kzalloc(sizeof(*map), GFP_KERNEL);
e8de3701
AB
370 if (map == NULL)
371 return -ENOMEM;
372
373 ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
374 if (ret < 0)
375 goto err_map_outb;
376
377 map->dir = MAP_OUTBOUND;
378 map->rioid = rioid;
379 map->rio_addr = raddr;
380 map->size = size;
381 map->phys_addr = *paddr;
382 map->filp = filp;
383 map->md = md;
384 kref_init(&map->ref);
385 list_add_tail(&map->node, &md->mappings);
386 return 0;
387err_map_outb:
388 kfree(map);
389 return ret;
390}
391
392static int
393rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 394 u16 rioid, u64 raddr, u32 size,
e8de3701
AB
395 dma_addr_t *paddr)
396{
397 struct rio_mport_mapping *map;
398 int err = -ENOMEM;
399
400 mutex_lock(&md->buf_mutex);
401 list_for_each_entry(map, &md->mappings, node) {
402 if (map->dir != MAP_OUTBOUND)
403 continue;
404 if (rioid == map->rioid &&
405 raddr == map->rio_addr && size == map->size) {
406 *paddr = map->phys_addr;
407 err = 0;
408 break;
409 } else if (rioid == map->rioid &&
410 raddr < (map->rio_addr + map->size - 1) &&
411 (raddr + size) > map->rio_addr) {
412 err = -EBUSY;
413 break;
414 }
415 }
416
417 /* If not found, create new */
418 if (err == -ENOMEM)
419 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
420 size, paddr);
421 mutex_unlock(&md->buf_mutex);
422 return err;
423}
424
425static int rio_mport_obw_map(struct file *filp, void __user *arg)
426{
427 struct mport_cdev_priv *priv = filp->private_data;
428 struct mport_dev *data = priv->md;
429 struct rio_mmap map;
430 dma_addr_t paddr;
431 int ret;
432
4e1016da 433 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
e8de3701
AB
434 return -EFAULT;
435
436 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
437 map.rioid, map.rio_addr, map.length);
438
439 ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
440 map.rio_addr, map.length, &paddr);
441 if (ret < 0) {
442 rmcd_error("Failed to set OBW err= %d", ret);
443 return ret;
444 }
445
446 map.handle = paddr;
447
4e1016da 448 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
e8de3701
AB
449 return -EFAULT;
450 return 0;
451}
452
453/*
454 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
455 *
456 * @priv: driver private data
457 * @arg: buffer handle returned by allocation routine
458 */
459static int rio_mport_obw_free(struct file *filp, void __user *arg)
460{
461 struct mport_cdev_priv *priv = filp->private_data;
462 struct mport_dev *md = priv->md;
463 u64 handle;
464 struct rio_mport_mapping *map, *_map;
465
466 if (!md->mport->ops->unmap_outb)
467 return -EPROTONOSUPPORT;
468
4e1016da 469 if (copy_from_user(&handle, arg, sizeof(handle)))
e8de3701
AB
470 return -EFAULT;
471
472 rmcd_debug(OBW, "h=0x%llx", handle);
473
474 mutex_lock(&md->buf_mutex);
475 list_for_each_entry_safe(map, _map, &md->mappings, node) {
476 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
477 if (map->filp == filp) {
478 rmcd_debug(OBW, "kref_put h=0x%llx", handle);
479 map->filp = NULL;
480 kref_put(&map->ref, mport_release_mapping);
481 }
482 break;
483 }
484 }
485 mutex_unlock(&md->buf_mutex);
486
487 return 0;
488}
489
490/*
491 * maint_hdid_set() - Set the host Device ID
492 * @priv: driver private data
493 * @arg: Device Id
494 */
495static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
496{
497 struct mport_dev *md = priv->md;
4e1016da 498 u16 hdid;
e8de3701 499
4e1016da 500 if (copy_from_user(&hdid, arg, sizeof(hdid)))
e8de3701
AB
501 return -EFAULT;
502
503 md->mport->host_deviceid = hdid;
504 md->properties.hdid = hdid;
505 rio_local_set_device_id(md->mport, hdid);
506
507 rmcd_debug(MPORT, "Set host device Id to %d", hdid);
508
509 return 0;
510}
511
512/*
513 * maint_comptag_set() - Set the host Component Tag
514 * @priv: driver private data
515 * @arg: Component Tag
516 */
517static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
518{
519 struct mport_dev *md = priv->md;
4e1016da 520 u32 comptag;
e8de3701 521
4e1016da 522 if (copy_from_user(&comptag, arg, sizeof(comptag)))
e8de3701
AB
523 return -EFAULT;
524
525 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
526
527 rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
528
529 return 0;
530}
531
532#ifdef CONFIG_RAPIDIO_DMA_ENGINE
533
534struct mport_dma_req {
bbd876ad 535 struct kref refcount;
e8de3701
AB
536 struct list_head node;
537 struct file *filp;
538 struct mport_cdev_priv *priv;
539 enum rio_transfer_sync sync;
540 struct sg_table sgt;
541 struct page **page_list;
542 unsigned int nr_pages;
543 struct rio_mport_mapping *map;
544 struct dma_chan *dmach;
545 enum dma_data_direction dir;
546 dma_cookie_t cookie;
547 enum dma_status status;
548 struct completion req_comp;
549};
550
e8de3701
AB
551static void mport_release_def_dma(struct kref *dma_ref)
552{
553 struct mport_dev *md =
554 container_of(dma_ref, struct mport_dev, dma_ref);
555
556 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
557 rio_release_dma(md->dma_chan);
558 md->dma_chan = NULL;
559}
560
561static void mport_release_dma(struct kref *dma_ref)
562{
563 struct mport_cdev_priv *priv =
564 container_of(dma_ref, struct mport_cdev_priv, dma_ref);
565
566 rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
567 complete(&priv->comp);
568}
569
bbd876ad 570static void dma_req_free(struct kref *ref)
e8de3701 571{
bbd876ad
IN
572 struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
573 refcount);
e8de3701
AB
574 struct mport_cdev_priv *priv = req->priv;
575 unsigned int i;
576
577 dma_unmap_sg(req->dmach->device->dev,
578 req->sgt.sgl, req->sgt.nents, req->dir);
579 sg_free_table(&req->sgt);
580 if (req->page_list) {
581 for (i = 0; i < req->nr_pages; i++)
582 put_page(req->page_list[i]);
583 kfree(req->page_list);
584 }
585
586 if (req->map) {
587 mutex_lock(&req->map->md->buf_mutex);
588 kref_put(&req->map->ref, mport_release_mapping);
589 mutex_unlock(&req->map->md->buf_mutex);
590 }
591
592 kref_put(&priv->dma_ref, mport_release_dma);
593
594 kfree(req);
595}
596
597static void dma_xfer_callback(void *param)
598{
599 struct mport_dma_req *req = (struct mport_dma_req *)param;
600 struct mport_cdev_priv *priv = req->priv;
601
602 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
603 NULL, NULL);
604 complete(&req->req_comp);
bbd876ad 605 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
606}
607
608/*
609 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
610 * transfer object.
611 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
612 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
613 * non-NULL pointer using IS_ERR macro.
614 */
615static struct dma_async_tx_descriptor
616*prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
617 struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
618 enum dma_ctrl_flags flags)
619{
620 struct rio_dma_data tx_data;
621
622 tx_data.sg = sgt->sgl;
623 tx_data.sg_len = nents;
624 tx_data.rio_addr_u = 0;
625 tx_data.rio_addr = transfer->rio_addr;
626 if (dir == DMA_MEM_TO_DEV) {
627 switch (transfer->method) {
628 case RIO_EXCHANGE_NWRITE:
629 tx_data.wr_type = RDW_ALL_NWRITE;
630 break;
631 case RIO_EXCHANGE_NWRITE_R_ALL:
632 tx_data.wr_type = RDW_ALL_NWRITE_R;
633 break;
634 case RIO_EXCHANGE_NWRITE_R:
635 tx_data.wr_type = RDW_LAST_NWRITE_R;
636 break;
637 case RIO_EXCHANGE_DEFAULT:
638 tx_data.wr_type = RDW_DEFAULT;
639 break;
640 default:
641 return ERR_PTR(-EINVAL);
642 }
643 }
644
645 return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
646}
647
648/* Request DMA channel associated with this mport device.
649 * Try to request DMA channel for every new process that opened given
650 * mport. If a new DMA channel is not available use default channel
651 * which is the first DMA channel opened on mport device.
652 */
653static int get_dma_channel(struct mport_cdev_priv *priv)
654{
655 mutex_lock(&priv->dma_lock);
656 if (!priv->dmach) {
657 priv->dmach = rio_request_mport_dma(priv->md->mport);
658 if (!priv->dmach) {
659 /* Use default DMA channel if available */
660 if (priv->md->dma_chan) {
661 priv->dmach = priv->md->dma_chan;
662 kref_get(&priv->md->dma_ref);
663 } else {
664 rmcd_error("Failed to get DMA channel");
665 mutex_unlock(&priv->dma_lock);
666 return -ENODEV;
667 }
668 } else if (!priv->md->dma_chan) {
669 /* Register default DMA channel if we do not have one */
670 priv->md->dma_chan = priv->dmach;
671 kref_init(&priv->md->dma_ref);
672 rmcd_debug(DMA, "Register DMA_chan %d as default",
673 priv->dmach->chan_id);
674 }
675
676 kref_init(&priv->dma_ref);
677 init_completion(&priv->comp);
678 }
679
680 kref_get(&priv->dma_ref);
681 mutex_unlock(&priv->dma_lock);
682 return 0;
683}
684
685static void put_dma_channel(struct mport_cdev_priv *priv)
686{
687 kref_put(&priv->dma_ref, mport_release_dma);
688}
689
690/*
691 * DMA transfer functions
692 */
693static int do_dma_request(struct mport_dma_req *req,
694 struct rio_transfer_io *xfer,
695 enum rio_transfer_sync sync, int nents)
696{
697 struct mport_cdev_priv *priv;
698 struct sg_table *sgt;
699 struct dma_chan *chan;
700 struct dma_async_tx_descriptor *tx;
701 dma_cookie_t cookie;
702 unsigned long tmo = msecs_to_jiffies(dma_timeout);
703 enum dma_transfer_direction dir;
704 long wret;
705 int ret = 0;
706
707 priv = req->priv;
708 sgt = &req->sgt;
709
710 chan = priv->dmach;
711 dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
712
713 rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
714 current->comm, task_pid_nr(current),
715 dev_name(&chan->dev->device),
716 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
717
718 /* Initialize DMA transaction request */
719 tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
720 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
721
722 if (!tx) {
723 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
724 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
725 xfer->rio_addr, xfer->length);
726 ret = -EIO;
727 goto err_out;
728 } else if (IS_ERR(tx)) {
729 ret = PTR_ERR(tx);
730 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
731 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
732 xfer->rio_addr, xfer->length);
733 goto err_out;
734 }
735
bbd876ad 736 tx->callback = dma_xfer_callback;
e8de3701
AB
737 tx->callback_param = req;
738
e8de3701 739 req->status = DMA_IN_PROGRESS;
bbd876ad 740 kref_get(&req->refcount);
e8de3701
AB
741
742 cookie = dmaengine_submit(tx);
743 req->cookie = cookie;
744
745 rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
746 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
747
748 if (dma_submit_error(cookie)) {
749 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
750 cookie, xfer->rio_addr, xfer->length);
bbd876ad 751 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
752 ret = -EIO;
753 goto err_out;
754 }
755
756 dma_async_issue_pending(chan);
757
758 if (sync == RIO_TRANSFER_ASYNC) {
759 spin_lock(&priv->req_lock);
760 list_add_tail(&req->node, &priv->async_list);
761 spin_unlock(&priv->req_lock);
762 return cookie;
763 } else if (sync == RIO_TRANSFER_FAF)
764 return 0;
765
766 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
767
768 if (wret == 0) {
769 /* Timeout on wait occurred */
770 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
771 current->comm, task_pid_nr(current),
772 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
773 return -ETIMEDOUT;
774 } else if (wret == -ERESTARTSYS) {
775 /* Wait_for_completion was interrupted by a signal but DMA may
776 * be in progress
777 */
778 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
779 current->comm, task_pid_nr(current),
780 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
781 return -EINTR;
782 }
783
784 if (req->status != DMA_COMPLETE) {
785 /* DMA transaction completion was signaled with error */
786 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
787 current->comm, task_pid_nr(current),
788 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
789 cookie, req->status, ret);
790 ret = -EIO;
791 }
792
793err_out:
794 return ret;
795}
796
797/*
798 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
799 * the remote RapidIO device
800 * @filp: file pointer associated with the call
801 * @transfer_mode: DMA transfer mode
802 * @sync: synchronization mode
803 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
804 * DMA_DEV_TO_MEM = read)
805 * @xfer: data transfer descriptor structure
806 */
807static int
4e1016da 808rio_dma_transfer(struct file *filp, u32 transfer_mode,
e8de3701
AB
809 enum rio_transfer_sync sync, enum dma_data_direction dir,
810 struct rio_transfer_io *xfer)
811{
812 struct mport_cdev_priv *priv = filp->private_data;
813 unsigned long nr_pages = 0;
814 struct page **page_list = NULL;
815 struct mport_dma_req *req;
816 struct mport_dev *md = priv->md;
817 struct dma_chan *chan;
818 int i, ret;
819 int nents;
820
821 if (xfer->length == 0)
822 return -EINVAL;
823 req = kzalloc(sizeof(*req), GFP_KERNEL);
824 if (!req)
825 return -ENOMEM;
826
827 ret = get_dma_channel(priv);
828 if (ret) {
829 kfree(req);
830 return ret;
831 }
c5157b76
IN
832 chan = priv->dmach;
833
834 kref_init(&req->refcount);
835 init_completion(&req->req_comp);
836 req->dir = dir;
837 req->filp = filp;
838 req->priv = priv;
839 req->dmach = chan;
840 req->sync = sync;
e8de3701
AB
841
842 /*
843 * If parameter loc_addr != NULL, we are transferring data from/to
844 * data buffer allocated in user-space: lock in memory user-space
845 * buffer pages and build an SG table for DMA transfer request
846 *
847 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
848 * used for DMA data transfers: build single entry SG table using
849 * offset within the internal buffer specified by handle parameter.
850 */
851 if (xfer->loc_addr) {
c4860ad6 852 unsigned int offset;
e8de3701
AB
853 long pinned;
854
c4860ad6 855 offset = lower_32_bits(offset_in_page(xfer->loc_addr));
e8de3701
AB
856 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
857
858 page_list = kmalloc_array(nr_pages,
859 sizeof(*page_list), GFP_KERNEL);
860 if (page_list == NULL) {
861 ret = -ENOMEM;
862 goto err_req;
863 }
864
0ca36a6b 865 pinned = get_user_pages_fast(
e8de3701 866 (unsigned long)xfer->loc_addr & PAGE_MASK,
73b0140b
IW
867 nr_pages,
868 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
869 page_list);
e8de3701
AB
870
871 if (pinned != nr_pages) {
872 if (pinned < 0) {
369f2679
LS
873 rmcd_error("get_user_pages_unlocked err=%ld",
874 pinned);
e8de3701
AB
875 nr_pages = 0;
876 } else
877 rmcd_error("pinned %ld out of %ld pages",
878 pinned, nr_pages);
879 ret = -EFAULT;
ffca476a
JH
880 /*
881 * Set nr_pages up to mean "how many pages to unpin, in
882 * the error handler:
883 */
884 nr_pages = pinned;
e8de3701
AB
885 goto err_pg;
886 }
887
888 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
889 offset, xfer->length, GFP_KERNEL);
890 if (ret) {
891 rmcd_error("sg_alloc_table failed with err=%d", ret);
892 goto err_pg;
893 }
894
895 req->page_list = page_list;
896 req->nr_pages = nr_pages;
897 } else {
898 dma_addr_t baddr;
899 struct rio_mport_mapping *map;
900
901 baddr = (dma_addr_t)xfer->handle;
902
903 mutex_lock(&md->buf_mutex);
904 list_for_each_entry(map, &md->mappings, node) {
905 if (baddr >= map->phys_addr &&
906 baddr < (map->phys_addr + map->size)) {
907 kref_get(&map->ref);
908 req->map = map;
909 break;
910 }
911 }
912 mutex_unlock(&md->buf_mutex);
913
914 if (req->map == NULL) {
915 ret = -ENOMEM;
916 goto err_req;
917 }
918
919 if (xfer->length + xfer->offset > map->size) {
920 ret = -EINVAL;
921 goto err_req;
922 }
923
924 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
925 if (unlikely(ret)) {
926 rmcd_error("sg_alloc_table failed for internal buf");
927 goto err_req;
928 }
929
930 sg_set_buf(req->sgt.sgl,
931 map->virt_addr + (baddr - map->phys_addr) +
932 xfer->offset, xfer->length);
933 }
934
e8de3701
AB
935 nents = dma_map_sg(chan->device->dev,
936 req->sgt.sgl, req->sgt.nents, dir);
c46d90cd 937 if (nents == 0) {
e8de3701 938 rmcd_error("Failed to map SG list");
b1402dcb
CJ
939 ret = -EFAULT;
940 goto err_pg;
e8de3701
AB
941 }
942
943 ret = do_dma_request(req, xfer, sync, nents);
944
945 if (ret >= 0) {
bbd876ad
IN
946 if (sync == RIO_TRANSFER_ASYNC)
947 return ret; /* return ASYNC cookie */
948 } else {
949 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
e8de3701
AB
950 }
951
e8de3701 952err_pg:
bbd876ad 953 if (!req->page_list) {
e8de3701
AB
954 for (i = 0; i < nr_pages; i++)
955 put_page(page_list[i]);
956 kfree(page_list);
957 }
958err_req:
bbd876ad 959 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
960 return ret;
961}
962
963static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
964{
965 struct mport_cdev_priv *priv = filp->private_data;
966 struct rio_transaction transaction;
967 struct rio_transfer_io *transfer;
968 enum dma_data_direction dir;
969 int i, ret = 0;
970
971 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
972 return -EFAULT;
973
4e1016da 974 if (transaction.count != 1) /* only single transfer for now */
e8de3701
AB
975 return -EINVAL;
976
977 if ((transaction.transfer_mode &
978 priv->md->properties.transfer_mode) == 0)
979 return -ENODEV;
980
42bc47b3 981 transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
e8de3701
AB
982 if (!transfer)
983 return -ENOMEM;
984
4e1016da
AB
985 if (unlikely(copy_from_user(transfer,
986 (void __user *)(uintptr_t)transaction.block,
987 transaction.count * sizeof(*transfer)))) {
e8de3701
AB
988 ret = -EFAULT;
989 goto out_free;
990 }
991
992 dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
993 DMA_FROM_DEVICE : DMA_TO_DEVICE;
994 for (i = 0; i < transaction.count && ret == 0; i++)
995 ret = rio_dma_transfer(filp, transaction.transfer_mode,
996 transaction.sync, dir, &transfer[i]);
997
4e1016da
AB
998 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
999 transfer,
1000 transaction.count * sizeof(*transfer))))
e8de3701
AB
1001 ret = -EFAULT;
1002
1003out_free:
1004 vfree(transfer);
1005
1006 return ret;
1007}
1008
1009static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1010{
1011 struct mport_cdev_priv *priv;
e8de3701
AB
1012 struct rio_async_tx_wait w_param;
1013 struct mport_dma_req *req;
1014 dma_cookie_t cookie;
1015 unsigned long tmo;
1016 long wret;
1017 int found = 0;
1018 int ret;
1019
1020 priv = (struct mport_cdev_priv *)filp->private_data;
e8de3701
AB
1021
1022 if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1023 return -EFAULT;
1024
1025 cookie = w_param.token;
1026 if (w_param.timeout)
1027 tmo = msecs_to_jiffies(w_param.timeout);
1028 else /* Use default DMA timeout */
1029 tmo = msecs_to_jiffies(dma_timeout);
1030
1031 spin_lock(&priv->req_lock);
1032 list_for_each_entry(req, &priv->async_list, node) {
1033 if (req->cookie == cookie) {
1034 list_del(&req->node);
1035 found = 1;
1036 break;
1037 }
1038 }
1039 spin_unlock(&priv->req_lock);
1040
1041 if (!found)
1042 return -EAGAIN;
1043
1044 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1045
1046 if (wret == 0) {
1047 /* Timeout on wait occurred */
1048 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1049 current->comm, task_pid_nr(current),
1050 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1051 ret = -ETIMEDOUT;
1052 goto err_tmo;
1053 } else if (wret == -ERESTARTSYS) {
1054 /* Wait_for_completion was interrupted by a signal but DMA may
1055 * be still in progress
1056 */
1057 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1058 current->comm, task_pid_nr(current),
1059 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1060 ret = -EINTR;
1061 goto err_tmo;
1062 }
1063
1064 if (req->status != DMA_COMPLETE) {
1065 /* DMA transaction completion signaled with transfer error */
1066 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1067 current->comm, task_pid_nr(current),
1068 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1069 req->status);
1070 ret = -EIO;
1071 } else
1072 ret = 0;
1073
1074 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
bbd876ad 1075 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
1076
1077 return ret;
1078
1079err_tmo:
1080 /* Return request back into async queue */
1081 spin_lock(&priv->req_lock);
1082 list_add_tail(&req->node, &priv->async_list);
1083 spin_unlock(&priv->req_lock);
1084 return ret;
1085}
1086
1087static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
4e1016da 1088 u64 size, struct rio_mport_mapping **mapping)
e8de3701
AB
1089{
1090 struct rio_mport_mapping *map;
1091
4e1016da 1092 map = kzalloc(sizeof(*map), GFP_KERNEL);
e8de3701
AB
1093 if (map == NULL)
1094 return -ENOMEM;
1095
1096 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1097 &map->phys_addr, GFP_KERNEL);
1098 if (map->virt_addr == NULL) {
1099 kfree(map);
1100 return -ENOMEM;
1101 }
1102
1103 map->dir = MAP_DMA;
1104 map->size = size;
1105 map->filp = filp;
1106 map->md = md;
1107 kref_init(&map->ref);
1108 mutex_lock(&md->buf_mutex);
1109 list_add_tail(&map->node, &md->mappings);
1110 mutex_unlock(&md->buf_mutex);
1111 *mapping = map;
1112
1113 return 0;
1114}
1115
1116static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1117{
1118 struct mport_cdev_priv *priv = filp->private_data;
1119 struct mport_dev *md = priv->md;
1120 struct rio_dma_mem map;
1121 struct rio_mport_mapping *mapping = NULL;
1122 int ret;
1123
4e1016da 1124 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
e8de3701
AB
1125 return -EFAULT;
1126
1127 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1128 if (ret)
1129 return ret;
1130
1131 map.dma_handle = mapping->phys_addr;
1132
4e1016da 1133 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
e8de3701
AB
1134 mutex_lock(&md->buf_mutex);
1135 kref_put(&mapping->ref, mport_release_mapping);
1136 mutex_unlock(&md->buf_mutex);
1137 return -EFAULT;
1138 }
1139
1140 return 0;
1141}
1142
1143static int rio_mport_free_dma(struct file *filp, void __user *arg)
1144{
1145 struct mport_cdev_priv *priv = filp->private_data;
1146 struct mport_dev *md = priv->md;
1147 u64 handle;
1148 int ret = -EFAULT;
1149 struct rio_mport_mapping *map, *_map;
1150
4e1016da 1151 if (copy_from_user(&handle, arg, sizeof(handle)))
e8de3701
AB
1152 return -EFAULT;
1153 rmcd_debug(EXIT, "filp=%p", filp);
1154
1155 mutex_lock(&md->buf_mutex);
1156 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1157 if (map->dir == MAP_DMA && map->phys_addr == handle &&
1158 map->filp == filp) {
1159 kref_put(&map->ref, mport_release_mapping);
1160 ret = 0;
1161 break;
1162 }
1163 }
1164 mutex_unlock(&md->buf_mutex);
1165
1166 if (ret == -EFAULT) {
1167 rmcd_debug(DMA, "ERR no matching mapping");
1168 return ret;
1169 }
1170
1171 return 0;
1172}
1173#else
1174static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1175{
1176 return -ENODEV;
1177}
1178
1179static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1180{
1181 return -ENODEV;
1182}
1183
1184static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1185{
1186 return -ENODEV;
1187}
1188
1189static int rio_mport_free_dma(struct file *filp, void __user *arg)
1190{
1191 return -ENODEV;
1192}
1193#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1194
1195/*
1196 * Inbound/outbound memory mapping functions
1197 */
1198
1199static int
1200rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 1201 u64 raddr, u64 size,
e8de3701
AB
1202 struct rio_mport_mapping **mapping)
1203{
1204 struct rio_mport *mport = md->mport;
1205 struct rio_mport_mapping *map;
1206 int ret;
1207
4e1016da
AB
1208 /* rio_map_inb_region() accepts u32 size */
1209 if (size > 0xffffffff)
1210 return -EINVAL;
1211
1212 map = kzalloc(sizeof(*map), GFP_KERNEL);
e8de3701
AB
1213 if (map == NULL)
1214 return -ENOMEM;
1215
1216 map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1217 &map->phys_addr, GFP_KERNEL);
1218 if (map->virt_addr == NULL) {
1219 ret = -ENOMEM;
1220 goto err_dma_alloc;
1221 }
1222
1223 if (raddr == RIO_MAP_ANY_ADDR)
1224 raddr = map->phys_addr;
4e1016da 1225 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
e8de3701
AB
1226 if (ret < 0)
1227 goto err_map_inb;
1228
1229 map->dir = MAP_INBOUND;
1230 map->rio_addr = raddr;
1231 map->size = size;
1232 map->filp = filp;
1233 map->md = md;
1234 kref_init(&map->ref);
1235 mutex_lock(&md->buf_mutex);
1236 list_add_tail(&map->node, &md->mappings);
1237 mutex_unlock(&md->buf_mutex);
1238 *mapping = map;
1239 return 0;
1240
1241err_map_inb:
1242 dma_free_coherent(mport->dev.parent, size,
1243 map->virt_addr, map->phys_addr);
1244err_dma_alloc:
1245 kfree(map);
1246 return ret;
1247}
1248
1249static int
1250rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 1251 u64 raddr, u64 size,
e8de3701
AB
1252 struct rio_mport_mapping **mapping)
1253{
1254 struct rio_mport_mapping *map;
1255 int err = -ENOMEM;
1256
1257 if (raddr == RIO_MAP_ANY_ADDR)
1258 goto get_new;
1259
1260 mutex_lock(&md->buf_mutex);
1261 list_for_each_entry(map, &md->mappings, node) {
1262 if (map->dir != MAP_INBOUND)
1263 continue;
1264 if (raddr == map->rio_addr && size == map->size) {
1265 /* allow exact match only */
1266 *mapping = map;
1267 err = 0;
1268 break;
1269 } else if (raddr < (map->rio_addr + map->size - 1) &&
1270 (raddr + size) > map->rio_addr) {
1271 err = -EBUSY;
1272 break;
1273 }
1274 }
1275 mutex_unlock(&md->buf_mutex);
1276
1277 if (err != -ENOMEM)
1278 return err;
1279get_new:
1280 /* not found, create new */
1281 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1282}
1283
1284static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1285{
1286 struct mport_cdev_priv *priv = filp->private_data;
1287 struct mport_dev *md = priv->md;
1288 struct rio_mmap map;
1289 struct rio_mport_mapping *mapping = NULL;
1290 int ret;
1291
1292 if (!md->mport->ops->map_inb)
1293 return -EPROTONOSUPPORT;
4e1016da 1294 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
e8de3701
AB
1295 return -EFAULT;
1296
1297 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1298
1299 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1300 map.length, &mapping);
1301 if (ret)
1302 return ret;
1303
1304 map.handle = mapping->phys_addr;
1305 map.rio_addr = mapping->rio_addr;
1306
4e1016da 1307 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
e8de3701
AB
1308 /* Delete mapping if it was created by this request */
1309 if (ret == 0 && mapping->filp == filp) {
1310 mutex_lock(&md->buf_mutex);
1311 kref_put(&mapping->ref, mport_release_mapping);
1312 mutex_unlock(&md->buf_mutex);
1313 }
1314 return -EFAULT;
1315 }
1316
1317 return 0;
1318}
1319
1320/*
1321 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1322 * previously allocated inbound DMA coherent buffer
1323 * @priv: driver private data
1324 * @arg: buffer handle returned by allocation routine
1325 */
1326static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1327{
1328 struct mport_cdev_priv *priv = filp->private_data;
1329 struct mport_dev *md = priv->md;
1330 u64 handle;
1331 struct rio_mport_mapping *map, *_map;
1332
1333 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1334
1335 if (!md->mport->ops->unmap_inb)
1336 return -EPROTONOSUPPORT;
1337
4e1016da 1338 if (copy_from_user(&handle, arg, sizeof(handle)))
e8de3701
AB
1339 return -EFAULT;
1340
1341 mutex_lock(&md->buf_mutex);
1342 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1343 if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1344 if (map->filp == filp) {
1345 map->filp = NULL;
1346 kref_put(&map->ref, mport_release_mapping);
1347 }
1348 break;
1349 }
1350 }
1351 mutex_unlock(&md->buf_mutex);
1352
1353 return 0;
1354}
1355
1356/*
1357 * maint_port_idx_get() - Get the port index of the mport instance
1358 * @priv: driver private data
1359 * @arg: port index
1360 */
1361static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1362{
1363 struct mport_dev *md = priv->md;
4e1016da 1364 u32 port_idx = md->mport->index;
e8de3701
AB
1365
1366 rmcd_debug(MPORT, "port_index=%d", port_idx);
1367
1368 if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1369 return -EFAULT;
1370
1371 return 0;
1372}
1373
1374static int rio_mport_add_event(struct mport_cdev_priv *priv,
1375 struct rio_event *event)
1376{
1377 int overflow;
1378
1379 if (!(priv->event_mask & event->header))
1380 return -EACCES;
1381
1382 spin_lock(&priv->fifo_lock);
1383 overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1384 || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1385 sizeof(*event)) != sizeof(*event);
1386 spin_unlock(&priv->fifo_lock);
1387
1388 wake_up_interruptible(&priv->event_rx_wait);
1389
1390 if (overflow) {
1391 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1392 return -EBUSY;
1393 }
1394
1395 return 0;
1396}
1397
1398static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1399 u16 src, u16 dst, u16 info)
1400{
1401 struct mport_dev *data = dev_id;
1402 struct mport_cdev_priv *priv;
1403 struct rio_mport_db_filter *db_filter;
1404 struct rio_event event;
1405 int handled;
1406
1407 event.header = RIO_DOORBELL;
1408 event.u.doorbell.rioid = src;
1409 event.u.doorbell.payload = info;
1410
1411 handled = 0;
1412 spin_lock(&data->db_lock);
1413 list_for_each_entry(db_filter, &data->doorbells, data_node) {
4e1016da 1414 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
e8de3701
AB
1415 db_filter->filter.rioid == src)) &&
1416 info >= db_filter->filter.low &&
1417 info <= db_filter->filter.high) {
1418 priv = db_filter->priv;
1419 rio_mport_add_event(priv, &event);
1420 handled = 1;
1421 }
1422 }
1423 spin_unlock(&data->db_lock);
1424
1425 if (!handled)
1426 dev_warn(&data->dev,
1427 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1428 __func__, src, info);
1429}
1430
1431static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1432 void __user *arg)
1433{
1434 struct mport_dev *md = priv->md;
1435 struct rio_mport_db_filter *db_filter;
1436 struct rio_doorbell_filter filter;
1437 unsigned long flags;
1438 int ret;
1439
1440 if (copy_from_user(&filter, arg, sizeof(filter)))
1441 return -EFAULT;
1442
1443 if (filter.low > filter.high)
1444 return -EINVAL;
1445
1446 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1447 rio_mport_doorbell_handler);
1448 if (ret) {
1449 rmcd_error("%s failed to register IBDB, err=%d",
1450 dev_name(&md->dev), ret);
1451 return ret;
1452 }
1453
1454 db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1455 if (db_filter == NULL) {
1456 rio_release_inb_dbell(md->mport, filter.low, filter.high);
1457 return -ENOMEM;
1458 }
1459
1460 db_filter->filter = filter;
1461 db_filter->priv = priv;
1462 spin_lock_irqsave(&md->db_lock, flags);
1463 list_add_tail(&db_filter->priv_node, &priv->db_filters);
1464 list_add_tail(&db_filter->data_node, &md->doorbells);
1465 spin_unlock_irqrestore(&md->db_lock, flags);
1466
1467 return 0;
1468}
1469
1470static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1471{
1472 list_del(&db_filter->data_node);
1473 list_del(&db_filter->priv_node);
1474 kfree(db_filter);
1475}
1476
1477static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1478 void __user *arg)
1479{
1480 struct rio_mport_db_filter *db_filter;
1481 struct rio_doorbell_filter filter;
1482 unsigned long flags;
1483 int ret = -EINVAL;
1484
1485 if (copy_from_user(&filter, arg, sizeof(filter)))
1486 return -EFAULT;
1487
4e1016da
AB
1488 if (filter.low > filter.high)
1489 return -EINVAL;
1490
e8de3701
AB
1491 spin_lock_irqsave(&priv->md->db_lock, flags);
1492 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1493 if (db_filter->filter.rioid == filter.rioid &&
1494 db_filter->filter.low == filter.low &&
1495 db_filter->filter.high == filter.high) {
1496 rio_mport_delete_db_filter(db_filter);
1497 ret = 0;
1498 break;
1499 }
1500 }
1501 spin_unlock_irqrestore(&priv->md->db_lock, flags);
1502
1503 if (!ret)
1504 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1505
1506 return ret;
1507}
1508
1509static int rio_mport_match_pw(union rio_pw_msg *msg,
1510 struct rio_pw_filter *filter)
1511{
1512 if ((msg->em.comptag & filter->mask) < filter->low ||
1513 (msg->em.comptag & filter->mask) > filter->high)
1514 return 0;
1515 return 1;
1516}
1517
1518static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1519 union rio_pw_msg *msg, int step)
1520{
1521 struct mport_dev *md = context;
1522 struct mport_cdev_priv *priv;
1523 struct rio_mport_pw_filter *pw_filter;
1524 struct rio_event event;
1525 int handled;
1526
1527 event.header = RIO_PORTWRITE;
1528 memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1529
1530 handled = 0;
1531 spin_lock(&md->pw_lock);
1532 list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1533 if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1534 priv = pw_filter->priv;
1535 rio_mport_add_event(priv, &event);
1536 handled = 1;
1537 }
1538 }
1539 spin_unlock(&md->pw_lock);
1540
1541 if (!handled) {
1542 printk_ratelimited(KERN_WARNING DRV_NAME
1543 ": mport%d received spurious PW from 0x%08x\n",
1544 mport->id, msg->em.comptag);
1545 }
1546
1547 return 0;
1548}
1549
1550static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1551 void __user *arg)
1552{
1553 struct mport_dev *md = priv->md;
1554 struct rio_mport_pw_filter *pw_filter;
1555 struct rio_pw_filter filter;
1556 unsigned long flags;
1557 int hadd = 0;
1558
1559 if (copy_from_user(&filter, arg, sizeof(filter)))
1560 return -EFAULT;
1561
1562 pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1563 if (pw_filter == NULL)
1564 return -ENOMEM;
1565
1566 pw_filter->filter = filter;
1567 pw_filter->priv = priv;
1568 spin_lock_irqsave(&md->pw_lock, flags);
1569 if (list_empty(&md->portwrites))
1570 hadd = 1;
1571 list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1572 list_add_tail(&pw_filter->md_node, &md->portwrites);
1573 spin_unlock_irqrestore(&md->pw_lock, flags);
1574
1575 if (hadd) {
1576 int ret;
1577
1578 ret = rio_add_mport_pw_handler(md->mport, md,
1579 rio_mport_pw_handler);
1580 if (ret) {
1581 dev_err(&md->dev,
1582 "%s: failed to add IB_PW handler, err=%d\n",
1583 __func__, ret);
1584 return ret;
1585 }
1586 rio_pw_enable(md->mport, 1);
1587 }
1588
1589 return 0;
1590}
1591
1592static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1593{
1594 list_del(&pw_filter->md_node);
1595 list_del(&pw_filter->priv_node);
1596 kfree(pw_filter);
1597}
1598
1599static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1600 struct rio_pw_filter *b)
1601{
1602 if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1603 return 1;
1604 return 0;
1605}
1606
1607static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1608 void __user *arg)
1609{
1610 struct mport_dev *md = priv->md;
1611 struct rio_mport_pw_filter *pw_filter;
1612 struct rio_pw_filter filter;
1613 unsigned long flags;
1614 int ret = -EINVAL;
1615 int hdel = 0;
1616
1617 if (copy_from_user(&filter, arg, sizeof(filter)))
1618 return -EFAULT;
1619
1620 spin_lock_irqsave(&md->pw_lock, flags);
1621 list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1622 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1623 rio_mport_delete_pw_filter(pw_filter);
1624 ret = 0;
1625 break;
1626 }
1627 }
1628
1629 if (list_empty(&md->portwrites))
1630 hdel = 1;
1631 spin_unlock_irqrestore(&md->pw_lock, flags);
1632
1633 if (hdel) {
1634 rio_del_mport_pw_handler(md->mport, priv->md,
1635 rio_mport_pw_handler);
1636 rio_pw_enable(md->mport, 0);
1637 }
1638
1639 return ret;
1640}
1641
1642/*
1643 * rio_release_dev - release routine for kernel RIO device object
1644 * @dev: kernel device object associated with a RIO device structure
1645 *
1646 * Frees a RIO device struct associated a RIO device struct.
1647 * The RIO device struct is freed.
1648 */
1649static void rio_release_dev(struct device *dev)
1650{
1651 struct rio_dev *rdev;
1652
1653 rdev = to_rio_dev(dev);
1654 pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1655 kfree(rdev);
1656}
1657
1658
1659static void rio_release_net(struct device *dev)
1660{
1661 struct rio_net *net;
1662
1663 net = to_rio_net(dev);
1664 rmcd_debug(RDEV, "net_%d", net->id);
1665 kfree(net);
1666}
1667
1668
1669/*
1670 * rio_mport_add_riodev - creates a kernel RIO device object
1671 *
1672 * Allocates a RIO device data structure and initializes required fields based
1673 * on device's configuration space contents.
1674 * If the device has switch capabilities, then a switch specific portion is
1675 * allocated and configured.
1676 */
1677static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1678 void __user *arg)
1679{
1680 struct mport_dev *md = priv->md;
1681 struct rio_rdev_info dev_info;
1682 struct rio_dev *rdev;
1683 struct rio_switch *rswitch = NULL;
1684 struct rio_mport *mport;
1685 size_t size;
1686 u32 rval;
1687 u32 swpinfo = 0;
1688 u16 destid;
1689 u8 hopcount;
1690 int err;
1691
1692 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1693 return -EFAULT;
156e0b1a 1694 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
e8de3701
AB
1695
1696 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1697 dev_info.comptag, dev_info.destid, dev_info.hopcount);
1698
1699 if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
1700 rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1701 return -EEXIST;
1702 }
1703
4e1016da 1704 size = sizeof(*rdev);
e8de3701 1705 mport = md->mport;
4e1016da
AB
1706 destid = dev_info.destid;
1707 hopcount = dev_info.hopcount;
e8de3701
AB
1708
1709 if (rio_mport_read_config_32(mport, destid, hopcount,
1710 RIO_PEF_CAR, &rval))
1711 return -EIO;
1712
1713 if (rval & RIO_PEF_SWITCH) {
1714 rio_mport_read_config_32(mport, destid, hopcount,
1715 RIO_SWP_INFO_CAR, &swpinfo);
1716 size += (RIO_GET_TOTAL_PORTS(swpinfo) *
1717 sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
1718 }
1719
1720 rdev = kzalloc(size, GFP_KERNEL);
1721 if (rdev == NULL)
1722 return -ENOMEM;
1723
1724 if (mport->net == NULL) {
1725 struct rio_net *net;
1726
1727 net = rio_alloc_net(mport);
1728 if (!net) {
1729 err = -ENOMEM;
1730 rmcd_debug(RDEV, "failed to allocate net object");
1731 goto cleanup;
1732 }
1733
1734 net->id = mport->id;
1735 net->hport = mport;
1736 dev_set_name(&net->dev, "rnet_%d", net->id);
1737 net->dev.parent = &mport->dev;
1738 net->dev.release = rio_release_net;
1739 err = rio_add_net(net);
1740 if (err) {
1741 rmcd_debug(RDEV, "failed to register net, err=%d", err);
1742 kfree(net);
1743 goto cleanup;
1744 }
1745 }
1746
1747 rdev->net = mport->net;
1748 rdev->pef = rval;
1749 rdev->swpinfo = swpinfo;
1750 rio_mport_read_config_32(mport, destid, hopcount,
1751 RIO_DEV_ID_CAR, &rval);
1752 rdev->did = rval >> 16;
1753 rdev->vid = rval & 0xffff;
1754 rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1755 &rdev->device_rev);
1756 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1757 &rval);
1758 rdev->asm_did = rval >> 16;
1759 rdev->asm_vid = rval & 0xffff;
1760 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1761 &rval);
1762 rdev->asm_rev = rval >> 16;
1763
1764 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1765 rdev->efptr = rval & 0xffff;
1766 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1ae842de 1767 hopcount, &rdev->phys_rmap);
e8de3701
AB
1768
1769 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1770 hopcount, RIO_EFB_ERR_MGMNT);
1771 }
1772
1773 rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1774 &rdev->src_ops);
1775 rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1776 &rdev->dst_ops);
1777
1778 rdev->comp_tag = dev_info.comptag;
1779 rdev->destid = destid;
1780 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1781 rdev->hopcount = hopcount;
1782
1783 if (rdev->pef & RIO_PEF_SWITCH) {
1784 rswitch = rdev->rswitch;
1785 rswitch->route_table = NULL;
1786 }
1787
1788 if (strlen(dev_info.name))
1789 dev_set_name(&rdev->dev, "%s", dev_info.name);
1790 else if (rdev->pef & RIO_PEF_SWITCH)
1791 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1792 rdev->comp_tag & RIO_CTAG_UDEVID);
1793 else
1794 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1795 rdev->comp_tag & RIO_CTAG_UDEVID);
1796
1797 INIT_LIST_HEAD(&rdev->net_list);
1798 rdev->dev.parent = &mport->net->dev;
1799 rio_attach_device(rdev);
1800 rdev->dev.release = rio_release_dev;
1801
1802 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1803 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1804 0, 0xffff);
1805 err = rio_add_device(rdev);
1806 if (err)
1807 goto cleanup;
1808 rio_dev_get(rdev);
1809
1810 return 0;
1811cleanup:
1812 kfree(rdev);
1813 return err;
1814}
1815
1816static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1817{
1818 struct rio_rdev_info dev_info;
1819 struct rio_dev *rdev = NULL;
1820 struct device *dev;
1821 struct rio_mport *mport;
1822 struct rio_net *net;
1823
1824 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1825 return -EFAULT;
156e0b1a 1826 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
e8de3701
AB
1827
1828 mport = priv->md->mport;
1829
1830 /* If device name is specified, removal by name has priority */
1831 if (strlen(dev_info.name)) {
1832 dev = bus_find_device_by_name(&rio_bus_type, NULL,
1833 dev_info.name);
1834 if (dev)
1835 rdev = to_rio_dev(dev);
1836 } else {
1837 do {
1838 rdev = rio_get_comptag(dev_info.comptag, rdev);
1839 if (rdev && rdev->dev.parent == &mport->net->dev &&
4e1016da
AB
1840 rdev->destid == dev_info.destid &&
1841 rdev->hopcount == dev_info.hopcount)
e8de3701
AB
1842 break;
1843 } while (rdev);
1844 }
1845
1846 if (!rdev) {
1847 rmcd_debug(RDEV,
1848 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1849 dev_info.name, dev_info.comptag, dev_info.destid,
1850 dev_info.hopcount);
1851 return -ENODEV;
1852 }
1853
1854 net = rdev->net;
1855 rio_dev_put(rdev);
1856 rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1857
1858 if (list_empty(&net->devices)) {
1859 rio_free_net(net);
1860 mport->net = NULL;
1861 }
1862
1863 return 0;
1864}
1865
1866/*
1867 * Mport cdev management
1868 */
1869
1870/*
1871 * mport_cdev_open() - Open character device (mport)
1872 */
1873static int mport_cdev_open(struct inode *inode, struct file *filp)
1874{
1875 int ret;
1876 int minor = iminor(inode);
1877 struct mport_dev *chdev;
1878 struct mport_cdev_priv *priv;
1879
1880 /* Test for valid device */
1881 if (minor >= RIO_MAX_MPORTS) {
1882 rmcd_error("Invalid minor device number");
1883 return -EINVAL;
1884 }
1885
1886 chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1887
1888 rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1889
1890 if (atomic_read(&chdev->active) == 0)
1891 return -ENODEV;
1892
1893 get_device(&chdev->dev);
1894
1895 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1896 if (!priv) {
1897 put_device(&chdev->dev);
1898 return -ENOMEM;
1899 }
1900
1901 priv->md = chdev;
1902
1903 mutex_lock(&chdev->file_mutex);
1904 list_add_tail(&priv->list, &chdev->file_list);
1905 mutex_unlock(&chdev->file_mutex);
1906
1907 INIT_LIST_HEAD(&priv->db_filters);
1908 INIT_LIST_HEAD(&priv->pw_filters);
1909 spin_lock_init(&priv->fifo_lock);
1910 init_waitqueue_head(&priv->event_rx_wait);
1911 ret = kfifo_alloc(&priv->event_fifo,
1912 sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1913 GFP_KERNEL);
1914 if (ret < 0) {
1915 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1916 ret = -ENOMEM;
1917 goto err_fifo;
1918 }
1919
1920#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1921 INIT_LIST_HEAD(&priv->async_list);
e8de3701
AB
1922 spin_lock_init(&priv->req_lock);
1923 mutex_init(&priv->dma_lock);
1924#endif
1925
1926 filp->private_data = priv;
1927 goto out;
1928err_fifo:
1929 kfree(priv);
1930out:
1931 return ret;
1932}
1933
1934static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1935{
1936 struct mport_cdev_priv *priv = filp->private_data;
1937
1938 return fasync_helper(fd, filp, mode, &priv->async_queue);
1939}
1940
1941#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1942static void mport_cdev_release_dma(struct file *filp)
1943{
1944 struct mport_cdev_priv *priv = filp->private_data;
1945 struct mport_dev *md;
1946 struct mport_dma_req *req, *req_next;
1947 unsigned long tmo = msecs_to_jiffies(dma_timeout);
1948 long wret;
1949 LIST_HEAD(list);
1950
1951 rmcd_debug(EXIT, "from filp=%p %s(%d)",
1952 filp, current->comm, task_pid_nr(current));
1953
1954 if (!priv->dmach) {
1955 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1956 return;
1957 }
1958
1959 md = priv->md;
1960
e8de3701
AB
1961 spin_lock(&priv->req_lock);
1962 if (!list_empty(&priv->async_list)) {
1963 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1964 filp, current->comm, task_pid_nr(current));
1965 list_splice_init(&priv->async_list, &list);
1966 }
1967 spin_unlock(&priv->req_lock);
1968
1969 if (!list_empty(&list)) {
1970 rmcd_debug(EXIT, "temp list not empty");
1971 list_for_each_entry_safe(req, req_next, &list, node) {
1972 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1973 req->filp, req->cookie,
1974 completion_done(&req->req_comp)?"yes":"no");
1975 list_del(&req->node);
bbd876ad 1976 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
1977 }
1978 }
1979
1980 put_dma_channel(priv);
1981 wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1982
1983 if (wret <= 0) {
1984 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1985 current->comm, task_pid_nr(current), wret);
1986 }
1987
e8de3701
AB
1988 if (priv->dmach != priv->md->dma_chan) {
1989 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1990 filp, current->comm, task_pid_nr(current));
1991 rio_release_dma(priv->dmach);
1992 } else {
1993 rmcd_debug(EXIT, "Adjust default DMA channel refcount");
1994 kref_put(&md->dma_ref, mport_release_def_dma);
1995 }
1996
1997 priv->dmach = NULL;
1998}
1999#else
2000#define mport_cdev_release_dma(priv) do {} while (0)
2001#endif
2002
2003/*
2004 * mport_cdev_release() - Release character device
2005 */
2006static int mport_cdev_release(struct inode *inode, struct file *filp)
2007{
2008 struct mport_cdev_priv *priv = filp->private_data;
2009 struct mport_dev *chdev;
2010 struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2011 struct rio_mport_db_filter *db_filter, *db_filter_next;
2012 struct rio_mport_mapping *map, *_map;
2013 unsigned long flags;
2014
2015 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2016
2017 chdev = priv->md;
2018 mport_cdev_release_dma(filp);
2019
2020 priv->event_mask = 0;
2021
2022 spin_lock_irqsave(&chdev->pw_lock, flags);
2023 if (!list_empty(&priv->pw_filters)) {
2024 list_for_each_entry_safe(pw_filter, pw_filter_next,
2025 &priv->pw_filters, priv_node)
2026 rio_mport_delete_pw_filter(pw_filter);
2027 }
2028 spin_unlock_irqrestore(&chdev->pw_lock, flags);
2029
2030 spin_lock_irqsave(&chdev->db_lock, flags);
2031 list_for_each_entry_safe(db_filter, db_filter_next,
2032 &priv->db_filters, priv_node) {
2033 rio_mport_delete_db_filter(db_filter);
2034 }
2035 spin_unlock_irqrestore(&chdev->db_lock, flags);
2036
2037 kfifo_free(&priv->event_fifo);
2038
2039 mutex_lock(&chdev->buf_mutex);
2040 list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2041 if (map->filp == filp) {
2042 rmcd_debug(EXIT, "release mapping %p filp=%p",
2043 map->virt_addr, filp);
2044 kref_put(&map->ref, mport_release_mapping);
2045 }
2046 }
2047 mutex_unlock(&chdev->buf_mutex);
2048
2049 mport_cdev_fasync(-1, filp, 0);
2050 filp->private_data = NULL;
2051 mutex_lock(&chdev->file_mutex);
2052 list_del(&priv->list);
2053 mutex_unlock(&chdev->file_mutex);
2054 put_device(&chdev->dev);
2055 kfree(priv);
2056 return 0;
2057}
2058
2059/*
2060 * mport_cdev_ioctl() - IOCTLs for character device
2061 */
2062static long mport_cdev_ioctl(struct file *filp,
2063 unsigned int cmd, unsigned long arg)
2064{
2065 int err = -EINVAL;
2066 struct mport_cdev_priv *data = filp->private_data;
2067 struct mport_dev *md = data->md;
2068
2069 if (atomic_read(&md->active) == 0)
2070 return -ENODEV;
2071
2072 switch (cmd) {
2073 case RIO_MPORT_MAINT_READ_LOCAL:
2074 return rio_mport_maint_rd(data, (void __user *)arg, 1);
2075 case RIO_MPORT_MAINT_WRITE_LOCAL:
2076 return rio_mport_maint_wr(data, (void __user *)arg, 1);
2077 case RIO_MPORT_MAINT_READ_REMOTE:
2078 return rio_mport_maint_rd(data, (void __user *)arg, 0);
2079 case RIO_MPORT_MAINT_WRITE_REMOTE:
2080 return rio_mport_maint_wr(data, (void __user *)arg, 0);
2081 case RIO_MPORT_MAINT_HDID_SET:
2082 return maint_hdid_set(data, (void __user *)arg);
2083 case RIO_MPORT_MAINT_COMPTAG_SET:
2084 return maint_comptag_set(data, (void __user *)arg);
2085 case RIO_MPORT_MAINT_PORT_IDX_GET:
2086 return maint_port_idx_get(data, (void __user *)arg);
2087 case RIO_MPORT_GET_PROPERTIES:
2088 md->properties.hdid = md->mport->host_deviceid;
4e1016da
AB
2089 if (copy_to_user((void __user *)arg, &(md->properties),
2090 sizeof(md->properties)))
e8de3701
AB
2091 return -EFAULT;
2092 return 0;
2093 case RIO_ENABLE_DOORBELL_RANGE:
2094 return rio_mport_add_db_filter(data, (void __user *)arg);
2095 case RIO_DISABLE_DOORBELL_RANGE:
2096 return rio_mport_remove_db_filter(data, (void __user *)arg);
2097 case RIO_ENABLE_PORTWRITE_RANGE:
2098 return rio_mport_add_pw_filter(data, (void __user *)arg);
2099 case RIO_DISABLE_PORTWRITE_RANGE:
2100 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2101 case RIO_SET_EVENT_MASK:
4e1016da 2102 data->event_mask = (u32)arg;
e8de3701
AB
2103 return 0;
2104 case RIO_GET_EVENT_MASK:
2105 if (copy_to_user((void __user *)arg, &data->event_mask,
4e1016da 2106 sizeof(u32)))
e8de3701
AB
2107 return -EFAULT;
2108 return 0;
2109 case RIO_MAP_OUTBOUND:
2110 return rio_mport_obw_map(filp, (void __user *)arg);
2111 case RIO_MAP_INBOUND:
2112 return rio_mport_map_inbound(filp, (void __user *)arg);
2113 case RIO_UNMAP_OUTBOUND:
2114 return rio_mport_obw_free(filp, (void __user *)arg);
2115 case RIO_UNMAP_INBOUND:
2116 return rio_mport_inbound_free(filp, (void __user *)arg);
2117 case RIO_ALLOC_DMA:
2118 return rio_mport_alloc_dma(filp, (void __user *)arg);
2119 case RIO_FREE_DMA:
2120 return rio_mport_free_dma(filp, (void __user *)arg);
2121 case RIO_WAIT_FOR_ASYNC:
2122 return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2123 case RIO_TRANSFER:
2124 return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2125 case RIO_DEV_ADD:
2126 return rio_mport_add_riodev(data, (void __user *)arg);
2127 case RIO_DEV_DEL:
2128 return rio_mport_del_riodev(data, (void __user *)arg);
2129 default:
2130 break;
2131 }
2132
2133 return err;
2134}
2135
2136/*
2137 * mport_release_mapping - free mapping resources and info structure
2138 * @ref: a pointer to the kref within struct rio_mport_mapping
2139 *
2140 * NOTE: Shall be called while holding buf_mutex.
2141 */
2142static void mport_release_mapping(struct kref *ref)
2143{
2144 struct rio_mport_mapping *map =
2145 container_of(ref, struct rio_mport_mapping, ref);
2146 struct rio_mport *mport = map->md->mport;
2147
2148 rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2149 map->dir, map->virt_addr,
2150 &map->phys_addr, mport->name);
2151
2152 list_del(&map->node);
2153
2154 switch (map->dir) {
2155 case MAP_INBOUND:
2156 rio_unmap_inb_region(mport, map->phys_addr);
92bf5016 2157 /* fall through */
e8de3701
AB
2158 case MAP_DMA:
2159 dma_free_coherent(mport->dev.parent, map->size,
2160 map->virt_addr, map->phys_addr);
2161 break;
2162 case MAP_OUTBOUND:
2163 rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2164 break;
2165 }
2166 kfree(map);
2167}
2168
2169static void mport_mm_open(struct vm_area_struct *vma)
2170{
2171 struct rio_mport_mapping *map = vma->vm_private_data;
2172
ea87b8e1 2173 rmcd_debug(MMAP, "%pad", &map->phys_addr);
e8de3701
AB
2174 kref_get(&map->ref);
2175}
2176
2177static void mport_mm_close(struct vm_area_struct *vma)
2178{
2179 struct rio_mport_mapping *map = vma->vm_private_data;
2180
ea87b8e1 2181 rmcd_debug(MMAP, "%pad", &map->phys_addr);
e8de3701
AB
2182 mutex_lock(&map->md->buf_mutex);
2183 kref_put(&map->ref, mport_release_mapping);
2184 mutex_unlock(&map->md->buf_mutex);
2185}
2186
2187static const struct vm_operations_struct vm_ops = {
2188 .open = mport_mm_open,
2189 .close = mport_mm_close,
2190};
2191
2192static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2193{
2194 struct mport_cdev_priv *priv = filp->private_data;
2195 struct mport_dev *md;
2196 size_t size = vma->vm_end - vma->vm_start;
2197 dma_addr_t baddr;
2198 unsigned long offset;
2199 int found = 0, ret;
2200 struct rio_mport_mapping *map;
2201
2202 rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2203 (unsigned int)size, vma->vm_pgoff);
2204
2205 md = priv->md;
2206 baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2207
2208 mutex_lock(&md->buf_mutex);
2209 list_for_each_entry(map, &md->mappings, node) {
2210 if (baddr >= map->phys_addr &&
2211 baddr < (map->phys_addr + map->size)) {
2212 found = 1;
2213 break;
2214 }
2215 }
2216 mutex_unlock(&md->buf_mutex);
2217
2218 if (!found)
2219 return -ENOMEM;
2220
2221 offset = baddr - map->phys_addr;
2222
2223 if (size + offset > map->size)
2224 return -EINVAL;
2225
2226 vma->vm_pgoff = offset >> PAGE_SHIFT;
2227 rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2228
2229 if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2230 ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2231 map->virt_addr, map->phys_addr, map->size);
2232 else if (map->dir == MAP_OUTBOUND) {
2233 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2234 ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2235 } else {
2236 rmcd_error("Attempt to mmap unsupported mapping type");
2237 ret = -EIO;
2238 }
2239
2240 if (!ret) {
2241 vma->vm_private_data = map;
2242 vma->vm_ops = &vm_ops;
2243 mport_mm_open(vma);
2244 } else {
2245 rmcd_error("MMAP exit with err=%d", ret);
2246 }
2247
2248 return ret;
2249}
2250
afc9a42b 2251static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
e8de3701
AB
2252{
2253 struct mport_cdev_priv *priv = filp->private_data;
2254
2255 poll_wait(filp, &priv->event_rx_wait, wait);
2256 if (kfifo_len(&priv->event_fifo))
a9a08845 2257 return EPOLLIN | EPOLLRDNORM;
e8de3701
AB
2258
2259 return 0;
2260}
2261
2262static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2263 loff_t *ppos)
2264{
2265 struct mport_cdev_priv *priv = filp->private_data;
2266 int copied;
2267 ssize_t ret;
2268
2269 if (!count)
2270 return 0;
2271
2272 if (kfifo_is_empty(&priv->event_fifo) &&
2273 (filp->f_flags & O_NONBLOCK))
2274 return -EAGAIN;
2275
2276 if (count % sizeof(struct rio_event))
2277 return -EINVAL;
2278
2279 ret = wait_event_interruptible(priv->event_rx_wait,
2280 kfifo_len(&priv->event_fifo) != 0);
2281 if (ret)
2282 return ret;
2283
2284 while (ret < count) {
2285 if (kfifo_to_user(&priv->event_fifo, buf,
2286 sizeof(struct rio_event), &copied))
2287 return -EFAULT;
2288 ret += copied;
2289 buf += copied;
2290 }
2291
2292 return ret;
2293}
2294
2295static ssize_t mport_write(struct file *filp, const char __user *buf,
2296 size_t count, loff_t *ppos)
2297{
2298 struct mport_cdev_priv *priv = filp->private_data;
2299 struct rio_mport *mport = priv->md->mport;
2300 struct rio_event event;
2301 int len, ret;
2302
2303 if (!count)
2304 return 0;
2305
2306 if (count % sizeof(event))
2307 return -EINVAL;
2308
2309 len = 0;
2310 while ((count - len) >= (int)sizeof(event)) {
2311 if (copy_from_user(&event, buf, sizeof(event)))
2312 return -EFAULT;
2313
2314 if (event.header != RIO_DOORBELL)
2315 return -EINVAL;
2316
2317 ret = rio_mport_send_doorbell(mport,
4e1016da 2318 event.u.doorbell.rioid,
e8de3701
AB
2319 event.u.doorbell.payload);
2320 if (ret < 0)
2321 return ret;
2322
2323 len += sizeof(event);
2324 buf += sizeof(event);
2325 }
2326
2327 return len;
2328}
2329
2330static const struct file_operations mport_fops = {
2331 .owner = THIS_MODULE,
2332 .open = mport_cdev_open,
2333 .release = mport_cdev_release,
2334 .poll = mport_cdev_poll,
2335 .read = mport_read,
2336 .write = mport_write,
2337 .mmap = mport_cdev_mmap,
2338 .fasync = mport_cdev_fasync,
2339 .unlocked_ioctl = mport_cdev_ioctl
2340};
2341
2342/*
2343 * Character device management
2344 */
2345
2346static void mport_device_release(struct device *dev)
2347{
2348 struct mport_dev *md;
2349
2350 rmcd_debug(EXIT, "%s", dev_name(dev));
2351 md = container_of(dev, struct mport_dev, dev);
2352 kfree(md);
2353}
2354
2355/*
2356 * mport_cdev_add() - Create mport_dev from rio_mport
2357 * @mport: RapidIO master port
2358 */
2359static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2360{
2361 int ret = 0;
2362 struct mport_dev *md;
2363 struct rio_mport_attr attr;
2364
4e1016da 2365 md = kzalloc(sizeof(*md), GFP_KERNEL);
e8de3701
AB
2366 if (!md) {
2367 rmcd_error("Unable allocate a device object");
2368 return NULL;
2369 }
2370
2371 md->mport = mport;
2372 mutex_init(&md->buf_mutex);
2373 mutex_init(&md->file_mutex);
2374 INIT_LIST_HEAD(&md->file_list);
e8de3701 2375
dbef390d
LG
2376 device_initialize(&md->dev);
2377 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
e8de3701
AB
2378 md->dev.class = dev_class;
2379 md->dev.parent = &mport->dev;
2380 md->dev.release = mport_device_release;
2381 dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2382 atomic_set(&md->active, 1);
2383
dbef390d
LG
2384 cdev_init(&md->cdev, &mport_fops);
2385 md->cdev.owner = THIS_MODULE;
2386
e8de3701
AB
2387 INIT_LIST_HEAD(&md->doorbells);
2388 spin_lock_init(&md->db_lock);
2389 INIT_LIST_HEAD(&md->portwrites);
2390 spin_lock_init(&md->pw_lock);
2391 INIT_LIST_HEAD(&md->mappings);
2392
2393 md->properties.id = mport->id;
2394 md->properties.sys_size = mport->sys_size;
2395 md->properties.hdid = mport->host_deviceid;
2396 md->properties.index = mport->index;
2397
2398 /* The transfer_mode property will be returned through mport query
2399 * interface
2400 */
4e1016da 2401#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
e8de3701
AB
2402 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2403#else
2404 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2405#endif
e1c3cdb2
MB
2406
2407 ret = cdev_device_add(&md->cdev, &md->dev);
2408 if (ret) {
2409 rmcd_error("Failed to register mport %d (err=%d)",
2410 mport->id, ret);
2411 goto err_cdev;
2412 }
e8de3701
AB
2413 ret = rio_query_mport(mport, &attr);
2414 if (!ret) {
2415 md->properties.flags = attr.flags;
2416 md->properties.link_speed = attr.link_speed;
2417 md->properties.link_width = attr.link_width;
2418 md->properties.dma_max_sge = attr.dma_max_sge;
2419 md->properties.dma_max_size = attr.dma_max_size;
2420 md->properties.dma_align = attr.dma_align;
2421 md->properties.cap_sys_size = 0;
2422 md->properties.cap_transfer_mode = 0;
2423 md->properties.cap_addr_size = 0;
2424 } else
2425 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2426 mport->name, MAJOR(dev_number), mport->id);
2427
2428 mutex_lock(&mport_devs_lock);
2429 list_add_tail(&md->node, &mport_devs);
2430 mutex_unlock(&mport_devs_lock);
2431
2432 pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2433 mport->name, MAJOR(dev_number), mport->id);
2434
2435 return md;
2436
2437err_cdev:
dbef390d 2438 put_device(&md->dev);
e8de3701
AB
2439 return NULL;
2440}
2441
2442/*
2443 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2444 * associated DMA channels.
2445 */
2446static void mport_cdev_terminate_dma(struct mport_dev *md)
2447{
2448#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2449 struct mport_cdev_priv *client;
2450
2451 rmcd_debug(DMA, "%s", dev_name(&md->dev));
2452
2453 mutex_lock(&md->file_mutex);
2454 list_for_each_entry(client, &md->file_list, list) {
2455 if (client->dmach) {
2456 dmaengine_terminate_all(client->dmach);
2457 rio_release_dma(client->dmach);
2458 }
2459 }
2460 mutex_unlock(&md->file_mutex);
2461
2462 if (md->dma_chan) {
2463 dmaengine_terminate_all(md->dma_chan);
2464 rio_release_dma(md->dma_chan);
2465 md->dma_chan = NULL;
2466 }
2467#endif
2468}
2469
2470
2471/*
2472 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2473 * mport_cdev files.
2474 */
2475static int mport_cdev_kill_fasync(struct mport_dev *md)
2476{
2477 unsigned int files = 0;
2478 struct mport_cdev_priv *client;
2479
2480 mutex_lock(&md->file_mutex);
2481 list_for_each_entry(client, &md->file_list, list) {
2482 if (client->async_queue)
2483 kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2484 files++;
2485 }
2486 mutex_unlock(&md->file_mutex);
2487 return files;
2488}
2489
2490/*
2491 * mport_cdev_remove() - Remove mport character device
2492 * @dev: Mport device to remove
2493 */
2494static void mport_cdev_remove(struct mport_dev *md)
2495{
2496 struct rio_mport_mapping *map, *_map;
2497
2498 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2499 atomic_set(&md->active, 0);
2500 mport_cdev_terminate_dma(md);
2501 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
dbef390d 2502 cdev_device_del(&md->cdev, &md->dev);
e8de3701
AB
2503 mport_cdev_kill_fasync(md);
2504
e8de3701
AB
2505 /* TODO: do we need to give clients some time to close file
2506 * descriptors? Simple wait for XX, or kref?
2507 */
2508
2509 /*
2510 * Release DMA buffers allocated for the mport device.
2511 * Disable associated inbound Rapidio requests mapping if applicable.
2512 */
2513 mutex_lock(&md->buf_mutex);
2514 list_for_each_entry_safe(map, _map, &md->mappings, node) {
2515 kref_put(&map->ref, mport_release_mapping);
2516 }
2517 mutex_unlock(&md->buf_mutex);
2518
2519 if (!list_empty(&md->mappings))
2520 rmcd_warn("WARNING: %s pending mappings on removal",
2521 md->mport->name);
2522
2523 rio_release_inb_dbell(md->mport, 0, 0x0fff);
2524
e8de3701
AB
2525 put_device(&md->dev);
2526}
2527
2528/*
2529 * RIO rio_mport_interface driver
2530 */
2531
2532/*
2533 * mport_add_mport() - Add rio_mport from LDM device struct
2534 * @dev: Linux device model struct
2535 * @class_intf: Linux class_interface
2536 */
2537static int mport_add_mport(struct device *dev,
2538 struct class_interface *class_intf)
2539{
2540 struct rio_mport *mport = NULL;
2541 struct mport_dev *chdev = NULL;
2542
2543 mport = to_rio_mport(dev);
2544 if (!mport)
2545 return -ENODEV;
2546
2547 chdev = mport_cdev_add(mport);
2548 if (!chdev)
2549 return -ENODEV;
2550
2551 return 0;
2552}
2553
2554/*
2555 * mport_remove_mport() - Remove rio_mport from global list
2556 * TODO remove device from global mport_dev list
2557 */
2558static void mport_remove_mport(struct device *dev,
2559 struct class_interface *class_intf)
2560{
2561 struct rio_mport *mport = NULL;
2562 struct mport_dev *chdev;
2563 int found = 0;
2564
2565 mport = to_rio_mport(dev);
2566 rmcd_debug(EXIT, "Remove %s", mport->name);
2567
2568 mutex_lock(&mport_devs_lock);
2569 list_for_each_entry(chdev, &mport_devs, node) {
2570 if (chdev->mport->id == mport->id) {
2571 atomic_set(&chdev->active, 0);
2572 list_del(&chdev->node);
2573 found = 1;
2574 break;
2575 }
2576 }
2577 mutex_unlock(&mport_devs_lock);
2578
2579 if (found)
2580 mport_cdev_remove(chdev);
2581}
2582
2583/* the rio_mport_interface is used to handle local mport devices */
2584static struct class_interface rio_mport_interface __refdata = {
2585 .class = &rio_mport_class,
2586 .add_dev = mport_add_mport,
2587 .remove_dev = mport_remove_mport,
2588};
2589
2590/*
2591 * Linux kernel module
2592 */
2593
2594/*
2595 * mport_init - Driver module loading
2596 */
2597static int __init mport_init(void)
2598{
2599 int ret;
2600
2601 /* Create device class needed by udev */
2602 dev_class = class_create(THIS_MODULE, DRV_NAME);
99f23c2c 2603 if (IS_ERR(dev_class)) {
e8de3701 2604 rmcd_error("Unable to create " DRV_NAME " class");
99f23c2c 2605 return PTR_ERR(dev_class);
e8de3701
AB
2606 }
2607
2608 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2609 if (ret < 0)
2610 goto err_chr;
2611
2612 rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2613
2614 /* Register to rio_mport_interface */
2615 ret = class_interface_register(&rio_mport_interface);
2616 if (ret) {
2617 rmcd_error("class_interface_register() failed, err=%d", ret);
2618 goto err_cli;
2619 }
2620
e8de3701
AB
2621 return 0;
2622
e8de3701
AB
2623err_cli:
2624 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2625err_chr:
2626 class_destroy(dev_class);
2627 return ret;
2628}
2629
2630/**
2631 * mport_exit - Driver module unloading
2632 */
2633static void __exit mport_exit(void)
2634{
2635 class_interface_unregister(&rio_mport_interface);
2636 class_destroy(dev_class);
2637 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
e8de3701
AB
2638}
2639
2640module_init(mport_init);
2641module_exit(mport_exit);