rapidio: fix error handling path
[linux-2.6-block.git] / drivers / rapidio / devices / rio_mport_cdev.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
e8de3701
AB
2/*
3 * RapidIO mport character device
4 *
5 * Copyright 2014-2015 Integrated Device Technology, Inc.
6 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * Copyright 2014-2015 Prodrive Technologies
8 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
9 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
10 * Copyright (C) 2014 Texas Instruments Incorporated
11 * Aurelien Jacquiot <a-jacquiot@ti.com>
e8de3701
AB
12 */
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/cdev.h>
16#include <linux/ioctl.h>
17#include <linux/uaccess.h>
18#include <linux/list.h>
19#include <linux/fs.h>
20#include <linux/err.h>
21#include <linux/net.h>
22#include <linux/poll.h>
23#include <linux/spinlock.h>
24#include <linux/sched.h>
25#include <linux/kfifo.h>
26
27#include <linux/mm.h>
28#include <linux/slab.h>
29#include <linux/vmalloc.h>
30#include <linux/mman.h>
31
32#include <linux/dma-mapping.h>
33#ifdef CONFIG_RAPIDIO_DMA_ENGINE
34#include <linux/dmaengine.h>
35#endif
36
37#include <linux/rio.h>
38#include <linux/rio_ids.h>
39#include <linux/rio_drv.h>
40#include <linux/rio_mport_cdev.h>
41
42#include "../rio.h"
43
44#define DRV_NAME "rio_mport"
45#define DRV_PREFIX DRV_NAME ": "
46#define DEV_NAME "rio_mport"
47#define DRV_VERSION "1.0.0"
48
49/* Debug output filtering masks */
50enum {
51 DBG_NONE = 0,
52 DBG_INIT = BIT(0), /* driver init */
53 DBG_EXIT = BIT(1), /* driver exit */
54 DBG_MPORT = BIT(2), /* mport add/remove */
55 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
56 DBG_DMA = BIT(4), /* DMA transfer messages */
57 DBG_MMAP = BIT(5), /* mapping messages */
58 DBG_IBW = BIT(6), /* inbound window */
59 DBG_EVENT = BIT(7), /* event handling messages */
60 DBG_OBW = BIT(8), /* outbound window messages */
61 DBG_DBELL = BIT(9), /* doorbell messages */
62 DBG_ALL = ~0,
63};
64
65#ifdef DEBUG
66#define rmcd_debug(level, fmt, arg...) \
67 do { \
68 if (DBG_##level & dbg_level) \
69 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
70 } while (0)
71#else
72#define rmcd_debug(level, fmt, arg...) \
73 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
74#endif
75
76#define rmcd_warn(fmt, arg...) \
77 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
78
79#define rmcd_error(fmt, arg...) \
80 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
81
82MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
83MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
84MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
85MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
86MODULE_DESCRIPTION("RapidIO mport character device driver");
87MODULE_LICENSE("GPL");
88MODULE_VERSION(DRV_VERSION);
89
90static int dma_timeout = 3000; /* DMA transfer timeout in msec */
91module_param(dma_timeout, int, S_IRUGO);
92MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
93
94#ifdef DEBUG
95static u32 dbg_level = DBG_NONE;
96module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
97MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
98#endif
99
100/*
101 * An internal DMA coherent buffer
102 */
103struct mport_dma_buf {
104 void *ib_base;
105 dma_addr_t ib_phys;
106 u32 ib_size;
107 u64 ib_rio_base;
108 bool ib_map;
109 struct file *filp;
110};
111
112/*
113 * Internal memory mapping structure
114 */
115enum rio_mport_map_dir {
116 MAP_INBOUND,
117 MAP_OUTBOUND,
118 MAP_DMA,
119};
120
121struct rio_mport_mapping {
122 struct list_head node;
123 struct mport_dev *md;
124 enum rio_mport_map_dir dir;
4e1016da 125 u16 rioid;
e8de3701
AB
126 u64 rio_addr;
127 dma_addr_t phys_addr; /* for mmap */
128 void *virt_addr; /* kernel address, for dma_free_coherent */
129 u64 size;
130 struct kref ref; /* refcount of vmas sharing the mapping */
131 struct file *filp;
132};
133
134struct rio_mport_dma_map {
135 int valid;
4e1016da 136 u64 length;
e8de3701
AB
137 void *vaddr;
138 dma_addr_t paddr;
139};
140
141#define MPORT_MAX_DMA_BUFS 16
142#define MPORT_EVENT_DEPTH 10
143
144/*
145 * mport_dev driver-specific structure that represents mport device
146 * @active mport device status flag
147 * @node list node to maintain list of registered mports
148 * @cdev character device
149 * @dev associated device object
150 * @mport associated subsystem's master port device object
151 * @buf_mutex lock for buffer handling
152 * @file_mutex - lock for open files list
153 * @file_list - list of open files on given mport
154 * @properties properties of this mport
155 * @portwrites queue of inbound portwrites
156 * @pw_lock lock for port write queue
157 * @mappings queue for memory mappings
158 * @dma_chan DMA channels associated with this device
159 * @dma_ref:
160 * @comp:
161 */
162struct mport_dev {
163 atomic_t active;
164 struct list_head node;
165 struct cdev cdev;
166 struct device dev;
167 struct rio_mport *mport;
168 struct mutex buf_mutex;
169 struct mutex file_mutex;
170 struct list_head file_list;
171 struct rio_mport_properties properties;
172 struct list_head doorbells;
173 spinlock_t db_lock;
174 struct list_head portwrites;
175 spinlock_t pw_lock;
176 struct list_head mappings;
177#ifdef CONFIG_RAPIDIO_DMA_ENGINE
178 struct dma_chan *dma_chan;
179 struct kref dma_ref;
180 struct completion comp;
181#endif
182};
183
184/*
185 * mport_cdev_priv - data structure specific to individual file object
186 * associated with an open device
187 * @md master port character device object
188 * @async_queue - asynchronous notification queue
189 * @list - file objects tracking list
190 * @db_filters inbound doorbell filters for this descriptor
191 * @pw_filters portwrite filters for this descriptor
192 * @event_fifo event fifo for this descriptor
193 * @event_rx_wait wait queue for this descriptor
194 * @fifo_lock lock for event_fifo
195 * @event_mask event mask for this descriptor
196 * @dmach DMA engine channel allocated for specific file object
197 */
198struct mport_cdev_priv {
199 struct mport_dev *md;
200 struct fasync_struct *async_queue;
201 struct list_head list;
202 struct list_head db_filters;
203 struct list_head pw_filters;
204 struct kfifo event_fifo;
205 wait_queue_head_t event_rx_wait;
206 spinlock_t fifo_lock;
4e1016da 207 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
e8de3701
AB
208#ifdef CONFIG_RAPIDIO_DMA_ENGINE
209 struct dma_chan *dmach;
210 struct list_head async_list;
e8de3701
AB
211 spinlock_t req_lock;
212 struct mutex dma_lock;
213 struct kref dma_ref;
214 struct completion comp;
215#endif
216};
217
218/*
219 * rio_mport_pw_filter - structure to describe a portwrite filter
220 * md_node node in mport device's list
221 * priv_node node in private file object's list
222 * priv reference to private data
223 * filter actual portwrite filter
224 */
225struct rio_mport_pw_filter {
226 struct list_head md_node;
227 struct list_head priv_node;
228 struct mport_cdev_priv *priv;
229 struct rio_pw_filter filter;
230};
231
232/*
233 * rio_mport_db_filter - structure to describe a doorbell filter
234 * @data_node reference to device node
235 * @priv_node node in private data
236 * @priv reference to private data
237 * @filter actual doorbell filter
238 */
239struct rio_mport_db_filter {
240 struct list_head data_node;
241 struct list_head priv_node;
242 struct mport_cdev_priv *priv;
243 struct rio_doorbell_filter filter;
244};
245
246static LIST_HEAD(mport_devs);
247static DEFINE_MUTEX(mport_devs_lock);
248
249#if (0) /* used by commented out portion of poll function : FIXME */
250static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
251#endif
252
253static struct class *dev_class;
254static dev_t dev_number;
255
e8de3701
AB
256static void mport_release_mapping(struct kref *ref);
257
258static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
259 int local)
260{
261 struct rio_mport *mport = priv->md->mport;
262 struct rio_mport_maint_io maint_io;
263 u32 *buffer;
264 u32 offset;
265 size_t length;
266 int ret, i;
267
268 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
269 return -EFAULT;
270
271 if ((maint_io.offset % 4) ||
4e1016da
AB
272 (maint_io.length == 0) || (maint_io.length % 4) ||
273 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
e8de3701
AB
274 return -EINVAL;
275
276 buffer = vmalloc(maint_io.length);
277 if (buffer == NULL)
278 return -ENOMEM;
279 length = maint_io.length/sizeof(u32);
280 offset = maint_io.offset;
281
282 for (i = 0; i < length; i++) {
283 if (local)
284 ret = __rio_local_read_config_32(mport,
285 offset, &buffer[i]);
286 else
287 ret = rio_mport_read_config_32(mport, maint_io.rioid,
288 maint_io.hopcount, offset, &buffer[i]);
289 if (ret)
290 goto out;
291
292 offset += 4;
293 }
294
4e1016da
AB
295 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
296 buffer, maint_io.length)))
e8de3701
AB
297 ret = -EFAULT;
298out:
299 vfree(buffer);
300 return ret;
301}
302
303static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
304 int local)
305{
306 struct rio_mport *mport = priv->md->mport;
307 struct rio_mport_maint_io maint_io;
308 u32 *buffer;
309 u32 offset;
310 size_t length;
311 int ret = -EINVAL, i;
312
313 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
314 return -EFAULT;
315
316 if ((maint_io.offset % 4) ||
4e1016da
AB
317 (maint_io.length == 0) || (maint_io.length % 4) ||
318 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
e8de3701
AB
319 return -EINVAL;
320
321 buffer = vmalloc(maint_io.length);
322 if (buffer == NULL)
323 return -ENOMEM;
324 length = maint_io.length;
325
4e1016da
AB
326 if (unlikely(copy_from_user(buffer,
327 (void __user *)(uintptr_t)maint_io.buffer, length))) {
e8de3701
AB
328 ret = -EFAULT;
329 goto out;
330 }
331
332 offset = maint_io.offset;
333 length /= sizeof(u32);
334
335 for (i = 0; i < length; i++) {
336 if (local)
337 ret = __rio_local_write_config_32(mport,
338 offset, buffer[i]);
339 else
340 ret = rio_mport_write_config_32(mport, maint_io.rioid,
341 maint_io.hopcount,
342 offset, buffer[i]);
343 if (ret)
344 goto out;
345
346 offset += 4;
347 }
348
349out:
350 vfree(buffer);
351 return ret;
352}
353
354
355/*
356 * Inbound/outbound memory mapping functions
357 */
358static int
359rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 360 u16 rioid, u64 raddr, u32 size,
e8de3701
AB
361 dma_addr_t *paddr)
362{
363 struct rio_mport *mport = md->mport;
364 struct rio_mport_mapping *map;
365 int ret;
366
367 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
368
4e1016da 369 map = kzalloc(sizeof(*map), GFP_KERNEL);
e8de3701
AB
370 if (map == NULL)
371 return -ENOMEM;
372
373 ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
374 if (ret < 0)
375 goto err_map_outb;
376
377 map->dir = MAP_OUTBOUND;
378 map->rioid = rioid;
379 map->rio_addr = raddr;
380 map->size = size;
381 map->phys_addr = *paddr;
382 map->filp = filp;
383 map->md = md;
384 kref_init(&map->ref);
385 list_add_tail(&map->node, &md->mappings);
386 return 0;
387err_map_outb:
388 kfree(map);
389 return ret;
390}
391
392static int
393rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 394 u16 rioid, u64 raddr, u32 size,
e8de3701
AB
395 dma_addr_t *paddr)
396{
397 struct rio_mport_mapping *map;
398 int err = -ENOMEM;
399
400 mutex_lock(&md->buf_mutex);
401 list_for_each_entry(map, &md->mappings, node) {
402 if (map->dir != MAP_OUTBOUND)
403 continue;
404 if (rioid == map->rioid &&
405 raddr == map->rio_addr && size == map->size) {
406 *paddr = map->phys_addr;
407 err = 0;
408 break;
409 } else if (rioid == map->rioid &&
410 raddr < (map->rio_addr + map->size - 1) &&
411 (raddr + size) > map->rio_addr) {
412 err = -EBUSY;
413 break;
414 }
415 }
416
417 /* If not found, create new */
418 if (err == -ENOMEM)
419 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
420 size, paddr);
421 mutex_unlock(&md->buf_mutex);
422 return err;
423}
424
425static int rio_mport_obw_map(struct file *filp, void __user *arg)
426{
427 struct mport_cdev_priv *priv = filp->private_data;
428 struct mport_dev *data = priv->md;
429 struct rio_mmap map;
430 dma_addr_t paddr;
431 int ret;
432
4e1016da 433 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
e8de3701
AB
434 return -EFAULT;
435
436 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
437 map.rioid, map.rio_addr, map.length);
438
439 ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
440 map.rio_addr, map.length, &paddr);
441 if (ret < 0) {
442 rmcd_error("Failed to set OBW err= %d", ret);
443 return ret;
444 }
445
446 map.handle = paddr;
447
4e1016da 448 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
e8de3701
AB
449 return -EFAULT;
450 return 0;
451}
452
453/*
454 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
455 *
456 * @priv: driver private data
457 * @arg: buffer handle returned by allocation routine
458 */
459static int rio_mport_obw_free(struct file *filp, void __user *arg)
460{
461 struct mport_cdev_priv *priv = filp->private_data;
462 struct mport_dev *md = priv->md;
463 u64 handle;
464 struct rio_mport_mapping *map, *_map;
465
466 if (!md->mport->ops->unmap_outb)
467 return -EPROTONOSUPPORT;
468
4e1016da 469 if (copy_from_user(&handle, arg, sizeof(handle)))
e8de3701
AB
470 return -EFAULT;
471
472 rmcd_debug(OBW, "h=0x%llx", handle);
473
474 mutex_lock(&md->buf_mutex);
475 list_for_each_entry_safe(map, _map, &md->mappings, node) {
476 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
477 if (map->filp == filp) {
478 rmcd_debug(OBW, "kref_put h=0x%llx", handle);
479 map->filp = NULL;
480 kref_put(&map->ref, mport_release_mapping);
481 }
482 break;
483 }
484 }
485 mutex_unlock(&md->buf_mutex);
486
487 return 0;
488}
489
490/*
491 * maint_hdid_set() - Set the host Device ID
492 * @priv: driver private data
493 * @arg: Device Id
494 */
495static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
496{
497 struct mport_dev *md = priv->md;
4e1016da 498 u16 hdid;
e8de3701 499
4e1016da 500 if (copy_from_user(&hdid, arg, sizeof(hdid)))
e8de3701
AB
501 return -EFAULT;
502
503 md->mport->host_deviceid = hdid;
504 md->properties.hdid = hdid;
505 rio_local_set_device_id(md->mport, hdid);
506
507 rmcd_debug(MPORT, "Set host device Id to %d", hdid);
508
509 return 0;
510}
511
512/*
513 * maint_comptag_set() - Set the host Component Tag
514 * @priv: driver private data
515 * @arg: Component Tag
516 */
517static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
518{
519 struct mport_dev *md = priv->md;
4e1016da 520 u32 comptag;
e8de3701 521
4e1016da 522 if (copy_from_user(&comptag, arg, sizeof(comptag)))
e8de3701
AB
523 return -EFAULT;
524
525 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
526
527 rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
528
529 return 0;
530}
531
532#ifdef CONFIG_RAPIDIO_DMA_ENGINE
533
534struct mport_dma_req {
bbd876ad 535 struct kref refcount;
e8de3701
AB
536 struct list_head node;
537 struct file *filp;
538 struct mport_cdev_priv *priv;
539 enum rio_transfer_sync sync;
540 struct sg_table sgt;
541 struct page **page_list;
542 unsigned int nr_pages;
543 struct rio_mport_mapping *map;
544 struct dma_chan *dmach;
545 enum dma_data_direction dir;
546 dma_cookie_t cookie;
547 enum dma_status status;
548 struct completion req_comp;
549};
550
e8de3701
AB
551static void mport_release_def_dma(struct kref *dma_ref)
552{
553 struct mport_dev *md =
554 container_of(dma_ref, struct mport_dev, dma_ref);
555
556 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
557 rio_release_dma(md->dma_chan);
558 md->dma_chan = NULL;
559}
560
561static void mport_release_dma(struct kref *dma_ref)
562{
563 struct mport_cdev_priv *priv =
564 container_of(dma_ref, struct mport_cdev_priv, dma_ref);
565
566 rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
567 complete(&priv->comp);
568}
569
bbd876ad 570static void dma_req_free(struct kref *ref)
e8de3701 571{
bbd876ad
IN
572 struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
573 refcount);
e8de3701 574 struct mport_cdev_priv *priv = req->priv;
e8de3701
AB
575
576 dma_unmap_sg(req->dmach->device->dev,
577 req->sgt.sgl, req->sgt.nents, req->dir);
578 sg_free_table(&req->sgt);
579 if (req->page_list) {
67446283 580 unpin_user_pages(req->page_list, req->nr_pages);
e8de3701
AB
581 kfree(req->page_list);
582 }
583
584 if (req->map) {
585 mutex_lock(&req->map->md->buf_mutex);
586 kref_put(&req->map->ref, mport_release_mapping);
587 mutex_unlock(&req->map->md->buf_mutex);
588 }
589
590 kref_put(&priv->dma_ref, mport_release_dma);
591
592 kfree(req);
593}
594
595static void dma_xfer_callback(void *param)
596{
597 struct mport_dma_req *req = (struct mport_dma_req *)param;
598 struct mport_cdev_priv *priv = req->priv;
599
600 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
601 NULL, NULL);
602 complete(&req->req_comp);
bbd876ad 603 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
604}
605
606/*
607 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
608 * transfer object.
609 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
610 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
611 * non-NULL pointer using IS_ERR macro.
612 */
613static struct dma_async_tx_descriptor
614*prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
615 struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
616 enum dma_ctrl_flags flags)
617{
618 struct rio_dma_data tx_data;
619
620 tx_data.sg = sgt->sgl;
621 tx_data.sg_len = nents;
622 tx_data.rio_addr_u = 0;
623 tx_data.rio_addr = transfer->rio_addr;
624 if (dir == DMA_MEM_TO_DEV) {
625 switch (transfer->method) {
626 case RIO_EXCHANGE_NWRITE:
627 tx_data.wr_type = RDW_ALL_NWRITE;
628 break;
629 case RIO_EXCHANGE_NWRITE_R_ALL:
630 tx_data.wr_type = RDW_ALL_NWRITE_R;
631 break;
632 case RIO_EXCHANGE_NWRITE_R:
633 tx_data.wr_type = RDW_LAST_NWRITE_R;
634 break;
635 case RIO_EXCHANGE_DEFAULT:
636 tx_data.wr_type = RDW_DEFAULT;
637 break;
638 default:
639 return ERR_PTR(-EINVAL);
640 }
641 }
642
643 return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
644}
645
646/* Request DMA channel associated with this mport device.
647 * Try to request DMA channel for every new process that opened given
648 * mport. If a new DMA channel is not available use default channel
649 * which is the first DMA channel opened on mport device.
650 */
651static int get_dma_channel(struct mport_cdev_priv *priv)
652{
653 mutex_lock(&priv->dma_lock);
654 if (!priv->dmach) {
655 priv->dmach = rio_request_mport_dma(priv->md->mport);
656 if (!priv->dmach) {
657 /* Use default DMA channel if available */
658 if (priv->md->dma_chan) {
659 priv->dmach = priv->md->dma_chan;
660 kref_get(&priv->md->dma_ref);
661 } else {
662 rmcd_error("Failed to get DMA channel");
663 mutex_unlock(&priv->dma_lock);
664 return -ENODEV;
665 }
666 } else if (!priv->md->dma_chan) {
667 /* Register default DMA channel if we do not have one */
668 priv->md->dma_chan = priv->dmach;
669 kref_init(&priv->md->dma_ref);
670 rmcd_debug(DMA, "Register DMA_chan %d as default",
671 priv->dmach->chan_id);
672 }
673
674 kref_init(&priv->dma_ref);
675 init_completion(&priv->comp);
676 }
677
678 kref_get(&priv->dma_ref);
679 mutex_unlock(&priv->dma_lock);
680 return 0;
681}
682
683static void put_dma_channel(struct mport_cdev_priv *priv)
684{
685 kref_put(&priv->dma_ref, mport_release_dma);
686}
687
688/*
689 * DMA transfer functions
690 */
691static int do_dma_request(struct mport_dma_req *req,
692 struct rio_transfer_io *xfer,
693 enum rio_transfer_sync sync, int nents)
694{
695 struct mport_cdev_priv *priv;
696 struct sg_table *sgt;
697 struct dma_chan *chan;
698 struct dma_async_tx_descriptor *tx;
699 dma_cookie_t cookie;
700 unsigned long tmo = msecs_to_jiffies(dma_timeout);
701 enum dma_transfer_direction dir;
702 long wret;
703 int ret = 0;
704
705 priv = req->priv;
706 sgt = &req->sgt;
707
708 chan = priv->dmach;
709 dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
710
711 rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
712 current->comm, task_pid_nr(current),
713 dev_name(&chan->dev->device),
714 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
715
716 /* Initialize DMA transaction request */
717 tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
718 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
719
720 if (!tx) {
721 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
722 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
723 xfer->rio_addr, xfer->length);
724 ret = -EIO;
725 goto err_out;
726 } else if (IS_ERR(tx)) {
727 ret = PTR_ERR(tx);
728 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
729 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
730 xfer->rio_addr, xfer->length);
731 goto err_out;
732 }
733
bbd876ad 734 tx->callback = dma_xfer_callback;
e8de3701
AB
735 tx->callback_param = req;
736
e8de3701 737 req->status = DMA_IN_PROGRESS;
bbd876ad 738 kref_get(&req->refcount);
e8de3701
AB
739
740 cookie = dmaengine_submit(tx);
741 req->cookie = cookie;
742
743 rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
744 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
745
746 if (dma_submit_error(cookie)) {
747 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
748 cookie, xfer->rio_addr, xfer->length);
bbd876ad 749 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
750 ret = -EIO;
751 goto err_out;
752 }
753
754 dma_async_issue_pending(chan);
755
756 if (sync == RIO_TRANSFER_ASYNC) {
757 spin_lock(&priv->req_lock);
758 list_add_tail(&req->node, &priv->async_list);
759 spin_unlock(&priv->req_lock);
760 return cookie;
761 } else if (sync == RIO_TRANSFER_FAF)
762 return 0;
763
764 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
765
766 if (wret == 0) {
767 /* Timeout on wait occurred */
768 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
769 current->comm, task_pid_nr(current),
770 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
771 return -ETIMEDOUT;
772 } else if (wret == -ERESTARTSYS) {
773 /* Wait_for_completion was interrupted by a signal but DMA may
774 * be in progress
775 */
776 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
777 current->comm, task_pid_nr(current),
778 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
779 return -EINTR;
780 }
781
782 if (req->status != DMA_COMPLETE) {
783 /* DMA transaction completion was signaled with error */
784 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
785 current->comm, task_pid_nr(current),
786 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
787 cookie, req->status, ret);
788 ret = -EIO;
789 }
790
791err_out:
792 return ret;
793}
794
795/*
796 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
797 * the remote RapidIO device
798 * @filp: file pointer associated with the call
799 * @transfer_mode: DMA transfer mode
800 * @sync: synchronization mode
801 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
802 * DMA_DEV_TO_MEM = read)
803 * @xfer: data transfer descriptor structure
804 */
805static int
4e1016da 806rio_dma_transfer(struct file *filp, u32 transfer_mode,
e8de3701
AB
807 enum rio_transfer_sync sync, enum dma_data_direction dir,
808 struct rio_transfer_io *xfer)
809{
810 struct mport_cdev_priv *priv = filp->private_data;
811 unsigned long nr_pages = 0;
812 struct page **page_list = NULL;
813 struct mport_dma_req *req;
814 struct mport_dev *md = priv->md;
815 struct dma_chan *chan;
67446283 816 int ret;
e8de3701
AB
817 int nents;
818
819 if (xfer->length == 0)
820 return -EINVAL;
821 req = kzalloc(sizeof(*req), GFP_KERNEL);
822 if (!req)
823 return -ENOMEM;
824
825 ret = get_dma_channel(priv);
826 if (ret) {
827 kfree(req);
828 return ret;
829 }
c5157b76
IN
830 chan = priv->dmach;
831
832 kref_init(&req->refcount);
833 init_completion(&req->req_comp);
834 req->dir = dir;
835 req->filp = filp;
836 req->priv = priv;
837 req->dmach = chan;
838 req->sync = sync;
e8de3701
AB
839
840 /*
841 * If parameter loc_addr != NULL, we are transferring data from/to
842 * data buffer allocated in user-space: lock in memory user-space
843 * buffer pages and build an SG table for DMA transfer request
844 *
845 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
846 * used for DMA data transfers: build single entry SG table using
847 * offset within the internal buffer specified by handle parameter.
848 */
849 if (xfer->loc_addr) {
c4860ad6 850 unsigned int offset;
e8de3701
AB
851 long pinned;
852
c4860ad6 853 offset = lower_32_bits(offset_in_page(xfer->loc_addr));
e8de3701
AB
854 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
855
856 page_list = kmalloc_array(nr_pages,
857 sizeof(*page_list), GFP_KERNEL);
858 if (page_list == NULL) {
859 ret = -ENOMEM;
860 goto err_req;
861 }
862
67446283 863 pinned = pin_user_pages_fast(
e8de3701 864 (unsigned long)xfer->loc_addr & PAGE_MASK,
73b0140b
IW
865 nr_pages,
866 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
867 page_list);
e8de3701
AB
868
869 if (pinned != nr_pages) {
870 if (pinned < 0) {
67446283 871 rmcd_error("pin_user_pages_fast err=%ld",
369f2679 872 pinned);
e8de3701 873 nr_pages = 0;
fa63f083 874 } else {
e8de3701
AB
875 rmcd_error("pinned %ld out of %ld pages",
876 pinned, nr_pages);
fa63f083
SJ
877 /*
878 * Set nr_pages up to mean "how many pages to unpin, in
879 * the error handler:
880 */
881 nr_pages = pinned;
882 }
e8de3701
AB
883 ret = -EFAULT;
884 goto err_pg;
885 }
886
887 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
888 offset, xfer->length, GFP_KERNEL);
889 if (ret) {
890 rmcd_error("sg_alloc_table failed with err=%d", ret);
891 goto err_pg;
892 }
893
894 req->page_list = page_list;
895 req->nr_pages = nr_pages;
896 } else {
897 dma_addr_t baddr;
898 struct rio_mport_mapping *map;
899
900 baddr = (dma_addr_t)xfer->handle;
901
902 mutex_lock(&md->buf_mutex);
903 list_for_each_entry(map, &md->mappings, node) {
904 if (baddr >= map->phys_addr &&
905 baddr < (map->phys_addr + map->size)) {
906 kref_get(&map->ref);
907 req->map = map;
908 break;
909 }
910 }
911 mutex_unlock(&md->buf_mutex);
912
913 if (req->map == NULL) {
914 ret = -ENOMEM;
915 goto err_req;
916 }
917
918 if (xfer->length + xfer->offset > map->size) {
919 ret = -EINVAL;
920 goto err_req;
921 }
922
923 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
924 if (unlikely(ret)) {
925 rmcd_error("sg_alloc_table failed for internal buf");
926 goto err_req;
927 }
928
929 sg_set_buf(req->sgt.sgl,
930 map->virt_addr + (baddr - map->phys_addr) +
931 xfer->offset, xfer->length);
932 }
933
e8de3701
AB
934 nents = dma_map_sg(chan->device->dev,
935 req->sgt.sgl, req->sgt.nents, dir);
c46d90cd 936 if (nents == 0) {
e8de3701 937 rmcd_error("Failed to map SG list");
b1402dcb
CJ
938 ret = -EFAULT;
939 goto err_pg;
e8de3701
AB
940 }
941
942 ret = do_dma_request(req, xfer, sync, nents);
943
944 if (ret >= 0) {
bbd876ad
IN
945 if (sync == RIO_TRANSFER_ASYNC)
946 return ret; /* return ASYNC cookie */
947 } else {
948 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
e8de3701
AB
949 }
950
e8de3701 951err_pg:
bbd876ad 952 if (!req->page_list) {
67446283 953 unpin_user_pages(page_list, nr_pages);
e8de3701
AB
954 kfree(page_list);
955 }
956err_req:
bbd876ad 957 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
958 return ret;
959}
960
961static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
962{
963 struct mport_cdev_priv *priv = filp->private_data;
964 struct rio_transaction transaction;
965 struct rio_transfer_io *transfer;
966 enum dma_data_direction dir;
967 int i, ret = 0;
968
969 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
970 return -EFAULT;
971
4e1016da 972 if (transaction.count != 1) /* only single transfer for now */
e8de3701
AB
973 return -EINVAL;
974
975 if ((transaction.transfer_mode &
976 priv->md->properties.transfer_mode) == 0)
977 return -ENODEV;
978
42bc47b3 979 transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
e8de3701
AB
980 if (!transfer)
981 return -ENOMEM;
982
4e1016da
AB
983 if (unlikely(copy_from_user(transfer,
984 (void __user *)(uintptr_t)transaction.block,
d7137549 985 array_size(sizeof(*transfer), transaction.count)))) {
e8de3701
AB
986 ret = -EFAULT;
987 goto out_free;
988 }
989
990 dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
991 DMA_FROM_DEVICE : DMA_TO_DEVICE;
992 for (i = 0; i < transaction.count && ret == 0; i++)
993 ret = rio_dma_transfer(filp, transaction.transfer_mode,
994 transaction.sync, dir, &transfer[i]);
995
4e1016da
AB
996 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
997 transfer,
d7137549 998 array_size(sizeof(*transfer), transaction.count))))
e8de3701
AB
999 ret = -EFAULT;
1000
1001out_free:
1002 vfree(transfer);
1003
1004 return ret;
1005}
1006
1007static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1008{
1009 struct mport_cdev_priv *priv;
e8de3701
AB
1010 struct rio_async_tx_wait w_param;
1011 struct mport_dma_req *req;
1012 dma_cookie_t cookie;
1013 unsigned long tmo;
1014 long wret;
1015 int found = 0;
1016 int ret;
1017
1018 priv = (struct mport_cdev_priv *)filp->private_data;
e8de3701
AB
1019
1020 if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1021 return -EFAULT;
1022
1023 cookie = w_param.token;
1024 if (w_param.timeout)
1025 tmo = msecs_to_jiffies(w_param.timeout);
1026 else /* Use default DMA timeout */
1027 tmo = msecs_to_jiffies(dma_timeout);
1028
1029 spin_lock(&priv->req_lock);
1030 list_for_each_entry(req, &priv->async_list, node) {
1031 if (req->cookie == cookie) {
1032 list_del(&req->node);
1033 found = 1;
1034 break;
1035 }
1036 }
1037 spin_unlock(&priv->req_lock);
1038
1039 if (!found)
1040 return -EAGAIN;
1041
1042 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1043
1044 if (wret == 0) {
1045 /* Timeout on wait occurred */
1046 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1047 current->comm, task_pid_nr(current),
1048 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1049 ret = -ETIMEDOUT;
1050 goto err_tmo;
1051 } else if (wret == -ERESTARTSYS) {
1052 /* Wait_for_completion was interrupted by a signal but DMA may
1053 * be still in progress
1054 */
1055 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1056 current->comm, task_pid_nr(current),
1057 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1058 ret = -EINTR;
1059 goto err_tmo;
1060 }
1061
1062 if (req->status != DMA_COMPLETE) {
1063 /* DMA transaction completion signaled with transfer error */
1064 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1065 current->comm, task_pid_nr(current),
1066 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1067 req->status);
1068 ret = -EIO;
1069 } else
1070 ret = 0;
1071
1072 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
bbd876ad 1073 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
1074
1075 return ret;
1076
1077err_tmo:
1078 /* Return request back into async queue */
1079 spin_lock(&priv->req_lock);
1080 list_add_tail(&req->node, &priv->async_list);
1081 spin_unlock(&priv->req_lock);
1082 return ret;
1083}
1084
1085static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
4e1016da 1086 u64 size, struct rio_mport_mapping **mapping)
e8de3701
AB
1087{
1088 struct rio_mport_mapping *map;
1089
4e1016da 1090 map = kzalloc(sizeof(*map), GFP_KERNEL);
e8de3701
AB
1091 if (map == NULL)
1092 return -ENOMEM;
1093
1094 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1095 &map->phys_addr, GFP_KERNEL);
1096 if (map->virt_addr == NULL) {
1097 kfree(map);
1098 return -ENOMEM;
1099 }
1100
1101 map->dir = MAP_DMA;
1102 map->size = size;
1103 map->filp = filp;
1104 map->md = md;
1105 kref_init(&map->ref);
1106 mutex_lock(&md->buf_mutex);
1107 list_add_tail(&map->node, &md->mappings);
1108 mutex_unlock(&md->buf_mutex);
1109 *mapping = map;
1110
1111 return 0;
1112}
1113
1114static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1115{
1116 struct mport_cdev_priv *priv = filp->private_data;
1117 struct mport_dev *md = priv->md;
1118 struct rio_dma_mem map;
1119 struct rio_mport_mapping *mapping = NULL;
1120 int ret;
1121
4e1016da 1122 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
e8de3701
AB
1123 return -EFAULT;
1124
1125 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1126 if (ret)
1127 return ret;
1128
1129 map.dma_handle = mapping->phys_addr;
1130
4e1016da 1131 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
e8de3701
AB
1132 mutex_lock(&md->buf_mutex);
1133 kref_put(&mapping->ref, mport_release_mapping);
1134 mutex_unlock(&md->buf_mutex);
1135 return -EFAULT;
1136 }
1137
1138 return 0;
1139}
1140
1141static int rio_mport_free_dma(struct file *filp, void __user *arg)
1142{
1143 struct mport_cdev_priv *priv = filp->private_data;
1144 struct mport_dev *md = priv->md;
1145 u64 handle;
1146 int ret = -EFAULT;
1147 struct rio_mport_mapping *map, *_map;
1148
4e1016da 1149 if (copy_from_user(&handle, arg, sizeof(handle)))
e8de3701
AB
1150 return -EFAULT;
1151 rmcd_debug(EXIT, "filp=%p", filp);
1152
1153 mutex_lock(&md->buf_mutex);
1154 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1155 if (map->dir == MAP_DMA && map->phys_addr == handle &&
1156 map->filp == filp) {
1157 kref_put(&map->ref, mport_release_mapping);
1158 ret = 0;
1159 break;
1160 }
1161 }
1162 mutex_unlock(&md->buf_mutex);
1163
1164 if (ret == -EFAULT) {
1165 rmcd_debug(DMA, "ERR no matching mapping");
1166 return ret;
1167 }
1168
1169 return 0;
1170}
1171#else
1172static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1173{
1174 return -ENODEV;
1175}
1176
1177static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1178{
1179 return -ENODEV;
1180}
1181
1182static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1183{
1184 return -ENODEV;
1185}
1186
1187static int rio_mport_free_dma(struct file *filp, void __user *arg)
1188{
1189 return -ENODEV;
1190}
1191#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1192
1193/*
1194 * Inbound/outbound memory mapping functions
1195 */
1196
1197static int
1198rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 1199 u64 raddr, u64 size,
e8de3701
AB
1200 struct rio_mport_mapping **mapping)
1201{
1202 struct rio_mport *mport = md->mport;
1203 struct rio_mport_mapping *map;
1204 int ret;
1205
4e1016da
AB
1206 /* rio_map_inb_region() accepts u32 size */
1207 if (size > 0xffffffff)
1208 return -EINVAL;
1209
1210 map = kzalloc(sizeof(*map), GFP_KERNEL);
e8de3701
AB
1211 if (map == NULL)
1212 return -ENOMEM;
1213
1214 map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1215 &map->phys_addr, GFP_KERNEL);
1216 if (map->virt_addr == NULL) {
1217 ret = -ENOMEM;
1218 goto err_dma_alloc;
1219 }
1220
1221 if (raddr == RIO_MAP_ANY_ADDR)
1222 raddr = map->phys_addr;
4e1016da 1223 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
e8de3701
AB
1224 if (ret < 0)
1225 goto err_map_inb;
1226
1227 map->dir = MAP_INBOUND;
1228 map->rio_addr = raddr;
1229 map->size = size;
1230 map->filp = filp;
1231 map->md = md;
1232 kref_init(&map->ref);
1233 mutex_lock(&md->buf_mutex);
1234 list_add_tail(&map->node, &md->mappings);
1235 mutex_unlock(&md->buf_mutex);
1236 *mapping = map;
1237 return 0;
1238
1239err_map_inb:
1240 dma_free_coherent(mport->dev.parent, size,
1241 map->virt_addr, map->phys_addr);
1242err_dma_alloc:
1243 kfree(map);
1244 return ret;
1245}
1246
1247static int
1248rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
4e1016da 1249 u64 raddr, u64 size,
e8de3701
AB
1250 struct rio_mport_mapping **mapping)
1251{
1252 struct rio_mport_mapping *map;
1253 int err = -ENOMEM;
1254
1255 if (raddr == RIO_MAP_ANY_ADDR)
1256 goto get_new;
1257
1258 mutex_lock(&md->buf_mutex);
1259 list_for_each_entry(map, &md->mappings, node) {
1260 if (map->dir != MAP_INBOUND)
1261 continue;
1262 if (raddr == map->rio_addr && size == map->size) {
1263 /* allow exact match only */
1264 *mapping = map;
1265 err = 0;
1266 break;
1267 } else if (raddr < (map->rio_addr + map->size - 1) &&
1268 (raddr + size) > map->rio_addr) {
1269 err = -EBUSY;
1270 break;
1271 }
1272 }
1273 mutex_unlock(&md->buf_mutex);
1274
1275 if (err != -ENOMEM)
1276 return err;
1277get_new:
1278 /* not found, create new */
1279 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1280}
1281
1282static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1283{
1284 struct mport_cdev_priv *priv = filp->private_data;
1285 struct mport_dev *md = priv->md;
1286 struct rio_mmap map;
1287 struct rio_mport_mapping *mapping = NULL;
1288 int ret;
1289
1290 if (!md->mport->ops->map_inb)
1291 return -EPROTONOSUPPORT;
4e1016da 1292 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
e8de3701
AB
1293 return -EFAULT;
1294
1295 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1296
1297 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1298 map.length, &mapping);
1299 if (ret)
1300 return ret;
1301
1302 map.handle = mapping->phys_addr;
1303 map.rio_addr = mapping->rio_addr;
1304
4e1016da 1305 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
e8de3701
AB
1306 /* Delete mapping if it was created by this request */
1307 if (ret == 0 && mapping->filp == filp) {
1308 mutex_lock(&md->buf_mutex);
1309 kref_put(&mapping->ref, mport_release_mapping);
1310 mutex_unlock(&md->buf_mutex);
1311 }
1312 return -EFAULT;
1313 }
1314
1315 return 0;
1316}
1317
1318/*
1319 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1320 * previously allocated inbound DMA coherent buffer
1321 * @priv: driver private data
1322 * @arg: buffer handle returned by allocation routine
1323 */
1324static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1325{
1326 struct mport_cdev_priv *priv = filp->private_data;
1327 struct mport_dev *md = priv->md;
1328 u64 handle;
1329 struct rio_mport_mapping *map, *_map;
1330
1331 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1332
1333 if (!md->mport->ops->unmap_inb)
1334 return -EPROTONOSUPPORT;
1335
4e1016da 1336 if (copy_from_user(&handle, arg, sizeof(handle)))
e8de3701
AB
1337 return -EFAULT;
1338
1339 mutex_lock(&md->buf_mutex);
1340 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1341 if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1342 if (map->filp == filp) {
1343 map->filp = NULL;
1344 kref_put(&map->ref, mport_release_mapping);
1345 }
1346 break;
1347 }
1348 }
1349 mutex_unlock(&md->buf_mutex);
1350
1351 return 0;
1352}
1353
1354/*
1355 * maint_port_idx_get() - Get the port index of the mport instance
1356 * @priv: driver private data
1357 * @arg: port index
1358 */
1359static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1360{
1361 struct mport_dev *md = priv->md;
4e1016da 1362 u32 port_idx = md->mport->index;
e8de3701
AB
1363
1364 rmcd_debug(MPORT, "port_index=%d", port_idx);
1365
1366 if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1367 return -EFAULT;
1368
1369 return 0;
1370}
1371
1372static int rio_mport_add_event(struct mport_cdev_priv *priv,
1373 struct rio_event *event)
1374{
1375 int overflow;
1376
1377 if (!(priv->event_mask & event->header))
1378 return -EACCES;
1379
1380 spin_lock(&priv->fifo_lock);
1381 overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1382 || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1383 sizeof(*event)) != sizeof(*event);
1384 spin_unlock(&priv->fifo_lock);
1385
1386 wake_up_interruptible(&priv->event_rx_wait);
1387
1388 if (overflow) {
1389 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1390 return -EBUSY;
1391 }
1392
1393 return 0;
1394}
1395
1396static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1397 u16 src, u16 dst, u16 info)
1398{
1399 struct mport_dev *data = dev_id;
1400 struct mport_cdev_priv *priv;
1401 struct rio_mport_db_filter *db_filter;
1402 struct rio_event event;
1403 int handled;
1404
1405 event.header = RIO_DOORBELL;
1406 event.u.doorbell.rioid = src;
1407 event.u.doorbell.payload = info;
1408
1409 handled = 0;
1410 spin_lock(&data->db_lock);
1411 list_for_each_entry(db_filter, &data->doorbells, data_node) {
4e1016da 1412 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
e8de3701
AB
1413 db_filter->filter.rioid == src)) &&
1414 info >= db_filter->filter.low &&
1415 info <= db_filter->filter.high) {
1416 priv = db_filter->priv;
1417 rio_mport_add_event(priv, &event);
1418 handled = 1;
1419 }
1420 }
1421 spin_unlock(&data->db_lock);
1422
1423 if (!handled)
1424 dev_warn(&data->dev,
1425 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1426 __func__, src, info);
1427}
1428
1429static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1430 void __user *arg)
1431{
1432 struct mport_dev *md = priv->md;
1433 struct rio_mport_db_filter *db_filter;
1434 struct rio_doorbell_filter filter;
1435 unsigned long flags;
1436 int ret;
1437
1438 if (copy_from_user(&filter, arg, sizeof(filter)))
1439 return -EFAULT;
1440
1441 if (filter.low > filter.high)
1442 return -EINVAL;
1443
1444 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1445 rio_mport_doorbell_handler);
1446 if (ret) {
1447 rmcd_error("%s failed to register IBDB, err=%d",
1448 dev_name(&md->dev), ret);
1449 return ret;
1450 }
1451
1452 db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1453 if (db_filter == NULL) {
1454 rio_release_inb_dbell(md->mport, filter.low, filter.high);
1455 return -ENOMEM;
1456 }
1457
1458 db_filter->filter = filter;
1459 db_filter->priv = priv;
1460 spin_lock_irqsave(&md->db_lock, flags);
1461 list_add_tail(&db_filter->priv_node, &priv->db_filters);
1462 list_add_tail(&db_filter->data_node, &md->doorbells);
1463 spin_unlock_irqrestore(&md->db_lock, flags);
1464
1465 return 0;
1466}
1467
1468static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1469{
1470 list_del(&db_filter->data_node);
1471 list_del(&db_filter->priv_node);
1472 kfree(db_filter);
1473}
1474
1475static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1476 void __user *arg)
1477{
1478 struct rio_mport_db_filter *db_filter;
1479 struct rio_doorbell_filter filter;
1480 unsigned long flags;
1481 int ret = -EINVAL;
1482
1483 if (copy_from_user(&filter, arg, sizeof(filter)))
1484 return -EFAULT;
1485
4e1016da
AB
1486 if (filter.low > filter.high)
1487 return -EINVAL;
1488
e8de3701
AB
1489 spin_lock_irqsave(&priv->md->db_lock, flags);
1490 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1491 if (db_filter->filter.rioid == filter.rioid &&
1492 db_filter->filter.low == filter.low &&
1493 db_filter->filter.high == filter.high) {
1494 rio_mport_delete_db_filter(db_filter);
1495 ret = 0;
1496 break;
1497 }
1498 }
1499 spin_unlock_irqrestore(&priv->md->db_lock, flags);
1500
1501 if (!ret)
1502 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1503
1504 return ret;
1505}
1506
1507static int rio_mport_match_pw(union rio_pw_msg *msg,
1508 struct rio_pw_filter *filter)
1509{
1510 if ((msg->em.comptag & filter->mask) < filter->low ||
1511 (msg->em.comptag & filter->mask) > filter->high)
1512 return 0;
1513 return 1;
1514}
1515
1516static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1517 union rio_pw_msg *msg, int step)
1518{
1519 struct mport_dev *md = context;
1520 struct mport_cdev_priv *priv;
1521 struct rio_mport_pw_filter *pw_filter;
1522 struct rio_event event;
1523 int handled;
1524
1525 event.header = RIO_PORTWRITE;
1526 memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1527
1528 handled = 0;
1529 spin_lock(&md->pw_lock);
1530 list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1531 if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1532 priv = pw_filter->priv;
1533 rio_mport_add_event(priv, &event);
1534 handled = 1;
1535 }
1536 }
1537 spin_unlock(&md->pw_lock);
1538
1539 if (!handled) {
1540 printk_ratelimited(KERN_WARNING DRV_NAME
1541 ": mport%d received spurious PW from 0x%08x\n",
1542 mport->id, msg->em.comptag);
1543 }
1544
1545 return 0;
1546}
1547
1548static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1549 void __user *arg)
1550{
1551 struct mport_dev *md = priv->md;
1552 struct rio_mport_pw_filter *pw_filter;
1553 struct rio_pw_filter filter;
1554 unsigned long flags;
1555 int hadd = 0;
1556
1557 if (copy_from_user(&filter, arg, sizeof(filter)))
1558 return -EFAULT;
1559
1560 pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1561 if (pw_filter == NULL)
1562 return -ENOMEM;
1563
1564 pw_filter->filter = filter;
1565 pw_filter->priv = priv;
1566 spin_lock_irqsave(&md->pw_lock, flags);
1567 if (list_empty(&md->portwrites))
1568 hadd = 1;
1569 list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1570 list_add_tail(&pw_filter->md_node, &md->portwrites);
1571 spin_unlock_irqrestore(&md->pw_lock, flags);
1572
1573 if (hadd) {
1574 int ret;
1575
1576 ret = rio_add_mport_pw_handler(md->mport, md,
1577 rio_mport_pw_handler);
1578 if (ret) {
1579 dev_err(&md->dev,
1580 "%s: failed to add IB_PW handler, err=%d\n",
1581 __func__, ret);
1582 return ret;
1583 }
1584 rio_pw_enable(md->mport, 1);
1585 }
1586
1587 return 0;
1588}
1589
1590static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1591{
1592 list_del(&pw_filter->md_node);
1593 list_del(&pw_filter->priv_node);
1594 kfree(pw_filter);
1595}
1596
1597static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1598 struct rio_pw_filter *b)
1599{
1600 if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1601 return 1;
1602 return 0;
1603}
1604
1605static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1606 void __user *arg)
1607{
1608 struct mport_dev *md = priv->md;
1609 struct rio_mport_pw_filter *pw_filter;
1610 struct rio_pw_filter filter;
1611 unsigned long flags;
1612 int ret = -EINVAL;
1613 int hdel = 0;
1614
1615 if (copy_from_user(&filter, arg, sizeof(filter)))
1616 return -EFAULT;
1617
1618 spin_lock_irqsave(&md->pw_lock, flags);
1619 list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1620 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1621 rio_mport_delete_pw_filter(pw_filter);
1622 ret = 0;
1623 break;
1624 }
1625 }
1626
1627 if (list_empty(&md->portwrites))
1628 hdel = 1;
1629 spin_unlock_irqrestore(&md->pw_lock, flags);
1630
1631 if (hdel) {
1632 rio_del_mport_pw_handler(md->mport, priv->md,
1633 rio_mport_pw_handler);
1634 rio_pw_enable(md->mport, 0);
1635 }
1636
1637 return ret;
1638}
1639
1640/*
1641 * rio_release_dev - release routine for kernel RIO device object
1642 * @dev: kernel device object associated with a RIO device structure
1643 *
1644 * Frees a RIO device struct associated a RIO device struct.
1645 * The RIO device struct is freed.
1646 */
1647static void rio_release_dev(struct device *dev)
1648{
1649 struct rio_dev *rdev;
1650
1651 rdev = to_rio_dev(dev);
1652 pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1653 kfree(rdev);
1654}
1655
1656
1657static void rio_release_net(struct device *dev)
1658{
1659 struct rio_net *net;
1660
1661 net = to_rio_net(dev);
1662 rmcd_debug(RDEV, "net_%d", net->id);
1663 kfree(net);
1664}
1665
1666
1667/*
1668 * rio_mport_add_riodev - creates a kernel RIO device object
1669 *
1670 * Allocates a RIO device data structure and initializes required fields based
1671 * on device's configuration space contents.
1672 * If the device has switch capabilities, then a switch specific portion is
1673 * allocated and configured.
1674 */
1675static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1676 void __user *arg)
1677{
1678 struct mport_dev *md = priv->md;
1679 struct rio_rdev_info dev_info;
1680 struct rio_dev *rdev;
1681 struct rio_switch *rswitch = NULL;
1682 struct rio_mport *mport;
1683 size_t size;
1684 u32 rval;
1685 u32 swpinfo = 0;
1686 u16 destid;
1687 u8 hopcount;
1688 int err;
1689
1690 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1691 return -EFAULT;
156e0b1a 1692 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
e8de3701
AB
1693
1694 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1695 dev_info.comptag, dev_info.destid, dev_info.hopcount);
1696
1697 if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
1698 rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1699 return -EEXIST;
1700 }
1701
4e1016da 1702 size = sizeof(*rdev);
e8de3701 1703 mport = md->mport;
4e1016da
AB
1704 destid = dev_info.destid;
1705 hopcount = dev_info.hopcount;
e8de3701
AB
1706
1707 if (rio_mport_read_config_32(mport, destid, hopcount,
1708 RIO_PEF_CAR, &rval))
1709 return -EIO;
1710
1711 if (rval & RIO_PEF_SWITCH) {
1712 rio_mport_read_config_32(mport, destid, hopcount,
1713 RIO_SWP_INFO_CAR, &swpinfo);
216ec27f 1714 size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo));
e8de3701
AB
1715 }
1716
1717 rdev = kzalloc(size, GFP_KERNEL);
1718 if (rdev == NULL)
1719 return -ENOMEM;
1720
1721 if (mport->net == NULL) {
1722 struct rio_net *net;
1723
1724 net = rio_alloc_net(mport);
1725 if (!net) {
1726 err = -ENOMEM;
1727 rmcd_debug(RDEV, "failed to allocate net object");
1728 goto cleanup;
1729 }
1730
1731 net->id = mport->id;
1732 net->hport = mport;
1733 dev_set_name(&net->dev, "rnet_%d", net->id);
1734 net->dev.parent = &mport->dev;
1735 net->dev.release = rio_release_net;
1736 err = rio_add_net(net);
1737 if (err) {
1738 rmcd_debug(RDEV, "failed to register net, err=%d", err);
1739 kfree(net);
1740 goto cleanup;
1741 }
1742 }
1743
1744 rdev->net = mport->net;
1745 rdev->pef = rval;
1746 rdev->swpinfo = swpinfo;
1747 rio_mport_read_config_32(mport, destid, hopcount,
1748 RIO_DEV_ID_CAR, &rval);
1749 rdev->did = rval >> 16;
1750 rdev->vid = rval & 0xffff;
1751 rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1752 &rdev->device_rev);
1753 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1754 &rval);
1755 rdev->asm_did = rval >> 16;
1756 rdev->asm_vid = rval & 0xffff;
1757 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1758 &rval);
1759 rdev->asm_rev = rval >> 16;
1760
1761 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1762 rdev->efptr = rval & 0xffff;
1763 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1ae842de 1764 hopcount, &rdev->phys_rmap);
e8de3701
AB
1765
1766 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1767 hopcount, RIO_EFB_ERR_MGMNT);
1768 }
1769
1770 rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1771 &rdev->src_ops);
1772 rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1773 &rdev->dst_ops);
1774
1775 rdev->comp_tag = dev_info.comptag;
1776 rdev->destid = destid;
1777 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1778 rdev->hopcount = hopcount;
1779
1780 if (rdev->pef & RIO_PEF_SWITCH) {
1781 rswitch = rdev->rswitch;
1782 rswitch->route_table = NULL;
1783 }
1784
1785 if (strlen(dev_info.name))
1786 dev_set_name(&rdev->dev, "%s", dev_info.name);
1787 else if (rdev->pef & RIO_PEF_SWITCH)
1788 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1789 rdev->comp_tag & RIO_CTAG_UDEVID);
1790 else
1791 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1792 rdev->comp_tag & RIO_CTAG_UDEVID);
1793
1794 INIT_LIST_HEAD(&rdev->net_list);
1795 rdev->dev.parent = &mport->net->dev;
1796 rio_attach_device(rdev);
1797 rdev->dev.release = rio_release_dev;
1798
1799 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1800 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1801 0, 0xffff);
1802 err = rio_add_device(rdev);
1803 if (err)
1804 goto cleanup;
1805 rio_dev_get(rdev);
1806
1807 return 0;
1808cleanup:
1809 kfree(rdev);
1810 return err;
1811}
1812
1813static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1814{
1815 struct rio_rdev_info dev_info;
1816 struct rio_dev *rdev = NULL;
1817 struct device *dev;
1818 struct rio_mport *mport;
1819 struct rio_net *net;
1820
1821 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1822 return -EFAULT;
156e0b1a 1823 dev_info.name[sizeof(dev_info.name) - 1] = '\0';
e8de3701
AB
1824
1825 mport = priv->md->mport;
1826
1827 /* If device name is specified, removal by name has priority */
1828 if (strlen(dev_info.name)) {
1829 dev = bus_find_device_by_name(&rio_bus_type, NULL,
1830 dev_info.name);
1831 if (dev)
1832 rdev = to_rio_dev(dev);
1833 } else {
1834 do {
1835 rdev = rio_get_comptag(dev_info.comptag, rdev);
1836 if (rdev && rdev->dev.parent == &mport->net->dev &&
4e1016da
AB
1837 rdev->destid == dev_info.destid &&
1838 rdev->hopcount == dev_info.hopcount)
e8de3701
AB
1839 break;
1840 } while (rdev);
1841 }
1842
1843 if (!rdev) {
1844 rmcd_debug(RDEV,
1845 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1846 dev_info.name, dev_info.comptag, dev_info.destid,
1847 dev_info.hopcount);
1848 return -ENODEV;
1849 }
1850
1851 net = rdev->net;
1852 rio_dev_put(rdev);
1853 rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1854
1855 if (list_empty(&net->devices)) {
1856 rio_free_net(net);
1857 mport->net = NULL;
1858 }
1859
1860 return 0;
1861}
1862
1863/*
1864 * Mport cdev management
1865 */
1866
1867/*
1868 * mport_cdev_open() - Open character device (mport)
1869 */
1870static int mport_cdev_open(struct inode *inode, struct file *filp)
1871{
1872 int ret;
1873 int minor = iminor(inode);
1874 struct mport_dev *chdev;
1875 struct mport_cdev_priv *priv;
1876
1877 /* Test for valid device */
1878 if (minor >= RIO_MAX_MPORTS) {
1879 rmcd_error("Invalid minor device number");
1880 return -EINVAL;
1881 }
1882
1883 chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1884
1885 rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1886
1887 if (atomic_read(&chdev->active) == 0)
1888 return -ENODEV;
1889
1890 get_device(&chdev->dev);
1891
1892 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1893 if (!priv) {
1894 put_device(&chdev->dev);
1895 return -ENOMEM;
1896 }
1897
1898 priv->md = chdev;
1899
1900 mutex_lock(&chdev->file_mutex);
1901 list_add_tail(&priv->list, &chdev->file_list);
1902 mutex_unlock(&chdev->file_mutex);
1903
1904 INIT_LIST_HEAD(&priv->db_filters);
1905 INIT_LIST_HEAD(&priv->pw_filters);
1906 spin_lock_init(&priv->fifo_lock);
1907 init_waitqueue_head(&priv->event_rx_wait);
1908 ret = kfifo_alloc(&priv->event_fifo,
1909 sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1910 GFP_KERNEL);
1911 if (ret < 0) {
1912 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1913 ret = -ENOMEM;
1914 goto err_fifo;
1915 }
1916
1917#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1918 INIT_LIST_HEAD(&priv->async_list);
e8de3701
AB
1919 spin_lock_init(&priv->req_lock);
1920 mutex_init(&priv->dma_lock);
1921#endif
1922
1923 filp->private_data = priv;
1924 goto out;
1925err_fifo:
1926 kfree(priv);
1927out:
1928 return ret;
1929}
1930
1931static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1932{
1933 struct mport_cdev_priv *priv = filp->private_data;
1934
1935 return fasync_helper(fd, filp, mode, &priv->async_queue);
1936}
1937
1938#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1939static void mport_cdev_release_dma(struct file *filp)
1940{
1941 struct mport_cdev_priv *priv = filp->private_data;
1942 struct mport_dev *md;
1943 struct mport_dma_req *req, *req_next;
1944 unsigned long tmo = msecs_to_jiffies(dma_timeout);
1945 long wret;
1946 LIST_HEAD(list);
1947
1948 rmcd_debug(EXIT, "from filp=%p %s(%d)",
1949 filp, current->comm, task_pid_nr(current));
1950
1951 if (!priv->dmach) {
1952 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1953 return;
1954 }
1955
1956 md = priv->md;
1957
e8de3701
AB
1958 spin_lock(&priv->req_lock);
1959 if (!list_empty(&priv->async_list)) {
1960 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1961 filp, current->comm, task_pid_nr(current));
1962 list_splice_init(&priv->async_list, &list);
1963 }
1964 spin_unlock(&priv->req_lock);
1965
1966 if (!list_empty(&list)) {
1967 rmcd_debug(EXIT, "temp list not empty");
1968 list_for_each_entry_safe(req, req_next, &list, node) {
1969 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1970 req->filp, req->cookie,
1971 completion_done(&req->req_comp)?"yes":"no");
1972 list_del(&req->node);
bbd876ad 1973 kref_put(&req->refcount, dma_req_free);
e8de3701
AB
1974 }
1975 }
1976
1977 put_dma_channel(priv);
1978 wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1979
1980 if (wret <= 0) {
1981 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1982 current->comm, task_pid_nr(current), wret);
1983 }
1984
e8de3701
AB
1985 if (priv->dmach != priv->md->dma_chan) {
1986 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1987 filp, current->comm, task_pid_nr(current));
1988 rio_release_dma(priv->dmach);
1989 } else {
1990 rmcd_debug(EXIT, "Adjust default DMA channel refcount");
1991 kref_put(&md->dma_ref, mport_release_def_dma);
1992 }
1993
1994 priv->dmach = NULL;
1995}
1996#else
1997#define mport_cdev_release_dma(priv) do {} while (0)
1998#endif
1999
2000/*
2001 * mport_cdev_release() - Release character device
2002 */
2003static int mport_cdev_release(struct inode *inode, struct file *filp)
2004{
2005 struct mport_cdev_priv *priv = filp->private_data;
2006 struct mport_dev *chdev;
2007 struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2008 struct rio_mport_db_filter *db_filter, *db_filter_next;
2009 struct rio_mport_mapping *map, *_map;
2010 unsigned long flags;
2011
2012 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2013
2014 chdev = priv->md;
2015 mport_cdev_release_dma(filp);
2016
2017 priv->event_mask = 0;
2018
2019 spin_lock_irqsave(&chdev->pw_lock, flags);
2020 if (!list_empty(&priv->pw_filters)) {
2021 list_for_each_entry_safe(pw_filter, pw_filter_next,
2022 &priv->pw_filters, priv_node)
2023 rio_mport_delete_pw_filter(pw_filter);
2024 }
2025 spin_unlock_irqrestore(&chdev->pw_lock, flags);
2026
2027 spin_lock_irqsave(&chdev->db_lock, flags);
2028 list_for_each_entry_safe(db_filter, db_filter_next,
2029 &priv->db_filters, priv_node) {
2030 rio_mport_delete_db_filter(db_filter);
2031 }
2032 spin_unlock_irqrestore(&chdev->db_lock, flags);
2033
2034 kfifo_free(&priv->event_fifo);
2035
2036 mutex_lock(&chdev->buf_mutex);
2037 list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2038 if (map->filp == filp) {
2039 rmcd_debug(EXIT, "release mapping %p filp=%p",
2040 map->virt_addr, filp);
2041 kref_put(&map->ref, mport_release_mapping);
2042 }
2043 }
2044 mutex_unlock(&chdev->buf_mutex);
2045
2046 mport_cdev_fasync(-1, filp, 0);
2047 filp->private_data = NULL;
2048 mutex_lock(&chdev->file_mutex);
2049 list_del(&priv->list);
2050 mutex_unlock(&chdev->file_mutex);
2051 put_device(&chdev->dev);
2052 kfree(priv);
2053 return 0;
2054}
2055
2056/*
2057 * mport_cdev_ioctl() - IOCTLs for character device
2058 */
2059static long mport_cdev_ioctl(struct file *filp,
2060 unsigned int cmd, unsigned long arg)
2061{
2062 int err = -EINVAL;
2063 struct mport_cdev_priv *data = filp->private_data;
2064 struct mport_dev *md = data->md;
2065
2066 if (atomic_read(&md->active) == 0)
2067 return -ENODEV;
2068
2069 switch (cmd) {
2070 case RIO_MPORT_MAINT_READ_LOCAL:
2071 return rio_mport_maint_rd(data, (void __user *)arg, 1);
2072 case RIO_MPORT_MAINT_WRITE_LOCAL:
2073 return rio_mport_maint_wr(data, (void __user *)arg, 1);
2074 case RIO_MPORT_MAINT_READ_REMOTE:
2075 return rio_mport_maint_rd(data, (void __user *)arg, 0);
2076 case RIO_MPORT_MAINT_WRITE_REMOTE:
2077 return rio_mport_maint_wr(data, (void __user *)arg, 0);
2078 case RIO_MPORT_MAINT_HDID_SET:
2079 return maint_hdid_set(data, (void __user *)arg);
2080 case RIO_MPORT_MAINT_COMPTAG_SET:
2081 return maint_comptag_set(data, (void __user *)arg);
2082 case RIO_MPORT_MAINT_PORT_IDX_GET:
2083 return maint_port_idx_get(data, (void __user *)arg);
2084 case RIO_MPORT_GET_PROPERTIES:
2085 md->properties.hdid = md->mport->host_deviceid;
4e1016da
AB
2086 if (copy_to_user((void __user *)arg, &(md->properties),
2087 sizeof(md->properties)))
e8de3701
AB
2088 return -EFAULT;
2089 return 0;
2090 case RIO_ENABLE_DOORBELL_RANGE:
2091 return rio_mport_add_db_filter(data, (void __user *)arg);
2092 case RIO_DISABLE_DOORBELL_RANGE:
2093 return rio_mport_remove_db_filter(data, (void __user *)arg);
2094 case RIO_ENABLE_PORTWRITE_RANGE:
2095 return rio_mport_add_pw_filter(data, (void __user *)arg);
2096 case RIO_DISABLE_PORTWRITE_RANGE:
2097 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2098 case RIO_SET_EVENT_MASK:
4e1016da 2099 data->event_mask = (u32)arg;
e8de3701
AB
2100 return 0;
2101 case RIO_GET_EVENT_MASK:
2102 if (copy_to_user((void __user *)arg, &data->event_mask,
4e1016da 2103 sizeof(u32)))
e8de3701
AB
2104 return -EFAULT;
2105 return 0;
2106 case RIO_MAP_OUTBOUND:
2107 return rio_mport_obw_map(filp, (void __user *)arg);
2108 case RIO_MAP_INBOUND:
2109 return rio_mport_map_inbound(filp, (void __user *)arg);
2110 case RIO_UNMAP_OUTBOUND:
2111 return rio_mport_obw_free(filp, (void __user *)arg);
2112 case RIO_UNMAP_INBOUND:
2113 return rio_mport_inbound_free(filp, (void __user *)arg);
2114 case RIO_ALLOC_DMA:
2115 return rio_mport_alloc_dma(filp, (void __user *)arg);
2116 case RIO_FREE_DMA:
2117 return rio_mport_free_dma(filp, (void __user *)arg);
2118 case RIO_WAIT_FOR_ASYNC:
2119 return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2120 case RIO_TRANSFER:
2121 return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2122 case RIO_DEV_ADD:
2123 return rio_mport_add_riodev(data, (void __user *)arg);
2124 case RIO_DEV_DEL:
2125 return rio_mport_del_riodev(data, (void __user *)arg);
2126 default:
2127 break;
2128 }
2129
2130 return err;
2131}
2132
2133/*
2134 * mport_release_mapping - free mapping resources and info structure
2135 * @ref: a pointer to the kref within struct rio_mport_mapping
2136 *
2137 * NOTE: Shall be called while holding buf_mutex.
2138 */
2139static void mport_release_mapping(struct kref *ref)
2140{
2141 struct rio_mport_mapping *map =
2142 container_of(ref, struct rio_mport_mapping, ref);
2143 struct rio_mport *mport = map->md->mport;
2144
2145 rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2146 map->dir, map->virt_addr,
2147 &map->phys_addr, mport->name);
2148
2149 list_del(&map->node);
2150
2151 switch (map->dir) {
2152 case MAP_INBOUND:
2153 rio_unmap_inb_region(mport, map->phys_addr);
df561f66 2154 fallthrough;
e8de3701
AB
2155 case MAP_DMA:
2156 dma_free_coherent(mport->dev.parent, map->size,
2157 map->virt_addr, map->phys_addr);
2158 break;
2159 case MAP_OUTBOUND:
2160 rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2161 break;
2162 }
2163 kfree(map);
2164}
2165
2166static void mport_mm_open(struct vm_area_struct *vma)
2167{
2168 struct rio_mport_mapping *map = vma->vm_private_data;
2169
ea87b8e1 2170 rmcd_debug(MMAP, "%pad", &map->phys_addr);
e8de3701
AB
2171 kref_get(&map->ref);
2172}
2173
2174static void mport_mm_close(struct vm_area_struct *vma)
2175{
2176 struct rio_mport_mapping *map = vma->vm_private_data;
2177
ea87b8e1 2178 rmcd_debug(MMAP, "%pad", &map->phys_addr);
e8de3701
AB
2179 mutex_lock(&map->md->buf_mutex);
2180 kref_put(&map->ref, mport_release_mapping);
2181 mutex_unlock(&map->md->buf_mutex);
2182}
2183
2184static const struct vm_operations_struct vm_ops = {
2185 .open = mport_mm_open,
2186 .close = mport_mm_close,
2187};
2188
2189static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2190{
2191 struct mport_cdev_priv *priv = filp->private_data;
2192 struct mport_dev *md;
2193 size_t size = vma->vm_end - vma->vm_start;
2194 dma_addr_t baddr;
2195 unsigned long offset;
2196 int found = 0, ret;
2197 struct rio_mport_mapping *map;
2198
2199 rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2200 (unsigned int)size, vma->vm_pgoff);
2201
2202 md = priv->md;
2203 baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2204
2205 mutex_lock(&md->buf_mutex);
2206 list_for_each_entry(map, &md->mappings, node) {
2207 if (baddr >= map->phys_addr &&
2208 baddr < (map->phys_addr + map->size)) {
2209 found = 1;
2210 break;
2211 }
2212 }
2213 mutex_unlock(&md->buf_mutex);
2214
2215 if (!found)
2216 return -ENOMEM;
2217
2218 offset = baddr - map->phys_addr;
2219
2220 if (size + offset > map->size)
2221 return -EINVAL;
2222
2223 vma->vm_pgoff = offset >> PAGE_SHIFT;
2224 rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2225
2226 if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2227 ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2228 map->virt_addr, map->phys_addr, map->size);
2229 else if (map->dir == MAP_OUTBOUND) {
2230 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2231 ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2232 } else {
2233 rmcd_error("Attempt to mmap unsupported mapping type");
2234 ret = -EIO;
2235 }
2236
2237 if (!ret) {
2238 vma->vm_private_data = map;
2239 vma->vm_ops = &vm_ops;
2240 mport_mm_open(vma);
2241 } else {
2242 rmcd_error("MMAP exit with err=%d", ret);
2243 }
2244
2245 return ret;
2246}
2247
afc9a42b 2248static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
e8de3701
AB
2249{
2250 struct mport_cdev_priv *priv = filp->private_data;
2251
2252 poll_wait(filp, &priv->event_rx_wait, wait);
2253 if (kfifo_len(&priv->event_fifo))
a9a08845 2254 return EPOLLIN | EPOLLRDNORM;
e8de3701
AB
2255
2256 return 0;
2257}
2258
2259static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2260 loff_t *ppos)
2261{
2262 struct mport_cdev_priv *priv = filp->private_data;
2263 int copied;
2264 ssize_t ret;
2265
2266 if (!count)
2267 return 0;
2268
2269 if (kfifo_is_empty(&priv->event_fifo) &&
2270 (filp->f_flags & O_NONBLOCK))
2271 return -EAGAIN;
2272
2273 if (count % sizeof(struct rio_event))
2274 return -EINVAL;
2275
2276 ret = wait_event_interruptible(priv->event_rx_wait,
2277 kfifo_len(&priv->event_fifo) != 0);
2278 if (ret)
2279 return ret;
2280
2281 while (ret < count) {
2282 if (kfifo_to_user(&priv->event_fifo, buf,
2283 sizeof(struct rio_event), &copied))
2284 return -EFAULT;
2285 ret += copied;
2286 buf += copied;
2287 }
2288
2289 return ret;
2290}
2291
2292static ssize_t mport_write(struct file *filp, const char __user *buf,
2293 size_t count, loff_t *ppos)
2294{
2295 struct mport_cdev_priv *priv = filp->private_data;
2296 struct rio_mport *mport = priv->md->mport;
2297 struct rio_event event;
2298 int len, ret;
2299
2300 if (!count)
2301 return 0;
2302
2303 if (count % sizeof(event))
2304 return -EINVAL;
2305
2306 len = 0;
2307 while ((count - len) >= (int)sizeof(event)) {
2308 if (copy_from_user(&event, buf, sizeof(event)))
2309 return -EFAULT;
2310
2311 if (event.header != RIO_DOORBELL)
2312 return -EINVAL;
2313
2314 ret = rio_mport_send_doorbell(mport,
4e1016da 2315 event.u.doorbell.rioid,
e8de3701
AB
2316 event.u.doorbell.payload);
2317 if (ret < 0)
2318 return ret;
2319
2320 len += sizeof(event);
2321 buf += sizeof(event);
2322 }
2323
2324 return len;
2325}
2326
2327static const struct file_operations mport_fops = {
2328 .owner = THIS_MODULE,
2329 .open = mport_cdev_open,
2330 .release = mport_cdev_release,
2331 .poll = mport_cdev_poll,
2332 .read = mport_read,
2333 .write = mport_write,
2334 .mmap = mport_cdev_mmap,
2335 .fasync = mport_cdev_fasync,
2336 .unlocked_ioctl = mport_cdev_ioctl
2337};
2338
2339/*
2340 * Character device management
2341 */
2342
2343static void mport_device_release(struct device *dev)
2344{
2345 struct mport_dev *md;
2346
2347 rmcd_debug(EXIT, "%s", dev_name(dev));
2348 md = container_of(dev, struct mport_dev, dev);
2349 kfree(md);
2350}
2351
2352/*
2353 * mport_cdev_add() - Create mport_dev from rio_mport
2354 * @mport: RapidIO master port
2355 */
2356static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2357{
2358 int ret = 0;
2359 struct mport_dev *md;
2360 struct rio_mport_attr attr;
2361
4e1016da 2362 md = kzalloc(sizeof(*md), GFP_KERNEL);
e8de3701
AB
2363 if (!md) {
2364 rmcd_error("Unable allocate a device object");
2365 return NULL;
2366 }
2367
2368 md->mport = mport;
2369 mutex_init(&md->buf_mutex);
2370 mutex_init(&md->file_mutex);
2371 INIT_LIST_HEAD(&md->file_list);
e8de3701 2372
dbef390d
LG
2373 device_initialize(&md->dev);
2374 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
e8de3701
AB
2375 md->dev.class = dev_class;
2376 md->dev.parent = &mport->dev;
2377 md->dev.release = mport_device_release;
2378 dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2379 atomic_set(&md->active, 1);
2380
dbef390d
LG
2381 cdev_init(&md->cdev, &mport_fops);
2382 md->cdev.owner = THIS_MODULE;
2383
e8de3701
AB
2384 INIT_LIST_HEAD(&md->doorbells);
2385 spin_lock_init(&md->db_lock);
2386 INIT_LIST_HEAD(&md->portwrites);
2387 spin_lock_init(&md->pw_lock);
2388 INIT_LIST_HEAD(&md->mappings);
2389
2390 md->properties.id = mport->id;
2391 md->properties.sys_size = mport->sys_size;
2392 md->properties.hdid = mport->host_deviceid;
2393 md->properties.index = mport->index;
2394
2395 /* The transfer_mode property will be returned through mport query
2396 * interface
2397 */
4e1016da 2398#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
e8de3701
AB
2399 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2400#else
2401 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2402#endif
e1c3cdb2
MB
2403
2404 ret = cdev_device_add(&md->cdev, &md->dev);
2405 if (ret) {
2406 rmcd_error("Failed to register mport %d (err=%d)",
2407 mport->id, ret);
2408 goto err_cdev;
2409 }
e8de3701
AB
2410 ret = rio_query_mport(mport, &attr);
2411 if (!ret) {
2412 md->properties.flags = attr.flags;
2413 md->properties.link_speed = attr.link_speed;
2414 md->properties.link_width = attr.link_width;
2415 md->properties.dma_max_sge = attr.dma_max_sge;
2416 md->properties.dma_max_size = attr.dma_max_size;
2417 md->properties.dma_align = attr.dma_align;
2418 md->properties.cap_sys_size = 0;
2419 md->properties.cap_transfer_mode = 0;
2420 md->properties.cap_addr_size = 0;
2421 } else
2422 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2423 mport->name, MAJOR(dev_number), mport->id);
2424
2425 mutex_lock(&mport_devs_lock);
2426 list_add_tail(&md->node, &mport_devs);
2427 mutex_unlock(&mport_devs_lock);
2428
2429 pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2430 mport->name, MAJOR(dev_number), mport->id);
2431
2432 return md;
2433
2434err_cdev:
dbef390d 2435 put_device(&md->dev);
e8de3701
AB
2436 return NULL;
2437}
2438
2439/*
2440 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2441 * associated DMA channels.
2442 */
2443static void mport_cdev_terminate_dma(struct mport_dev *md)
2444{
2445#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2446 struct mport_cdev_priv *client;
2447
2448 rmcd_debug(DMA, "%s", dev_name(&md->dev));
2449
2450 mutex_lock(&md->file_mutex);
2451 list_for_each_entry(client, &md->file_list, list) {
2452 if (client->dmach) {
2453 dmaengine_terminate_all(client->dmach);
2454 rio_release_dma(client->dmach);
2455 }
2456 }
2457 mutex_unlock(&md->file_mutex);
2458
2459 if (md->dma_chan) {
2460 dmaengine_terminate_all(md->dma_chan);
2461 rio_release_dma(md->dma_chan);
2462 md->dma_chan = NULL;
2463 }
2464#endif
2465}
2466
2467
2468/*
2469 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2470 * mport_cdev files.
2471 */
2472static int mport_cdev_kill_fasync(struct mport_dev *md)
2473{
2474 unsigned int files = 0;
2475 struct mport_cdev_priv *client;
2476
2477 mutex_lock(&md->file_mutex);
2478 list_for_each_entry(client, &md->file_list, list) {
2479 if (client->async_queue)
2480 kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2481 files++;
2482 }
2483 mutex_unlock(&md->file_mutex);
2484 return files;
2485}
2486
2487/*
2488 * mport_cdev_remove() - Remove mport character device
2489 * @dev: Mport device to remove
2490 */
2491static void mport_cdev_remove(struct mport_dev *md)
2492{
2493 struct rio_mport_mapping *map, *_map;
2494
2495 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2496 atomic_set(&md->active, 0);
2497 mport_cdev_terminate_dma(md);
2498 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
dbef390d 2499 cdev_device_del(&md->cdev, &md->dev);
e8de3701
AB
2500 mport_cdev_kill_fasync(md);
2501
e8de3701
AB
2502 /* TODO: do we need to give clients some time to close file
2503 * descriptors? Simple wait for XX, or kref?
2504 */
2505
2506 /*
2507 * Release DMA buffers allocated for the mport device.
2508 * Disable associated inbound Rapidio requests mapping if applicable.
2509 */
2510 mutex_lock(&md->buf_mutex);
2511 list_for_each_entry_safe(map, _map, &md->mappings, node) {
2512 kref_put(&map->ref, mport_release_mapping);
2513 }
2514 mutex_unlock(&md->buf_mutex);
2515
2516 if (!list_empty(&md->mappings))
2517 rmcd_warn("WARNING: %s pending mappings on removal",
2518 md->mport->name);
2519
2520 rio_release_inb_dbell(md->mport, 0, 0x0fff);
2521
e8de3701
AB
2522 put_device(&md->dev);
2523}
2524
2525/*
2526 * RIO rio_mport_interface driver
2527 */
2528
2529/*
2530 * mport_add_mport() - Add rio_mport from LDM device struct
2531 * @dev: Linux device model struct
2532 * @class_intf: Linux class_interface
2533 */
2534static int mport_add_mport(struct device *dev,
2535 struct class_interface *class_intf)
2536{
2537 struct rio_mport *mport = NULL;
2538 struct mport_dev *chdev = NULL;
2539
2540 mport = to_rio_mport(dev);
2541 if (!mport)
2542 return -ENODEV;
2543
2544 chdev = mport_cdev_add(mport);
2545 if (!chdev)
2546 return -ENODEV;
2547
2548 return 0;
2549}
2550
2551/*
2552 * mport_remove_mport() - Remove rio_mport from global list
2553 * TODO remove device from global mport_dev list
2554 */
2555static void mport_remove_mport(struct device *dev,
2556 struct class_interface *class_intf)
2557{
2558 struct rio_mport *mport = NULL;
2559 struct mport_dev *chdev;
2560 int found = 0;
2561
2562 mport = to_rio_mport(dev);
2563 rmcd_debug(EXIT, "Remove %s", mport->name);
2564
2565 mutex_lock(&mport_devs_lock);
2566 list_for_each_entry(chdev, &mport_devs, node) {
2567 if (chdev->mport->id == mport->id) {
2568 atomic_set(&chdev->active, 0);
2569 list_del(&chdev->node);
2570 found = 1;
2571 break;
2572 }
2573 }
2574 mutex_unlock(&mport_devs_lock);
2575
2576 if (found)
2577 mport_cdev_remove(chdev);
2578}
2579
2580/* the rio_mport_interface is used to handle local mport devices */
2581static struct class_interface rio_mport_interface __refdata = {
2582 .class = &rio_mport_class,
2583 .add_dev = mport_add_mport,
2584 .remove_dev = mport_remove_mport,
2585};
2586
2587/*
2588 * Linux kernel module
2589 */
2590
2591/*
2592 * mport_init - Driver module loading
2593 */
2594static int __init mport_init(void)
2595{
2596 int ret;
2597
2598 /* Create device class needed by udev */
2599 dev_class = class_create(THIS_MODULE, DRV_NAME);
99f23c2c 2600 if (IS_ERR(dev_class)) {
e8de3701 2601 rmcd_error("Unable to create " DRV_NAME " class");
99f23c2c 2602 return PTR_ERR(dev_class);
e8de3701
AB
2603 }
2604
2605 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2606 if (ret < 0)
2607 goto err_chr;
2608
2609 rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2610
2611 /* Register to rio_mport_interface */
2612 ret = class_interface_register(&rio_mport_interface);
2613 if (ret) {
2614 rmcd_error("class_interface_register() failed, err=%d", ret);
2615 goto err_cli;
2616 }
2617
e8de3701
AB
2618 return 0;
2619
e8de3701
AB
2620err_cli:
2621 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2622err_chr:
2623 class_destroy(dev_class);
2624 return ret;
2625}
2626
2627/**
2628 * mport_exit - Driver module unloading
2629 */
2630static void __exit mport_exit(void)
2631{
2632 class_interface_unregister(&rio_mport_interface);
2633 class_destroy(dev_class);
2634 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
e8de3701
AB
2635}
2636
2637module_init(mport_init);
2638module_exit(mport_exit);