Commit | Line | Data |
---|---|---|
e8de3701 AB |
1 | /* |
2 | * RapidIO mport character device | |
3 | * | |
4 | * Copyright 2014-2015 Integrated Device Technology, Inc. | |
5 | * Alexandre Bounine <alexandre.bounine@idt.com> | |
6 | * Copyright 2014-2015 Prodrive Technologies | |
7 | * Andre van Herk <andre.van.herk@prodrive-technologies.com> | |
8 | * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com> | |
9 | * Copyright (C) 2014 Texas Instruments Incorporated | |
10 | * Aurelien Jacquiot <a-jacquiot@ti.com> | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by the | |
14 | * Free Software Foundation; either version 2 of the License, or (at your | |
15 | * option) any later version. | |
16 | */ | |
17 | #include <linux/module.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/cdev.h> | |
20 | #include <linux/ioctl.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/list.h> | |
23 | #include <linux/fs.h> | |
24 | #include <linux/err.h> | |
25 | #include <linux/net.h> | |
26 | #include <linux/poll.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/sched.h> | |
29 | #include <linux/kfifo.h> | |
30 | ||
31 | #include <linux/mm.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/vmalloc.h> | |
34 | #include <linux/mman.h> | |
35 | ||
36 | #include <linux/dma-mapping.h> | |
37 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
38 | #include <linux/dmaengine.h> | |
39 | #endif | |
40 | ||
41 | #include <linux/rio.h> | |
42 | #include <linux/rio_ids.h> | |
43 | #include <linux/rio_drv.h> | |
44 | #include <linux/rio_mport_cdev.h> | |
45 | ||
46 | #include "../rio.h" | |
47 | ||
48 | #define DRV_NAME "rio_mport" | |
49 | #define DRV_PREFIX DRV_NAME ": " | |
50 | #define DEV_NAME "rio_mport" | |
51 | #define DRV_VERSION "1.0.0" | |
52 | ||
53 | /* Debug output filtering masks */ | |
54 | enum { | |
55 | DBG_NONE = 0, | |
56 | DBG_INIT = BIT(0), /* driver init */ | |
57 | DBG_EXIT = BIT(1), /* driver exit */ | |
58 | DBG_MPORT = BIT(2), /* mport add/remove */ | |
59 | DBG_RDEV = BIT(3), /* RapidIO device add/remove */ | |
60 | DBG_DMA = BIT(4), /* DMA transfer messages */ | |
61 | DBG_MMAP = BIT(5), /* mapping messages */ | |
62 | DBG_IBW = BIT(6), /* inbound window */ | |
63 | DBG_EVENT = BIT(7), /* event handling messages */ | |
64 | DBG_OBW = BIT(8), /* outbound window messages */ | |
65 | DBG_DBELL = BIT(9), /* doorbell messages */ | |
66 | DBG_ALL = ~0, | |
67 | }; | |
68 | ||
69 | #ifdef DEBUG | |
70 | #define rmcd_debug(level, fmt, arg...) \ | |
71 | do { \ | |
72 | if (DBG_##level & dbg_level) \ | |
73 | pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \ | |
74 | } while (0) | |
75 | #else | |
76 | #define rmcd_debug(level, fmt, arg...) \ | |
77 | no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg) | |
78 | #endif | |
79 | ||
80 | #define rmcd_warn(fmt, arg...) \ | |
81 | pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg) | |
82 | ||
83 | #define rmcd_error(fmt, arg...) \ | |
84 | pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg) | |
85 | ||
86 | MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>"); | |
87 | MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>"); | |
88 | MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); | |
89 | MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>"); | |
90 | MODULE_DESCRIPTION("RapidIO mport character device driver"); | |
91 | MODULE_LICENSE("GPL"); | |
92 | MODULE_VERSION(DRV_VERSION); | |
93 | ||
94 | static int dma_timeout = 3000; /* DMA transfer timeout in msec */ | |
95 | module_param(dma_timeout, int, S_IRUGO); | |
96 | MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)"); | |
97 | ||
98 | #ifdef DEBUG | |
99 | static u32 dbg_level = DBG_NONE; | |
100 | module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO); | |
101 | MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); | |
102 | #endif | |
103 | ||
104 | /* | |
105 | * An internal DMA coherent buffer | |
106 | */ | |
107 | struct mport_dma_buf { | |
108 | void *ib_base; | |
109 | dma_addr_t ib_phys; | |
110 | u32 ib_size; | |
111 | u64 ib_rio_base; | |
112 | bool ib_map; | |
113 | struct file *filp; | |
114 | }; | |
115 | ||
116 | /* | |
117 | * Internal memory mapping structure | |
118 | */ | |
119 | enum rio_mport_map_dir { | |
120 | MAP_INBOUND, | |
121 | MAP_OUTBOUND, | |
122 | MAP_DMA, | |
123 | }; | |
124 | ||
125 | struct rio_mport_mapping { | |
126 | struct list_head node; | |
127 | struct mport_dev *md; | |
128 | enum rio_mport_map_dir dir; | |
4e1016da | 129 | u16 rioid; |
e8de3701 AB |
130 | u64 rio_addr; |
131 | dma_addr_t phys_addr; /* for mmap */ | |
132 | void *virt_addr; /* kernel address, for dma_free_coherent */ | |
133 | u64 size; | |
134 | struct kref ref; /* refcount of vmas sharing the mapping */ | |
135 | struct file *filp; | |
136 | }; | |
137 | ||
138 | struct rio_mport_dma_map { | |
139 | int valid; | |
4e1016da | 140 | u64 length; |
e8de3701 AB |
141 | void *vaddr; |
142 | dma_addr_t paddr; | |
143 | }; | |
144 | ||
145 | #define MPORT_MAX_DMA_BUFS 16 | |
146 | #define MPORT_EVENT_DEPTH 10 | |
147 | ||
148 | /* | |
149 | * mport_dev driver-specific structure that represents mport device | |
150 | * @active mport device status flag | |
151 | * @node list node to maintain list of registered mports | |
152 | * @cdev character device | |
153 | * @dev associated device object | |
154 | * @mport associated subsystem's master port device object | |
155 | * @buf_mutex lock for buffer handling | |
156 | * @file_mutex - lock for open files list | |
157 | * @file_list - list of open files on given mport | |
158 | * @properties properties of this mport | |
159 | * @portwrites queue of inbound portwrites | |
160 | * @pw_lock lock for port write queue | |
161 | * @mappings queue for memory mappings | |
162 | * @dma_chan DMA channels associated with this device | |
163 | * @dma_ref: | |
164 | * @comp: | |
165 | */ | |
166 | struct mport_dev { | |
167 | atomic_t active; | |
168 | struct list_head node; | |
169 | struct cdev cdev; | |
170 | struct device dev; | |
171 | struct rio_mport *mport; | |
172 | struct mutex buf_mutex; | |
173 | struct mutex file_mutex; | |
174 | struct list_head file_list; | |
175 | struct rio_mport_properties properties; | |
176 | struct list_head doorbells; | |
177 | spinlock_t db_lock; | |
178 | struct list_head portwrites; | |
179 | spinlock_t pw_lock; | |
180 | struct list_head mappings; | |
181 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
182 | struct dma_chan *dma_chan; | |
183 | struct kref dma_ref; | |
184 | struct completion comp; | |
185 | #endif | |
186 | }; | |
187 | ||
188 | /* | |
189 | * mport_cdev_priv - data structure specific to individual file object | |
190 | * associated with an open device | |
191 | * @md master port character device object | |
192 | * @async_queue - asynchronous notification queue | |
193 | * @list - file objects tracking list | |
194 | * @db_filters inbound doorbell filters for this descriptor | |
195 | * @pw_filters portwrite filters for this descriptor | |
196 | * @event_fifo event fifo for this descriptor | |
197 | * @event_rx_wait wait queue for this descriptor | |
198 | * @fifo_lock lock for event_fifo | |
199 | * @event_mask event mask for this descriptor | |
200 | * @dmach DMA engine channel allocated for specific file object | |
201 | */ | |
202 | struct mport_cdev_priv { | |
203 | struct mport_dev *md; | |
204 | struct fasync_struct *async_queue; | |
205 | struct list_head list; | |
206 | struct list_head db_filters; | |
207 | struct list_head pw_filters; | |
208 | struct kfifo event_fifo; | |
209 | wait_queue_head_t event_rx_wait; | |
210 | spinlock_t fifo_lock; | |
4e1016da | 211 | u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ |
e8de3701 AB |
212 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
213 | struct dma_chan *dmach; | |
214 | struct list_head async_list; | |
215 | struct list_head pend_list; | |
216 | spinlock_t req_lock; | |
217 | struct mutex dma_lock; | |
218 | struct kref dma_ref; | |
219 | struct completion comp; | |
220 | #endif | |
221 | }; | |
222 | ||
223 | /* | |
224 | * rio_mport_pw_filter - structure to describe a portwrite filter | |
225 | * md_node node in mport device's list | |
226 | * priv_node node in private file object's list | |
227 | * priv reference to private data | |
228 | * filter actual portwrite filter | |
229 | */ | |
230 | struct rio_mport_pw_filter { | |
231 | struct list_head md_node; | |
232 | struct list_head priv_node; | |
233 | struct mport_cdev_priv *priv; | |
234 | struct rio_pw_filter filter; | |
235 | }; | |
236 | ||
237 | /* | |
238 | * rio_mport_db_filter - structure to describe a doorbell filter | |
239 | * @data_node reference to device node | |
240 | * @priv_node node in private data | |
241 | * @priv reference to private data | |
242 | * @filter actual doorbell filter | |
243 | */ | |
244 | struct rio_mport_db_filter { | |
245 | struct list_head data_node; | |
246 | struct list_head priv_node; | |
247 | struct mport_cdev_priv *priv; | |
248 | struct rio_doorbell_filter filter; | |
249 | }; | |
250 | ||
251 | static LIST_HEAD(mport_devs); | |
252 | static DEFINE_MUTEX(mport_devs_lock); | |
253 | ||
254 | #if (0) /* used by commented out portion of poll function : FIXME */ | |
255 | static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); | |
256 | #endif | |
257 | ||
258 | static struct class *dev_class; | |
259 | static dev_t dev_number; | |
260 | ||
261 | static struct workqueue_struct *dma_wq; | |
262 | ||
263 | static void mport_release_mapping(struct kref *ref); | |
264 | ||
265 | static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, | |
266 | int local) | |
267 | { | |
268 | struct rio_mport *mport = priv->md->mport; | |
269 | struct rio_mport_maint_io maint_io; | |
270 | u32 *buffer; | |
271 | u32 offset; | |
272 | size_t length; | |
273 | int ret, i; | |
274 | ||
275 | if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) | |
276 | return -EFAULT; | |
277 | ||
278 | if ((maint_io.offset % 4) || | |
4e1016da AB |
279 | (maint_io.length == 0) || (maint_io.length % 4) || |
280 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) | |
e8de3701 AB |
281 | return -EINVAL; |
282 | ||
283 | buffer = vmalloc(maint_io.length); | |
284 | if (buffer == NULL) | |
285 | return -ENOMEM; | |
286 | length = maint_io.length/sizeof(u32); | |
287 | offset = maint_io.offset; | |
288 | ||
289 | for (i = 0; i < length; i++) { | |
290 | if (local) | |
291 | ret = __rio_local_read_config_32(mport, | |
292 | offset, &buffer[i]); | |
293 | else | |
294 | ret = rio_mport_read_config_32(mport, maint_io.rioid, | |
295 | maint_io.hopcount, offset, &buffer[i]); | |
296 | if (ret) | |
297 | goto out; | |
298 | ||
299 | offset += 4; | |
300 | } | |
301 | ||
4e1016da AB |
302 | if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, |
303 | buffer, maint_io.length))) | |
e8de3701 AB |
304 | ret = -EFAULT; |
305 | out: | |
306 | vfree(buffer); | |
307 | return ret; | |
308 | } | |
309 | ||
310 | static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, | |
311 | int local) | |
312 | { | |
313 | struct rio_mport *mport = priv->md->mport; | |
314 | struct rio_mport_maint_io maint_io; | |
315 | u32 *buffer; | |
316 | u32 offset; | |
317 | size_t length; | |
318 | int ret = -EINVAL, i; | |
319 | ||
320 | if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) | |
321 | return -EFAULT; | |
322 | ||
323 | if ((maint_io.offset % 4) || | |
4e1016da AB |
324 | (maint_io.length == 0) || (maint_io.length % 4) || |
325 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) | |
e8de3701 AB |
326 | return -EINVAL; |
327 | ||
328 | buffer = vmalloc(maint_io.length); | |
329 | if (buffer == NULL) | |
330 | return -ENOMEM; | |
331 | length = maint_io.length; | |
332 | ||
4e1016da AB |
333 | if (unlikely(copy_from_user(buffer, |
334 | (void __user *)(uintptr_t)maint_io.buffer, length))) { | |
e8de3701 AB |
335 | ret = -EFAULT; |
336 | goto out; | |
337 | } | |
338 | ||
339 | offset = maint_io.offset; | |
340 | length /= sizeof(u32); | |
341 | ||
342 | for (i = 0; i < length; i++) { | |
343 | if (local) | |
344 | ret = __rio_local_write_config_32(mport, | |
345 | offset, buffer[i]); | |
346 | else | |
347 | ret = rio_mport_write_config_32(mport, maint_io.rioid, | |
348 | maint_io.hopcount, | |
349 | offset, buffer[i]); | |
350 | if (ret) | |
351 | goto out; | |
352 | ||
353 | offset += 4; | |
354 | } | |
355 | ||
356 | out: | |
357 | vfree(buffer); | |
358 | return ret; | |
359 | } | |
360 | ||
361 | ||
362 | /* | |
363 | * Inbound/outbound memory mapping functions | |
364 | */ | |
365 | static int | |
366 | rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 367 | u16 rioid, u64 raddr, u32 size, |
e8de3701 AB |
368 | dma_addr_t *paddr) |
369 | { | |
370 | struct rio_mport *mport = md->mport; | |
371 | struct rio_mport_mapping *map; | |
372 | int ret; | |
373 | ||
374 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); | |
375 | ||
4e1016da | 376 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
e8de3701 AB |
377 | if (map == NULL) |
378 | return -ENOMEM; | |
379 | ||
380 | ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr); | |
381 | if (ret < 0) | |
382 | goto err_map_outb; | |
383 | ||
384 | map->dir = MAP_OUTBOUND; | |
385 | map->rioid = rioid; | |
386 | map->rio_addr = raddr; | |
387 | map->size = size; | |
388 | map->phys_addr = *paddr; | |
389 | map->filp = filp; | |
390 | map->md = md; | |
391 | kref_init(&map->ref); | |
392 | list_add_tail(&map->node, &md->mappings); | |
393 | return 0; | |
394 | err_map_outb: | |
395 | kfree(map); | |
396 | return ret; | |
397 | } | |
398 | ||
399 | static int | |
400 | rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 401 | u16 rioid, u64 raddr, u32 size, |
e8de3701 AB |
402 | dma_addr_t *paddr) |
403 | { | |
404 | struct rio_mport_mapping *map; | |
405 | int err = -ENOMEM; | |
406 | ||
407 | mutex_lock(&md->buf_mutex); | |
408 | list_for_each_entry(map, &md->mappings, node) { | |
409 | if (map->dir != MAP_OUTBOUND) | |
410 | continue; | |
411 | if (rioid == map->rioid && | |
412 | raddr == map->rio_addr && size == map->size) { | |
413 | *paddr = map->phys_addr; | |
414 | err = 0; | |
415 | break; | |
416 | } else if (rioid == map->rioid && | |
417 | raddr < (map->rio_addr + map->size - 1) && | |
418 | (raddr + size) > map->rio_addr) { | |
419 | err = -EBUSY; | |
420 | break; | |
421 | } | |
422 | } | |
423 | ||
424 | /* If not found, create new */ | |
425 | if (err == -ENOMEM) | |
426 | err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, | |
427 | size, paddr); | |
428 | mutex_unlock(&md->buf_mutex); | |
429 | return err; | |
430 | } | |
431 | ||
432 | static int rio_mport_obw_map(struct file *filp, void __user *arg) | |
433 | { | |
434 | struct mport_cdev_priv *priv = filp->private_data; | |
435 | struct mport_dev *data = priv->md; | |
436 | struct rio_mmap map; | |
437 | dma_addr_t paddr; | |
438 | int ret; | |
439 | ||
4e1016da | 440 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
e8de3701 AB |
441 | return -EFAULT; |
442 | ||
443 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", | |
444 | map.rioid, map.rio_addr, map.length); | |
445 | ||
446 | ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, | |
447 | map.rio_addr, map.length, &paddr); | |
448 | if (ret < 0) { | |
449 | rmcd_error("Failed to set OBW err= %d", ret); | |
450 | return ret; | |
451 | } | |
452 | ||
453 | map.handle = paddr; | |
454 | ||
4e1016da | 455 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) |
e8de3701 AB |
456 | return -EFAULT; |
457 | return 0; | |
458 | } | |
459 | ||
460 | /* | |
461 | * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space | |
462 | * | |
463 | * @priv: driver private data | |
464 | * @arg: buffer handle returned by allocation routine | |
465 | */ | |
466 | static int rio_mport_obw_free(struct file *filp, void __user *arg) | |
467 | { | |
468 | struct mport_cdev_priv *priv = filp->private_data; | |
469 | struct mport_dev *md = priv->md; | |
470 | u64 handle; | |
471 | struct rio_mport_mapping *map, *_map; | |
472 | ||
473 | if (!md->mport->ops->unmap_outb) | |
474 | return -EPROTONOSUPPORT; | |
475 | ||
4e1016da | 476 | if (copy_from_user(&handle, arg, sizeof(handle))) |
e8de3701 AB |
477 | return -EFAULT; |
478 | ||
479 | rmcd_debug(OBW, "h=0x%llx", handle); | |
480 | ||
481 | mutex_lock(&md->buf_mutex); | |
482 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
483 | if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { | |
484 | if (map->filp == filp) { | |
485 | rmcd_debug(OBW, "kref_put h=0x%llx", handle); | |
486 | map->filp = NULL; | |
487 | kref_put(&map->ref, mport_release_mapping); | |
488 | } | |
489 | break; | |
490 | } | |
491 | } | |
492 | mutex_unlock(&md->buf_mutex); | |
493 | ||
494 | return 0; | |
495 | } | |
496 | ||
497 | /* | |
498 | * maint_hdid_set() - Set the host Device ID | |
499 | * @priv: driver private data | |
500 | * @arg: Device Id | |
501 | */ | |
502 | static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) | |
503 | { | |
504 | struct mport_dev *md = priv->md; | |
4e1016da | 505 | u16 hdid; |
e8de3701 | 506 | |
4e1016da | 507 | if (copy_from_user(&hdid, arg, sizeof(hdid))) |
e8de3701 AB |
508 | return -EFAULT; |
509 | ||
510 | md->mport->host_deviceid = hdid; | |
511 | md->properties.hdid = hdid; | |
512 | rio_local_set_device_id(md->mport, hdid); | |
513 | ||
514 | rmcd_debug(MPORT, "Set host device Id to %d", hdid); | |
515 | ||
516 | return 0; | |
517 | } | |
518 | ||
519 | /* | |
520 | * maint_comptag_set() - Set the host Component Tag | |
521 | * @priv: driver private data | |
522 | * @arg: Component Tag | |
523 | */ | |
524 | static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) | |
525 | { | |
526 | struct mport_dev *md = priv->md; | |
4e1016da | 527 | u32 comptag; |
e8de3701 | 528 | |
4e1016da | 529 | if (copy_from_user(&comptag, arg, sizeof(comptag))) |
e8de3701 AB |
530 | return -EFAULT; |
531 | ||
532 | rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); | |
533 | ||
534 | rmcd_debug(MPORT, "Set host Component Tag to %d", comptag); | |
535 | ||
536 | return 0; | |
537 | } | |
538 | ||
539 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
540 | ||
541 | struct mport_dma_req { | |
542 | struct list_head node; | |
543 | struct file *filp; | |
544 | struct mport_cdev_priv *priv; | |
545 | enum rio_transfer_sync sync; | |
546 | struct sg_table sgt; | |
547 | struct page **page_list; | |
548 | unsigned int nr_pages; | |
549 | struct rio_mport_mapping *map; | |
550 | struct dma_chan *dmach; | |
551 | enum dma_data_direction dir; | |
552 | dma_cookie_t cookie; | |
553 | enum dma_status status; | |
554 | struct completion req_comp; | |
555 | }; | |
556 | ||
557 | struct mport_faf_work { | |
558 | struct work_struct work; | |
559 | struct mport_dma_req *req; | |
560 | }; | |
561 | ||
562 | static void mport_release_def_dma(struct kref *dma_ref) | |
563 | { | |
564 | struct mport_dev *md = | |
565 | container_of(dma_ref, struct mport_dev, dma_ref); | |
566 | ||
567 | rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); | |
568 | rio_release_dma(md->dma_chan); | |
569 | md->dma_chan = NULL; | |
570 | } | |
571 | ||
572 | static void mport_release_dma(struct kref *dma_ref) | |
573 | { | |
574 | struct mport_cdev_priv *priv = | |
575 | container_of(dma_ref, struct mport_cdev_priv, dma_ref); | |
576 | ||
577 | rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id); | |
578 | complete(&priv->comp); | |
579 | } | |
580 | ||
581 | static void dma_req_free(struct mport_dma_req *req) | |
582 | { | |
583 | struct mport_cdev_priv *priv = req->priv; | |
584 | unsigned int i; | |
585 | ||
586 | dma_unmap_sg(req->dmach->device->dev, | |
587 | req->sgt.sgl, req->sgt.nents, req->dir); | |
588 | sg_free_table(&req->sgt); | |
589 | if (req->page_list) { | |
590 | for (i = 0; i < req->nr_pages; i++) | |
591 | put_page(req->page_list[i]); | |
592 | kfree(req->page_list); | |
593 | } | |
594 | ||
595 | if (req->map) { | |
596 | mutex_lock(&req->map->md->buf_mutex); | |
597 | kref_put(&req->map->ref, mport_release_mapping); | |
598 | mutex_unlock(&req->map->md->buf_mutex); | |
599 | } | |
600 | ||
601 | kref_put(&priv->dma_ref, mport_release_dma); | |
602 | ||
603 | kfree(req); | |
604 | } | |
605 | ||
606 | static void dma_xfer_callback(void *param) | |
607 | { | |
608 | struct mport_dma_req *req = (struct mport_dma_req *)param; | |
609 | struct mport_cdev_priv *priv = req->priv; | |
610 | ||
611 | req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, | |
612 | NULL, NULL); | |
613 | complete(&req->req_comp); | |
614 | } | |
615 | ||
616 | static void dma_faf_cleanup(struct work_struct *_work) | |
617 | { | |
618 | struct mport_faf_work *work = container_of(_work, | |
619 | struct mport_faf_work, work); | |
620 | struct mport_dma_req *req = work->req; | |
621 | ||
622 | dma_req_free(req); | |
623 | kfree(work); | |
624 | } | |
625 | ||
626 | static void dma_faf_callback(void *param) | |
627 | { | |
628 | struct mport_dma_req *req = (struct mport_dma_req *)param; | |
629 | struct mport_faf_work *work; | |
630 | ||
631 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | |
632 | if (!work) | |
633 | return; | |
634 | ||
635 | INIT_WORK(&work->work, dma_faf_cleanup); | |
636 | work->req = req; | |
637 | queue_work(dma_wq, &work->work); | |
638 | } | |
639 | ||
640 | /* | |
641 | * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA | |
642 | * transfer object. | |
643 | * Returns pointer to DMA transaction descriptor allocated by DMA driver on | |
644 | * success or ERR_PTR (and/or NULL) if failed. Caller must check returned | |
645 | * non-NULL pointer using IS_ERR macro. | |
646 | */ | |
647 | static struct dma_async_tx_descriptor | |
648 | *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, | |
649 | struct sg_table *sgt, int nents, enum dma_transfer_direction dir, | |
650 | enum dma_ctrl_flags flags) | |
651 | { | |
652 | struct rio_dma_data tx_data; | |
653 | ||
654 | tx_data.sg = sgt->sgl; | |
655 | tx_data.sg_len = nents; | |
656 | tx_data.rio_addr_u = 0; | |
657 | tx_data.rio_addr = transfer->rio_addr; | |
658 | if (dir == DMA_MEM_TO_DEV) { | |
659 | switch (transfer->method) { | |
660 | case RIO_EXCHANGE_NWRITE: | |
661 | tx_data.wr_type = RDW_ALL_NWRITE; | |
662 | break; | |
663 | case RIO_EXCHANGE_NWRITE_R_ALL: | |
664 | tx_data.wr_type = RDW_ALL_NWRITE_R; | |
665 | break; | |
666 | case RIO_EXCHANGE_NWRITE_R: | |
667 | tx_data.wr_type = RDW_LAST_NWRITE_R; | |
668 | break; | |
669 | case RIO_EXCHANGE_DEFAULT: | |
670 | tx_data.wr_type = RDW_DEFAULT; | |
671 | break; | |
672 | default: | |
673 | return ERR_PTR(-EINVAL); | |
674 | } | |
675 | } | |
676 | ||
677 | return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags); | |
678 | } | |
679 | ||
680 | /* Request DMA channel associated with this mport device. | |
681 | * Try to request DMA channel for every new process that opened given | |
682 | * mport. If a new DMA channel is not available use default channel | |
683 | * which is the first DMA channel opened on mport device. | |
684 | */ | |
685 | static int get_dma_channel(struct mport_cdev_priv *priv) | |
686 | { | |
687 | mutex_lock(&priv->dma_lock); | |
688 | if (!priv->dmach) { | |
689 | priv->dmach = rio_request_mport_dma(priv->md->mport); | |
690 | if (!priv->dmach) { | |
691 | /* Use default DMA channel if available */ | |
692 | if (priv->md->dma_chan) { | |
693 | priv->dmach = priv->md->dma_chan; | |
694 | kref_get(&priv->md->dma_ref); | |
695 | } else { | |
696 | rmcd_error("Failed to get DMA channel"); | |
697 | mutex_unlock(&priv->dma_lock); | |
698 | return -ENODEV; | |
699 | } | |
700 | } else if (!priv->md->dma_chan) { | |
701 | /* Register default DMA channel if we do not have one */ | |
702 | priv->md->dma_chan = priv->dmach; | |
703 | kref_init(&priv->md->dma_ref); | |
704 | rmcd_debug(DMA, "Register DMA_chan %d as default", | |
705 | priv->dmach->chan_id); | |
706 | } | |
707 | ||
708 | kref_init(&priv->dma_ref); | |
709 | init_completion(&priv->comp); | |
710 | } | |
711 | ||
712 | kref_get(&priv->dma_ref); | |
713 | mutex_unlock(&priv->dma_lock); | |
714 | return 0; | |
715 | } | |
716 | ||
717 | static void put_dma_channel(struct mport_cdev_priv *priv) | |
718 | { | |
719 | kref_put(&priv->dma_ref, mport_release_dma); | |
720 | } | |
721 | ||
722 | /* | |
723 | * DMA transfer functions | |
724 | */ | |
725 | static int do_dma_request(struct mport_dma_req *req, | |
726 | struct rio_transfer_io *xfer, | |
727 | enum rio_transfer_sync sync, int nents) | |
728 | { | |
729 | struct mport_cdev_priv *priv; | |
730 | struct sg_table *sgt; | |
731 | struct dma_chan *chan; | |
732 | struct dma_async_tx_descriptor *tx; | |
733 | dma_cookie_t cookie; | |
734 | unsigned long tmo = msecs_to_jiffies(dma_timeout); | |
735 | enum dma_transfer_direction dir; | |
736 | long wret; | |
737 | int ret = 0; | |
738 | ||
739 | priv = req->priv; | |
740 | sgt = &req->sgt; | |
741 | ||
742 | chan = priv->dmach; | |
743 | dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; | |
744 | ||
745 | rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s", | |
746 | current->comm, task_pid_nr(current), | |
747 | dev_name(&chan->dev->device), | |
748 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); | |
749 | ||
750 | /* Initialize DMA transaction request */ | |
751 | tx = prep_dma_xfer(chan, xfer, sgt, nents, dir, | |
752 | DMA_CTRL_ACK | DMA_PREP_INTERRUPT); | |
753 | ||
754 | if (!tx) { | |
755 | rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx", | |
756 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", | |
757 | xfer->rio_addr, xfer->length); | |
758 | ret = -EIO; | |
759 | goto err_out; | |
760 | } else if (IS_ERR(tx)) { | |
761 | ret = PTR_ERR(tx); | |
762 | rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret, | |
763 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", | |
764 | xfer->rio_addr, xfer->length); | |
765 | goto err_out; | |
766 | } | |
767 | ||
768 | if (sync == RIO_TRANSFER_FAF) | |
769 | tx->callback = dma_faf_callback; | |
770 | else | |
771 | tx->callback = dma_xfer_callback; | |
772 | tx->callback_param = req; | |
773 | ||
774 | req->dmach = chan; | |
775 | req->sync = sync; | |
776 | req->status = DMA_IN_PROGRESS; | |
777 | init_completion(&req->req_comp); | |
778 | ||
779 | cookie = dmaengine_submit(tx); | |
780 | req->cookie = cookie; | |
781 | ||
782 | rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current), | |
783 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); | |
784 | ||
785 | if (dma_submit_error(cookie)) { | |
786 | rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", | |
787 | cookie, xfer->rio_addr, xfer->length); | |
788 | ret = -EIO; | |
789 | goto err_out; | |
790 | } | |
791 | ||
792 | dma_async_issue_pending(chan); | |
793 | ||
794 | if (sync == RIO_TRANSFER_ASYNC) { | |
795 | spin_lock(&priv->req_lock); | |
796 | list_add_tail(&req->node, &priv->async_list); | |
797 | spin_unlock(&priv->req_lock); | |
798 | return cookie; | |
799 | } else if (sync == RIO_TRANSFER_FAF) | |
800 | return 0; | |
801 | ||
802 | wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); | |
803 | ||
804 | if (wret == 0) { | |
805 | /* Timeout on wait occurred */ | |
806 | rmcd_error("%s(%d) timed out waiting for DMA_%s %d", | |
807 | current->comm, task_pid_nr(current), | |
808 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); | |
809 | return -ETIMEDOUT; | |
810 | } else if (wret == -ERESTARTSYS) { | |
811 | /* Wait_for_completion was interrupted by a signal but DMA may | |
812 | * be in progress | |
813 | */ | |
814 | rmcd_error("%s(%d) wait for DMA_%s %d was interrupted", | |
815 | current->comm, task_pid_nr(current), | |
816 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); | |
817 | return -EINTR; | |
818 | } | |
819 | ||
820 | if (req->status != DMA_COMPLETE) { | |
821 | /* DMA transaction completion was signaled with error */ | |
822 | rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)", | |
823 | current->comm, task_pid_nr(current), | |
824 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", | |
825 | cookie, req->status, ret); | |
826 | ret = -EIO; | |
827 | } | |
828 | ||
829 | err_out: | |
830 | return ret; | |
831 | } | |
832 | ||
833 | /* | |
834 | * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from | |
835 | * the remote RapidIO device | |
836 | * @filp: file pointer associated with the call | |
837 | * @transfer_mode: DMA transfer mode | |
838 | * @sync: synchronization mode | |
839 | * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR | |
840 | * DMA_DEV_TO_MEM = read) | |
841 | * @xfer: data transfer descriptor structure | |
842 | */ | |
843 | static int | |
4e1016da | 844 | rio_dma_transfer(struct file *filp, u32 transfer_mode, |
e8de3701 AB |
845 | enum rio_transfer_sync sync, enum dma_data_direction dir, |
846 | struct rio_transfer_io *xfer) | |
847 | { | |
848 | struct mport_cdev_priv *priv = filp->private_data; | |
849 | unsigned long nr_pages = 0; | |
850 | struct page **page_list = NULL; | |
851 | struct mport_dma_req *req; | |
852 | struct mport_dev *md = priv->md; | |
853 | struct dma_chan *chan; | |
854 | int i, ret; | |
855 | int nents; | |
856 | ||
857 | if (xfer->length == 0) | |
858 | return -EINVAL; | |
859 | req = kzalloc(sizeof(*req), GFP_KERNEL); | |
860 | if (!req) | |
861 | return -ENOMEM; | |
862 | ||
863 | ret = get_dma_channel(priv); | |
864 | if (ret) { | |
865 | kfree(req); | |
866 | return ret; | |
867 | } | |
868 | ||
869 | /* | |
870 | * If parameter loc_addr != NULL, we are transferring data from/to | |
871 | * data buffer allocated in user-space: lock in memory user-space | |
872 | * buffer pages and build an SG table for DMA transfer request | |
873 | * | |
874 | * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is | |
875 | * used for DMA data transfers: build single entry SG table using | |
876 | * offset within the internal buffer specified by handle parameter. | |
877 | */ | |
878 | if (xfer->loc_addr) { | |
c4860ad6 | 879 | unsigned int offset; |
e8de3701 AB |
880 | long pinned; |
881 | ||
c4860ad6 | 882 | offset = lower_32_bits(offset_in_page(xfer->loc_addr)); |
e8de3701 AB |
883 | nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; |
884 | ||
885 | page_list = kmalloc_array(nr_pages, | |
886 | sizeof(*page_list), GFP_KERNEL); | |
887 | if (page_list == NULL) { | |
888 | ret = -ENOMEM; | |
889 | goto err_req; | |
890 | } | |
891 | ||
0ca36a6b | 892 | pinned = get_user_pages_fast( |
e8de3701 | 893 | (unsigned long)xfer->loc_addr & PAGE_MASK, |
0ca36a6b | 894 | nr_pages, dir == DMA_FROM_DEVICE, page_list); |
e8de3701 AB |
895 | |
896 | if (pinned != nr_pages) { | |
897 | if (pinned < 0) { | |
369f2679 LS |
898 | rmcd_error("get_user_pages_unlocked err=%ld", |
899 | pinned); | |
e8de3701 AB |
900 | nr_pages = 0; |
901 | } else | |
902 | rmcd_error("pinned %ld out of %ld pages", | |
903 | pinned, nr_pages); | |
904 | ret = -EFAULT; | |
905 | goto err_pg; | |
906 | } | |
907 | ||
908 | ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, | |
909 | offset, xfer->length, GFP_KERNEL); | |
910 | if (ret) { | |
911 | rmcd_error("sg_alloc_table failed with err=%d", ret); | |
912 | goto err_pg; | |
913 | } | |
914 | ||
915 | req->page_list = page_list; | |
916 | req->nr_pages = nr_pages; | |
917 | } else { | |
918 | dma_addr_t baddr; | |
919 | struct rio_mport_mapping *map; | |
920 | ||
921 | baddr = (dma_addr_t)xfer->handle; | |
922 | ||
923 | mutex_lock(&md->buf_mutex); | |
924 | list_for_each_entry(map, &md->mappings, node) { | |
925 | if (baddr >= map->phys_addr && | |
926 | baddr < (map->phys_addr + map->size)) { | |
927 | kref_get(&map->ref); | |
928 | req->map = map; | |
929 | break; | |
930 | } | |
931 | } | |
932 | mutex_unlock(&md->buf_mutex); | |
933 | ||
934 | if (req->map == NULL) { | |
935 | ret = -ENOMEM; | |
936 | goto err_req; | |
937 | } | |
938 | ||
939 | if (xfer->length + xfer->offset > map->size) { | |
940 | ret = -EINVAL; | |
941 | goto err_req; | |
942 | } | |
943 | ||
944 | ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL); | |
945 | if (unlikely(ret)) { | |
946 | rmcd_error("sg_alloc_table failed for internal buf"); | |
947 | goto err_req; | |
948 | } | |
949 | ||
950 | sg_set_buf(req->sgt.sgl, | |
951 | map->virt_addr + (baddr - map->phys_addr) + | |
952 | xfer->offset, xfer->length); | |
953 | } | |
954 | ||
955 | req->dir = dir; | |
956 | req->filp = filp; | |
957 | req->priv = priv; | |
958 | chan = priv->dmach; | |
959 | ||
960 | nents = dma_map_sg(chan->device->dev, | |
961 | req->sgt.sgl, req->sgt.nents, dir); | |
c46d90cd | 962 | if (nents == 0) { |
e8de3701 | 963 | rmcd_error("Failed to map SG list"); |
b1402dcb CJ |
964 | ret = -EFAULT; |
965 | goto err_pg; | |
e8de3701 AB |
966 | } |
967 | ||
968 | ret = do_dma_request(req, xfer, sync, nents); | |
969 | ||
970 | if (ret >= 0) { | |
971 | if (sync == RIO_TRANSFER_SYNC) | |
972 | goto sync_out; | |
973 | return ret; /* return ASYNC cookie */ | |
974 | } | |
975 | ||
976 | if (ret == -ETIMEDOUT || ret == -EINTR) { | |
977 | /* | |
978 | * This can happen only in case of SYNC transfer. | |
979 | * Do not free unfinished request structure immediately. | |
980 | * Place it into pending list and deal with it later | |
981 | */ | |
982 | spin_lock(&priv->req_lock); | |
983 | list_add_tail(&req->node, &priv->pend_list); | |
984 | spin_unlock(&priv->req_lock); | |
985 | return ret; | |
986 | } | |
987 | ||
988 | ||
989 | rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); | |
990 | sync_out: | |
991 | dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); | |
992 | sg_free_table(&req->sgt); | |
993 | err_pg: | |
994 | if (page_list) { | |
995 | for (i = 0; i < nr_pages; i++) | |
996 | put_page(page_list[i]); | |
997 | kfree(page_list); | |
998 | } | |
999 | err_req: | |
1000 | if (req->map) { | |
1001 | mutex_lock(&md->buf_mutex); | |
1002 | kref_put(&req->map->ref, mport_release_mapping); | |
1003 | mutex_unlock(&md->buf_mutex); | |
1004 | } | |
1005 | put_dma_channel(priv); | |
1006 | kfree(req); | |
1007 | return ret; | |
1008 | } | |
1009 | ||
1010 | static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) | |
1011 | { | |
1012 | struct mport_cdev_priv *priv = filp->private_data; | |
1013 | struct rio_transaction transaction; | |
1014 | struct rio_transfer_io *transfer; | |
1015 | enum dma_data_direction dir; | |
1016 | int i, ret = 0; | |
1017 | ||
1018 | if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) | |
1019 | return -EFAULT; | |
1020 | ||
4e1016da | 1021 | if (transaction.count != 1) /* only single transfer for now */ |
e8de3701 AB |
1022 | return -EINVAL; |
1023 | ||
1024 | if ((transaction.transfer_mode & | |
1025 | priv->md->properties.transfer_mode) == 0) | |
1026 | return -ENODEV; | |
1027 | ||
4e1016da | 1028 | transfer = vmalloc(transaction.count * sizeof(*transfer)); |
e8de3701 AB |
1029 | if (!transfer) |
1030 | return -ENOMEM; | |
1031 | ||
4e1016da AB |
1032 | if (unlikely(copy_from_user(transfer, |
1033 | (void __user *)(uintptr_t)transaction.block, | |
1034 | transaction.count * sizeof(*transfer)))) { | |
e8de3701 AB |
1035 | ret = -EFAULT; |
1036 | goto out_free; | |
1037 | } | |
1038 | ||
1039 | dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ? | |
1040 | DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
1041 | for (i = 0; i < transaction.count && ret == 0; i++) | |
1042 | ret = rio_dma_transfer(filp, transaction.transfer_mode, | |
1043 | transaction.sync, dir, &transfer[i]); | |
1044 | ||
4e1016da AB |
1045 | if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, |
1046 | transfer, | |
1047 | transaction.count * sizeof(*transfer)))) | |
e8de3701 AB |
1048 | ret = -EFAULT; |
1049 | ||
1050 | out_free: | |
1051 | vfree(transfer); | |
1052 | ||
1053 | return ret; | |
1054 | } | |
1055 | ||
1056 | static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) | |
1057 | { | |
1058 | struct mport_cdev_priv *priv; | |
1059 | struct mport_dev *md; | |
1060 | struct rio_async_tx_wait w_param; | |
1061 | struct mport_dma_req *req; | |
1062 | dma_cookie_t cookie; | |
1063 | unsigned long tmo; | |
1064 | long wret; | |
1065 | int found = 0; | |
1066 | int ret; | |
1067 | ||
1068 | priv = (struct mport_cdev_priv *)filp->private_data; | |
1069 | md = priv->md; | |
1070 | ||
1071 | if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param)))) | |
1072 | return -EFAULT; | |
1073 | ||
1074 | cookie = w_param.token; | |
1075 | if (w_param.timeout) | |
1076 | tmo = msecs_to_jiffies(w_param.timeout); | |
1077 | else /* Use default DMA timeout */ | |
1078 | tmo = msecs_to_jiffies(dma_timeout); | |
1079 | ||
1080 | spin_lock(&priv->req_lock); | |
1081 | list_for_each_entry(req, &priv->async_list, node) { | |
1082 | if (req->cookie == cookie) { | |
1083 | list_del(&req->node); | |
1084 | found = 1; | |
1085 | break; | |
1086 | } | |
1087 | } | |
1088 | spin_unlock(&priv->req_lock); | |
1089 | ||
1090 | if (!found) | |
1091 | return -EAGAIN; | |
1092 | ||
1093 | wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); | |
1094 | ||
1095 | if (wret == 0) { | |
1096 | /* Timeout on wait occurred */ | |
1097 | rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s", | |
1098 | current->comm, task_pid_nr(current), | |
1099 | (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); | |
1100 | ret = -ETIMEDOUT; | |
1101 | goto err_tmo; | |
1102 | } else if (wret == -ERESTARTSYS) { | |
1103 | /* Wait_for_completion was interrupted by a signal but DMA may | |
1104 | * be still in progress | |
1105 | */ | |
1106 | rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted", | |
1107 | current->comm, task_pid_nr(current), | |
1108 | (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); | |
1109 | ret = -EINTR; | |
1110 | goto err_tmo; | |
1111 | } | |
1112 | ||
1113 | if (req->status != DMA_COMPLETE) { | |
1114 | /* DMA transaction completion signaled with transfer error */ | |
1115 | rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d", | |
1116 | current->comm, task_pid_nr(current), | |
1117 | (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE", | |
1118 | req->status); | |
1119 | ret = -EIO; | |
1120 | } else | |
1121 | ret = 0; | |
1122 | ||
1123 | if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) | |
1124 | dma_req_free(req); | |
1125 | ||
1126 | return ret; | |
1127 | ||
1128 | err_tmo: | |
1129 | /* Return request back into async queue */ | |
1130 | spin_lock(&priv->req_lock); | |
1131 | list_add_tail(&req->node, &priv->async_list); | |
1132 | spin_unlock(&priv->req_lock); | |
1133 | return ret; | |
1134 | } | |
1135 | ||
1136 | static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 1137 | u64 size, struct rio_mport_mapping **mapping) |
e8de3701 AB |
1138 | { |
1139 | struct rio_mport_mapping *map; | |
1140 | ||
4e1016da | 1141 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
e8de3701 AB |
1142 | if (map == NULL) |
1143 | return -ENOMEM; | |
1144 | ||
1145 | map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, | |
1146 | &map->phys_addr, GFP_KERNEL); | |
1147 | if (map->virt_addr == NULL) { | |
1148 | kfree(map); | |
1149 | return -ENOMEM; | |
1150 | } | |
1151 | ||
1152 | map->dir = MAP_DMA; | |
1153 | map->size = size; | |
1154 | map->filp = filp; | |
1155 | map->md = md; | |
1156 | kref_init(&map->ref); | |
1157 | mutex_lock(&md->buf_mutex); | |
1158 | list_add_tail(&map->node, &md->mappings); | |
1159 | mutex_unlock(&md->buf_mutex); | |
1160 | *mapping = map; | |
1161 | ||
1162 | return 0; | |
1163 | } | |
1164 | ||
1165 | static int rio_mport_alloc_dma(struct file *filp, void __user *arg) | |
1166 | { | |
1167 | struct mport_cdev_priv *priv = filp->private_data; | |
1168 | struct mport_dev *md = priv->md; | |
1169 | struct rio_dma_mem map; | |
1170 | struct rio_mport_mapping *mapping = NULL; | |
1171 | int ret; | |
1172 | ||
4e1016da | 1173 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
e8de3701 AB |
1174 | return -EFAULT; |
1175 | ||
1176 | ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); | |
1177 | if (ret) | |
1178 | return ret; | |
1179 | ||
1180 | map.dma_handle = mapping->phys_addr; | |
1181 | ||
4e1016da | 1182 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
e8de3701 AB |
1183 | mutex_lock(&md->buf_mutex); |
1184 | kref_put(&mapping->ref, mport_release_mapping); | |
1185 | mutex_unlock(&md->buf_mutex); | |
1186 | return -EFAULT; | |
1187 | } | |
1188 | ||
1189 | return 0; | |
1190 | } | |
1191 | ||
1192 | static int rio_mport_free_dma(struct file *filp, void __user *arg) | |
1193 | { | |
1194 | struct mport_cdev_priv *priv = filp->private_data; | |
1195 | struct mport_dev *md = priv->md; | |
1196 | u64 handle; | |
1197 | int ret = -EFAULT; | |
1198 | struct rio_mport_mapping *map, *_map; | |
1199 | ||
4e1016da | 1200 | if (copy_from_user(&handle, arg, sizeof(handle))) |
e8de3701 AB |
1201 | return -EFAULT; |
1202 | rmcd_debug(EXIT, "filp=%p", filp); | |
1203 | ||
1204 | mutex_lock(&md->buf_mutex); | |
1205 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
1206 | if (map->dir == MAP_DMA && map->phys_addr == handle && | |
1207 | map->filp == filp) { | |
1208 | kref_put(&map->ref, mport_release_mapping); | |
1209 | ret = 0; | |
1210 | break; | |
1211 | } | |
1212 | } | |
1213 | mutex_unlock(&md->buf_mutex); | |
1214 | ||
1215 | if (ret == -EFAULT) { | |
1216 | rmcd_debug(DMA, "ERR no matching mapping"); | |
1217 | return ret; | |
1218 | } | |
1219 | ||
1220 | return 0; | |
1221 | } | |
1222 | #else | |
1223 | static int rio_mport_transfer_ioctl(struct file *filp, void *arg) | |
1224 | { | |
1225 | return -ENODEV; | |
1226 | } | |
1227 | ||
1228 | static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) | |
1229 | { | |
1230 | return -ENODEV; | |
1231 | } | |
1232 | ||
1233 | static int rio_mport_alloc_dma(struct file *filp, void __user *arg) | |
1234 | { | |
1235 | return -ENODEV; | |
1236 | } | |
1237 | ||
1238 | static int rio_mport_free_dma(struct file *filp, void __user *arg) | |
1239 | { | |
1240 | return -ENODEV; | |
1241 | } | |
1242 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | |
1243 | ||
1244 | /* | |
1245 | * Inbound/outbound memory mapping functions | |
1246 | */ | |
1247 | ||
1248 | static int | |
1249 | rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 1250 | u64 raddr, u64 size, |
e8de3701 AB |
1251 | struct rio_mport_mapping **mapping) |
1252 | { | |
1253 | struct rio_mport *mport = md->mport; | |
1254 | struct rio_mport_mapping *map; | |
1255 | int ret; | |
1256 | ||
4e1016da AB |
1257 | /* rio_map_inb_region() accepts u32 size */ |
1258 | if (size > 0xffffffff) | |
1259 | return -EINVAL; | |
1260 | ||
1261 | map = kzalloc(sizeof(*map), GFP_KERNEL); | |
e8de3701 AB |
1262 | if (map == NULL) |
1263 | return -ENOMEM; | |
1264 | ||
1265 | map->virt_addr = dma_alloc_coherent(mport->dev.parent, size, | |
1266 | &map->phys_addr, GFP_KERNEL); | |
1267 | if (map->virt_addr == NULL) { | |
1268 | ret = -ENOMEM; | |
1269 | goto err_dma_alloc; | |
1270 | } | |
1271 | ||
1272 | if (raddr == RIO_MAP_ANY_ADDR) | |
1273 | raddr = map->phys_addr; | |
4e1016da | 1274 | ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); |
e8de3701 AB |
1275 | if (ret < 0) |
1276 | goto err_map_inb; | |
1277 | ||
1278 | map->dir = MAP_INBOUND; | |
1279 | map->rio_addr = raddr; | |
1280 | map->size = size; | |
1281 | map->filp = filp; | |
1282 | map->md = md; | |
1283 | kref_init(&map->ref); | |
1284 | mutex_lock(&md->buf_mutex); | |
1285 | list_add_tail(&map->node, &md->mappings); | |
1286 | mutex_unlock(&md->buf_mutex); | |
1287 | *mapping = map; | |
1288 | return 0; | |
1289 | ||
1290 | err_map_inb: | |
1291 | dma_free_coherent(mport->dev.parent, size, | |
1292 | map->virt_addr, map->phys_addr); | |
1293 | err_dma_alloc: | |
1294 | kfree(map); | |
1295 | return ret; | |
1296 | } | |
1297 | ||
1298 | static int | |
1299 | rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, | |
4e1016da | 1300 | u64 raddr, u64 size, |
e8de3701 AB |
1301 | struct rio_mport_mapping **mapping) |
1302 | { | |
1303 | struct rio_mport_mapping *map; | |
1304 | int err = -ENOMEM; | |
1305 | ||
1306 | if (raddr == RIO_MAP_ANY_ADDR) | |
1307 | goto get_new; | |
1308 | ||
1309 | mutex_lock(&md->buf_mutex); | |
1310 | list_for_each_entry(map, &md->mappings, node) { | |
1311 | if (map->dir != MAP_INBOUND) | |
1312 | continue; | |
1313 | if (raddr == map->rio_addr && size == map->size) { | |
1314 | /* allow exact match only */ | |
1315 | *mapping = map; | |
1316 | err = 0; | |
1317 | break; | |
1318 | } else if (raddr < (map->rio_addr + map->size - 1) && | |
1319 | (raddr + size) > map->rio_addr) { | |
1320 | err = -EBUSY; | |
1321 | break; | |
1322 | } | |
1323 | } | |
1324 | mutex_unlock(&md->buf_mutex); | |
1325 | ||
1326 | if (err != -ENOMEM) | |
1327 | return err; | |
1328 | get_new: | |
1329 | /* not found, create new */ | |
1330 | return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); | |
1331 | } | |
1332 | ||
1333 | static int rio_mport_map_inbound(struct file *filp, void __user *arg) | |
1334 | { | |
1335 | struct mport_cdev_priv *priv = filp->private_data; | |
1336 | struct mport_dev *md = priv->md; | |
1337 | struct rio_mmap map; | |
1338 | struct rio_mport_mapping *mapping = NULL; | |
1339 | int ret; | |
1340 | ||
1341 | if (!md->mport->ops->map_inb) | |
1342 | return -EPROTONOSUPPORT; | |
4e1016da | 1343 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
e8de3701 AB |
1344 | return -EFAULT; |
1345 | ||
1346 | rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); | |
1347 | ||
1348 | ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, | |
1349 | map.length, &mapping); | |
1350 | if (ret) | |
1351 | return ret; | |
1352 | ||
1353 | map.handle = mapping->phys_addr; | |
1354 | map.rio_addr = mapping->rio_addr; | |
1355 | ||
4e1016da | 1356 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
e8de3701 AB |
1357 | /* Delete mapping if it was created by this request */ |
1358 | if (ret == 0 && mapping->filp == filp) { | |
1359 | mutex_lock(&md->buf_mutex); | |
1360 | kref_put(&mapping->ref, mport_release_mapping); | |
1361 | mutex_unlock(&md->buf_mutex); | |
1362 | } | |
1363 | return -EFAULT; | |
1364 | } | |
1365 | ||
1366 | return 0; | |
1367 | } | |
1368 | ||
1369 | /* | |
1370 | * rio_mport_inbound_free() - unmap from RapidIO address space and free | |
1371 | * previously allocated inbound DMA coherent buffer | |
1372 | * @priv: driver private data | |
1373 | * @arg: buffer handle returned by allocation routine | |
1374 | */ | |
1375 | static int rio_mport_inbound_free(struct file *filp, void __user *arg) | |
1376 | { | |
1377 | struct mport_cdev_priv *priv = filp->private_data; | |
1378 | struct mport_dev *md = priv->md; | |
1379 | u64 handle; | |
1380 | struct rio_mport_mapping *map, *_map; | |
1381 | ||
1382 | rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); | |
1383 | ||
1384 | if (!md->mport->ops->unmap_inb) | |
1385 | return -EPROTONOSUPPORT; | |
1386 | ||
4e1016da | 1387 | if (copy_from_user(&handle, arg, sizeof(handle))) |
e8de3701 AB |
1388 | return -EFAULT; |
1389 | ||
1390 | mutex_lock(&md->buf_mutex); | |
1391 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
1392 | if (map->dir == MAP_INBOUND && map->phys_addr == handle) { | |
1393 | if (map->filp == filp) { | |
1394 | map->filp = NULL; | |
1395 | kref_put(&map->ref, mport_release_mapping); | |
1396 | } | |
1397 | break; | |
1398 | } | |
1399 | } | |
1400 | mutex_unlock(&md->buf_mutex); | |
1401 | ||
1402 | return 0; | |
1403 | } | |
1404 | ||
1405 | /* | |
1406 | * maint_port_idx_get() - Get the port index of the mport instance | |
1407 | * @priv: driver private data | |
1408 | * @arg: port index | |
1409 | */ | |
1410 | static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) | |
1411 | { | |
1412 | struct mport_dev *md = priv->md; | |
4e1016da | 1413 | u32 port_idx = md->mport->index; |
e8de3701 AB |
1414 | |
1415 | rmcd_debug(MPORT, "port_index=%d", port_idx); | |
1416 | ||
1417 | if (copy_to_user(arg, &port_idx, sizeof(port_idx))) | |
1418 | return -EFAULT; | |
1419 | ||
1420 | return 0; | |
1421 | } | |
1422 | ||
1423 | static int rio_mport_add_event(struct mport_cdev_priv *priv, | |
1424 | struct rio_event *event) | |
1425 | { | |
1426 | int overflow; | |
1427 | ||
1428 | if (!(priv->event_mask & event->header)) | |
1429 | return -EACCES; | |
1430 | ||
1431 | spin_lock(&priv->fifo_lock); | |
1432 | overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) | |
1433 | || kfifo_in(&priv->event_fifo, (unsigned char *)event, | |
1434 | sizeof(*event)) != sizeof(*event); | |
1435 | spin_unlock(&priv->fifo_lock); | |
1436 | ||
1437 | wake_up_interruptible(&priv->event_rx_wait); | |
1438 | ||
1439 | if (overflow) { | |
1440 | dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); | |
1441 | return -EBUSY; | |
1442 | } | |
1443 | ||
1444 | return 0; | |
1445 | } | |
1446 | ||
1447 | static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, | |
1448 | u16 src, u16 dst, u16 info) | |
1449 | { | |
1450 | struct mport_dev *data = dev_id; | |
1451 | struct mport_cdev_priv *priv; | |
1452 | struct rio_mport_db_filter *db_filter; | |
1453 | struct rio_event event; | |
1454 | int handled; | |
1455 | ||
1456 | event.header = RIO_DOORBELL; | |
1457 | event.u.doorbell.rioid = src; | |
1458 | event.u.doorbell.payload = info; | |
1459 | ||
1460 | handled = 0; | |
1461 | spin_lock(&data->db_lock); | |
1462 | list_for_each_entry(db_filter, &data->doorbells, data_node) { | |
4e1016da | 1463 | if (((db_filter->filter.rioid == RIO_INVALID_DESTID || |
e8de3701 AB |
1464 | db_filter->filter.rioid == src)) && |
1465 | info >= db_filter->filter.low && | |
1466 | info <= db_filter->filter.high) { | |
1467 | priv = db_filter->priv; | |
1468 | rio_mport_add_event(priv, &event); | |
1469 | handled = 1; | |
1470 | } | |
1471 | } | |
1472 | spin_unlock(&data->db_lock); | |
1473 | ||
1474 | if (!handled) | |
1475 | dev_warn(&data->dev, | |
1476 | "%s: spurious DB received from 0x%x, info=0x%04x\n", | |
1477 | __func__, src, info); | |
1478 | } | |
1479 | ||
1480 | static int rio_mport_add_db_filter(struct mport_cdev_priv *priv, | |
1481 | void __user *arg) | |
1482 | { | |
1483 | struct mport_dev *md = priv->md; | |
1484 | struct rio_mport_db_filter *db_filter; | |
1485 | struct rio_doorbell_filter filter; | |
1486 | unsigned long flags; | |
1487 | int ret; | |
1488 | ||
1489 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1490 | return -EFAULT; | |
1491 | ||
1492 | if (filter.low > filter.high) | |
1493 | return -EINVAL; | |
1494 | ||
1495 | ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, | |
1496 | rio_mport_doorbell_handler); | |
1497 | if (ret) { | |
1498 | rmcd_error("%s failed to register IBDB, err=%d", | |
1499 | dev_name(&md->dev), ret); | |
1500 | return ret; | |
1501 | } | |
1502 | ||
1503 | db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL); | |
1504 | if (db_filter == NULL) { | |
1505 | rio_release_inb_dbell(md->mport, filter.low, filter.high); | |
1506 | return -ENOMEM; | |
1507 | } | |
1508 | ||
1509 | db_filter->filter = filter; | |
1510 | db_filter->priv = priv; | |
1511 | spin_lock_irqsave(&md->db_lock, flags); | |
1512 | list_add_tail(&db_filter->priv_node, &priv->db_filters); | |
1513 | list_add_tail(&db_filter->data_node, &md->doorbells); | |
1514 | spin_unlock_irqrestore(&md->db_lock, flags); | |
1515 | ||
1516 | return 0; | |
1517 | } | |
1518 | ||
1519 | static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter) | |
1520 | { | |
1521 | list_del(&db_filter->data_node); | |
1522 | list_del(&db_filter->priv_node); | |
1523 | kfree(db_filter); | |
1524 | } | |
1525 | ||
1526 | static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, | |
1527 | void __user *arg) | |
1528 | { | |
1529 | struct rio_mport_db_filter *db_filter; | |
1530 | struct rio_doorbell_filter filter; | |
1531 | unsigned long flags; | |
1532 | int ret = -EINVAL; | |
1533 | ||
1534 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1535 | return -EFAULT; | |
1536 | ||
4e1016da AB |
1537 | if (filter.low > filter.high) |
1538 | return -EINVAL; | |
1539 | ||
e8de3701 AB |
1540 | spin_lock_irqsave(&priv->md->db_lock, flags); |
1541 | list_for_each_entry(db_filter, &priv->db_filters, priv_node) { | |
1542 | if (db_filter->filter.rioid == filter.rioid && | |
1543 | db_filter->filter.low == filter.low && | |
1544 | db_filter->filter.high == filter.high) { | |
1545 | rio_mport_delete_db_filter(db_filter); | |
1546 | ret = 0; | |
1547 | break; | |
1548 | } | |
1549 | } | |
1550 | spin_unlock_irqrestore(&priv->md->db_lock, flags); | |
1551 | ||
1552 | if (!ret) | |
1553 | rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); | |
1554 | ||
1555 | return ret; | |
1556 | } | |
1557 | ||
1558 | static int rio_mport_match_pw(union rio_pw_msg *msg, | |
1559 | struct rio_pw_filter *filter) | |
1560 | { | |
1561 | if ((msg->em.comptag & filter->mask) < filter->low || | |
1562 | (msg->em.comptag & filter->mask) > filter->high) | |
1563 | return 0; | |
1564 | return 1; | |
1565 | } | |
1566 | ||
1567 | static int rio_mport_pw_handler(struct rio_mport *mport, void *context, | |
1568 | union rio_pw_msg *msg, int step) | |
1569 | { | |
1570 | struct mport_dev *md = context; | |
1571 | struct mport_cdev_priv *priv; | |
1572 | struct rio_mport_pw_filter *pw_filter; | |
1573 | struct rio_event event; | |
1574 | int handled; | |
1575 | ||
1576 | event.header = RIO_PORTWRITE; | |
1577 | memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); | |
1578 | ||
1579 | handled = 0; | |
1580 | spin_lock(&md->pw_lock); | |
1581 | list_for_each_entry(pw_filter, &md->portwrites, md_node) { | |
1582 | if (rio_mport_match_pw(msg, &pw_filter->filter)) { | |
1583 | priv = pw_filter->priv; | |
1584 | rio_mport_add_event(priv, &event); | |
1585 | handled = 1; | |
1586 | } | |
1587 | } | |
1588 | spin_unlock(&md->pw_lock); | |
1589 | ||
1590 | if (!handled) { | |
1591 | printk_ratelimited(KERN_WARNING DRV_NAME | |
1592 | ": mport%d received spurious PW from 0x%08x\n", | |
1593 | mport->id, msg->em.comptag); | |
1594 | } | |
1595 | ||
1596 | return 0; | |
1597 | } | |
1598 | ||
1599 | static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv, | |
1600 | void __user *arg) | |
1601 | { | |
1602 | struct mport_dev *md = priv->md; | |
1603 | struct rio_mport_pw_filter *pw_filter; | |
1604 | struct rio_pw_filter filter; | |
1605 | unsigned long flags; | |
1606 | int hadd = 0; | |
1607 | ||
1608 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1609 | return -EFAULT; | |
1610 | ||
1611 | pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL); | |
1612 | if (pw_filter == NULL) | |
1613 | return -ENOMEM; | |
1614 | ||
1615 | pw_filter->filter = filter; | |
1616 | pw_filter->priv = priv; | |
1617 | spin_lock_irqsave(&md->pw_lock, flags); | |
1618 | if (list_empty(&md->portwrites)) | |
1619 | hadd = 1; | |
1620 | list_add_tail(&pw_filter->priv_node, &priv->pw_filters); | |
1621 | list_add_tail(&pw_filter->md_node, &md->portwrites); | |
1622 | spin_unlock_irqrestore(&md->pw_lock, flags); | |
1623 | ||
1624 | if (hadd) { | |
1625 | int ret; | |
1626 | ||
1627 | ret = rio_add_mport_pw_handler(md->mport, md, | |
1628 | rio_mport_pw_handler); | |
1629 | if (ret) { | |
1630 | dev_err(&md->dev, | |
1631 | "%s: failed to add IB_PW handler, err=%d\n", | |
1632 | __func__, ret); | |
1633 | return ret; | |
1634 | } | |
1635 | rio_pw_enable(md->mport, 1); | |
1636 | } | |
1637 | ||
1638 | return 0; | |
1639 | } | |
1640 | ||
1641 | static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter) | |
1642 | { | |
1643 | list_del(&pw_filter->md_node); | |
1644 | list_del(&pw_filter->priv_node); | |
1645 | kfree(pw_filter); | |
1646 | } | |
1647 | ||
1648 | static int rio_mport_match_pw_filter(struct rio_pw_filter *a, | |
1649 | struct rio_pw_filter *b) | |
1650 | { | |
1651 | if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high)) | |
1652 | return 1; | |
1653 | return 0; | |
1654 | } | |
1655 | ||
1656 | static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv, | |
1657 | void __user *arg) | |
1658 | { | |
1659 | struct mport_dev *md = priv->md; | |
1660 | struct rio_mport_pw_filter *pw_filter; | |
1661 | struct rio_pw_filter filter; | |
1662 | unsigned long flags; | |
1663 | int ret = -EINVAL; | |
1664 | int hdel = 0; | |
1665 | ||
1666 | if (copy_from_user(&filter, arg, sizeof(filter))) | |
1667 | return -EFAULT; | |
1668 | ||
1669 | spin_lock_irqsave(&md->pw_lock, flags); | |
1670 | list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) { | |
1671 | if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) { | |
1672 | rio_mport_delete_pw_filter(pw_filter); | |
1673 | ret = 0; | |
1674 | break; | |
1675 | } | |
1676 | } | |
1677 | ||
1678 | if (list_empty(&md->portwrites)) | |
1679 | hdel = 1; | |
1680 | spin_unlock_irqrestore(&md->pw_lock, flags); | |
1681 | ||
1682 | if (hdel) { | |
1683 | rio_del_mport_pw_handler(md->mport, priv->md, | |
1684 | rio_mport_pw_handler); | |
1685 | rio_pw_enable(md->mport, 0); | |
1686 | } | |
1687 | ||
1688 | return ret; | |
1689 | } | |
1690 | ||
1691 | /* | |
1692 | * rio_release_dev - release routine for kernel RIO device object | |
1693 | * @dev: kernel device object associated with a RIO device structure | |
1694 | * | |
1695 | * Frees a RIO device struct associated a RIO device struct. | |
1696 | * The RIO device struct is freed. | |
1697 | */ | |
1698 | static void rio_release_dev(struct device *dev) | |
1699 | { | |
1700 | struct rio_dev *rdev; | |
1701 | ||
1702 | rdev = to_rio_dev(dev); | |
1703 | pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev)); | |
1704 | kfree(rdev); | |
1705 | } | |
1706 | ||
1707 | ||
1708 | static void rio_release_net(struct device *dev) | |
1709 | { | |
1710 | struct rio_net *net; | |
1711 | ||
1712 | net = to_rio_net(dev); | |
1713 | rmcd_debug(RDEV, "net_%d", net->id); | |
1714 | kfree(net); | |
1715 | } | |
1716 | ||
1717 | ||
1718 | /* | |
1719 | * rio_mport_add_riodev - creates a kernel RIO device object | |
1720 | * | |
1721 | * Allocates a RIO device data structure and initializes required fields based | |
1722 | * on device's configuration space contents. | |
1723 | * If the device has switch capabilities, then a switch specific portion is | |
1724 | * allocated and configured. | |
1725 | */ | |
1726 | static int rio_mport_add_riodev(struct mport_cdev_priv *priv, | |
1727 | void __user *arg) | |
1728 | { | |
1729 | struct mport_dev *md = priv->md; | |
1730 | struct rio_rdev_info dev_info; | |
1731 | struct rio_dev *rdev; | |
1732 | struct rio_switch *rswitch = NULL; | |
1733 | struct rio_mport *mport; | |
1734 | size_t size; | |
1735 | u32 rval; | |
1736 | u32 swpinfo = 0; | |
1737 | u16 destid; | |
1738 | u8 hopcount; | |
1739 | int err; | |
1740 | ||
1741 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) | |
1742 | return -EFAULT; | |
1743 | ||
1744 | rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, | |
1745 | dev_info.comptag, dev_info.destid, dev_info.hopcount); | |
1746 | ||
1747 | if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) { | |
1748 | rmcd_debug(RDEV, "device %s already exists", dev_info.name); | |
1749 | return -EEXIST; | |
1750 | } | |
1751 | ||
4e1016da | 1752 | size = sizeof(*rdev); |
e8de3701 | 1753 | mport = md->mport; |
4e1016da AB |
1754 | destid = dev_info.destid; |
1755 | hopcount = dev_info.hopcount; | |
e8de3701 AB |
1756 | |
1757 | if (rio_mport_read_config_32(mport, destid, hopcount, | |
1758 | RIO_PEF_CAR, &rval)) | |
1759 | return -EIO; | |
1760 | ||
1761 | if (rval & RIO_PEF_SWITCH) { | |
1762 | rio_mport_read_config_32(mport, destid, hopcount, | |
1763 | RIO_SWP_INFO_CAR, &swpinfo); | |
1764 | size += (RIO_GET_TOTAL_PORTS(swpinfo) * | |
1765 | sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); | |
1766 | } | |
1767 | ||
1768 | rdev = kzalloc(size, GFP_KERNEL); | |
1769 | if (rdev == NULL) | |
1770 | return -ENOMEM; | |
1771 | ||
1772 | if (mport->net == NULL) { | |
1773 | struct rio_net *net; | |
1774 | ||
1775 | net = rio_alloc_net(mport); | |
1776 | if (!net) { | |
1777 | err = -ENOMEM; | |
1778 | rmcd_debug(RDEV, "failed to allocate net object"); | |
1779 | goto cleanup; | |
1780 | } | |
1781 | ||
1782 | net->id = mport->id; | |
1783 | net->hport = mport; | |
1784 | dev_set_name(&net->dev, "rnet_%d", net->id); | |
1785 | net->dev.parent = &mport->dev; | |
1786 | net->dev.release = rio_release_net; | |
1787 | err = rio_add_net(net); | |
1788 | if (err) { | |
1789 | rmcd_debug(RDEV, "failed to register net, err=%d", err); | |
1790 | kfree(net); | |
1791 | goto cleanup; | |
1792 | } | |
1793 | } | |
1794 | ||
1795 | rdev->net = mport->net; | |
1796 | rdev->pef = rval; | |
1797 | rdev->swpinfo = swpinfo; | |
1798 | rio_mport_read_config_32(mport, destid, hopcount, | |
1799 | RIO_DEV_ID_CAR, &rval); | |
1800 | rdev->did = rval >> 16; | |
1801 | rdev->vid = rval & 0xffff; | |
1802 | rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR, | |
1803 | &rdev->device_rev); | |
1804 | rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR, | |
1805 | &rval); | |
1806 | rdev->asm_did = rval >> 16; | |
1807 | rdev->asm_vid = rval & 0xffff; | |
1808 | rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR, | |
1809 | &rval); | |
1810 | rdev->asm_rev = rval >> 16; | |
1811 | ||
1812 | if (rdev->pef & RIO_PEF_EXT_FEATURES) { | |
1813 | rdev->efptr = rval & 0xffff; | |
1814 | rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, | |
1ae842de | 1815 | hopcount, &rdev->phys_rmap); |
e8de3701 AB |
1816 | |
1817 | rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, | |
1818 | hopcount, RIO_EFB_ERR_MGMNT); | |
1819 | } | |
1820 | ||
1821 | rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR, | |
1822 | &rdev->src_ops); | |
1823 | rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR, | |
1824 | &rdev->dst_ops); | |
1825 | ||
1826 | rdev->comp_tag = dev_info.comptag; | |
1827 | rdev->destid = destid; | |
1828 | /* hopcount is stored as specified by a caller, regardles of EP or SW */ | |
1829 | rdev->hopcount = hopcount; | |
1830 | ||
1831 | if (rdev->pef & RIO_PEF_SWITCH) { | |
1832 | rswitch = rdev->rswitch; | |
1833 | rswitch->route_table = NULL; | |
1834 | } | |
1835 | ||
1836 | if (strlen(dev_info.name)) | |
1837 | dev_set_name(&rdev->dev, "%s", dev_info.name); | |
1838 | else if (rdev->pef & RIO_PEF_SWITCH) | |
1839 | dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id, | |
1840 | rdev->comp_tag & RIO_CTAG_UDEVID); | |
1841 | else | |
1842 | dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id, | |
1843 | rdev->comp_tag & RIO_CTAG_UDEVID); | |
1844 | ||
1845 | INIT_LIST_HEAD(&rdev->net_list); | |
1846 | rdev->dev.parent = &mport->net->dev; | |
1847 | rio_attach_device(rdev); | |
1848 | rdev->dev.release = rio_release_dev; | |
1849 | ||
1850 | if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) | |
1851 | rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], | |
1852 | 0, 0xffff); | |
1853 | err = rio_add_device(rdev); | |
1854 | if (err) | |
1855 | goto cleanup; | |
1856 | rio_dev_get(rdev); | |
1857 | ||
1858 | return 0; | |
1859 | cleanup: | |
1860 | kfree(rdev); | |
1861 | return err; | |
1862 | } | |
1863 | ||
1864 | static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) | |
1865 | { | |
1866 | struct rio_rdev_info dev_info; | |
1867 | struct rio_dev *rdev = NULL; | |
1868 | struct device *dev; | |
1869 | struct rio_mport *mport; | |
1870 | struct rio_net *net; | |
1871 | ||
1872 | if (copy_from_user(&dev_info, arg, sizeof(dev_info))) | |
1873 | return -EFAULT; | |
1874 | ||
1875 | mport = priv->md->mport; | |
1876 | ||
1877 | /* If device name is specified, removal by name has priority */ | |
1878 | if (strlen(dev_info.name)) { | |
1879 | dev = bus_find_device_by_name(&rio_bus_type, NULL, | |
1880 | dev_info.name); | |
1881 | if (dev) | |
1882 | rdev = to_rio_dev(dev); | |
1883 | } else { | |
1884 | do { | |
1885 | rdev = rio_get_comptag(dev_info.comptag, rdev); | |
1886 | if (rdev && rdev->dev.parent == &mport->net->dev && | |
4e1016da AB |
1887 | rdev->destid == dev_info.destid && |
1888 | rdev->hopcount == dev_info.hopcount) | |
e8de3701 AB |
1889 | break; |
1890 | } while (rdev); | |
1891 | } | |
1892 | ||
1893 | if (!rdev) { | |
1894 | rmcd_debug(RDEV, | |
1895 | "device name:%s ct:0x%x did:0x%x hc:0x%x not found", | |
1896 | dev_info.name, dev_info.comptag, dev_info.destid, | |
1897 | dev_info.hopcount); | |
1898 | return -ENODEV; | |
1899 | } | |
1900 | ||
1901 | net = rdev->net; | |
1902 | rio_dev_put(rdev); | |
1903 | rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); | |
1904 | ||
1905 | if (list_empty(&net->devices)) { | |
1906 | rio_free_net(net); | |
1907 | mport->net = NULL; | |
1908 | } | |
1909 | ||
1910 | return 0; | |
1911 | } | |
1912 | ||
1913 | /* | |
1914 | * Mport cdev management | |
1915 | */ | |
1916 | ||
1917 | /* | |
1918 | * mport_cdev_open() - Open character device (mport) | |
1919 | */ | |
1920 | static int mport_cdev_open(struct inode *inode, struct file *filp) | |
1921 | { | |
1922 | int ret; | |
1923 | int minor = iminor(inode); | |
1924 | struct mport_dev *chdev; | |
1925 | struct mport_cdev_priv *priv; | |
1926 | ||
1927 | /* Test for valid device */ | |
1928 | if (minor >= RIO_MAX_MPORTS) { | |
1929 | rmcd_error("Invalid minor device number"); | |
1930 | return -EINVAL; | |
1931 | } | |
1932 | ||
1933 | chdev = container_of(inode->i_cdev, struct mport_dev, cdev); | |
1934 | ||
1935 | rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp); | |
1936 | ||
1937 | if (atomic_read(&chdev->active) == 0) | |
1938 | return -ENODEV; | |
1939 | ||
1940 | get_device(&chdev->dev); | |
1941 | ||
1942 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | |
1943 | if (!priv) { | |
1944 | put_device(&chdev->dev); | |
1945 | return -ENOMEM; | |
1946 | } | |
1947 | ||
1948 | priv->md = chdev; | |
1949 | ||
1950 | mutex_lock(&chdev->file_mutex); | |
1951 | list_add_tail(&priv->list, &chdev->file_list); | |
1952 | mutex_unlock(&chdev->file_mutex); | |
1953 | ||
1954 | INIT_LIST_HEAD(&priv->db_filters); | |
1955 | INIT_LIST_HEAD(&priv->pw_filters); | |
1956 | spin_lock_init(&priv->fifo_lock); | |
1957 | init_waitqueue_head(&priv->event_rx_wait); | |
1958 | ret = kfifo_alloc(&priv->event_fifo, | |
1959 | sizeof(struct rio_event) * MPORT_EVENT_DEPTH, | |
1960 | GFP_KERNEL); | |
1961 | if (ret < 0) { | |
1962 | dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); | |
1963 | ret = -ENOMEM; | |
1964 | goto err_fifo; | |
1965 | } | |
1966 | ||
1967 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
1968 | INIT_LIST_HEAD(&priv->async_list); | |
1969 | INIT_LIST_HEAD(&priv->pend_list); | |
1970 | spin_lock_init(&priv->req_lock); | |
1971 | mutex_init(&priv->dma_lock); | |
1972 | #endif | |
1973 | ||
1974 | filp->private_data = priv; | |
1975 | goto out; | |
1976 | err_fifo: | |
1977 | kfree(priv); | |
1978 | out: | |
1979 | return ret; | |
1980 | } | |
1981 | ||
1982 | static int mport_cdev_fasync(int fd, struct file *filp, int mode) | |
1983 | { | |
1984 | struct mport_cdev_priv *priv = filp->private_data; | |
1985 | ||
1986 | return fasync_helper(fd, filp, mode, &priv->async_queue); | |
1987 | } | |
1988 | ||
1989 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
1990 | static void mport_cdev_release_dma(struct file *filp) | |
1991 | { | |
1992 | struct mport_cdev_priv *priv = filp->private_data; | |
1993 | struct mport_dev *md; | |
1994 | struct mport_dma_req *req, *req_next; | |
1995 | unsigned long tmo = msecs_to_jiffies(dma_timeout); | |
1996 | long wret; | |
1997 | LIST_HEAD(list); | |
1998 | ||
1999 | rmcd_debug(EXIT, "from filp=%p %s(%d)", | |
2000 | filp, current->comm, task_pid_nr(current)); | |
2001 | ||
2002 | if (!priv->dmach) { | |
2003 | rmcd_debug(EXIT, "No DMA channel for filp=%p", filp); | |
2004 | return; | |
2005 | } | |
2006 | ||
2007 | md = priv->md; | |
2008 | ||
2009 | flush_workqueue(dma_wq); | |
2010 | ||
2011 | spin_lock(&priv->req_lock); | |
2012 | if (!list_empty(&priv->async_list)) { | |
2013 | rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", | |
2014 | filp, current->comm, task_pid_nr(current)); | |
2015 | list_splice_init(&priv->async_list, &list); | |
2016 | } | |
2017 | spin_unlock(&priv->req_lock); | |
2018 | ||
2019 | if (!list_empty(&list)) { | |
2020 | rmcd_debug(EXIT, "temp list not empty"); | |
2021 | list_for_each_entry_safe(req, req_next, &list, node) { | |
2022 | rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", | |
2023 | req->filp, req->cookie, | |
2024 | completion_done(&req->req_comp)?"yes":"no"); | |
2025 | list_del(&req->node); | |
2026 | dma_req_free(req); | |
2027 | } | |
2028 | } | |
2029 | ||
2030 | if (!list_empty(&priv->pend_list)) { | |
2031 | rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)", | |
2032 | filp, current->comm, task_pid_nr(current)); | |
2033 | list_for_each_entry_safe(req, | |
2034 | req_next, &priv->pend_list, node) { | |
2035 | rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", | |
2036 | req->filp, req->cookie, | |
2037 | completion_done(&req->req_comp)?"yes":"no"); | |
2038 | list_del(&req->node); | |
2039 | dma_req_free(req); | |
2040 | } | |
2041 | } | |
2042 | ||
2043 | put_dma_channel(priv); | |
2044 | wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo); | |
2045 | ||
2046 | if (wret <= 0) { | |
2047 | rmcd_error("%s(%d) failed waiting for DMA release err=%ld", | |
2048 | current->comm, task_pid_nr(current), wret); | |
2049 | } | |
2050 | ||
2051 | spin_lock(&priv->req_lock); | |
2052 | ||
2053 | if (!list_empty(&priv->pend_list)) { | |
2054 | rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)", | |
2055 | filp, current->comm, task_pid_nr(current)); | |
2056 | } | |
2057 | ||
2058 | spin_unlock(&priv->req_lock); | |
2059 | ||
2060 | if (priv->dmach != priv->md->dma_chan) { | |
2061 | rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", | |
2062 | filp, current->comm, task_pid_nr(current)); | |
2063 | rio_release_dma(priv->dmach); | |
2064 | } else { | |
2065 | rmcd_debug(EXIT, "Adjust default DMA channel refcount"); | |
2066 | kref_put(&md->dma_ref, mport_release_def_dma); | |
2067 | } | |
2068 | ||
2069 | priv->dmach = NULL; | |
2070 | } | |
2071 | #else | |
2072 | #define mport_cdev_release_dma(priv) do {} while (0) | |
2073 | #endif | |
2074 | ||
2075 | /* | |
2076 | * mport_cdev_release() - Release character device | |
2077 | */ | |
2078 | static int mport_cdev_release(struct inode *inode, struct file *filp) | |
2079 | { | |
2080 | struct mport_cdev_priv *priv = filp->private_data; | |
2081 | struct mport_dev *chdev; | |
2082 | struct rio_mport_pw_filter *pw_filter, *pw_filter_next; | |
2083 | struct rio_mport_db_filter *db_filter, *db_filter_next; | |
2084 | struct rio_mport_mapping *map, *_map; | |
2085 | unsigned long flags; | |
2086 | ||
2087 | rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); | |
2088 | ||
2089 | chdev = priv->md; | |
2090 | mport_cdev_release_dma(filp); | |
2091 | ||
2092 | priv->event_mask = 0; | |
2093 | ||
2094 | spin_lock_irqsave(&chdev->pw_lock, flags); | |
2095 | if (!list_empty(&priv->pw_filters)) { | |
2096 | list_for_each_entry_safe(pw_filter, pw_filter_next, | |
2097 | &priv->pw_filters, priv_node) | |
2098 | rio_mport_delete_pw_filter(pw_filter); | |
2099 | } | |
2100 | spin_unlock_irqrestore(&chdev->pw_lock, flags); | |
2101 | ||
2102 | spin_lock_irqsave(&chdev->db_lock, flags); | |
2103 | list_for_each_entry_safe(db_filter, db_filter_next, | |
2104 | &priv->db_filters, priv_node) { | |
2105 | rio_mport_delete_db_filter(db_filter); | |
2106 | } | |
2107 | spin_unlock_irqrestore(&chdev->db_lock, flags); | |
2108 | ||
2109 | kfifo_free(&priv->event_fifo); | |
2110 | ||
2111 | mutex_lock(&chdev->buf_mutex); | |
2112 | list_for_each_entry_safe(map, _map, &chdev->mappings, node) { | |
2113 | if (map->filp == filp) { | |
2114 | rmcd_debug(EXIT, "release mapping %p filp=%p", | |
2115 | map->virt_addr, filp); | |
2116 | kref_put(&map->ref, mport_release_mapping); | |
2117 | } | |
2118 | } | |
2119 | mutex_unlock(&chdev->buf_mutex); | |
2120 | ||
2121 | mport_cdev_fasync(-1, filp, 0); | |
2122 | filp->private_data = NULL; | |
2123 | mutex_lock(&chdev->file_mutex); | |
2124 | list_del(&priv->list); | |
2125 | mutex_unlock(&chdev->file_mutex); | |
2126 | put_device(&chdev->dev); | |
2127 | kfree(priv); | |
2128 | return 0; | |
2129 | } | |
2130 | ||
2131 | /* | |
2132 | * mport_cdev_ioctl() - IOCTLs for character device | |
2133 | */ | |
2134 | static long mport_cdev_ioctl(struct file *filp, | |
2135 | unsigned int cmd, unsigned long arg) | |
2136 | { | |
2137 | int err = -EINVAL; | |
2138 | struct mport_cdev_priv *data = filp->private_data; | |
2139 | struct mport_dev *md = data->md; | |
2140 | ||
2141 | if (atomic_read(&md->active) == 0) | |
2142 | return -ENODEV; | |
2143 | ||
2144 | switch (cmd) { | |
2145 | case RIO_MPORT_MAINT_READ_LOCAL: | |
2146 | return rio_mport_maint_rd(data, (void __user *)arg, 1); | |
2147 | case RIO_MPORT_MAINT_WRITE_LOCAL: | |
2148 | return rio_mport_maint_wr(data, (void __user *)arg, 1); | |
2149 | case RIO_MPORT_MAINT_READ_REMOTE: | |
2150 | return rio_mport_maint_rd(data, (void __user *)arg, 0); | |
2151 | case RIO_MPORT_MAINT_WRITE_REMOTE: | |
2152 | return rio_mport_maint_wr(data, (void __user *)arg, 0); | |
2153 | case RIO_MPORT_MAINT_HDID_SET: | |
2154 | return maint_hdid_set(data, (void __user *)arg); | |
2155 | case RIO_MPORT_MAINT_COMPTAG_SET: | |
2156 | return maint_comptag_set(data, (void __user *)arg); | |
2157 | case RIO_MPORT_MAINT_PORT_IDX_GET: | |
2158 | return maint_port_idx_get(data, (void __user *)arg); | |
2159 | case RIO_MPORT_GET_PROPERTIES: | |
2160 | md->properties.hdid = md->mport->host_deviceid; | |
4e1016da AB |
2161 | if (copy_to_user((void __user *)arg, &(md->properties), |
2162 | sizeof(md->properties))) | |
e8de3701 AB |
2163 | return -EFAULT; |
2164 | return 0; | |
2165 | case RIO_ENABLE_DOORBELL_RANGE: | |
2166 | return rio_mport_add_db_filter(data, (void __user *)arg); | |
2167 | case RIO_DISABLE_DOORBELL_RANGE: | |
2168 | return rio_mport_remove_db_filter(data, (void __user *)arg); | |
2169 | case RIO_ENABLE_PORTWRITE_RANGE: | |
2170 | return rio_mport_add_pw_filter(data, (void __user *)arg); | |
2171 | case RIO_DISABLE_PORTWRITE_RANGE: | |
2172 | return rio_mport_remove_pw_filter(data, (void __user *)arg); | |
2173 | case RIO_SET_EVENT_MASK: | |
4e1016da | 2174 | data->event_mask = (u32)arg; |
e8de3701 AB |
2175 | return 0; |
2176 | case RIO_GET_EVENT_MASK: | |
2177 | if (copy_to_user((void __user *)arg, &data->event_mask, | |
4e1016da | 2178 | sizeof(u32))) |
e8de3701 AB |
2179 | return -EFAULT; |
2180 | return 0; | |
2181 | case RIO_MAP_OUTBOUND: | |
2182 | return rio_mport_obw_map(filp, (void __user *)arg); | |
2183 | case RIO_MAP_INBOUND: | |
2184 | return rio_mport_map_inbound(filp, (void __user *)arg); | |
2185 | case RIO_UNMAP_OUTBOUND: | |
2186 | return rio_mport_obw_free(filp, (void __user *)arg); | |
2187 | case RIO_UNMAP_INBOUND: | |
2188 | return rio_mport_inbound_free(filp, (void __user *)arg); | |
2189 | case RIO_ALLOC_DMA: | |
2190 | return rio_mport_alloc_dma(filp, (void __user *)arg); | |
2191 | case RIO_FREE_DMA: | |
2192 | return rio_mport_free_dma(filp, (void __user *)arg); | |
2193 | case RIO_WAIT_FOR_ASYNC: | |
2194 | return rio_mport_wait_for_async_dma(filp, (void __user *)arg); | |
2195 | case RIO_TRANSFER: | |
2196 | return rio_mport_transfer_ioctl(filp, (void __user *)arg); | |
2197 | case RIO_DEV_ADD: | |
2198 | return rio_mport_add_riodev(data, (void __user *)arg); | |
2199 | case RIO_DEV_DEL: | |
2200 | return rio_mport_del_riodev(data, (void __user *)arg); | |
2201 | default: | |
2202 | break; | |
2203 | } | |
2204 | ||
2205 | return err; | |
2206 | } | |
2207 | ||
2208 | /* | |
2209 | * mport_release_mapping - free mapping resources and info structure | |
2210 | * @ref: a pointer to the kref within struct rio_mport_mapping | |
2211 | * | |
2212 | * NOTE: Shall be called while holding buf_mutex. | |
2213 | */ | |
2214 | static void mport_release_mapping(struct kref *ref) | |
2215 | { | |
2216 | struct rio_mport_mapping *map = | |
2217 | container_of(ref, struct rio_mport_mapping, ref); | |
2218 | struct rio_mport *mport = map->md->mport; | |
2219 | ||
2220 | rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s", | |
2221 | map->dir, map->virt_addr, | |
2222 | &map->phys_addr, mport->name); | |
2223 | ||
2224 | list_del(&map->node); | |
2225 | ||
2226 | switch (map->dir) { | |
2227 | case MAP_INBOUND: | |
2228 | rio_unmap_inb_region(mport, map->phys_addr); | |
2229 | case MAP_DMA: | |
2230 | dma_free_coherent(mport->dev.parent, map->size, | |
2231 | map->virt_addr, map->phys_addr); | |
2232 | break; | |
2233 | case MAP_OUTBOUND: | |
2234 | rio_unmap_outb_region(mport, map->rioid, map->rio_addr); | |
2235 | break; | |
2236 | } | |
2237 | kfree(map); | |
2238 | } | |
2239 | ||
2240 | static void mport_mm_open(struct vm_area_struct *vma) | |
2241 | { | |
2242 | struct rio_mport_mapping *map = vma->vm_private_data; | |
2243 | ||
ea87b8e1 | 2244 | rmcd_debug(MMAP, "%pad", &map->phys_addr); |
e8de3701 AB |
2245 | kref_get(&map->ref); |
2246 | } | |
2247 | ||
2248 | static void mport_mm_close(struct vm_area_struct *vma) | |
2249 | { | |
2250 | struct rio_mport_mapping *map = vma->vm_private_data; | |
2251 | ||
ea87b8e1 | 2252 | rmcd_debug(MMAP, "%pad", &map->phys_addr); |
e8de3701 AB |
2253 | mutex_lock(&map->md->buf_mutex); |
2254 | kref_put(&map->ref, mport_release_mapping); | |
2255 | mutex_unlock(&map->md->buf_mutex); | |
2256 | } | |
2257 | ||
2258 | static const struct vm_operations_struct vm_ops = { | |
2259 | .open = mport_mm_open, | |
2260 | .close = mport_mm_close, | |
2261 | }; | |
2262 | ||
2263 | static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma) | |
2264 | { | |
2265 | struct mport_cdev_priv *priv = filp->private_data; | |
2266 | struct mport_dev *md; | |
2267 | size_t size = vma->vm_end - vma->vm_start; | |
2268 | dma_addr_t baddr; | |
2269 | unsigned long offset; | |
2270 | int found = 0, ret; | |
2271 | struct rio_mport_mapping *map; | |
2272 | ||
2273 | rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx", | |
2274 | (unsigned int)size, vma->vm_pgoff); | |
2275 | ||
2276 | md = priv->md; | |
2277 | baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT); | |
2278 | ||
2279 | mutex_lock(&md->buf_mutex); | |
2280 | list_for_each_entry(map, &md->mappings, node) { | |
2281 | if (baddr >= map->phys_addr && | |
2282 | baddr < (map->phys_addr + map->size)) { | |
2283 | found = 1; | |
2284 | break; | |
2285 | } | |
2286 | } | |
2287 | mutex_unlock(&md->buf_mutex); | |
2288 | ||
2289 | if (!found) | |
2290 | return -ENOMEM; | |
2291 | ||
2292 | offset = baddr - map->phys_addr; | |
2293 | ||
2294 | if (size + offset > map->size) | |
2295 | return -EINVAL; | |
2296 | ||
2297 | vma->vm_pgoff = offset >> PAGE_SHIFT; | |
2298 | rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff); | |
2299 | ||
2300 | if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) | |
2301 | ret = dma_mmap_coherent(md->mport->dev.parent, vma, | |
2302 | map->virt_addr, map->phys_addr, map->size); | |
2303 | else if (map->dir == MAP_OUTBOUND) { | |
2304 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
2305 | ret = vm_iomap_memory(vma, map->phys_addr, map->size); | |
2306 | } else { | |
2307 | rmcd_error("Attempt to mmap unsupported mapping type"); | |
2308 | ret = -EIO; | |
2309 | } | |
2310 | ||
2311 | if (!ret) { | |
2312 | vma->vm_private_data = map; | |
2313 | vma->vm_ops = &vm_ops; | |
2314 | mport_mm_open(vma); | |
2315 | } else { | |
2316 | rmcd_error("MMAP exit with err=%d", ret); | |
2317 | } | |
2318 | ||
2319 | return ret; | |
2320 | } | |
2321 | ||
afc9a42b | 2322 | static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait) |
e8de3701 AB |
2323 | { |
2324 | struct mport_cdev_priv *priv = filp->private_data; | |
2325 | ||
2326 | poll_wait(filp, &priv->event_rx_wait, wait); | |
2327 | if (kfifo_len(&priv->event_fifo)) | |
a9a08845 | 2328 | return EPOLLIN | EPOLLRDNORM; |
e8de3701 AB |
2329 | |
2330 | return 0; | |
2331 | } | |
2332 | ||
2333 | static ssize_t mport_read(struct file *filp, char __user *buf, size_t count, | |
2334 | loff_t *ppos) | |
2335 | { | |
2336 | struct mport_cdev_priv *priv = filp->private_data; | |
2337 | int copied; | |
2338 | ssize_t ret; | |
2339 | ||
2340 | if (!count) | |
2341 | return 0; | |
2342 | ||
2343 | if (kfifo_is_empty(&priv->event_fifo) && | |
2344 | (filp->f_flags & O_NONBLOCK)) | |
2345 | return -EAGAIN; | |
2346 | ||
2347 | if (count % sizeof(struct rio_event)) | |
2348 | return -EINVAL; | |
2349 | ||
2350 | ret = wait_event_interruptible(priv->event_rx_wait, | |
2351 | kfifo_len(&priv->event_fifo) != 0); | |
2352 | if (ret) | |
2353 | return ret; | |
2354 | ||
2355 | while (ret < count) { | |
2356 | if (kfifo_to_user(&priv->event_fifo, buf, | |
2357 | sizeof(struct rio_event), &copied)) | |
2358 | return -EFAULT; | |
2359 | ret += copied; | |
2360 | buf += copied; | |
2361 | } | |
2362 | ||
2363 | return ret; | |
2364 | } | |
2365 | ||
2366 | static ssize_t mport_write(struct file *filp, const char __user *buf, | |
2367 | size_t count, loff_t *ppos) | |
2368 | { | |
2369 | struct mport_cdev_priv *priv = filp->private_data; | |
2370 | struct rio_mport *mport = priv->md->mport; | |
2371 | struct rio_event event; | |
2372 | int len, ret; | |
2373 | ||
2374 | if (!count) | |
2375 | return 0; | |
2376 | ||
2377 | if (count % sizeof(event)) | |
2378 | return -EINVAL; | |
2379 | ||
2380 | len = 0; | |
2381 | while ((count - len) >= (int)sizeof(event)) { | |
2382 | if (copy_from_user(&event, buf, sizeof(event))) | |
2383 | return -EFAULT; | |
2384 | ||
2385 | if (event.header != RIO_DOORBELL) | |
2386 | return -EINVAL; | |
2387 | ||
2388 | ret = rio_mport_send_doorbell(mport, | |
4e1016da | 2389 | event.u.doorbell.rioid, |
e8de3701 AB |
2390 | event.u.doorbell.payload); |
2391 | if (ret < 0) | |
2392 | return ret; | |
2393 | ||
2394 | len += sizeof(event); | |
2395 | buf += sizeof(event); | |
2396 | } | |
2397 | ||
2398 | return len; | |
2399 | } | |
2400 | ||
2401 | static const struct file_operations mport_fops = { | |
2402 | .owner = THIS_MODULE, | |
2403 | .open = mport_cdev_open, | |
2404 | .release = mport_cdev_release, | |
2405 | .poll = mport_cdev_poll, | |
2406 | .read = mport_read, | |
2407 | .write = mport_write, | |
2408 | .mmap = mport_cdev_mmap, | |
2409 | .fasync = mport_cdev_fasync, | |
2410 | .unlocked_ioctl = mport_cdev_ioctl | |
2411 | }; | |
2412 | ||
2413 | /* | |
2414 | * Character device management | |
2415 | */ | |
2416 | ||
2417 | static void mport_device_release(struct device *dev) | |
2418 | { | |
2419 | struct mport_dev *md; | |
2420 | ||
2421 | rmcd_debug(EXIT, "%s", dev_name(dev)); | |
2422 | md = container_of(dev, struct mport_dev, dev); | |
2423 | kfree(md); | |
2424 | } | |
2425 | ||
2426 | /* | |
2427 | * mport_cdev_add() - Create mport_dev from rio_mport | |
2428 | * @mport: RapidIO master port | |
2429 | */ | |
2430 | static struct mport_dev *mport_cdev_add(struct rio_mport *mport) | |
2431 | { | |
2432 | int ret = 0; | |
2433 | struct mport_dev *md; | |
2434 | struct rio_mport_attr attr; | |
2435 | ||
4e1016da | 2436 | md = kzalloc(sizeof(*md), GFP_KERNEL); |
e8de3701 AB |
2437 | if (!md) { |
2438 | rmcd_error("Unable allocate a device object"); | |
2439 | return NULL; | |
2440 | } | |
2441 | ||
2442 | md->mport = mport; | |
2443 | mutex_init(&md->buf_mutex); | |
2444 | mutex_init(&md->file_mutex); | |
2445 | INIT_LIST_HEAD(&md->file_list); | |
e8de3701 | 2446 | |
dbef390d LG |
2447 | device_initialize(&md->dev); |
2448 | md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); | |
e8de3701 AB |
2449 | md->dev.class = dev_class; |
2450 | md->dev.parent = &mport->dev; | |
2451 | md->dev.release = mport_device_release; | |
2452 | dev_set_name(&md->dev, DEV_NAME "%d", mport->id); | |
2453 | atomic_set(&md->active, 1); | |
2454 | ||
dbef390d LG |
2455 | cdev_init(&md->cdev, &mport_fops); |
2456 | md->cdev.owner = THIS_MODULE; | |
2457 | ||
2458 | ret = cdev_device_add(&md->cdev, &md->dev); | |
e8de3701 AB |
2459 | if (ret) { |
2460 | rmcd_error("Failed to register mport %d (err=%d)", | |
2461 | mport->id, ret); | |
2462 | goto err_cdev; | |
2463 | } | |
2464 | ||
e8de3701 AB |
2465 | INIT_LIST_HEAD(&md->doorbells); |
2466 | spin_lock_init(&md->db_lock); | |
2467 | INIT_LIST_HEAD(&md->portwrites); | |
2468 | spin_lock_init(&md->pw_lock); | |
2469 | INIT_LIST_HEAD(&md->mappings); | |
2470 | ||
2471 | md->properties.id = mport->id; | |
2472 | md->properties.sys_size = mport->sys_size; | |
2473 | md->properties.hdid = mport->host_deviceid; | |
2474 | md->properties.index = mport->index; | |
2475 | ||
2476 | /* The transfer_mode property will be returned through mport query | |
2477 | * interface | |
2478 | */ | |
4e1016da | 2479 | #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ |
e8de3701 AB |
2480 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; |
2481 | #else | |
2482 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; | |
2483 | #endif | |
2484 | ret = rio_query_mport(mport, &attr); | |
2485 | if (!ret) { | |
2486 | md->properties.flags = attr.flags; | |
2487 | md->properties.link_speed = attr.link_speed; | |
2488 | md->properties.link_width = attr.link_width; | |
2489 | md->properties.dma_max_sge = attr.dma_max_sge; | |
2490 | md->properties.dma_max_size = attr.dma_max_size; | |
2491 | md->properties.dma_align = attr.dma_align; | |
2492 | md->properties.cap_sys_size = 0; | |
2493 | md->properties.cap_transfer_mode = 0; | |
2494 | md->properties.cap_addr_size = 0; | |
2495 | } else | |
2496 | pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n", | |
2497 | mport->name, MAJOR(dev_number), mport->id); | |
2498 | ||
2499 | mutex_lock(&mport_devs_lock); | |
2500 | list_add_tail(&md->node, &mport_devs); | |
2501 | mutex_unlock(&mport_devs_lock); | |
2502 | ||
2503 | pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n", | |
2504 | mport->name, MAJOR(dev_number), mport->id); | |
2505 | ||
2506 | return md; | |
2507 | ||
2508 | err_cdev: | |
dbef390d | 2509 | put_device(&md->dev); |
e8de3701 AB |
2510 | return NULL; |
2511 | } | |
2512 | ||
2513 | /* | |
2514 | * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release | |
2515 | * associated DMA channels. | |
2516 | */ | |
2517 | static void mport_cdev_terminate_dma(struct mport_dev *md) | |
2518 | { | |
2519 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | |
2520 | struct mport_cdev_priv *client; | |
2521 | ||
2522 | rmcd_debug(DMA, "%s", dev_name(&md->dev)); | |
2523 | ||
2524 | mutex_lock(&md->file_mutex); | |
2525 | list_for_each_entry(client, &md->file_list, list) { | |
2526 | if (client->dmach) { | |
2527 | dmaengine_terminate_all(client->dmach); | |
2528 | rio_release_dma(client->dmach); | |
2529 | } | |
2530 | } | |
2531 | mutex_unlock(&md->file_mutex); | |
2532 | ||
2533 | if (md->dma_chan) { | |
2534 | dmaengine_terminate_all(md->dma_chan); | |
2535 | rio_release_dma(md->dma_chan); | |
2536 | md->dma_chan = NULL; | |
2537 | } | |
2538 | #endif | |
2539 | } | |
2540 | ||
2541 | ||
2542 | /* | |
2543 | * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open | |
2544 | * mport_cdev files. | |
2545 | */ | |
2546 | static int mport_cdev_kill_fasync(struct mport_dev *md) | |
2547 | { | |
2548 | unsigned int files = 0; | |
2549 | struct mport_cdev_priv *client; | |
2550 | ||
2551 | mutex_lock(&md->file_mutex); | |
2552 | list_for_each_entry(client, &md->file_list, list) { | |
2553 | if (client->async_queue) | |
2554 | kill_fasync(&client->async_queue, SIGIO, POLL_HUP); | |
2555 | files++; | |
2556 | } | |
2557 | mutex_unlock(&md->file_mutex); | |
2558 | return files; | |
2559 | } | |
2560 | ||
2561 | /* | |
2562 | * mport_cdev_remove() - Remove mport character device | |
2563 | * @dev: Mport device to remove | |
2564 | */ | |
2565 | static void mport_cdev_remove(struct mport_dev *md) | |
2566 | { | |
2567 | struct rio_mport_mapping *map, *_map; | |
2568 | ||
2569 | rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); | |
2570 | atomic_set(&md->active, 0); | |
2571 | mport_cdev_terminate_dma(md); | |
2572 | rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); | |
dbef390d | 2573 | cdev_device_del(&md->cdev, &md->dev); |
e8de3701 AB |
2574 | mport_cdev_kill_fasync(md); |
2575 | ||
2576 | flush_workqueue(dma_wq); | |
2577 | ||
2578 | /* TODO: do we need to give clients some time to close file | |
2579 | * descriptors? Simple wait for XX, or kref? | |
2580 | */ | |
2581 | ||
2582 | /* | |
2583 | * Release DMA buffers allocated for the mport device. | |
2584 | * Disable associated inbound Rapidio requests mapping if applicable. | |
2585 | */ | |
2586 | mutex_lock(&md->buf_mutex); | |
2587 | list_for_each_entry_safe(map, _map, &md->mappings, node) { | |
2588 | kref_put(&map->ref, mport_release_mapping); | |
2589 | } | |
2590 | mutex_unlock(&md->buf_mutex); | |
2591 | ||
2592 | if (!list_empty(&md->mappings)) | |
2593 | rmcd_warn("WARNING: %s pending mappings on removal", | |
2594 | md->mport->name); | |
2595 | ||
2596 | rio_release_inb_dbell(md->mport, 0, 0x0fff); | |
2597 | ||
e8de3701 AB |
2598 | put_device(&md->dev); |
2599 | } | |
2600 | ||
2601 | /* | |
2602 | * RIO rio_mport_interface driver | |
2603 | */ | |
2604 | ||
2605 | /* | |
2606 | * mport_add_mport() - Add rio_mport from LDM device struct | |
2607 | * @dev: Linux device model struct | |
2608 | * @class_intf: Linux class_interface | |
2609 | */ | |
2610 | static int mport_add_mport(struct device *dev, | |
2611 | struct class_interface *class_intf) | |
2612 | { | |
2613 | struct rio_mport *mport = NULL; | |
2614 | struct mport_dev *chdev = NULL; | |
2615 | ||
2616 | mport = to_rio_mport(dev); | |
2617 | if (!mport) | |
2618 | return -ENODEV; | |
2619 | ||
2620 | chdev = mport_cdev_add(mport); | |
2621 | if (!chdev) | |
2622 | return -ENODEV; | |
2623 | ||
2624 | return 0; | |
2625 | } | |
2626 | ||
2627 | /* | |
2628 | * mport_remove_mport() - Remove rio_mport from global list | |
2629 | * TODO remove device from global mport_dev list | |
2630 | */ | |
2631 | static void mport_remove_mport(struct device *dev, | |
2632 | struct class_interface *class_intf) | |
2633 | { | |
2634 | struct rio_mport *mport = NULL; | |
2635 | struct mport_dev *chdev; | |
2636 | int found = 0; | |
2637 | ||
2638 | mport = to_rio_mport(dev); | |
2639 | rmcd_debug(EXIT, "Remove %s", mport->name); | |
2640 | ||
2641 | mutex_lock(&mport_devs_lock); | |
2642 | list_for_each_entry(chdev, &mport_devs, node) { | |
2643 | if (chdev->mport->id == mport->id) { | |
2644 | atomic_set(&chdev->active, 0); | |
2645 | list_del(&chdev->node); | |
2646 | found = 1; | |
2647 | break; | |
2648 | } | |
2649 | } | |
2650 | mutex_unlock(&mport_devs_lock); | |
2651 | ||
2652 | if (found) | |
2653 | mport_cdev_remove(chdev); | |
2654 | } | |
2655 | ||
2656 | /* the rio_mport_interface is used to handle local mport devices */ | |
2657 | static struct class_interface rio_mport_interface __refdata = { | |
2658 | .class = &rio_mport_class, | |
2659 | .add_dev = mport_add_mport, | |
2660 | .remove_dev = mport_remove_mport, | |
2661 | }; | |
2662 | ||
2663 | /* | |
2664 | * Linux kernel module | |
2665 | */ | |
2666 | ||
2667 | /* | |
2668 | * mport_init - Driver module loading | |
2669 | */ | |
2670 | static int __init mport_init(void) | |
2671 | { | |
2672 | int ret; | |
2673 | ||
2674 | /* Create device class needed by udev */ | |
2675 | dev_class = class_create(THIS_MODULE, DRV_NAME); | |
99f23c2c | 2676 | if (IS_ERR(dev_class)) { |
e8de3701 | 2677 | rmcd_error("Unable to create " DRV_NAME " class"); |
99f23c2c | 2678 | return PTR_ERR(dev_class); |
e8de3701 AB |
2679 | } |
2680 | ||
2681 | ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); | |
2682 | if (ret < 0) | |
2683 | goto err_chr; | |
2684 | ||
2685 | rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number)); | |
2686 | ||
2687 | /* Register to rio_mport_interface */ | |
2688 | ret = class_interface_register(&rio_mport_interface); | |
2689 | if (ret) { | |
2690 | rmcd_error("class_interface_register() failed, err=%d", ret); | |
2691 | goto err_cli; | |
2692 | } | |
2693 | ||
2694 | dma_wq = create_singlethread_workqueue("dma_wq"); | |
2695 | if (!dma_wq) { | |
2696 | rmcd_error("failed to create DMA work queue"); | |
2697 | ret = -ENOMEM; | |
2698 | goto err_wq; | |
2699 | } | |
2700 | ||
2701 | return 0; | |
2702 | ||
2703 | err_wq: | |
2704 | class_interface_unregister(&rio_mport_interface); | |
2705 | err_cli: | |
2706 | unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); | |
2707 | err_chr: | |
2708 | class_destroy(dev_class); | |
2709 | return ret; | |
2710 | } | |
2711 | ||
2712 | /** | |
2713 | * mport_exit - Driver module unloading | |
2714 | */ | |
2715 | static void __exit mport_exit(void) | |
2716 | { | |
2717 | class_interface_unregister(&rio_mport_interface); | |
2718 | class_destroy(dev_class); | |
2719 | unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); | |
2720 | destroy_workqueue(dma_wq); | |
2721 | } | |
2722 | ||
2723 | module_init(mport_init); | |
2724 | module_exit(mport_exit); |