staging: xillybus: Removed unnecessary error message
[linux-2.6-block.git] / drivers / staging / xillybus / xillybus_core.c
CommitLineData
48bae050
EB
1/*
2 * linux/drivers/misc/xillybus_core.c
3 *
4 * Copyright 2011 Xillybus Ltd, http://xillybus.com
5 *
6 * Driver for the Xillybus FPGA/host framework.
7 *
8 * This driver interfaces with a special IP core in an FPGA, setting up
9 * a pipe between a hardware FIFO in the programmable logic and a device
10 * file in the host. The number of such pipes and their attributes are
11 * set up on the logic. This driver detects these automatically and
12 * creates the device files accordingly.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the smems of the GNU General Public License as published by
16 * the Free Software Foundation; version 2 of the License.
17 */
18
19#include <linux/list.h>
20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/io.h>
23#include <linux/dma-mapping.h>
24#include <linux/interrupt.h>
25#include <linux/sched.h>
26#include <linux/fs.h>
27#include <linux/cdev.h>
28#include <linux/spinlock.h>
29#include <linux/mutex.h>
48bae050
EB
30#include <linux/crc32.h>
31#include <linux/poll.h>
32#include <linux/delay.h>
48bae050
EB
33#include <linux/slab.h>
34#include <linux/workqueue.h>
35#include "xillybus.h"
36
37MODULE_DESCRIPTION("Xillybus core functions");
38MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
39MODULE_VERSION("1.07");
40MODULE_ALIAS("xillybus_core");
41MODULE_LICENSE("GPL v2");
42
43/* General timeout is 100 ms, rx timeout is 10 ms */
44#define XILLY_RX_TIMEOUT (10*HZ/1000)
45#define XILLY_TIMEOUT (100*HZ/1000)
46
539889ee
EB
47#define fpga_msg_ctrl_reg 0x0008
48#define fpga_dma_control_reg 0x0020
49#define fpga_dma_bufno_reg 0x0024
50#define fpga_dma_bufaddr_lowaddr_reg 0x0028
51#define fpga_dma_bufaddr_highaddr_reg 0x002c
52#define fpga_buf_ctrl_reg 0x0030
53#define fpga_buf_offset_reg 0x0034
54#define fpga_endian_reg 0x0040
48bae050
EB
55
56#define XILLYMSG_OPCODE_RELEASEBUF 1
57#define XILLYMSG_OPCODE_QUIESCEACK 2
58#define XILLYMSG_OPCODE_FIFOEOF 3
59#define XILLYMSG_OPCODE_FATAL_ERROR 4
60#define XILLYMSG_OPCODE_NONEMPTY 5
61
e71042f2
EB
62static const char xillyname[] = "xillybus";
63
48bae050
EB
64static struct class *xillybus_class;
65
66/*
67 * ep_list_lock is the last lock to be taken; No other lock requests are
68 * allowed while holding it. It merely protects list_of_endpoints, and not
69 * the endpoints listed in it.
70 */
71
72static LIST_HEAD(list_of_endpoints);
73static struct mutex ep_list_lock;
7ee9ded2 74static struct workqueue_struct *xillybus_wq;
48bae050
EB
75
76/*
77 * Locking scheme: Mutexes protect invocations of character device methods.
78 * If both locks are taken, wr_mutex is taken first, rd_mutex second.
79 *
80 * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
81 * buffers' end_offset fields against changes made by IRQ handler (and in
82 * theory, other file request handlers, but the mutex handles that). Nothing
83 * else.
84 * They are held for short direct memory manipulations. Needless to say,
85 * no mutex locking is allowed when a spinlock is held.
86 *
87 * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
88 *
89 * register_mutex is endpoint-specific, and is held when non-atomic
90 * register operations are performed. wr_mutex and rd_mutex may be
91 * held when register_mutex is taken, but none of the spinlocks. Note that
92 * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
93 * which are unrelated to buf_offset_reg, since they are harmless.
94 *
95 * Blocking on the wait queues is allowed with mutexes held, but not with
96 * spinlocks.
97 *
98 * Only interruptible blocking is allowed on mutexes and wait queues.
99 *
100 * All in all, the locking order goes (with skips allowed, of course):
101 * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
102 */
103
35fcf7e3 104static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
48bae050
EB
105{
106 int opcode;
107 int msg_channel, msg_bufno, msg_data, msg_dir;
108
109 opcode = (buf[0] >> 24) & 0xff;
110 msg_dir = buf[0] & 1;
111 msg_channel = (buf[0] >> 1) & 0x7ff;
112 msg_bufno = (buf[0] >> 12) & 0x3ff;
113 msg_data = buf[1] & 0xfffffff;
114
35fcf7e3
EB
115 dev_warn(endpoint->dev,
116 "Malformed message (skipping): opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
117 opcode, msg_channel, msg_dir, msg_bufno, msg_data);
48bae050
EB
118}
119
120/*
121 * xillybus_isr assumes the interrupt is allocated exclusively to it,
122 * which is the natural case MSI and several other hardware-oriented
123 * interrupts. Sharing is not allowed.
124 */
125
126irqreturn_t xillybus_isr(int irq, void *data)
127{
128 struct xilly_endpoint *ep = data;
129 u32 *buf;
130 unsigned int buf_size;
131 int i;
132 int opcode;
133 unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
134 struct xilly_channel *channel;
135
48bae050
EB
136 buf = ep->msgbuf_addr;
137 buf_size = ep->msg_buf_size/sizeof(u32);
138
7ee9ded2 139 ep->ephw->hw_sync_sgl_for_cpu(ep,
48bae050
EB
140 ep->msgbuf_dma_addr,
141 ep->msg_buf_size,
142 DMA_FROM_DEVICE);
143
144 for (i = 0; i < buf_size; i += 2)
145 if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
35fcf7e3
EB
146 malformed_message(ep, &buf[i]);
147 dev_warn(ep->dev,
148 "Sending a NACK on counter %x (instead of %x) on entry %d\n",
48bae050
EB
149 ((buf[i+1] >> 28) & 0xf),
150 ep->msg_counter,
151 i/2);
152
84590b1a 153 if (++ep->failed_messages > 10) {
35fcf7e3
EB
154 dev_err(ep->dev,
155 "Lost sync with interrupt messages. Stopping.\n");
84590b1a 156 } else {
7ee9ded2 157 ep->ephw->hw_sync_sgl_for_device(
48bae050
EB
158 ep,
159 ep->msgbuf_dma_addr,
160 ep->msg_buf_size,
161 DMA_FROM_DEVICE);
162
163 iowrite32(0x01, /* Message NACK */
539889ee 164 ep->registers + fpga_msg_ctrl_reg);
48bae050
EB
165 }
166 return IRQ_HANDLED;
167 } else if (buf[i] & (1 << 22)) /* Last message */
168 break;
169
170 if (i >= buf_size) {
35fcf7e3 171 dev_err(ep->dev, "Bad interrupt message. Stopping.\n");
48bae050
EB
172 return IRQ_HANDLED;
173 }
174
175 buf_size = i;
176
177 for (i = 0; i <= buf_size; i += 2) { /* Scan through messages */
178 opcode = (buf[i] >> 24) & 0xff;
179
180 msg_dir = buf[i] & 1;
181 msg_channel = (buf[i] >> 1) & 0x7ff;
182 msg_bufno = (buf[i] >> 12) & 0x3ff;
183 msg_data = buf[i+1] & 0xfffffff;
184
185 switch (opcode) {
186 case XILLYMSG_OPCODE_RELEASEBUF:
187
188 if ((msg_channel > ep->num_channels) ||
189 (msg_channel == 0)) {
35fcf7e3 190 malformed_message(ep, &buf[i]);
48bae050
EB
191 break;
192 }
193
194 channel = ep->channels[msg_channel];
195
196 if (msg_dir) { /* Write channel */
197 if (msg_bufno >= channel->num_wr_buffers) {
35fcf7e3 198 malformed_message(ep, &buf[i]);
48bae050
EB
199 break;
200 }
201 spin_lock(&channel->wr_spinlock);
202 channel->wr_buffers[msg_bufno]->end_offset =
203 msg_data;
204 channel->wr_fpga_buf_idx = msg_bufno;
205 channel->wr_empty = 0;
206 channel->wr_sleepy = 0;
207 spin_unlock(&channel->wr_spinlock);
208
209 wake_up_interruptible(&channel->wr_wait);
210
211 } else {
212 /* Read channel */
213
214 if (msg_bufno >= channel->num_rd_buffers) {
35fcf7e3 215 malformed_message(ep, &buf[i]);
48bae050
EB
216 break;
217 }
218
219 spin_lock(&channel->rd_spinlock);
220 channel->rd_fpga_buf_idx = msg_bufno;
221 channel->rd_full = 0;
222 spin_unlock(&channel->rd_spinlock);
223
224 wake_up_interruptible(&channel->rd_wait);
225 if (!channel->rd_synchronous)
226 queue_delayed_work(
227 xillybus_wq,
228 &channel->rd_workitem,
229 XILLY_RX_TIMEOUT);
230 }
231
232 break;
233 case XILLYMSG_OPCODE_NONEMPTY:
234 if ((msg_channel > ep->num_channels) ||
235 (msg_channel == 0) || (!msg_dir) ||
236 !ep->channels[msg_channel]->wr_supports_nonempty) {
35fcf7e3 237 malformed_message(ep, &buf[i]);
48bae050
EB
238 break;
239 }
240
241 channel = ep->channels[msg_channel];
242
243 if (msg_bufno >= channel->num_wr_buffers) {
35fcf7e3 244 malformed_message(ep, &buf[i]);
48bae050
EB
245 break;
246 }
247 spin_lock(&channel->wr_spinlock);
248 if (msg_bufno == channel->wr_host_buf_idx)
249 channel->wr_ready = 1;
250 spin_unlock(&channel->wr_spinlock);
251
252 wake_up_interruptible(&channel->wr_ready_wait);
253
254 break;
255 case XILLYMSG_OPCODE_QUIESCEACK:
256 ep->idtlen = msg_data;
257 wake_up_interruptible(&ep->ep_wait);
258
259 break;
260 case XILLYMSG_OPCODE_FIFOEOF:
cc6289fa
EB
261 if ((msg_channel > ep->num_channels) ||
262 (msg_channel == 0) || (!msg_dir) ||
263 !ep->channels[msg_channel]->num_wr_buffers) {
264 malformed_message(ep, &buf[i]);
265 break;
266 }
48bae050
EB
267 channel = ep->channels[msg_channel];
268 spin_lock(&channel->wr_spinlock);
269 channel->wr_eof = msg_bufno;
270 channel->wr_sleepy = 0;
271
272 channel->wr_hangup = channel->wr_empty &&
273 (channel->wr_host_buf_idx == msg_bufno);
274
275 spin_unlock(&channel->wr_spinlock);
276
277 wake_up_interruptible(&channel->wr_wait);
278
279 break;
280 case XILLYMSG_OPCODE_FATAL_ERROR:
281 ep->fatal_error = 1;
282 wake_up_interruptible(&ep->ep_wait); /* For select() */
35fcf7e3
EB
283 dev_err(ep->dev,
284 "FPGA reported a fatal error. This means that the low-level communication with the device has failed. This hardware problem is most likely unrelated to Xillybus (neither kernel module nor FPGA core), but reports are still welcome. All I/O is aborted.\n");
48bae050
EB
285 break;
286 default:
35fcf7e3 287 malformed_message(ep, &buf[i]);
48bae050
EB
288 break;
289 }
290 }
291
7ee9ded2 292 ep->ephw->hw_sync_sgl_for_device(ep,
48bae050
EB
293 ep->msgbuf_dma_addr,
294 ep->msg_buf_size,
295 DMA_FROM_DEVICE);
296
297 ep->msg_counter = (ep->msg_counter + 1) & 0xf;
298 ep->failed_messages = 0;
539889ee 299 iowrite32(0x03, ep->registers + fpga_msg_ctrl_reg); /* Message ACK */
48bae050
EB
300
301 return IRQ_HANDLED;
302}
303EXPORT_SYMBOL(xillybus_isr);
304
305/*
306 * A few trivial memory management functions.
307 * NOTE: These functions are used only on probe and remove, and therefore
308 * no locks are applied!
309 */
310
48bae050
EB
311static void xillybus_autoflush(struct work_struct *work);
312
049c1fb4
EB
313struct xilly_alloc_state {
314 void *salami;
315 int left_of_salami;
316 int nbuffer;
317 enum dma_data_direction direction;
318 u32 regdirection;
319};
320
321static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
322 struct xilly_alloc_state *s,
323 struct xilly_buffer **buffers,
324 int bufnum, int bytebufsize)
325{
326 int i, rc;
327 dma_addr_t dma_addr;
328 struct device *dev = ep->dev;
329 struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
330
331 if (buffers) { /* Not the message buffer */
5899005f
EB
332 this_buffer = devm_kcalloc(dev, bufnum,
333 sizeof(struct xilly_buffer),
334 GFP_KERNEL);
049c1fb4
EB
335
336 if (!this_buffer)
337 return -ENOMEM;
338 }
339
340 for (i = 0; i < bufnum; i++) {
341 /*
342 * Buffers are expected in descending size order, so there
343 * is either enough space for this buffer or none at all.
344 */
345
346 if ((s->left_of_salami < bytebufsize) &&
347 (s->left_of_salami > 0)) {
348 dev_err(ep->dev,
349 "Corrupt buffer allocation in IDT. Aborting.\n");
350 return -ENODEV;
351 }
352
353 if (s->left_of_salami == 0) {
354 int allocorder, allocsize;
355
356 allocsize = PAGE_SIZE;
357 allocorder = 0;
358 while (bytebufsize > allocsize) {
359 allocsize *= 2;
360 allocorder++;
361 }
362
363 s->salami = (void *) devm_get_free_pages(
364 dev,
365 GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO,
366 allocorder);
367
368 if (!s->salami)
369 return -ENOMEM;
370 s->left_of_salami = allocsize;
371 }
372
373 rc = ep->ephw->map_single(ep, s->salami,
374 bytebufsize, s->direction,
375 &dma_addr);
376
377 if (rc)
378 return rc;
379
380 iowrite32((u32) (dma_addr & 0xffffffff),
539889ee 381 ep->registers + fpga_dma_bufaddr_lowaddr_reg);
049c1fb4 382 iowrite32(((u32) ((((u64) dma_addr) >> 32) & 0xffffffff)),
539889ee 383 ep->registers + fpga_dma_bufaddr_highaddr_reg);
049c1fb4
EB
384
385 if (buffers) { /* Not the message buffer */
386 this_buffer->addr = s->salami;
387 this_buffer->dma_addr = dma_addr;
388 buffers[i] = this_buffer++;
389
390 iowrite32(s->regdirection | s->nbuffer++,
539889ee 391 ep->registers + fpga_dma_bufno_reg);
049c1fb4
EB
392 } else {
393 ep->msgbuf_addr = s->salami;
394 ep->msgbuf_dma_addr = dma_addr;
395 ep->msg_buf_size = bytebufsize;
396
397 iowrite32(s->regdirection,
539889ee 398 ep->registers + fpga_dma_bufno_reg);
049c1fb4
EB
399 }
400
401 s->left_of_salami -= bytebufsize;
402 s->salami += bytebufsize;
403 }
404 return 0; /* Success */
405}
406
48bae050 407static int xilly_setupchannels(struct xilly_endpoint *ep,
48bae050
EB
408 unsigned char *chandesc,
409 int entries
410 )
411{
525be905 412 struct device *dev = ep->dev;
049c1fb4 413 int i, entry, rc;
48bae050
EB
414 struct xilly_channel *channel;
415 int channelnum, bufnum, bufsize, format, is_writebuf;
416 int bytebufsize;
417 int synchronous, allowpartial, exclusive_open, seekable;
418 int supports_nonempty;
48bae050
EB
419 int msg_buf_done = 0;
420
049c1fb4
EB
421 struct xilly_alloc_state rd_alloc = {
422 .salami = NULL,
423 .left_of_salami = 0,
424 .nbuffer = 1,
425 .direction = DMA_TO_DEVICE,
426 .regdirection = 0,
427 };
428
429 struct xilly_alloc_state wr_alloc = {
430 .salami = NULL,
431 .left_of_salami = 0,
432 .nbuffer = 1,
433 .direction = DMA_FROM_DEVICE,
434 .regdirection = 0x80000000,
435 };
48bae050 436
5899005f 437 channel = devm_kcalloc(dev, ep->num_channels,
525be905 438 sizeof(struct xilly_channel), GFP_KERNEL);
48bae050
EB
439
440 if (!channel)
31ca128d 441 return -ENOMEM;
48bae050 442
5899005f 443 ep->channels = devm_kcalloc(dev, ep->num_channels + 1,
525be905
EB
444 sizeof(struct xilly_channel *),
445 GFP_KERNEL);
48bae050
EB
446
447 if (!ep->channels)
31ca128d 448 return -ENOMEM;
48bae050
EB
449
450 ep->channels[0] = NULL; /* Channel 0 is message buf. */
451
452 /* Initialize all channels with defaults */
453
454 for (i = 1; i <= ep->num_channels; i++) {
455 channel->wr_buffers = NULL;
456 channel->rd_buffers = NULL;
457 channel->num_wr_buffers = 0;
458 channel->num_rd_buffers = 0;
459 channel->wr_fpga_buf_idx = -1;
460 channel->wr_host_buf_idx = 0;
461 channel->wr_host_buf_pos = 0;
462 channel->wr_empty = 1;
463 channel->wr_ready = 0;
464 channel->wr_sleepy = 1;
465 channel->rd_fpga_buf_idx = 0;
466 channel->rd_host_buf_idx = 0;
467 channel->rd_host_buf_pos = 0;
468 channel->rd_full = 0;
469 channel->wr_ref_count = 0;
470 channel->rd_ref_count = 0;
471
472 spin_lock_init(&channel->wr_spinlock);
473 spin_lock_init(&channel->rd_spinlock);
474 mutex_init(&channel->wr_mutex);
475 mutex_init(&channel->rd_mutex);
476 init_waitqueue_head(&channel->rd_wait);
477 init_waitqueue_head(&channel->wr_wait);
478 init_waitqueue_head(&channel->wr_ready_wait);
479
480 INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
481
482 channel->endpoint = ep;
483 channel->chan_num = i;
484
485 channel->log2_element_size = 0;
486
487 ep->channels[i] = channel++;
488 }
489
48bae050 490 for (entry = 0; entry < entries; entry++, chandesc += 4) {
049c1fb4
EB
491 struct xilly_buffer **buffers = NULL;
492
48bae050
EB
493 is_writebuf = chandesc[0] & 0x01;
494 channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
495 format = (chandesc[1] >> 4) & 0x03;
496 allowpartial = (chandesc[1] >> 6) & 0x01;
497 synchronous = (chandesc[1] >> 7) & 0x01;
498 bufsize = 1 << (chandesc[2] & 0x1f);
499 bufnum = 1 << (chandesc[3] & 0x0f);
500 exclusive_open = (chandesc[2] >> 7) & 0x01;
501 seekable = (chandesc[2] >> 6) & 0x01;
502 supports_nonempty = (chandesc[2] >> 5) & 0x01;
503
504 if ((channelnum > ep->num_channels) ||
505 ((channelnum == 0) && !is_writebuf)) {
35fcf7e3
EB
506 dev_err(ep->dev,
507 "IDT requests channel out of range. Aborting.\n");
48bae050
EB
508 return -ENODEV;
509 }
510
511 channel = ep->channels[channelnum]; /* NULL for msg channel */
512
049c1fb4 513 if (!is_writebuf || channelnum > 0) {
48bae050
EB
514 channel->log2_element_size = ((format > 2) ?
515 2 : format);
049c1fb4 516
48bae050
EB
517 bytebufsize = channel->rd_buf_size = bufsize *
518 (1 << channel->log2_element_size);
48bae050 519
5899005f
EB
520 buffers = devm_kcalloc(dev, bufnum,
521 sizeof(struct xilly_buffer *),
522 GFP_KERNEL);
48bae050 523
049c1fb4 524 if (!buffers)
31ca128d 525 return -ENOMEM;
84590b1a 526 } else {
049c1fb4 527 bytebufsize = bufsize << 2;
84590b1a 528 }
48bae050 529
049c1fb4
EB
530 if (!is_writebuf) {
531 channel->num_rd_buffers = bufnum;
532 channel->rd_allow_partial = allowpartial;
533 channel->rd_synchronous = synchronous;
534 channel->rd_exclusive_open = exclusive_open;
535 channel->seekable = seekable;
48bae050 536
049c1fb4
EB
537 channel->rd_buffers = buffers;
538 rc = xilly_get_dma_buffers(ep, &rd_alloc, buffers,
539 bufnum, bytebufsize);
91d42194 540 } else if (channelnum > 0) {
48bae050 541 channel->num_wr_buffers = bufnum;
48bae050
EB
542
543 channel->seekable = seekable;
544 channel->wr_supports_nonempty = supports_nonempty;
545
546 channel->wr_allow_partial = allowpartial;
547 channel->wr_synchronous = synchronous;
548 channel->wr_exclusive_open = exclusive_open;
549
049c1fb4
EB
550 channel->wr_buffers = buffers;
551 rc = xilly_get_dma_buffers(ep, &wr_alloc, buffers,
552 bufnum, bytebufsize);
553 } else {
554 rc = xilly_get_dma_buffers(ep, &wr_alloc, NULL,
555 bufnum, bytebufsize);
556 msg_buf_done++;
48bae050
EB
557 }
558
049c1fb4 559 if (rc)
31ca128d 560 return -ENOMEM;
48bae050
EB
561 }
562
563 if (!msg_buf_done) {
35fcf7e3
EB
564 dev_err(ep->dev,
565 "Corrupt IDT: No message buffer. Aborting.\n");
48bae050
EB
566 return -ENODEV;
567 }
48bae050 568 return 0;
48bae050
EB
569}
570
571static void xilly_scan_idt(struct xilly_endpoint *endpoint,
572 struct xilly_idt_handle *idt_handle)
573{
574 int count = 0;
575 unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
576 unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
577 unsigned char *scan;
578 int len;
579
580 scan = idt;
581 idt_handle->idt = idt;
582
583 scan++; /* Skip version number */
584
585 while ((scan <= end_of_idt) && *scan) {
586 while ((scan <= end_of_idt) && *scan++)
587 /* Do nothing, just scan thru string */;
588 count++;
589 }
590
591 scan++;
592
593 if (scan > end_of_idt) {
35fcf7e3
EB
594 dev_err(endpoint->dev,
595 "IDT device name list overflow. Aborting.\n");
48bae050
EB
596 idt_handle->chandesc = NULL;
597 return;
91d42194
VB
598 }
599 idt_handle->chandesc = scan;
48bae050
EB
600
601 len = endpoint->idtlen - (3 + ((int) (scan - idt)));
602
603 if (len & 0x03) {
604 idt_handle->chandesc = NULL;
605
35fcf7e3
EB
606 dev_err(endpoint->dev,
607 "Corrupt IDT device name list. Aborting.\n");
48bae050
EB
608 }
609
610 idt_handle->entries = len >> 2;
611
612 endpoint->num_channels = count;
613}
614
615static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
616{
617 int rc = 0;
618 struct xilly_channel *channel;
619 unsigned char *version;
620
621 channel = endpoint->channels[1]; /* This should be generated ad-hoc */
622
623 channel->wr_sleepy = 1;
48bae050
EB
624
625 iowrite32(1 |
626 (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
539889ee 627 endpoint->registers + fpga_buf_ctrl_reg);
48bae050
EB
628
629 wait_event_interruptible_timeout(channel->wr_wait,
630 (!channel->wr_sleepy),
631 XILLY_TIMEOUT);
632
633 if (channel->wr_sleepy) {
35fcf7e3 634 dev_err(endpoint->dev, "Failed to obtain IDT. Aborting.\n");
48bae050
EB
635
636 if (endpoint->fatal_error)
637 return -EIO;
638
639 rc = -ENODEV;
640 return rc;
641 }
642
7ee9ded2 643 endpoint->ephw->hw_sync_sgl_for_cpu(
48bae050
EB
644 channel->endpoint,
645 channel->wr_buffers[0]->dma_addr,
646 channel->wr_buf_size,
647 DMA_FROM_DEVICE);
648
649 if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
35fcf7e3
EB
650 dev_err(endpoint->dev,
651 "IDT length mismatch (%d != %d). Aborting.\n",
48bae050
EB
652 channel->wr_buffers[0]->end_offset, endpoint->idtlen);
653 rc = -ENODEV;
654 return rc;
655 }
656
657 if (crc32_le(~0, channel->wr_buffers[0]->addr,
658 endpoint->idtlen+1) != 0) {
35fcf7e3 659 dev_err(endpoint->dev, "IDT failed CRC check. Aborting.\n");
48bae050
EB
660 rc = -ENODEV;
661 return rc;
662 }
663
664 version = channel->wr_buffers[0]->addr;
665
666 /* Check version number. Accept anything below 0x82 for now. */
667 if (*version > 0x82) {
35fcf7e3
EB
668 dev_err(endpoint->dev,
669 "No support for IDT version 0x%02x. Maybe the xillybus driver needs an upgarde. Aborting.\n",
48bae050
EB
670 (int) *version);
671 rc = -ENODEV;
672 return rc;
673 }
674
675 return 0; /* Success */
676}
677
7ee9ded2
EB
678static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
679 size_t count, loff_t *f_pos)
48bae050
EB
680{
681 ssize_t rc;
682 unsigned long flags;
683 int bytes_done = 0;
684 int no_time_left = 0;
685 long deadline, left_to_sleep;
686 struct xilly_channel *channel = filp->private_data;
687
688 int empty, reached_eof, exhausted, ready;
689 /* Initializations are there only to silence warnings */
690
691 int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
692 int waiting_bufidx;
693
694 if (channel->endpoint->fatal_error)
695 return -EIO;
696
697 deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
698
699 rc = mutex_lock_interruptible(&channel->wr_mutex);
700
701 if (rc)
702 return rc;
703
704 rc = 0; /* Just to be clear about it. Compiler optimizes this out */
705
706 while (1) { /* Note that we may drop mutex within this loop */
707 int bytes_to_do = count - bytes_done;
91d42194 708
48bae050
EB
709 spin_lock_irqsave(&channel->wr_spinlock, flags);
710
711 empty = channel->wr_empty;
712 ready = !empty || channel->wr_ready;
713
714 if (!empty) {
715 bufidx = channel->wr_host_buf_idx;
716 bufpos = channel->wr_host_buf_pos;
717 howmany = ((channel->wr_buffers[bufidx]->end_offset
718 + 1) << channel->log2_element_size)
719 - bufpos;
720
721 /* Update wr_host_* to its post-operation state */
722 if (howmany > bytes_to_do) {
723 bufferdone = 0;
724
725 howmany = bytes_to_do;
726 channel->wr_host_buf_pos += howmany;
727 } else {
728 bufferdone = 1;
729
730 channel->wr_host_buf_pos = 0;
731
732 if (bufidx == channel->wr_fpga_buf_idx) {
733 channel->wr_empty = 1;
734 channel->wr_sleepy = 1;
735 channel->wr_ready = 0;
736 }
737
738 if (bufidx >= (channel->num_wr_buffers - 1))
739 channel->wr_host_buf_idx = 0;
740 else
741 channel->wr_host_buf_idx++;
742 }
743 }
744
745 /*
746 * Marking our situation after the possible changes above,
747 * for use after releasing the spinlock.
748 *
749 * empty = empty before change
750 * exhasted = empty after possible change
751 */
752
753 reached_eof = channel->wr_empty &&
754 (channel->wr_host_buf_idx == channel->wr_eof);
755 channel->wr_hangup = reached_eof;
756 exhausted = channel->wr_empty;
757 waiting_bufidx = channel->wr_host_buf_idx;
758
759 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
760
761 if (!empty) { /* Go on, now without the spinlock */
762
763 if (bufpos == 0) /* Position zero means it's virgin */
7ee9ded2 764 channel->endpoint->ephw->hw_sync_sgl_for_cpu(
48bae050
EB
765 channel->endpoint,
766 channel->wr_buffers[bufidx]->dma_addr,
767 channel->wr_buf_size,
768 DMA_FROM_DEVICE);
769
770 if (copy_to_user(
771 userbuf,
772 channel->wr_buffers[bufidx]->addr
773 + bufpos, howmany))
774 rc = -EFAULT;
775
776 userbuf += howmany;
777 bytes_done += howmany;
778
779 if (bufferdone) {
780 channel->endpoint->ephw->
7ee9ded2 781 hw_sync_sgl_for_device
48bae050
EB
782 (
783 channel->endpoint,
784 channel->wr_buffers[bufidx]->
785 dma_addr,
786 channel->wr_buf_size,
787 DMA_FROM_DEVICE);
788
789 /*
790 * Tell FPGA the buffer is done with. It's an
791 * atomic operation to the FPGA, so what
792 * happens with other channels doesn't matter,
793 * and the certain channel is protected with
794 * the channel-specific mutex.
795 */
796
797 iowrite32(1 | (channel->chan_num << 1)
798 | (bufidx << 12),
539889ee
EB
799 channel->endpoint->registers +
800 fpga_buf_ctrl_reg);
48bae050
EB
801 }
802
803 if (rc) {
804 mutex_unlock(&channel->wr_mutex);
805 return rc;
806 }
807 }
808
809 /* This includes a zero-count return = EOF */
810 if ((bytes_done >= count) || reached_eof)
811 break;
812
813 if (!exhausted)
814 continue; /* More in RAM buffer(s)? Just go on. */
815
816 if ((bytes_done > 0) &&
817 (no_time_left ||
818 (channel->wr_synchronous && channel->wr_allow_partial)))
819 break;
820
821 /*
822 * Nonblocking read: The "ready" flag tells us that the FPGA
823 * has data to send. In non-blocking mode, if it isn't on,
824 * just return. But if there is, we jump directly to the point
825 * where we ask for the FPGA to send all it has, and wait
826 * until that data arrives. So in a sense, we *do* block in
827 * nonblocking mode, but only for a very short time.
828 */
829
830 if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
831 if (bytes_done > 0)
832 break;
833
834 if (ready)
835 goto desperate;
836
837 bytes_done = -EAGAIN;
838 break;
839 }
840
841 if (!no_time_left || (bytes_done > 0)) {
842 /*
843 * Note that in case of an element-misaligned read
844 * request, offsetlimit will include the last element,
845 * which will be partially read from.
846 */
847 int offsetlimit = ((count - bytes_done) - 1) >>
848 channel->log2_element_size;
849 int buf_elements = channel->wr_buf_size >>
850 channel->log2_element_size;
851
852 /*
853 * In synchronous mode, always send an offset limit.
854 * Just don't send a value too big.
855 */
856
857 if (channel->wr_synchronous) {
858 /* Don't request more than one buffer */
859 if (channel->wr_allow_partial &&
860 (offsetlimit >= buf_elements))
861 offsetlimit = buf_elements - 1;
862
863 /* Don't request more than all buffers */
864 if (!channel->wr_allow_partial &&
865 (offsetlimit >=
866 (buf_elements * channel->num_wr_buffers)))
867 offsetlimit = buf_elements *
868 channel->num_wr_buffers - 1;
869 }
870
871 /*
872 * In asynchronous mode, force early flush of a buffer
873 * only if that will allow returning a full count. The
874 * "offsetlimit < ( ... )" rather than "<=" excludes
875 * requesting a full buffer, which would obviously
876 * cause a buffer transmission anyhow
877 */
878
879 if (channel->wr_synchronous ||
880 (offsetlimit < (buf_elements - 1))) {
881
882 mutex_lock(&channel->endpoint->register_mutex);
883
884 iowrite32(offsetlimit,
539889ee
EB
885 channel->endpoint->registers +
886 fpga_buf_offset_reg);
48bae050
EB
887
888 iowrite32(1 | (channel->chan_num << 1) |
889 (2 << 24) | /* 2 = offset limit */
890 (waiting_bufidx << 12),
539889ee
EB
891 channel->endpoint->registers +
892 fpga_buf_ctrl_reg);
48bae050 893
48bae050
EB
894 mutex_unlock(&channel->endpoint->
895 register_mutex);
896 }
897
898 }
899
900 /*
901 * If partial completion is disallowed, there is no point in
902 * timeout sleeping. Neither if no_time_left is set and
903 * there's no data.
904 */
905
906 if (!channel->wr_allow_partial ||
907 (no_time_left && (bytes_done == 0))) {
908
909 /*
910 * This do-loop will run more than once if another
911 * thread reasserted wr_sleepy before we got the mutex
912 * back, so we try again.
913 */
914
915 do {
916 mutex_unlock(&channel->wr_mutex);
917
918 if (wait_event_interruptible(
919 channel->wr_wait,
920 (!channel->wr_sleepy)))
921 goto interrupted;
922
923 if (mutex_lock_interruptible(
924 &channel->wr_mutex))
925 goto interrupted;
926 } while (channel->wr_sleepy);
927
928 continue;
929
930interrupted: /* Mutex is not held if got here */
931 if (channel->endpoint->fatal_error)
932 return -EIO;
933 if (bytes_done)
934 return bytes_done;
935 if (filp->f_flags & O_NONBLOCK)
936 return -EAGAIN; /* Don't admit snoozing */
937 return -EINTR;
938 }
939
940 left_to_sleep = deadline - ((long) jiffies);
941
942 /*
943 * If our time is out, skip the waiting. We may miss wr_sleepy
944 * being deasserted but hey, almost missing the train is like
945 * missing it.
946 */
947
948 if (left_to_sleep > 0) {
949 left_to_sleep =
950 wait_event_interruptible_timeout(
951 channel->wr_wait,
952 (!channel->wr_sleepy),
953 left_to_sleep);
954
955 if (!channel->wr_sleepy)
956 continue;
957
958 if (left_to_sleep < 0) { /* Interrupt */
959 mutex_unlock(&channel->wr_mutex);
960 if (channel->endpoint->fatal_error)
961 return -EIO;
962 if (bytes_done)
963 return bytes_done;
964 return -EINTR;
965 }
966 }
967
968desperate:
969 no_time_left = 1; /* We're out of sleeping time. Desperate! */
970
971 if (bytes_done == 0) {
972 /*
973 * Reaching here means that we allow partial return,
974 * that we've run out of time, and that we have
975 * nothing to return.
976 * So tell the FPGA to send anything it has or gets.
977 */
978
979 iowrite32(1 | (channel->chan_num << 1) |
980 (3 << 24) | /* Opcode 3, flush it all! */
981 (waiting_bufidx << 12),
539889ee
EB
982 channel->endpoint->registers +
983 fpga_buf_ctrl_reg);
48bae050
EB
984 }
985
986 /*
987 * Formally speaking, we should block for data at this point.
988 * But to keep the code cleaner, we'll just finish the loop,
989 * make the unlikely check for data, and then block at the
990 * usual place.
991 */
992 }
993
994 mutex_unlock(&channel->wr_mutex);
995
996 if (channel->endpoint->fatal_error)
997 return -EIO;
998
999 return bytes_done;
1000}
1001
1002/*
1003 * The timeout argument takes values as follows:
1004 * >0 : Flush with timeout
1005 * ==0 : Flush, and wait idefinitely for the flush to complete
1006 * <0 : Autoflush: Flush only if there's a single buffer occupied
1007 */
1008
1009static int xillybus_myflush(struct xilly_channel *channel, long timeout)
1010{
1011 int rc = 0;
1012 unsigned long flags;
1013
1014 int end_offset_plus1;
1015 int bufidx, bufidx_minus1;
1016 int i;
1017 int empty;
1018 int new_rd_host_buf_pos;
1019
1020 if (channel->endpoint->fatal_error)
1021 return -EIO;
1022 rc = mutex_lock_interruptible(&channel->rd_mutex);
1023
1024 if (rc)
1025 return rc;
1026
1027 /*
1028 * Don't flush a closed channel. This can happen when the work queued
1029 * autoflush thread fires off after the file has closed. This is not
1030 * an error, just something to dismiss.
1031 */
1032
1033 if (!channel->rd_ref_count)
1034 goto done;
1035
1036 bufidx = channel->rd_host_buf_idx;
1037
1038 bufidx_minus1 = (bufidx == 0) ? channel->num_rd_buffers - 1 : bufidx-1;
1039
1040 end_offset_plus1 = channel->rd_host_buf_pos >>
1041 channel->log2_element_size;
1042
1043 new_rd_host_buf_pos = channel->rd_host_buf_pos -
1044 (end_offset_plus1 << channel->log2_element_size);
1045
1046 /* Submit the current buffer if it's nonempty */
1047 if (end_offset_plus1) {
1048 unsigned char *tail = channel->rd_buffers[bufidx]->addr +
1049 (end_offset_plus1 << channel->log2_element_size);
1050
1051 /* Copy unflushed data, so we can put it in next buffer */
1052 for (i = 0; i < new_rd_host_buf_pos; i++)
1053 channel->rd_leftovers[i] = *tail++;
1054
1055 spin_lock_irqsave(&channel->rd_spinlock, flags);
1056
1057 /* Autoflush only if a single buffer is occupied */
1058
1059 if ((timeout < 0) &&
1060 (channel->rd_full ||
1061 (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
1062 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1063 /*
1064 * A new work item may be queued by the ISR exactly
1065 * now, since the execution of a work item allows the
1066 * queuing of a new one while it's running.
1067 */
1068 goto done;
1069 }
1070
1071 /* The 4th element is never needed for data, so it's a flag */
1072 channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
1073
1074 /* Set up rd_full to reflect a certain moment's state */
1075
1076 if (bufidx == channel->rd_fpga_buf_idx)
1077 channel->rd_full = 1;
1078 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1079
1080 if (bufidx >= (channel->num_rd_buffers - 1))
1081 channel->rd_host_buf_idx = 0;
1082 else
1083 channel->rd_host_buf_idx++;
1084
7ee9ded2 1085 channel->endpoint->ephw->hw_sync_sgl_for_device(
48bae050
EB
1086 channel->endpoint,
1087 channel->rd_buffers[bufidx]->dma_addr,
1088 channel->rd_buf_size,
1089 DMA_TO_DEVICE);
1090
1091 mutex_lock(&channel->endpoint->register_mutex);
1092
1093 iowrite32(end_offset_plus1 - 1,
539889ee 1094 channel->endpoint->registers + fpga_buf_offset_reg);
48bae050
EB
1095
1096 iowrite32((channel->chan_num << 1) | /* Channel ID */
1097 (2 << 24) | /* Opcode 2, submit buffer */
1098 (bufidx << 12),
539889ee 1099 channel->endpoint->registers + fpga_buf_ctrl_reg);
48bae050
EB
1100
1101 mutex_unlock(&channel->endpoint->register_mutex);
1102 } else if (bufidx == 0)
1103 bufidx = channel->num_rd_buffers - 1;
1104 else
1105 bufidx--;
1106
1107 channel->rd_host_buf_pos = new_rd_host_buf_pos;
1108
1109 if (timeout < 0)
1110 goto done; /* Autoflush */
1111
1112
1113 /*
1114 * bufidx is now the last buffer written to (or equal to
1115 * rd_fpga_buf_idx if buffer was never written to), and
1116 * channel->rd_host_buf_idx the one after it.
1117 *
1118 * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
1119 */
1120
1121 rc = 0;
1122
1123 while (1) { /* Loop waiting for draining of buffers */
1124 spin_lock_irqsave(&channel->rd_spinlock, flags);
1125
1126 if (bufidx != channel->rd_fpga_buf_idx)
1127 channel->rd_full = 1; /*
1128 * Not really full,
1129 * but needs waiting.
1130 */
1131
1132 empty = !channel->rd_full;
1133
1134 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1135
1136 if (empty)
1137 break;
1138
1139 /*
1140 * Indefinite sleep with mutex taken. With data waiting for
1141 * flushing user should not be surprised if open() for write
1142 * sleeps.
1143 */
1144 if (timeout == 0)
1145 wait_event_interruptible(channel->rd_wait,
1146 (!channel->rd_full));
1147
1148 else if (wait_event_interruptible_timeout(
1149 channel->rd_wait,
1150 (!channel->rd_full),
1151 timeout) == 0) {
35fcf7e3
EB
1152 dev_warn(channel->endpoint->dev,
1153 "Timed out while flushing. Output data may be lost.\n");
48bae050
EB
1154
1155 rc = -ETIMEDOUT;
1156 break;
1157 }
1158
1159 if (channel->rd_full) {
1160 rc = -EINTR;
1161 break;
1162 }
1163 }
1164
1165done:
1166 mutex_unlock(&channel->rd_mutex);
1167
1168 if (channel->endpoint->fatal_error)
1169 return -EIO;
1170
1171 return rc;
1172}
1173
1174static int xillybus_flush(struct file *filp, fl_owner_t id)
1175{
1176 if (!(filp->f_mode & FMODE_WRITE))
1177 return 0;
1178
1179 return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
1180}
1181
1182static void xillybus_autoflush(struct work_struct *work)
1183{
1184 struct delayed_work *workitem = container_of(
1185 work, struct delayed_work, work);
1186 struct xilly_channel *channel = container_of(
1187 workitem, struct xilly_channel, rd_workitem);
1188 int rc;
1189
1190 rc = xillybus_myflush(channel, -1);
1191
1192 if (rc == -EINTR)
35fcf7e3
EB
1193 dev_warn(channel->endpoint->dev,
1194 "Autoflush failed because work queue thread got a signal.\n");
48bae050 1195 else if (rc)
35fcf7e3
EB
1196 dev_err(channel->endpoint->dev,
1197 "Autoflush failed under weird circumstances.\n");
48bae050
EB
1198}
1199
7ee9ded2 1200static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
48bae050
EB
1201 size_t count, loff_t *f_pos)
1202{
1203 ssize_t rc;
1204 unsigned long flags;
1205 int bytes_done = 0;
1206 struct xilly_channel *channel = filp->private_data;
1207
1208 int full, exhausted;
1209 /* Initializations are there only to silence warnings */
1210
1211 int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
1212 int end_offset_plus1 = 0;
1213
1214 if (channel->endpoint->fatal_error)
1215 return -EIO;
1216
1217 rc = mutex_lock_interruptible(&channel->rd_mutex);
1218
1219 if (rc)
1220 return rc;
1221
1222 rc = 0; /* Just to be clear about it. Compiler optimizes this out */
1223
1224 while (1) {
1225 int bytes_to_do = count - bytes_done;
1226
1227 spin_lock_irqsave(&channel->rd_spinlock, flags);
1228
1229 full = channel->rd_full;
1230
1231 if (!full) {
1232 bufidx = channel->rd_host_buf_idx;
1233 bufpos = channel->rd_host_buf_pos;
1234 howmany = channel->rd_buf_size - bufpos;
1235
1236 /*
1237 * Update rd_host_* to its state after this operation.
1238 * count=0 means committing the buffer immediately,
1239 * which is like flushing, but not necessarily block.
1240 */
1241
1242 if ((howmany > bytes_to_do) &&
1243 (count ||
1244 ((bufpos >> channel->log2_element_size) == 0))) {
1245 bufferdone = 0;
1246
1247 howmany = bytes_to_do;
1248 channel->rd_host_buf_pos += howmany;
1249 } else {
1250 bufferdone = 1;
1251
1252 if (count) {
1253 end_offset_plus1 =
1254 channel->rd_buf_size >>
1255 channel->log2_element_size;
1256 channel->rd_host_buf_pos = 0;
1257 } else {
1258 unsigned char *tail;
1259 int i;
1260
1261 end_offset_plus1 = bufpos >>
1262 channel->log2_element_size;
1263
1264 channel->rd_host_buf_pos -=
1265 end_offset_plus1 <<
1266 channel->log2_element_size;
1267
1268 tail = channel->
1269 rd_buffers[bufidx]->addr +
1270 (end_offset_plus1 <<
1271 channel->log2_element_size);
1272
1273 for (i = 0;
1274 i < channel->rd_host_buf_pos;
1275 i++)
1276 channel->rd_leftovers[i] =
1277 *tail++;
1278 }
1279
1280 if (bufidx == channel->rd_fpga_buf_idx)
1281 channel->rd_full = 1;
1282
1283 if (bufidx >= (channel->num_rd_buffers - 1))
1284 channel->rd_host_buf_idx = 0;
1285 else
1286 channel->rd_host_buf_idx++;
1287 }
1288 }
1289
1290 /*
1291 * Marking our situation after the possible changes above,
1292 * for use after releasing the spinlock.
1293 *
1294 * full = full before change
1295 * exhasted = full after possible change
1296 */
1297
1298 exhausted = channel->rd_full;
1299
1300 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1301
1302 if (!full) { /* Go on, now without the spinlock */
1303 unsigned char *head =
1304 channel->rd_buffers[bufidx]->addr;
1305 int i;
1306
1307 if ((bufpos == 0) || /* Zero means it's virgin */
1308 (channel->rd_leftovers[3] != 0)) {
7ee9ded2 1309 channel->endpoint->ephw->hw_sync_sgl_for_cpu(
48bae050
EB
1310 channel->endpoint,
1311 channel->rd_buffers[bufidx]->dma_addr,
1312 channel->rd_buf_size,
1313 DMA_TO_DEVICE);
1314
1315 /* Virgin, but leftovers are due */
1316 for (i = 0; i < bufpos; i++)
1317 *head++ = channel->rd_leftovers[i];
1318
1319 channel->rd_leftovers[3] = 0; /* Clear flag */
1320 }
1321
1322 if (copy_from_user(
1323 channel->rd_buffers[bufidx]->addr + bufpos,
1324 userbuf, howmany))
1325 rc = -EFAULT;
1326
1327 userbuf += howmany;
1328 bytes_done += howmany;
1329
1330 if (bufferdone) {
1331 channel->endpoint->ephw->
7ee9ded2 1332 hw_sync_sgl_for_device(
48bae050
EB
1333 channel->endpoint,
1334 channel->rd_buffers[bufidx]->
1335 dma_addr,
1336 channel->rd_buf_size,
1337 DMA_TO_DEVICE);
1338
1339 mutex_lock(&channel->endpoint->register_mutex);
1340
1341 iowrite32(end_offset_plus1 - 1,
539889ee
EB
1342 channel->endpoint->registers +
1343 fpga_buf_offset_reg);
9fdde366 1344
48bae050
EB
1345 iowrite32((channel->chan_num << 1) |
1346 (2 << 24) | /* 2 = submit buffer */
1347 (bufidx << 12),
539889ee
EB
1348 channel->endpoint->registers +
1349 fpga_buf_ctrl_reg);
48bae050
EB
1350
1351 mutex_unlock(&channel->endpoint->
1352 register_mutex);
1353
1354 channel->rd_leftovers[3] =
1355 (channel->rd_host_buf_pos != 0);
1356 }
1357
1358 if (rc) {
1359 mutex_unlock(&channel->rd_mutex);
1360
1361 if (channel->endpoint->fatal_error)
1362 return -EIO;
1363
1364 if (!channel->rd_synchronous)
1365 queue_delayed_work(
1366 xillybus_wq,
1367 &channel->rd_workitem,
1368 XILLY_RX_TIMEOUT);
1369
1370 return rc;
1371 }
1372 }
1373
1374 if (bytes_done >= count)
1375 break;
1376
1377 if (!exhausted)
1378 continue; /* If there's more space, just go on */
1379
1380 if ((bytes_done > 0) && channel->rd_allow_partial)
1381 break;
1382
1383 /*
1384 * Indefinite sleep with mutex taken. With data waiting for
1385 * flushing, user should not be surprised if open() for write
1386 * sleeps.
1387 */
1388
1389 if (filp->f_flags & O_NONBLOCK) {
1390 bytes_done = -EAGAIN;
1391 break;
1392 }
1393
1394 wait_event_interruptible(channel->rd_wait,
1395 (!channel->rd_full));
1396
1397 if (channel->rd_full) {
1398 mutex_unlock(&channel->rd_mutex);
1399
1400 if (channel->endpoint->fatal_error)
1401 return -EIO;
1402
1403 if (bytes_done)
1404 return bytes_done;
1405 return -EINTR;
1406 }
1407 }
1408
1409 mutex_unlock(&channel->rd_mutex);
1410
1411 if (!channel->rd_synchronous)
1412 queue_delayed_work(xillybus_wq,
1413 &channel->rd_workitem,
1414 XILLY_RX_TIMEOUT);
1415
1416 if ((channel->rd_synchronous) && (bytes_done > 0)) {
1417 rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
1418
1419 if (rc && (rc != -EINTR))
1420 return rc;
1421 }
1422
1423 if (channel->endpoint->fatal_error)
1424 return -EIO;
1425
1426 return bytes_done;
1427}
1428
1429static int xillybus_open(struct inode *inode, struct file *filp)
1430{
1431 int rc = 0;
1432 unsigned long flags;
1433 int minor = iminor(inode);
1434 int major = imajor(inode);
1435 struct xilly_endpoint *ep_iter, *endpoint = NULL;
1436 struct xilly_channel *channel;
1437
1438 mutex_lock(&ep_list_lock);
1439
1440 list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
1441 if ((ep_iter->major == major) &&
1442 (minor >= ep_iter->lowest_minor) &&
1443 (minor < (ep_iter->lowest_minor +
1444 ep_iter->num_channels))) {
1445 endpoint = ep_iter;
1446 break;
1447 }
1448 }
1449 mutex_unlock(&ep_list_lock);
1450
1451 if (!endpoint) {
ae1dd9bc
EA
1452 pr_err("xillybus: open() failed to find a device for major=%d and minor=%d\n",
1453 major, minor);
48bae050
EB
1454 return -ENODEV;
1455 }
1456
1457 if (endpoint->fatal_error)
1458 return -EIO;
1459
1460 channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
1461 filp->private_data = channel;
1462
1463
1464 /*
1465 * It gets complicated because:
1466 * 1. We don't want to take a mutex we don't have to
1467 * 2. We don't want to open one direction if the other will fail.
1468 */
1469
1470 if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
1471 return -ENODEV;
1472
1473 if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
1474 return -ENODEV;
1475
1476 if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
1477 (channel->wr_synchronous || !channel->wr_allow_partial ||
1478 !channel->wr_supports_nonempty)) {
35fcf7e3
EB
1479 dev_err(endpoint->dev,
1480 "open() failed: O_NONBLOCK not allowed for read on this device\n");
48bae050
EB
1481 return -ENODEV;
1482 }
1483
1484 if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
1485 (channel->rd_synchronous || !channel->rd_allow_partial)) {
35fcf7e3
EB
1486 dev_err(endpoint->dev,
1487 "open() failed: O_NONBLOCK not allowed for write on this device\n");
48bae050
EB
1488 return -ENODEV;
1489 }
1490
1491 /*
1492 * Note: open() may block on getting mutexes despite O_NONBLOCK.
1493 * This shouldn't occur normally, since multiple open of the same
1494 * file descriptor is almost always prohibited anyhow
1495 * (*_exclusive_open is normally set in real-life systems).
1496 */
1497
1498 if (filp->f_mode & FMODE_READ) {
1499 rc = mutex_lock_interruptible(&channel->wr_mutex);
1500 if (rc)
1501 return rc;
1502 }
1503
1504 if (filp->f_mode & FMODE_WRITE) {
1505 rc = mutex_lock_interruptible(&channel->rd_mutex);
1506 if (rc)
1507 goto unlock_wr;
1508 }
1509
1510 if ((filp->f_mode & FMODE_READ) &&
1511 (channel->wr_ref_count != 0) &&
1512 (channel->wr_exclusive_open)) {
1513 rc = -EBUSY;
1514 goto unlock;
1515 }
1516
1517 if ((filp->f_mode & FMODE_WRITE) &&
1518 (channel->rd_ref_count != 0) &&
1519 (channel->rd_exclusive_open)) {
1520 rc = -EBUSY;
1521 goto unlock;
1522 }
1523
1524
1525 if (filp->f_mode & FMODE_READ) {
1526 if (channel->wr_ref_count == 0) { /* First open of file */
1527 /* Move the host to first buffer */
1528 spin_lock_irqsave(&channel->wr_spinlock, flags);
1529 channel->wr_host_buf_idx = 0;
1530 channel->wr_host_buf_pos = 0;
1531 channel->wr_fpga_buf_idx = -1;
1532 channel->wr_empty = 1;
1533 channel->wr_ready = 0;
1534 channel->wr_sleepy = 1;
1535 channel->wr_eof = -1;
1536 channel->wr_hangup = 0;
1537
1538 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1539
1540 iowrite32(1 | (channel->chan_num << 1) |
1541 (4 << 24) | /* Opcode 4, open channel */
1542 ((channel->wr_synchronous & 1) << 23),
539889ee
EB
1543 channel->endpoint->registers +
1544 fpga_buf_ctrl_reg);
48bae050
EB
1545 }
1546
1547 channel->wr_ref_count++;
1548 }
1549
1550 if (filp->f_mode & FMODE_WRITE) {
1551 if (channel->rd_ref_count == 0) { /* First open of file */
1552 /* Move the host to first buffer */
1553 spin_lock_irqsave(&channel->rd_spinlock, flags);
1554 channel->rd_host_buf_idx = 0;
1555 channel->rd_host_buf_pos = 0;
1556 channel->rd_leftovers[3] = 0; /* No leftovers. */
1557 channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
1558 channel->rd_full = 0;
1559
1560 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1561
1562 iowrite32((channel->chan_num << 1) |
1563 (4 << 24), /* Opcode 4, open channel */
539889ee
EB
1564 channel->endpoint->registers +
1565 fpga_buf_ctrl_reg);
48bae050
EB
1566 }
1567
1568 channel->rd_ref_count++;
1569 }
1570
1571unlock:
1572 if (filp->f_mode & FMODE_WRITE)
1573 mutex_unlock(&channel->rd_mutex);
1574unlock_wr:
1575 if (filp->f_mode & FMODE_READ)
1576 mutex_unlock(&channel->wr_mutex);
1577
1578 if (!rc && (!channel->seekable))
1579 return nonseekable_open(inode, filp);
1580
1581 return rc;
1582}
1583
1584static int xillybus_release(struct inode *inode, struct file *filp)
1585{
48bae050
EB
1586 unsigned long flags;
1587 struct xilly_channel *channel = filp->private_data;
1588
1589 int buf_idx;
1590 int eof;
1591
1592 if (channel->endpoint->fatal_error)
1593 return -EIO;
1594
1595 if (filp->f_mode & FMODE_WRITE) {
a983dd5d 1596 mutex_lock(&channel->rd_mutex);
48bae050
EB
1597
1598 channel->rd_ref_count--;
1599
1600 if (channel->rd_ref_count == 0) {
1601
1602 /*
1603 * We rely on the kernel calling flush()
1604 * before we get here.
1605 */
1606
1607 iowrite32((channel->chan_num << 1) | /* Channel ID */
1608 (5 << 24), /* Opcode 5, close channel */
539889ee
EB
1609 channel->endpoint->registers +
1610 fpga_buf_ctrl_reg);
48bae050
EB
1611 }
1612 mutex_unlock(&channel->rd_mutex);
1613 }
1614
1615 if (filp->f_mode & FMODE_READ) {
a983dd5d 1616 mutex_lock(&channel->wr_mutex);
48bae050
EB
1617
1618 channel->wr_ref_count--;
1619
1620 if (channel->wr_ref_count == 0) {
1621
1622 iowrite32(1 | (channel->chan_num << 1) |
1623 (5 << 24), /* Opcode 5, close channel */
539889ee
EB
1624 channel->endpoint->registers +
1625 fpga_buf_ctrl_reg);
48bae050
EB
1626
1627 /*
1628 * This is crazily cautious: We make sure that not
1629 * only that we got an EOF (be it because we closed
1630 * the channel or because of a user's EOF), but verify
1631 * that it's one beyond the last buffer arrived, so
1632 * we have no leftover buffers pending before wrapping
1633 * up (which can only happen in asynchronous channels,
1634 * BTW)
1635 */
1636
1637 while (1) {
1638 spin_lock_irqsave(&channel->wr_spinlock,
1639 flags);
1640 buf_idx = channel->wr_fpga_buf_idx;
1641 eof = channel->wr_eof;
1642 channel->wr_sleepy = 1;
1643 spin_unlock_irqrestore(&channel->wr_spinlock,
1644 flags);
1645
1646 /*
1647 * Check if eof points at the buffer after
1648 * the last one the FPGA submitted. Note that
1649 * no EOF is marked by negative eof.
1650 */
1651
1652 buf_idx++;
1653 if (buf_idx == channel->num_wr_buffers)
1654 buf_idx = 0;
1655
1656 if (buf_idx == eof)
1657 break;
1658
1659 /*
1660 * Steal extra 100 ms if awaken by interrupt.
1661 * This is a simple workaround for an
1662 * interrupt pending when entering, which would
1663 * otherwise result in declaring the hardware
1664 * non-responsive.
1665 */
1666
1667 if (wait_event_interruptible(
1668 channel->wr_wait,
1669 (!channel->wr_sleepy)))
1670 msleep(100);
1671
1672 if (channel->wr_sleepy) {
1673 mutex_unlock(&channel->wr_mutex);
35fcf7e3
EB
1674 dev_warn(channel->endpoint->dev,
1675 "Hardware failed to respond to close command, therefore left in messy state.\n");
48bae050
EB
1676 return -EINTR;
1677 }
1678 }
1679 }
1680
1681 mutex_unlock(&channel->wr_mutex);
1682 }
1683
1684 return 0;
1685}
7ee9ded2 1686static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
48bae050
EB
1687{
1688 struct xilly_channel *channel = filp->private_data;
1689 loff_t pos = filp->f_pos;
1690 int rc = 0;
1691
1692 /*
1693 * Take both mutexes not allowing interrupts, since it seems like
1694 * common applications don't expect an -EINTR here. Besides, multiple
9379fd54 1695 * access to a single file descriptor on seekable devices is a mess
48bae050
EB
1696 * anyhow.
1697 */
1698
1699 if (channel->endpoint->fatal_error)
1700 return -EIO;
1701
1702 mutex_lock(&channel->wr_mutex);
1703 mutex_lock(&channel->rd_mutex);
1704
1705 switch (whence) {
3cbc7479 1706 case SEEK_SET:
48bae050
EB
1707 pos = offset;
1708 break;
3cbc7479 1709 case SEEK_CUR:
48bae050
EB
1710 pos += offset;
1711 break;
3cbc7479 1712 case SEEK_END:
48bae050
EB
1713 pos = offset; /* Going to the end => to the beginning */
1714 break;
1715 default:
1716 rc = -EINVAL;
1717 goto end;
1718 }
1719
1720 /* In any case, we must finish on an element boundary */
1721 if (pos & ((1 << channel->log2_element_size) - 1)) {
1722 rc = -EINVAL;
1723 goto end;
1724 }
1725
1726 mutex_lock(&channel->endpoint->register_mutex);
1727
1728 iowrite32(pos >> channel->log2_element_size,
539889ee 1729 channel->endpoint->registers + fpga_buf_offset_reg);
9fdde366 1730
48bae050
EB
1731 iowrite32((channel->chan_num << 1) |
1732 (6 << 24), /* Opcode 6, set address */
539889ee 1733 channel->endpoint->registers + fpga_buf_ctrl_reg);
48bae050
EB
1734
1735 mutex_unlock(&channel->endpoint->register_mutex);
1736
1737end:
1738 mutex_unlock(&channel->rd_mutex);
1739 mutex_unlock(&channel->wr_mutex);
1740
1741 if (rc) /* Return error after releasing mutexes */
1742 return rc;
1743
1744 filp->f_pos = pos;
1745
1746 /*
1747 * Since seekable devices are allowed only when the channel is
1748 * synchronous, we assume that there is no data pending in either
1749 * direction (which holds true as long as no concurrent access on the
1750 * file descriptor takes place).
1751 * The only thing we may need to throw away is leftovers from partial
1752 * write() flush.
1753 */
1754
1755 channel->rd_leftovers[3] = 0;
1756
1757 return pos;
1758}
1759
1760static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
1761{
1762 struct xilly_channel *channel = filp->private_data;
1763 unsigned int mask = 0;
1764 unsigned long flags;
1765
1766 poll_wait(filp, &channel->endpoint->ep_wait, wait);
1767
1768 /*
1769 * poll() won't play ball regarding read() channels which
1770 * aren't asynchronous and support the nonempty message. Allowing
1771 * that will create situations where data has been delivered at
1772 * the FPGA, and users expecting select() to wake up, which it may
1773 * not.
1774 */
1775
1776 if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
1777 poll_wait(filp, &channel->wr_wait, wait);
1778 poll_wait(filp, &channel->wr_ready_wait, wait);
1779
1780 spin_lock_irqsave(&channel->wr_spinlock, flags);
1781 if (!channel->wr_empty || channel->wr_ready)
1782 mask |= POLLIN | POLLRDNORM;
1783
1784 if (channel->wr_hangup)
1785 /*
1786 * Not POLLHUP, because its behavior is in the
1787 * mist, and POLLIN does what we want: Wake up
1788 * the read file descriptor so it sees EOF.
1789 */
1790 mask |= POLLIN | POLLRDNORM;
1791 spin_unlock_irqrestore(&channel->wr_spinlock, flags);
1792 }
1793
1794 /*
1795 * If partial data write is disallowed on a write() channel,
1796 * it's pointless to ever signal OK to write, because is could
1797 * block despite some space being available.
1798 */
1799
1800 if (channel->rd_allow_partial) {
1801 poll_wait(filp, &channel->rd_wait, wait);
1802
1803 spin_lock_irqsave(&channel->rd_spinlock, flags);
1804 if (!channel->rd_full)
1805 mask |= POLLOUT | POLLWRNORM;
1806 spin_unlock_irqrestore(&channel->rd_spinlock, flags);
1807 }
1808
1809 if (channel->endpoint->fatal_error)
1810 mask |= POLLERR;
1811
1812 return mask;
1813}
1814
1815static const struct file_operations xillybus_fops = {
1816 .owner = THIS_MODULE,
1817 .read = xillybus_read,
1818 .write = xillybus_write,
1819 .open = xillybus_open,
1820 .flush = xillybus_flush,
1821 .release = xillybus_release,
1822 .llseek = xillybus_llseek,
1823 .poll = xillybus_poll,
1824};
1825
1826static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
1827 const unsigned char *idt)
1828{
1829 int rc;
1830 dev_t dev;
1831 int devnum, i, minor, major;
1832 char devname[48];
1833 struct device *device;
1834
1835 rc = alloc_chrdev_region(&dev, 0, /* minor start */
1836 endpoint->num_channels,
1837 xillyname);
1838
1839 if (rc) {
35fcf7e3 1840 dev_warn(endpoint->dev, "Failed to obtain major/minors");
48bae050
EB
1841 goto error1;
1842 }
1843
1844 endpoint->major = major = MAJOR(dev);
1845 endpoint->lowest_minor = minor = MINOR(dev);
1846
1847 cdev_init(&endpoint->cdev, &xillybus_fops);
1848 endpoint->cdev.owner = endpoint->ephw->owner;
1849 rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
1850 endpoint->num_channels);
1851 if (rc) {
35fcf7e3 1852 dev_warn(endpoint->dev, "Failed to add cdev. Aborting.\n");
48bae050
EB
1853 goto error2;
1854 }
1855
1856 idt++;
1857
1858 for (i = minor, devnum = 0;
1859 devnum < endpoint->num_channels;
1860 devnum++, i++) {
1861 snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
1862
1863 devname[sizeof(devname)-1] = 0; /* Should never matter */
1864
1865 while (*idt++)
1866 /* Skip to next */;
1867
1868 device = device_create(xillybus_class,
1869 NULL,
1870 MKDEV(major, i),
1871 NULL,
e72b9da0 1872 "%s", devname);
48bae050
EB
1873
1874 if (IS_ERR(device)) {
35fcf7e3
EB
1875 dev_warn(endpoint->dev,
1876 "Failed to create %s device. Aborting.\n",
1877 devname);
48bae050
EB
1878 goto error3;
1879 }
1880 }
1881
35fcf7e3
EB
1882 dev_info(endpoint->dev, "Created %d device files.\n",
1883 endpoint->num_channels);
48bae050
EB
1884 return 0; /* succeed */
1885
1886error3:
1887 devnum--; i--;
1888 for (; devnum >= 0; devnum--, i--)
1889 device_destroy(xillybus_class, MKDEV(major, i));
1890
1891 cdev_del(&endpoint->cdev);
1892error2:
1893 unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
1894error1:
1895
1896 return rc;
1897}
1898
1899static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
1900{
1901 int minor;
1902
1903 for (minor = endpoint->lowest_minor;
1904 minor < (endpoint->lowest_minor + endpoint->num_channels);
1905 minor++)
1906 device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
1907 cdev_del(&endpoint->cdev);
1908 unregister_chrdev_region(MKDEV(endpoint->major,
1909 endpoint->lowest_minor),
1910 endpoint->num_channels);
1911
35fcf7e3
EB
1912 dev_info(endpoint->dev, "Removed %d device files.\n",
1913 endpoint->num_channels);
48bae050
EB
1914}
1915
1916
1917struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
1918 struct device *dev,
1919 struct xilly_endpoint_hardware
1920 *ephw)
1921{
1922 struct xilly_endpoint *endpoint;
1923
9267462e 1924 endpoint = devm_kzalloc(dev, sizeof(*endpoint), GFP_KERNEL);
91d42194 1925 if (!endpoint)
48bae050 1926 return NULL;
48bae050
EB
1927
1928 endpoint->pdev = pdev;
1929 endpoint->dev = dev;
1930 endpoint->ephw = ephw;
48bae050
EB
1931 endpoint->msg_counter = 0x0b;
1932 endpoint->failed_messages = 0;
1933 endpoint->fatal_error = 0;
1934
1935 init_waitqueue_head(&endpoint->ep_wait);
1936 mutex_init(&endpoint->register_mutex);
1937
1938 return endpoint;
1939}
1940EXPORT_SYMBOL(xillybus_init_endpoint);
1941
1942static int xilly_quiesce(struct xilly_endpoint *endpoint)
1943{
1944 endpoint->idtlen = -1;
21fc0b9f 1945
48bae050 1946 iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
539889ee 1947 endpoint->registers + fpga_dma_control_reg);
48bae050
EB
1948
1949 wait_event_interruptible_timeout(endpoint->ep_wait,
1950 (endpoint->idtlen >= 0),
1951 XILLY_TIMEOUT);
1952
1953 if (endpoint->idtlen < 0) {
35fcf7e3 1954 dev_err(endpoint->dev,
525be905 1955 "Failed to quiesce the device on exit.\n");
48bae050
EB
1956 return -ENODEV;
1957 }
1958 return 0; /* Success */
1959}
1960
1961int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
1962{
1963 int rc = 0;
1964
525be905 1965 void *bootstrap_resources;
48bae050 1966 int idtbuffersize = (1 << PAGE_SHIFT);
525be905 1967 struct device *dev = endpoint->dev;
48bae050
EB
1968
1969 /*
1970 * The bogus IDT is used during bootstrap for allocating the initial
1971 * message buffer, and then the message buffer and space for the IDT
1972 * itself. The initial message buffer is of a single page's size, but
1973 * it's soon replaced with a more modest one (and memory is freed).
1974 */
1975
1976 unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
1977 3, 192, PAGE_SHIFT, 0 };
1978 struct xilly_idt_handle idt_handle;
1979
48bae050 1980 /*
9379fd54
MI
1981 * Writing the value 0x00000001 to Endianness register signals which
1982 * endianness this processor is using, so the FPGA can swap words as
48bae050
EB
1983 * necessary.
1984 */
1985
539889ee 1986 iowrite32(1, endpoint->registers + fpga_endian_reg);
48bae050
EB
1987
1988 /* Bootstrap phase I: Allocate temporary message buffer */
1989
525be905
EB
1990 bootstrap_resources = devres_open_group(dev, NULL, GFP_KERNEL);
1991 if (!bootstrap_resources)
1992 return -ENOMEM;
1993
48bae050
EB
1994 endpoint->num_channels = 0;
1995
525be905 1996 rc = xilly_setupchannels(endpoint, bogus_idt, 1);
48bae050
EB
1997
1998 if (rc)
525be905 1999 return rc;
48bae050
EB
2000
2001 /* Clear the message subsystem (and counter in particular) */
539889ee 2002 iowrite32(0x04, endpoint->registers + fpga_msg_ctrl_reg);
48bae050
EB
2003
2004 endpoint->idtlen = -1;
2005
48bae050
EB
2006 /*
2007 * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
2008 * buffer size.
2009 */
2010 iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
539889ee 2011 endpoint->registers + fpga_dma_control_reg);
48bae050
EB
2012
2013 wait_event_interruptible_timeout(endpoint->ep_wait,
2014 (endpoint->idtlen >= 0),
2015 XILLY_TIMEOUT);
2016
2017 if (endpoint->idtlen < 0) {
35fcf7e3 2018 dev_err(endpoint->dev, "No response from FPGA. Aborting.\n");
525be905 2019 return -ENODEV;
48bae050
EB
2020 }
2021
2022 /* Enable DMA */
2023 iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
539889ee 2024 endpoint->registers + fpga_dma_control_reg);
48bae050
EB
2025
2026 /* Bootstrap phase II: Allocate buffer for IDT and obtain it */
2027 while (endpoint->idtlen >= idtbuffersize) {
2028 idtbuffersize *= 2;
2029 bogus_idt[6]++;
2030 }
2031
2032 endpoint->num_channels = 1;
2033
525be905 2034 rc = xilly_setupchannels(endpoint, bogus_idt, 2);
48bae050
EB
2035
2036 if (rc)
2037 goto failed_idt;
2038
48bae050
EB
2039 rc = xilly_obtain_idt(endpoint);
2040
2041 if (rc)
2042 goto failed_idt;
2043
2044 xilly_scan_idt(endpoint, &idt_handle);
2045
2046 if (!idt_handle.chandesc) {
2047 rc = -ENODEV;
2048 goto failed_idt;
2049 }
525be905
EB
2050
2051 devres_close_group(dev, bootstrap_resources);
2052
48bae050
EB
2053 /* Bootstrap phase III: Allocate buffers according to IDT */
2054
2055 rc = xilly_setupchannels(endpoint,
48bae050
EB
2056 idt_handle.chandesc,
2057 idt_handle.entries);
2058
2059 if (rc)
2060 goto failed_idt;
2061
48bae050
EB
2062 /*
2063 * endpoint is now completely configured. We put it on the list
2064 * available to open() before registering the char device(s)
2065 */
2066
2067 mutex_lock(&ep_list_lock);
2068 list_add_tail(&endpoint->ep_list, &list_of_endpoints);
2069 mutex_unlock(&ep_list_lock);
2070
2071 rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
2072
2073 if (rc)
2074 goto failed_chrdevs;
2075
525be905 2076 devres_release_group(dev, bootstrap_resources);
48bae050
EB
2077
2078 return 0;
2079
2080failed_chrdevs:
2081 mutex_lock(&ep_list_lock);
2082 list_del(&endpoint->ep_list);
2083 mutex_unlock(&ep_list_lock);
2084
2085failed_idt:
525be905 2086 xilly_quiesce(endpoint);
48bae050 2087 flush_workqueue(xillybus_wq);
48bae050
EB
2088
2089 return rc;
2090}
2091EXPORT_SYMBOL(xillybus_endpoint_discovery);
2092
2093void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
2094{
2095 xillybus_cleanup_chrdev(endpoint);
2096
2097 mutex_lock(&ep_list_lock);
2098 list_del(&endpoint->ep_list);
2099 mutex_unlock(&ep_list_lock);
2100
2101 xilly_quiesce(endpoint);
2102
2103 /*
2104 * Flushing is done upon endpoint release to prevent access to memory
2105 * just about to be released. This makes the quiesce complete.
2106 */
2107 flush_workqueue(xillybus_wq);
2108}
2109EXPORT_SYMBOL(xillybus_endpoint_remove);
2110
2111static int __init xillybus_init(void)
2112{
48bae050
EB
2113 mutex_init(&ep_list_lock);
2114
2115 xillybus_class = class_create(THIS_MODULE, xillyname);
2531f6cc
EB
2116 if (IS_ERR(xillybus_class))
2117 return PTR_ERR(xillybus_class);
48bae050
EB
2118
2119 xillybus_wq = alloc_workqueue(xillyname, 0, 0);
3e67dee2
RW
2120 if (!xillybus_wq) {
2121 class_destroy(xillybus_class);
2122 rc = -ENOMEM;
2123 }
48bae050 2124
3e67dee2 2125 return rc;
48bae050
EB
2126}
2127
2128static void __exit xillybus_exit(void)
2129{
2130 /* flush_workqueue() was called for each endpoint released */
2131 destroy_workqueue(xillybus_wq);
2132
2133 class_destroy(xillybus_class);
2134}
2135
2136module_init(xillybus_init);
2137module_exit(xillybus_exit);