drm/v3d: fix up register addresses for V3D 7.x
[linux-block.git] / drivers / comedi / comedi_buf.c
CommitLineData
e184e2be 1// SPDX-License-Identifier: GPL-2.0+
ea082fb1
HS
2/*
3 * comedi_buf.c
4 *
5 * COMEDI - Linux Control and Measurement Device Interface
6 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
b8d57655 7 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
ea082fb1
HS
8 */
9
76a1de32 10#include <linux/vmalloc.h>
af93da31 11#include <linux/slab.h>
df0e68c1 12#include <linux/comedi/comedidev.h>
ea082fb1
HS
13#include "comedi_internal.h"
14
6bd76457
HS
15#ifdef PAGE_KERNEL_NOCACHE
16#define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
17#else
18#define COMEDI_PAGE_PROTECTION PAGE_KERNEL
19#endif
20
af93da31 21static void comedi_buf_map_kref_release(struct kref *kref)
718c4d68 22{
af93da31
IA
23 struct comedi_buf_map *bm =
24 container_of(kref, struct comedi_buf_map, refcount);
718c4d68 25 struct comedi_buf_page *buf;
af93da31 26 unsigned int i;
718c4d68 27
af93da31 28 if (bm->page_list) {
e3647214
IA
29 if (bm->dma_dir != DMA_NONE) {
30 /*
31 * DMA buffer was allocated as a single block.
32 * Address is in page_list[0].
33 */
34 buf = &bm->page_list[0];
35 dma_free_coherent(bm->dma_hw_dev,
36 PAGE_SIZE * bm->n_pages,
37 buf->virt_addr, buf->dma_addr);
38 } else {
39 for (i = 0; i < bm->n_pages; i++) {
40 buf = &bm->page_list[i];
41 ClearPageReserved(virt_to_page(buf->virt_addr));
718c4d68
HS
42 free_page((unsigned long)buf->virt_addr);
43 }
44 }
af93da31
IA
45 vfree(bm->page_list);
46 }
47 if (bm->dma_dir != DMA_NONE)
48 put_device(bm->dma_hw_dev);
49 kfree(bm);
50}
51
52static void __comedi_buf_free(struct comedi_device *dev,
53 struct comedi_subdevice *s)
54{
55 struct comedi_async *async = s->async;
b34aa86f
IA
56 struct comedi_buf_map *bm;
57 unsigned long flags;
af93da31
IA
58
59 if (async->prealloc_buf) {
e3647214
IA
60 if (s->async_dma_dir == DMA_NONE)
61 vunmap(async->prealloc_buf);
af93da31
IA
62 async->prealloc_buf = NULL;
63 async->prealloc_bufsz = 0;
718c4d68 64 }
af93da31 65
b34aa86f
IA
66 spin_lock_irqsave(&s->spin_lock, flags);
67 bm = async->buf_map;
af93da31 68 async->buf_map = NULL;
b34aa86f
IA
69 spin_unlock_irqrestore(&s->spin_lock, flags);
70 comedi_buf_map_put(bm);
718c4d68
HS
71}
72
e3647214
IA
73static struct comedi_buf_map *
74comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
75 unsigned int n_pages)
76{
77 struct comedi_buf_map *bm;
78 struct comedi_buf_page *buf;
79 unsigned int i;
80
81 bm = kzalloc(sizeof(*bm), GFP_KERNEL);
82 if (!bm)
83 return NULL;
84
85 kref_init(&bm->refcount);
86 bm->dma_dir = dma_dir;
87 if (bm->dma_dir != DMA_NONE) {
88 /* Need ref to hardware device to free buffer later. */
89 bm->dma_hw_dev = get_device(dev->hw_dev);
90 }
91
92 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
93 if (!bm->page_list)
94 goto err;
95
96 if (bm->dma_dir != DMA_NONE) {
97 void *virt_addr;
98 dma_addr_t dma_addr;
99
100 /*
101 * Currently, the DMA buffer needs to be allocated as a
102 * single block so that it can be mmap()'ed.
103 */
104 virt_addr = dma_alloc_coherent(bm->dma_hw_dev,
105 PAGE_SIZE * n_pages, &dma_addr,
106 GFP_KERNEL);
107 if (!virt_addr)
108 goto err;
109
110 for (i = 0; i < n_pages; i++) {
111 buf = &bm->page_list[i];
112 buf->virt_addr = virt_addr + (i << PAGE_SHIFT);
113 buf->dma_addr = dma_addr + (i << PAGE_SHIFT);
114 }
115
116 bm->n_pages = i;
117 } else {
118 for (i = 0; i < n_pages; i++) {
119 buf = &bm->page_list[i];
120 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
121 if (!buf->virt_addr)
122 break;
123
124 SetPageReserved(virt_to_page(buf->virt_addr));
125 }
126
127 bm->n_pages = i;
128 if (i < n_pages)
129 goto err;
130 }
131
132 return bm;
133
134err:
135 comedi_buf_map_put(bm);
136 return NULL;
137}
138
6bd76457
HS
139static void __comedi_buf_alloc(struct comedi_device *dev,
140 struct comedi_subdevice *s,
938cae7a 141 unsigned int n_pages)
6bd76457
HS
142{
143 struct comedi_async *async = s->async;
144 struct page **pages = NULL;
af93da31 145 struct comedi_buf_map *bm;
6bd76457 146 struct comedi_buf_page *buf;
b34aa86f 147 unsigned long flags;
938cae7a 148 unsigned int i;
6bd76457 149
e9166139
IA
150 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
151 dev_err(dev->class_dev,
152 "dma buffer allocation not supported\n");
153 return;
154 }
155
e3647214 156 bm = comedi_buf_map_alloc(dev, s->async_dma_dir, n_pages);
af93da31
IA
157 if (!bm)
158 return;
159
b34aa86f
IA
160 spin_lock_irqsave(&s->spin_lock, flags);
161 async->buf_map = bm;
162 spin_unlock_irqrestore(&s->spin_lock, flags);
af93da31 163
e3647214
IA
164 if (bm->dma_dir != DMA_NONE) {
165 /*
166 * DMA buffer was allocated as a single block.
167 * Address is in page_list[0].
168 */
169 buf = &bm->page_list[0];
170 async->prealloc_buf = buf->virt_addr;
171 } else {
6bd76457 172 pages = vmalloc(sizeof(struct page *) * n_pages);
e3647214
IA
173 if (!pages)
174 return;
6bd76457 175
e3647214
IA
176 for (i = 0; i < n_pages; i++) {
177 buf = &bm->page_list[i];
178 pages[i] = virt_to_page(buf->virt_addr);
179 }
6bd76457 180
e3647214 181 /* vmap the pages to prealloc_buf */
6bd76457
HS
182 async->prealloc_buf = vmap(pages, n_pages, VM_MAP,
183 COMEDI_PAGE_PROTECTION);
184
e3647214
IA
185 vfree(pages);
186 }
6bd76457
HS
187}
188
af93da31
IA
189void comedi_buf_map_get(struct comedi_buf_map *bm)
190{
191 if (bm)
192 kref_get(&bm->refcount);
193}
194
195int comedi_buf_map_put(struct comedi_buf_map *bm)
196{
197 if (bm)
198 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
199 return 1;
200}
201
255364f7
IA
202/* helper for "access" vm operation */
203int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
204 void *buf, int len, int write)
205{
f47d8b11 206 unsigned int pgoff = offset_in_page(offset);
255364f7
IA
207 unsigned long pg = offset >> PAGE_SHIFT;
208 int done = 0;
209
210 while (done < len && pg < bm->n_pages) {
211 int l = min_t(int, len - done, PAGE_SIZE - pgoff);
212 void *b = bm->page_list[pg].virt_addr + pgoff;
213
214 if (write)
215 memcpy(b, buf, l);
216 else
217 memcpy(buf, b, l);
218 buf += l;
219 done += l;
220 pg++;
221 pgoff = 0;
222 }
223 return done;
224}
225
b34aa86f
IA
226/* returns s->async->buf_map and increments its kref refcount */
227struct comedi_buf_map *
228comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
229{
230 struct comedi_async *async = s->async;
231 struct comedi_buf_map *bm = NULL;
232 unsigned long flags;
233
234 if (!async)
235 return NULL;
236
237 spin_lock_irqsave(&s->spin_lock, flags);
238 bm = async->buf_map;
239 /* only want it if buffer pages allocated */
240 if (bm && bm->n_pages)
241 comedi_buf_map_get(bm);
242 else
243 bm = NULL;
244 spin_unlock_irqrestore(&s->spin_lock, flags);
245
246 return bm;
247}
248
d4526ab4 249bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
af93da31 250{
d4526ab4 251 struct comedi_buf_map *bm = s->async->buf_map;
af93da31 252
2c935bc5 253 return bm && (kref_read(&bm->refcount) > 1);
af93da31
IA
254}
255
ea082fb1
HS
256int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
257 unsigned long new_size)
258{
259 struct comedi_async *async = s->async;
260
77c21b62
IA
261 lockdep_assert_held(&dev->mutex);
262
ea082fb1
HS
263 /* Round up new_size to multiple of PAGE_SIZE */
264 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
265
266 /* if no change is required, do nothing */
267 if (async->prealloc_buf && async->prealloc_bufsz == new_size)
268 return 0;
269
718c4d68 270 /* deallocate old buffer */
af93da31 271 __comedi_buf_free(dev, s);
718c4d68 272
6bd76457 273 /* allocate new buffer */
ea082fb1 274 if (new_size) {
938cae7a 275 unsigned int n_pages = new_size >> PAGE_SHIFT;
6bd76457
HS
276
277 __comedi_buf_alloc(dev, s, n_pages);
ea082fb1 278
718c4d68 279 if (!async->prealloc_buf) {
6bd76457 280 /* allocation failed */
af93da31 281 __comedi_buf_free(dev, s);
ea082fb1
HS
282 return -ENOMEM;
283 }
ea082fb1
HS
284 }
285 async->prealloc_bufsz = new_size;
286
287 return 0;
288}
289
fcc18a9a 290void comedi_buf_reset(struct comedi_subdevice *s)
ea082fb1 291{
fcc18a9a
IA
292 struct comedi_async *async = s->async;
293
ea082fb1
HS
294 async->buf_write_alloc_count = 0;
295 async->buf_write_count = 0;
296 async->buf_read_alloc_count = 0;
297 async->buf_read_count = 0;
298
299 async->buf_write_ptr = 0;
300 async->buf_read_ptr = 0;
301
302 async->cur_chan = 0;
1dacbe5b 303 async->scans_done = 0;
ea082fb1
HS
304 async->scan_progress = 0;
305 async->munge_chan = 0;
306 async->munge_count = 0;
307 async->munge_ptr = 0;
308
309 async->events = 0;
310}
311
274ec5ee 312static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
ea082fb1 313{
a1c0e5fe 314 struct comedi_async *async = s->async;
f8f76e90 315 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
ea082fb1 316
f8f76e90
HS
317 return free_end - async->buf_write_alloc_count;
318}
319
432fbde7
IA
320unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
321{
322 struct comedi_async *async = s->async;
323 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
324
325 return free_end - async->buf_write_count;
326}
327
c240e20c
IA
328/**
329 * comedi_buf_write_alloc() - Reserve buffer space for writing
330 * @s: COMEDI subdevice.
331 * @nbytes: Maximum space to reserve in bytes.
332 *
333 * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
334 * data buffer associated with the subdevice. The amount reserved is limited
335 * by the space available.
336 *
337 * Return: The amount of space reserved in bytes.
338 */
0cf5efaa
HS
339unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
340 unsigned int nbytes)
f8f76e90 341{
8ab4fe7e 342 struct comedi_async *async = s->async;
274ec5ee 343 unsigned int unalloc = comedi_buf_write_n_unalloc(s);
f8f76e90 344
274ec5ee
IA
345 if (nbytes > unalloc)
346 nbytes = unalloc;
ea082fb1 347
f8f76e90
HS
348 async->buf_write_alloc_count += nbytes;
349
350 /*
351 * ensure the async buffer 'counts' are read and updated
352 * before we write data to the write-alloc'ed buffer space
ea082fb1
HS
353 */
354 smp_mb();
f8f76e90 355
ea082fb1
HS
356 return nbytes;
357}
5660e742 358EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
ea082fb1 359
8d4be669
HS
360/*
361 * munging is applied to data by core as it passes between user
362 * and kernel space
363 */
5b108588 364static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
ea082fb1
HS
365 unsigned int num_bytes)
366{
5b108588 367 struct comedi_async *async = s->async;
ea082fb1 368 unsigned int count = 0;
938cae7a 369 const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
ea082fb1 370
8d4be669 371 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
ea082fb1 372 async->munge_count += num_bytes;
d70fb897
SK
373 return num_bytes;
374 }
8d4be669 375
d70fb897
SK
376 /* don't munge partial samples */
377 num_bytes -= num_bytes % num_sample_bytes;
378 while (count < num_bytes) {
379 int block_size = num_bytes - count;
380 unsigned int buf_end;
8d4be669 381
d70fb897
SK
382 buf_end = async->prealloc_bufsz - async->munge_ptr;
383 if (block_size > buf_end)
384 block_size = buf_end;
8d4be669 385
d70fb897
SK
386 s->munge(s->device, s,
387 async->prealloc_buf + async->munge_ptr,
388 block_size, async->munge_chan);
389
390 /*
391 * ensure data is munged in buffer before the
392 * async buffer munge_count is incremented
393 */
394 smp_wmb();
395
396 async->munge_chan += block_size / num_sample_bytes;
397 async->munge_chan %= async->cmd.chanlist_len;
398 async->munge_count += block_size;
399 async->munge_ptr += block_size;
400 async->munge_ptr %= async->prealloc_bufsz;
401 count += block_size;
ea082fb1 402 }
8d4be669 403
ea082fb1
HS
404 return count;
405}
406
0f1f34e8 407unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
8bd650f9 408{
0f1f34e8
IA
409 struct comedi_async *async = s->async;
410
8bd650f9
HS
411 return async->buf_write_alloc_count - async->buf_write_count;
412}
413
c240e20c
IA
414/**
415 * comedi_buf_write_free() - Free buffer space after it is written
416 * @s: COMEDI subdevice.
417 * @nbytes: Maximum space to free in bytes.
418 *
419 * Free up to @nbytes bytes of space previously reserved for writing in the
420 * COMEDI acquisition data buffer associated with the subdevice. The amount of
421 * space freed is limited to the amount that was reserved. The freed space is
422 * assumed to have been filled with sample data by the writer.
423 *
424 * If the samples in the freed space need to be "munged", do so here. The
425 * freed space becomes available for allocation by the reader.
426 *
427 * Return: The amount of space freed in bytes.
428 */
940dd35d 429unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
8ae560a1 430 unsigned int nbytes)
ea082fb1 431{
940dd35d 432 struct comedi_async *async = s->async;
0f1f34e8 433 unsigned int allocated = comedi_buf_write_n_allocated(s);
d21af4cb 434
6166ce87 435 if (nbytes > allocated)
d21af4cb 436 nbytes = allocated;
6166ce87 437
ea082fb1
HS
438 async->buf_write_count += nbytes;
439 async->buf_write_ptr += nbytes;
5b108588 440 comedi_buf_munge(s, async->buf_write_count - async->munge_count);
ea082fb1
HS
441 if (async->buf_write_ptr >= async->prealloc_bufsz)
442 async->buf_write_ptr %= async->prealloc_bufsz;
443
444 return nbytes;
445}
5660e742 446EXPORT_SYMBOL_GPL(comedi_buf_write_free);
ea082fb1 447
c240e20c
IA
448/**
449 * comedi_buf_read_n_available() - Determine amount of readable buffer space
450 * @s: COMEDI subdevice.
451 *
452 * Determine the amount of readable buffer space in the COMEDI acquisition data
453 * buffer associated with the subdevice. The readable buffer space is that
454 * which has been freed by the writer and "munged" to the sample data format
455 * expected by COMEDI if necessary.
456 *
457 * Return: The amount of readable buffer space.
458 */
e9edef3a 459unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
ea082fb1 460{
e9edef3a 461 struct comedi_async *async = s->async;
938cae7a 462 unsigned int num_bytes;
ea082fb1 463
43f9137d 464 if (!async)
ea082fb1 465 return 0;
43f9137d 466
ea082fb1 467 num_bytes = async->munge_count - async->buf_read_count;
43f9137d
HS
468
469 /*
470 * ensure the async buffer 'counts' are read before we
471 * attempt to read data from the buffer
ea082fb1
HS
472 */
473 smp_rmb();
43f9137d 474
ea082fb1
HS
475 return num_bytes;
476}
5660e742 477EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
ea082fb1 478
c240e20c
IA
479/**
480 * comedi_buf_read_alloc() - Reserve buffer space for reading
481 * @s: COMEDI subdevice.
482 * @nbytes: Maximum space to reserve in bytes.
483 *
484 * Reserve up to @nbytes bytes of previously written and "munged" buffer space
485 * for reading in the COMEDI acquisition data buffer associated with the
486 * subdevice. The amount reserved is limited to the space available. The
487 * reader can read from the reserved space and then free it. A reader is also
488 * allowed to read from the space before reserving it as long as it determines
489 * the amount of readable data available, but the space needs to be marked as
490 * reserved before it can be freed.
491 *
492 * Return: The amount of space reserved in bytes.
493 */
d13be55a 494unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
8ae560a1 495 unsigned int nbytes)
ea082fb1 496{
d13be55a 497 struct comedi_async *async = s->async;
034cbd17
HS
498 unsigned int available;
499
500 available = async->munge_count - async->buf_read_alloc_count;
501 if (nbytes > available)
502 nbytes = available;
503
ea082fb1 504 async->buf_read_alloc_count += nbytes;
034cbd17
HS
505
506 /*
507 * ensure the async buffer 'counts' are read before we
508 * attempt to read data from the read-alloc'ed buffer space
509 */
ea082fb1 510 smp_rmb();
034cbd17 511
ea082fb1
HS
512 return nbytes;
513}
5660e742 514EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
ea082fb1 515
5b2b64b7
HS
516static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
517{
518 return async->buf_read_alloc_count - async->buf_read_count;
519}
520
c240e20c
IA
521/**
522 * comedi_buf_read_free() - Free buffer space after it has been read
523 * @s: COMEDI subdevice.
524 * @nbytes: Maximum space to free in bytes.
525 *
526 * Free up to @nbytes bytes of buffer space previously reserved for reading in
527 * the COMEDI acquisition data buffer associated with the subdevice. The
528 * amount of space freed is limited to the amount that was reserved.
529 *
530 * The freed space becomes available for allocation by the writer.
531 *
532 * Return: The amount of space freed in bytes.
533 */
f1df8662 534unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
8ae560a1 535 unsigned int nbytes)
ea082fb1 536{
f1df8662 537 struct comedi_async *async = s->async;
3abfa106
HS
538 unsigned int allocated;
539
540 /*
541 * ensure data has been read out of buffer before
542 * the async read count is incremented
543 */
ea082fb1 544 smp_mb();
3abfa106
HS
545
546 allocated = comedi_buf_read_n_allocated(async);
215040e1 547 if (nbytes > allocated)
3abfa106 548 nbytes = allocated;
215040e1 549
ea082fb1
HS
550 async->buf_read_count += nbytes;
551 async->buf_read_ptr += nbytes;
552 async->buf_read_ptr %= async->prealloc_bufsz;
553 return nbytes;
554}
5660e742 555EXPORT_SYMBOL_GPL(comedi_buf_read_free);
ea082fb1 556
aa26e46b 557static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
aa26e46b 558 const void *data, unsigned int num_bytes)
ea082fb1 559{
00603a9c 560 struct comedi_async *async = s->async;
319ad742 561 unsigned int write_ptr = async->buf_write_ptr;
ea082fb1
HS
562
563 while (num_bytes) {
564 unsigned int block_size;
565
566 if (write_ptr + num_bytes > async->prealloc_bufsz)
567 block_size = async->prealloc_bufsz - write_ptr;
568 else
569 block_size = num_bytes;
570
571 memcpy(async->prealloc_buf + write_ptr, data, block_size);
572
573 data += block_size;
574 num_bytes -= block_size;
575
576 write_ptr = 0;
577 }
578}
ea082fb1 579
9e2093d2 580static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
9e2093d2 581 void *dest, unsigned int nbytes)
ea082fb1
HS
582{
583 void *src;
2fadffc0 584 struct comedi_async *async = s->async;
0ca7f61c 585 unsigned int read_ptr = async->buf_read_ptr;
ea082fb1
HS
586
587 while (nbytes) {
588 unsigned int block_size;
589
590 src = async->prealloc_buf + read_ptr;
591
592 if (nbytes >= async->prealloc_bufsz - read_ptr)
593 block_size = async->prealloc_bufsz - read_ptr;
594 else
595 block_size = nbytes;
596
597 memcpy(dest, src, block_size);
598 nbytes -= block_size;
599 dest += block_size;
600 read_ptr = 0;
601 }
602}
ea29c1d5 603
5438da85 604/**
67ecc3dd
IA
605 * comedi_buf_write_samples() - Write sample data to COMEDI buffer
606 * @s: COMEDI subdevice.
607 * @data: Pointer to source samples.
608 * @nsamples: Number of samples to write.
5438da85 609 *
67ecc3dd
IA
610 * Write up to @nsamples samples to the COMEDI acquisition data buffer
611 * associated with the subdevice, mark it as written and update the
612 * acquisition scan progress. If there is not enough room for the specified
613 * number of samples, the number of samples written is limited to the number
614 * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
615 * acquisition to terminate with an overrun error. Set the %COMEDI_CB_BLOCK
616 * event flag if any samples are written to cause waiting tasks to be woken
617 * when the event flags are processed.
5438da85 618 *
67ecc3dd 619 * Return: The amount of data written in bytes.
5438da85
HS
620 */
621unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
622 const void *data, unsigned int nsamples)
623{
624 unsigned int max_samples;
625 unsigned int nbytes;
626
eb3a1323
HS
627 /*
628 * Make sure there is enough room in the buffer for all the samples.
629 * If not, clamp the nsamples to the number that will fit, flag the
630 * buffer overrun and add the samples that fit.
631 */
274ec5ee 632 max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
5438da85
HS
633 if (nsamples > max_samples) {
634 dev_warn(s->device->class_dev, "buffer overrun\n");
635 s->async->events |= COMEDI_CB_OVERFLOW;
eb3a1323 636 nsamples = max_samples;
5438da85
HS
637 }
638
e19a3c97
HS
639 if (nsamples == 0)
640 return 0;
641
c39e050d
HS
642 nbytes = comedi_buf_write_alloc(s,
643 comedi_samples_to_bytes(s, nsamples));
e19a3c97
HS
644 comedi_buf_memcpy_to(s, data, nbytes);
645 comedi_buf_write_free(s, nbytes);
646 comedi_inc_scan_progress(s, nbytes);
647 s->async->events |= COMEDI_CB_BLOCK;
5438da85 648
e19a3c97 649 return nbytes;
5438da85
HS
650}
651EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
652
4455d7c3 653/**
67ecc3dd
IA
654 * comedi_buf_read_samples() - Read sample data from COMEDI buffer
655 * @s: COMEDI subdevice.
656 * @data: Pointer to destination.
657 * @nsamples: Maximum number of samples to read.
4455d7c3 658 *
67ecc3dd
IA
659 * Read up to @nsamples samples from the COMEDI acquisition data buffer
660 * associated with the subdevice, mark it as read and update the acquisition
661 * scan progress. Limit the number of samples read to the number available.
662 * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
663 * tasks to be woken when the event flags are processed.
4455d7c3 664 *
67ecc3dd 665 * Return: The amount of data read in bytes.
4455d7c3
HS
666 */
667unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
668 void *data, unsigned int nsamples)
669{
670 unsigned int max_samples;
671 unsigned int nbytes;
672
8de27e70 673 /* clamp nsamples to the number of full samples available */
c39e050d
HS
674 max_samples = comedi_bytes_to_samples(s,
675 comedi_buf_read_n_available(s));
4455d7c3
HS
676 if (nsamples > max_samples)
677 nsamples = max_samples;
678
8de27e70 679 if (nsamples == 0)
109bf06e 680 return 0;
4455d7c3 681
c39e050d
HS
682 nbytes = comedi_buf_read_alloc(s,
683 comedi_samples_to_bytes(s, nsamples));
0ca7f61c 684 comedi_buf_memcpy_from(s, data, nbytes);
109bf06e
HS
685 comedi_buf_read_free(s, nbytes);
686 comedi_inc_scan_progress(s, nbytes);
687 s->async->events |= COMEDI_CB_BLOCK;
688
689 return nbytes;
4455d7c3
HS
690}
691EXPORT_SYMBOL_GPL(comedi_buf_read_samples);