1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 * Abramo Bagnara <abramo@alsa-project.org>
8 #include <linux/slab.h>
9 #include <linux/sched/signal.h>
10 #include <linux/time.h>
11 #include <linux/math64.h>
12 #include <linux/export.h>
13 #include <sound/core.h>
14 #include <sound/control.h>
15 #include <sound/tlv.h>
16 #include <sound/info.h>
17 #include <sound/pcm.h>
18 #include <sound/pcm_params.h>
19 #include <sound/timer.h>
21 #include "pcm_local.h"
23 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
24 #define CREATE_TRACE_POINTS
25 #include "pcm_trace.h"
27 #define trace_hwptr(substream, pos, in_interrupt)
28 #define trace_xrun(substream)
29 #define trace_hw_ptr_error(substream, reason)
30 #define trace_applptr(substream, prev, curr)
33 static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
37 * fill ring buffer with silence
38 * runtime->silence_start: starting pointer to silence area
39 * runtime->silence_filled: size filled with silence
40 * runtime->silence_threshold: threshold from application
41 * runtime->silence_size: maximal size from application
43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
45 void snd_pcm_playback_silence(struct snd_pcm_substream *substream)
47 struct snd_pcm_runtime *runtime = substream->runtime;
48 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
49 snd_pcm_sframes_t added, hw_avail, frames;
50 snd_pcm_uframes_t noise_dist, ofs, transfer;
53 added = appl_ptr - runtime->silence_start;
56 added += runtime->boundary;
57 if (added < runtime->silence_filled)
58 runtime->silence_filled -= added;
60 runtime->silence_filled = 0;
61 runtime->silence_start = appl_ptr;
64 // This will "legitimately" turn negative on underrun, and will be mangled
65 // into a huge number by the boundary crossing handling. The initial state
66 // might also be not quite sane. The code below MUST account for these cases.
67 hw_avail = appl_ptr - runtime->status->hw_ptr;
69 hw_avail += runtime->boundary;
71 noise_dist = hw_avail + runtime->silence_filled;
72 if (runtime->silence_size < runtime->boundary) {
73 frames = runtime->silence_threshold - noise_dist;
76 if (frames > runtime->silence_size)
77 frames = runtime->silence_size;
79 frames = runtime->buffer_size - noise_dist;
84 if (snd_BUG_ON(frames > runtime->buffer_size))
86 ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
88 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
89 err = fill_silence_frames(substream, ofs, transfer);
91 runtime->silence_filled += transfer;
95 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
98 #ifdef CONFIG_SND_DEBUG
99 void snd_pcm_debug_name(struct snd_pcm_substream *substream,
100 char *name, size_t len)
102 snprintf(name, len, "pcmC%dD%d%c:%d",
103 substream->pcm->card->number,
104 substream->pcm->device,
105 substream->stream ? 'c' : 'p',
108 EXPORT_SYMBOL(snd_pcm_debug_name);
111 #define XRUN_DEBUG_BASIC (1<<0)
112 #define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
113 #define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
115 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
117 #define xrun_debug(substream, mask) \
118 ((substream)->pstr->xrun_debug & (mask))
120 #define xrun_debug(substream, mask) 0
123 #define dump_stack_on_xrun(substream) do { \
124 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
128 /* call with stream lock held */
129 void __snd_pcm_xrun(struct snd_pcm_substream *substream)
131 struct snd_pcm_runtime *runtime = substream->runtime;
133 trace_xrun(substream);
134 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
135 struct timespec64 tstamp;
137 snd_pcm_gettime(runtime, &tstamp);
138 runtime->status->tstamp.tv_sec = tstamp.tv_sec;
139 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
141 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
142 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
144 snd_pcm_debug_name(substream, name, sizeof(name));
145 pcm_warn(substream->pcm, "XRUN: %s\n", name);
146 dump_stack_on_xrun(substream);
150 #ifdef CONFIG_SND_PCM_XRUN_DEBUG
151 #define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
153 trace_hw_ptr_error(substream, reason); \
154 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
155 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
156 (in_interrupt) ? 'Q' : 'P', ##args); \
157 dump_stack_on_xrun(substream); \
161 #else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
163 #define hw_ptr_error(substream, fmt, args...) do { } while (0)
167 int snd_pcm_update_state(struct snd_pcm_substream *substream,
168 struct snd_pcm_runtime *runtime)
170 snd_pcm_uframes_t avail;
172 avail = snd_pcm_avail(substream);
173 if (avail > runtime->avail_max)
174 runtime->avail_max = avail;
175 if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
176 if (avail >= runtime->buffer_size) {
177 snd_pcm_drain_done(substream);
181 if (avail >= runtime->stop_threshold) {
182 __snd_pcm_xrun(substream);
186 if (runtime->twake) {
187 if (avail >= runtime->twake)
188 wake_up(&runtime->tsleep);
189 } else if (avail >= runtime->control->avail_min)
190 wake_up(&runtime->sleep);
194 static void update_audio_tstamp(struct snd_pcm_substream *substream,
195 struct timespec64 *curr_tstamp,
196 struct timespec64 *audio_tstamp)
198 struct snd_pcm_runtime *runtime = substream->runtime;
199 u64 audio_frames, audio_nsecs;
200 struct timespec64 driver_tstamp;
202 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
205 if (!(substream->ops->get_time_info) ||
206 (runtime->audio_tstamp_report.actual_type ==
207 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
210 * provide audio timestamp derived from pointer position
211 * add delay only if requested
214 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
216 if (runtime->audio_tstamp_config.report_delay) {
217 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
218 audio_frames -= runtime->delay;
220 audio_frames += runtime->delay;
222 audio_nsecs = div_u64(audio_frames * 1000000000LL,
224 *audio_tstamp = ns_to_timespec64(audio_nsecs);
227 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
228 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
229 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
230 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
231 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
232 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
237 * re-take a driver timestamp to let apps detect if the reference tstamp
238 * read by low-level hardware was provided with a delay
240 snd_pcm_gettime(substream->runtime, &driver_tstamp);
241 runtime->driver_tstamp = driver_tstamp;
244 static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
245 unsigned int in_interrupt)
247 struct snd_pcm_runtime *runtime = substream->runtime;
248 snd_pcm_uframes_t pos;
249 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
250 snd_pcm_sframes_t hdelta, delta;
251 unsigned long jdelta;
252 unsigned long curr_jiffies;
253 struct timespec64 curr_tstamp;
254 struct timespec64 audio_tstamp;
255 int crossed_boundary = 0;
257 old_hw_ptr = runtime->status->hw_ptr;
260 * group pointer, time and jiffies reads to allow for more
261 * accurate correlations/corrections.
262 * The values are stored at the end of this routine after
263 * corrections for hw_ptr position
265 pos = substream->ops->pointer(substream);
266 curr_jiffies = jiffies;
267 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
268 if ((substream->ops->get_time_info) &&
269 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
270 substream->ops->get_time_info(substream, &curr_tstamp,
272 &runtime->audio_tstamp_config,
273 &runtime->audio_tstamp_report);
275 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
276 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
277 snd_pcm_gettime(runtime, &curr_tstamp);
279 snd_pcm_gettime(runtime, &curr_tstamp);
282 if (pos == SNDRV_PCM_POS_XRUN) {
283 __snd_pcm_xrun(substream);
286 if (pos >= runtime->buffer_size) {
287 if (printk_ratelimit()) {
289 snd_pcm_debug_name(substream, name, sizeof(name));
290 pcm_err(substream->pcm,
291 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
292 name, pos, runtime->buffer_size,
293 runtime->period_size);
297 pos -= pos % runtime->min_align;
298 trace_hwptr(substream, pos, in_interrupt);
299 hw_base = runtime->hw_ptr_base;
300 new_hw_ptr = hw_base + pos;
302 /* we know that one period was processed */
303 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
304 delta = runtime->hw_ptr_interrupt + runtime->period_size;
305 if (delta > new_hw_ptr) {
306 /* check for double acknowledged interrupts */
307 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
308 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
309 hw_base += runtime->buffer_size;
310 if (hw_base >= runtime->boundary) {
314 new_hw_ptr = hw_base + pos;
319 /* new_hw_ptr might be lower than old_hw_ptr in case when */
320 /* pointer crosses the end of the ring buffer */
321 if (new_hw_ptr < old_hw_ptr) {
322 hw_base += runtime->buffer_size;
323 if (hw_base >= runtime->boundary) {
327 new_hw_ptr = hw_base + pos;
330 delta = new_hw_ptr - old_hw_ptr;
332 delta += runtime->boundary;
334 if (runtime->no_period_wakeup) {
335 snd_pcm_sframes_t xrun_threshold;
337 * Without regular period interrupts, we have to check
338 * the elapsed time to detect xruns.
340 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
341 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
343 hdelta = jdelta - delta * HZ / runtime->rate;
344 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
345 while (hdelta > xrun_threshold) {
346 delta += runtime->buffer_size;
347 hw_base += runtime->buffer_size;
348 if (hw_base >= runtime->boundary) {
352 new_hw_ptr = hw_base + pos;
353 hdelta -= runtime->hw_ptr_buffer_jiffies;
358 /* something must be really wrong */
359 if (delta >= runtime->buffer_size + runtime->period_size) {
360 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
361 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
362 substream->stream, (long)pos,
363 (long)new_hw_ptr, (long)old_hw_ptr);
367 /* Do jiffies check only in xrun_debug mode */
368 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
369 goto no_jiffies_check;
371 /* Skip the jiffies check for hardwares with BATCH flag.
372 * Such hardware usually just increases the position at each IRQ,
373 * thus it can't give any strange position.
375 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
376 goto no_jiffies_check;
378 if (hdelta < runtime->delay)
379 goto no_jiffies_check;
380 hdelta -= runtime->delay;
381 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
382 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
384 (((runtime->period_size * HZ) / runtime->rate)
386 /* move new_hw_ptr according jiffies not pos variable */
387 new_hw_ptr = old_hw_ptr;
389 /* use loop to avoid checks for delta overflows */
390 /* the delta value is small or zero in most cases */
392 new_hw_ptr += runtime->period_size;
393 if (new_hw_ptr >= runtime->boundary) {
394 new_hw_ptr -= runtime->boundary;
399 /* align hw_base to buffer_size */
400 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
401 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
402 (long)pos, (long)hdelta,
403 (long)runtime->period_size, jdelta,
404 ((hdelta * HZ) / runtime->rate), hw_base,
405 (unsigned long)old_hw_ptr,
406 (unsigned long)new_hw_ptr);
407 /* reset values to proper state */
409 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
412 if (delta > runtime->period_size + runtime->period_size / 2) {
413 hw_ptr_error(substream, in_interrupt,
415 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
416 substream->stream, (long)delta,
422 if (runtime->status->hw_ptr == new_hw_ptr) {
423 runtime->hw_ptr_jiffies = curr_jiffies;
424 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
429 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
431 delta += runtime->boundary;
432 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
433 runtime->hw_ptr_interrupt += delta;
434 if (runtime->hw_ptr_interrupt >= runtime->boundary)
435 runtime->hw_ptr_interrupt -= runtime->boundary;
437 runtime->hw_ptr_base = hw_base;
438 runtime->status->hw_ptr = new_hw_ptr;
439 runtime->hw_ptr_jiffies = curr_jiffies;
440 if (crossed_boundary) {
441 snd_BUG_ON(crossed_boundary != 1);
442 runtime->hw_ptr_wrap += runtime->boundary;
445 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
446 runtime->silence_size > 0)
447 snd_pcm_playback_silence(substream);
449 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
451 return snd_pcm_update_state(substream, runtime);
454 /* CAUTION: call it with irq disabled */
455 int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
457 return snd_pcm_update_hw_ptr0(substream, 0);
461 * snd_pcm_set_ops - set the PCM operators
462 * @pcm: the pcm instance
463 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
464 * @ops: the operator table
466 * Sets the given PCM operators to the pcm instance.
468 void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
469 const struct snd_pcm_ops *ops)
471 struct snd_pcm_str *stream = &pcm->streams[direction];
472 struct snd_pcm_substream *substream;
474 for (substream = stream->substream; substream != NULL; substream = substream->next)
475 substream->ops = ops;
477 EXPORT_SYMBOL(snd_pcm_set_ops);
480 * snd_pcm_set_sync - set the PCM sync id
481 * @substream: the pcm substream
483 * Sets the PCM sync identifier for the card.
485 void snd_pcm_set_sync(struct snd_pcm_substream *substream)
487 struct snd_pcm_runtime *runtime = substream->runtime;
489 runtime->sync.id32[0] = substream->pcm->card->number;
490 runtime->sync.id32[1] = -1;
491 runtime->sync.id32[2] = -1;
492 runtime->sync.id32[3] = -1;
494 EXPORT_SYMBOL(snd_pcm_set_sync);
497 * Standard ioctl routine
500 static inline unsigned int div32(unsigned int a, unsigned int b,
511 static inline unsigned int div_down(unsigned int a, unsigned int b)
518 static inline unsigned int div_up(unsigned int a, unsigned int b)
530 static inline unsigned int mul(unsigned int a, unsigned int b)
534 if (div_down(UINT_MAX, a) < b)
539 static inline unsigned int muldiv32(unsigned int a, unsigned int b,
540 unsigned int c, unsigned int *r)
542 u_int64_t n = (u_int64_t) a * b;
547 n = div_u64_rem(n, c, r);
556 * snd_interval_refine - refine the interval value of configurator
557 * @i: the interval value to refine
558 * @v: the interval value to refer to
560 * Refines the interval value with the reference value.
561 * The interval is changed to the range satisfying both intervals.
562 * The interval status (min, max, integer, etc.) are evaluated.
564 * Return: Positive if the value is changed, zero if it's not changed, or a
565 * negative error code.
567 int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
570 if (snd_BUG_ON(snd_interval_empty(i)))
572 if (i->min < v->min) {
574 i->openmin = v->openmin;
576 } else if (i->min == v->min && !i->openmin && v->openmin) {
580 if (i->max > v->max) {
582 i->openmax = v->openmax;
584 } else if (i->max == v->max && !i->openmax && v->openmax) {
588 if (!i->integer && v->integer) {
601 } else if (!i->openmin && !i->openmax && i->min == i->max)
603 if (snd_interval_checkempty(i)) {
604 snd_interval_none(i);
609 EXPORT_SYMBOL(snd_interval_refine);
611 static int snd_interval_refine_first(struct snd_interval *i)
613 const unsigned int last_max = i->max;
615 if (snd_BUG_ON(snd_interval_empty(i)))
617 if (snd_interval_single(i))
622 /* only exclude max value if also excluded before refine */
623 i->openmax = (i->openmax && i->max >= last_max);
627 static int snd_interval_refine_last(struct snd_interval *i)
629 const unsigned int last_min = i->min;
631 if (snd_BUG_ON(snd_interval_empty(i)))
633 if (snd_interval_single(i))
638 /* only exclude min value if also excluded before refine */
639 i->openmin = (i->openmin && i->min <= last_min);
643 void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
645 if (a->empty || b->empty) {
646 snd_interval_none(c);
650 c->min = mul(a->min, b->min);
651 c->openmin = (a->openmin || b->openmin);
652 c->max = mul(a->max, b->max);
653 c->openmax = (a->openmax || b->openmax);
654 c->integer = (a->integer && b->integer);
658 * snd_interval_div - refine the interval value with division
665 * Returns non-zero if the value is changed, zero if not changed.
667 void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
670 if (a->empty || b->empty) {
671 snd_interval_none(c);
675 c->min = div32(a->min, b->max, &r);
676 c->openmin = (r || a->openmin || b->openmax);
678 c->max = div32(a->max, b->min, &r);
683 c->openmax = (a->openmax || b->openmin);
692 * snd_interval_muldivk - refine the interval value
695 * @k: divisor (as integer)
700 * Returns non-zero if the value is changed, zero if not changed.
702 void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
703 unsigned int k, struct snd_interval *c)
706 if (a->empty || b->empty) {
707 snd_interval_none(c);
711 c->min = muldiv32(a->min, b->min, k, &r);
712 c->openmin = (r || a->openmin || b->openmin);
713 c->max = muldiv32(a->max, b->max, k, &r);
718 c->openmax = (a->openmax || b->openmax);
723 * snd_interval_mulkdiv - refine the interval value
725 * @k: dividend 2 (as integer)
731 * Returns non-zero if the value is changed, zero if not changed.
733 void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
734 const struct snd_interval *b, struct snd_interval *c)
737 if (a->empty || b->empty) {
738 snd_interval_none(c);
742 c->min = muldiv32(a->min, k, b->max, &r);
743 c->openmin = (r || a->openmin || b->openmax);
745 c->max = muldiv32(a->max, k, b->min, &r);
750 c->openmax = (a->openmax || b->openmin);
762 * snd_interval_ratnum - refine the interval value
763 * @i: interval to refine
764 * @rats_count: number of ratnum_t
765 * @rats: ratnum_t array
766 * @nump: pointer to store the resultant numerator
767 * @denp: pointer to store the resultant denominator
769 * Return: Positive if the value is changed, zero if it's not changed, or a
770 * negative error code.
772 int snd_interval_ratnum(struct snd_interval *i,
773 unsigned int rats_count, const struct snd_ratnum *rats,
774 unsigned int *nump, unsigned int *denp)
776 unsigned int best_num, best_den;
779 struct snd_interval t;
781 unsigned int result_num, result_den;
784 best_num = best_den = best_diff = 0;
785 for (k = 0; k < rats_count; ++k) {
786 unsigned int num = rats[k].num;
788 unsigned int q = i->min;
792 den = div_up(num, q);
793 if (den < rats[k].den_min)
795 if (den > rats[k].den_max)
796 den = rats[k].den_max;
799 r = (den - rats[k].den_min) % rats[k].den_step;
803 diff = num - q * den;
807 diff * best_den < best_diff * den) {
817 t.min = div_down(best_num, best_den);
818 t.openmin = !!(best_num % best_den);
820 result_num = best_num;
821 result_diff = best_diff;
822 result_den = best_den;
823 best_num = best_den = best_diff = 0;
824 for (k = 0; k < rats_count; ++k) {
825 unsigned int num = rats[k].num;
827 unsigned int q = i->max;
833 den = div_down(num, q);
834 if (den > rats[k].den_max)
836 if (den < rats[k].den_min)
837 den = rats[k].den_min;
840 r = (den - rats[k].den_min) % rats[k].den_step;
842 den += rats[k].den_step - r;
844 diff = q * den - num;
848 diff * best_den < best_diff * den) {
858 t.max = div_up(best_num, best_den);
859 t.openmax = !!(best_num % best_den);
861 err = snd_interval_refine(i, &t);
865 if (snd_interval_single(i)) {
866 if (best_diff * result_den < result_diff * best_den) {
867 result_num = best_num;
868 result_den = best_den;
877 EXPORT_SYMBOL(snd_interval_ratnum);
880 * snd_interval_ratden - refine the interval value
881 * @i: interval to refine
882 * @rats_count: number of struct ratden
883 * @rats: struct ratden array
884 * @nump: pointer to store the resultant numerator
885 * @denp: pointer to store the resultant denominator
887 * Return: Positive if the value is changed, zero if it's not changed, or a
888 * negative error code.
890 static int snd_interval_ratden(struct snd_interval *i,
891 unsigned int rats_count,
892 const struct snd_ratden *rats,
893 unsigned int *nump, unsigned int *denp)
895 unsigned int best_num, best_diff, best_den;
897 struct snd_interval t;
900 best_num = best_den = best_diff = 0;
901 for (k = 0; k < rats_count; ++k) {
903 unsigned int den = rats[k].den;
904 unsigned int q = i->min;
907 if (num > rats[k].num_max)
909 if (num < rats[k].num_min)
910 num = rats[k].num_max;
913 r = (num - rats[k].num_min) % rats[k].num_step;
915 num += rats[k].num_step - r;
917 diff = num - q * den;
919 diff * best_den < best_diff * den) {
929 t.min = div_down(best_num, best_den);
930 t.openmin = !!(best_num % best_den);
932 best_num = best_den = best_diff = 0;
933 for (k = 0; k < rats_count; ++k) {
935 unsigned int den = rats[k].den;
936 unsigned int q = i->max;
939 if (num < rats[k].num_min)
941 if (num > rats[k].num_max)
942 num = rats[k].num_max;
945 r = (num - rats[k].num_min) % rats[k].num_step;
949 diff = q * den - num;
951 diff * best_den < best_diff * den) {
961 t.max = div_up(best_num, best_den);
962 t.openmax = !!(best_num % best_den);
964 err = snd_interval_refine(i, &t);
968 if (snd_interval_single(i)) {
978 * snd_interval_list - refine the interval value from the list
979 * @i: the interval value to refine
980 * @count: the number of elements in the list
981 * @list: the value list
982 * @mask: the bit-mask to evaluate
984 * Refines the interval value from the list.
985 * When mask is non-zero, only the elements corresponding to bit 1 are
988 * Return: Positive if the value is changed, zero if it's not changed, or a
989 * negative error code.
991 int snd_interval_list(struct snd_interval *i, unsigned int count,
992 const unsigned int *list, unsigned int mask)
995 struct snd_interval list_range;
1001 snd_interval_any(&list_range);
1002 list_range.min = UINT_MAX;
1004 for (k = 0; k < count; k++) {
1005 if (mask && !(mask & (1 << k)))
1007 if (!snd_interval_test(i, list[k]))
1009 list_range.min = min(list_range.min, list[k]);
1010 list_range.max = max(list_range.max, list[k]);
1012 return snd_interval_refine(i, &list_range);
1014 EXPORT_SYMBOL(snd_interval_list);
1017 * snd_interval_ranges - refine the interval value from the list of ranges
1018 * @i: the interval value to refine
1019 * @count: the number of elements in the list of ranges
1020 * @ranges: the ranges list
1021 * @mask: the bit-mask to evaluate
1023 * Refines the interval value from the list of ranges.
1024 * When mask is non-zero, only the elements corresponding to bit 1 are
1027 * Return: Positive if the value is changed, zero if it's not changed, or a
1028 * negative error code.
1030 int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1031 const struct snd_interval *ranges, unsigned int mask)
1034 struct snd_interval range_union;
1035 struct snd_interval range;
1038 snd_interval_none(i);
1041 snd_interval_any(&range_union);
1042 range_union.min = UINT_MAX;
1043 range_union.max = 0;
1044 for (k = 0; k < count; k++) {
1045 if (mask && !(mask & (1 << k)))
1047 snd_interval_copy(&range, &ranges[k]);
1048 if (snd_interval_refine(&range, i) < 0)
1050 if (snd_interval_empty(&range))
1053 if (range.min < range_union.min) {
1054 range_union.min = range.min;
1055 range_union.openmin = 1;
1057 if (range.min == range_union.min && !range.openmin)
1058 range_union.openmin = 0;
1059 if (range.max > range_union.max) {
1060 range_union.max = range.max;
1061 range_union.openmax = 1;
1063 if (range.max == range_union.max && !range.openmax)
1064 range_union.openmax = 0;
1066 return snd_interval_refine(i, &range_union);
1068 EXPORT_SYMBOL(snd_interval_ranges);
1070 static int snd_interval_step(struct snd_interval *i, unsigned int step)
1075 if (n != 0 || i->openmin) {
1081 if (n != 0 || i->openmax) {
1086 if (snd_interval_checkempty(i)) {
1093 /* Info constraints helpers */
1096 * snd_pcm_hw_rule_add - add the hw-constraint rule
1097 * @runtime: the pcm runtime instance
1098 * @cond: condition bits
1099 * @var: the variable to evaluate
1100 * @func: the evaluation function
1101 * @private: the private data pointer passed to function
1102 * @dep: the dependent variables
1104 * Return: Zero if successful, or a negative error code on failure.
1106 int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1108 snd_pcm_hw_rule_func_t func, void *private,
1111 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1112 struct snd_pcm_hw_rule *c;
1115 va_start(args, dep);
1116 if (constrs->rules_num >= constrs->rules_all) {
1117 struct snd_pcm_hw_rule *new;
1118 unsigned int new_rules = constrs->rules_all + 16;
1119 new = krealloc_array(constrs->rules, new_rules,
1120 sizeof(*c), GFP_KERNEL);
1125 constrs->rules = new;
1126 constrs->rules_all = new_rules;
1128 c = &constrs->rules[constrs->rules_num];
1132 c->private = private;
1135 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1142 dep = va_arg(args, int);
1144 constrs->rules_num++;
1148 EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1151 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1152 * @runtime: PCM runtime instance
1153 * @var: hw_params variable to apply the mask
1154 * @mask: the bitmap mask
1156 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1158 * Return: Zero if successful, or a negative error code on failure.
1160 int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1163 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1164 struct snd_mask *maskp = constrs_mask(constrs, var);
1165 *maskp->bits &= mask;
1166 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1167 if (*maskp->bits == 0)
1173 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1174 * @runtime: PCM runtime instance
1175 * @var: hw_params variable to apply the mask
1176 * @mask: the 64bit bitmap mask
1178 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1180 * Return: Zero if successful, or a negative error code on failure.
1182 int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1185 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1186 struct snd_mask *maskp = constrs_mask(constrs, var);
1187 maskp->bits[0] &= (u_int32_t)mask;
1188 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1189 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1190 if (! maskp->bits[0] && ! maskp->bits[1])
1194 EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1197 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1198 * @runtime: PCM runtime instance
1199 * @var: hw_params variable to apply the integer constraint
1201 * Apply the constraint of integer to an interval parameter.
1203 * Return: Positive if the value is changed, zero if it's not changed, or a
1204 * negative error code.
1206 int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1208 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1209 return snd_interval_setinteger(constrs_interval(constrs, var));
1211 EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1214 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1215 * @runtime: PCM runtime instance
1216 * @var: hw_params variable to apply the range
1217 * @min: the minimal value
1218 * @max: the maximal value
1220 * Apply the min/max range constraint to an interval parameter.
1222 * Return: Positive if the value is changed, zero if it's not changed, or a
1223 * negative error code.
1225 int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1226 unsigned int min, unsigned int max)
1228 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1229 struct snd_interval t;
1232 t.openmin = t.openmax = 0;
1234 return snd_interval_refine(constrs_interval(constrs, var), &t);
1236 EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1238 static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1239 struct snd_pcm_hw_rule *rule)
1241 struct snd_pcm_hw_constraint_list *list = rule->private;
1242 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1247 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1248 * @runtime: PCM runtime instance
1249 * @cond: condition bits
1250 * @var: hw_params variable to apply the list constraint
1253 * Apply the list of constraints to an interval parameter.
1255 * Return: Zero if successful, or a negative error code on failure.
1257 int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1259 snd_pcm_hw_param_t var,
1260 const struct snd_pcm_hw_constraint_list *l)
1262 return snd_pcm_hw_rule_add(runtime, cond, var,
1263 snd_pcm_hw_rule_list, (void *)l,
1266 EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1268 static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1269 struct snd_pcm_hw_rule *rule)
1271 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1272 return snd_interval_ranges(hw_param_interval(params, rule->var),
1273 r->count, r->ranges, r->mask);
1278 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1279 * @runtime: PCM runtime instance
1280 * @cond: condition bits
1281 * @var: hw_params variable to apply the list of range constraints
1284 * Apply the list of range constraints to an interval parameter.
1286 * Return: Zero if successful, or a negative error code on failure.
1288 int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1290 snd_pcm_hw_param_t var,
1291 const struct snd_pcm_hw_constraint_ranges *r)
1293 return snd_pcm_hw_rule_add(runtime, cond, var,
1294 snd_pcm_hw_rule_ranges, (void *)r,
1297 EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1299 static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1300 struct snd_pcm_hw_rule *rule)
1302 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1303 unsigned int num = 0, den = 0;
1305 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1306 r->nrats, r->rats, &num, &den);
1307 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1308 params->rate_num = num;
1309 params->rate_den = den;
1315 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1316 * @runtime: PCM runtime instance
1317 * @cond: condition bits
1318 * @var: hw_params variable to apply the ratnums constraint
1319 * @r: struct snd_ratnums constriants
1321 * Return: Zero if successful, or a negative error code on failure.
1323 int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1325 snd_pcm_hw_param_t var,
1326 const struct snd_pcm_hw_constraint_ratnums *r)
1328 return snd_pcm_hw_rule_add(runtime, cond, var,
1329 snd_pcm_hw_rule_ratnums, (void *)r,
1332 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1334 static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1335 struct snd_pcm_hw_rule *rule)
1337 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1338 unsigned int num = 0, den = 0;
1339 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1340 r->nrats, r->rats, &num, &den);
1341 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1342 params->rate_num = num;
1343 params->rate_den = den;
1349 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1350 * @runtime: PCM runtime instance
1351 * @cond: condition bits
1352 * @var: hw_params variable to apply the ratdens constraint
1353 * @r: struct snd_ratdens constriants
1355 * Return: Zero if successful, or a negative error code on failure.
1357 int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1359 snd_pcm_hw_param_t var,
1360 const struct snd_pcm_hw_constraint_ratdens *r)
1362 return snd_pcm_hw_rule_add(runtime, cond, var,
1363 snd_pcm_hw_rule_ratdens, (void *)r,
1366 EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1368 static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1369 struct snd_pcm_hw_rule *rule)
1371 unsigned int l = (unsigned long) rule->private;
1372 int width = l & 0xffff;
1373 unsigned int msbits = l >> 16;
1374 const struct snd_interval *i =
1375 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1377 if (!snd_interval_single(i))
1380 if ((snd_interval_value(i) == width) ||
1381 (width == 0 && snd_interval_value(i) > msbits))
1382 params->msbits = min_not_zero(params->msbits, msbits);
1388 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1389 * @runtime: PCM runtime instance
1390 * @cond: condition bits
1391 * @width: sample bits width
1392 * @msbits: msbits width
1394 * This constraint will set the number of most significant bits (msbits) if a
1395 * sample format with the specified width has been select. If width is set to 0
1396 * the msbits will be set for any sample format with a width larger than the
1399 * Return: Zero if successful, or a negative error code on failure.
1401 int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1404 unsigned int msbits)
1406 unsigned long l = (msbits << 16) | width;
1407 return snd_pcm_hw_rule_add(runtime, cond, -1,
1408 snd_pcm_hw_rule_msbits,
1410 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1412 EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1414 static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1415 struct snd_pcm_hw_rule *rule)
1417 unsigned long step = (unsigned long) rule->private;
1418 return snd_interval_step(hw_param_interval(params, rule->var), step);
1422 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1423 * @runtime: PCM runtime instance
1424 * @cond: condition bits
1425 * @var: hw_params variable to apply the step constraint
1428 * Return: Zero if successful, or a negative error code on failure.
1430 int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1432 snd_pcm_hw_param_t var,
1435 return snd_pcm_hw_rule_add(runtime, cond, var,
1436 snd_pcm_hw_rule_step, (void *) step,
1439 EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1441 static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1443 static const unsigned int pow2_sizes[] = {
1444 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1445 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1446 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1447 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1449 return snd_interval_list(hw_param_interval(params, rule->var),
1450 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1454 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1455 * @runtime: PCM runtime instance
1456 * @cond: condition bits
1457 * @var: hw_params variable to apply the power-of-2 constraint
1459 * Return: Zero if successful, or a negative error code on failure.
1461 int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1463 snd_pcm_hw_param_t var)
1465 return snd_pcm_hw_rule_add(runtime, cond, var,
1466 snd_pcm_hw_rule_pow2, NULL,
1469 EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1471 static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1472 struct snd_pcm_hw_rule *rule)
1474 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1475 struct snd_interval *rate;
1477 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1478 return snd_interval_list(rate, 1, &base_rate, 0);
1482 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1483 * @runtime: PCM runtime instance
1484 * @base_rate: the rate at which the hardware does not resample
1486 * Return: Zero if successful, or a negative error code on failure.
1488 int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1489 unsigned int base_rate)
1491 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1492 SNDRV_PCM_HW_PARAM_RATE,
1493 snd_pcm_hw_rule_noresample_func,
1494 (void *)(uintptr_t)base_rate,
1495 SNDRV_PCM_HW_PARAM_RATE, -1);
1497 EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1499 static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1500 snd_pcm_hw_param_t var)
1502 if (hw_is_mask(var)) {
1503 snd_mask_any(hw_param_mask(params, var));
1504 params->cmask |= 1 << var;
1505 params->rmask |= 1 << var;
1508 if (hw_is_interval(var)) {
1509 snd_interval_any(hw_param_interval(params, var));
1510 params->cmask |= 1 << var;
1511 params->rmask |= 1 << var;
1517 void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1520 memset(params, 0, sizeof(*params));
1521 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1522 _snd_pcm_hw_param_any(params, k);
1523 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1524 _snd_pcm_hw_param_any(params, k);
1527 EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1530 * snd_pcm_hw_param_value - return @params field @var value
1531 * @params: the hw_params instance
1532 * @var: parameter to retrieve
1533 * @dir: pointer to the direction (-1,0,1) or %NULL
1535 * Return: The value for field @var if it's fixed in configuration space
1536 * defined by @params. -%EINVAL otherwise.
1538 int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1539 snd_pcm_hw_param_t var, int *dir)
1541 if (hw_is_mask(var)) {
1542 const struct snd_mask *mask = hw_param_mask_c(params, var);
1543 if (!snd_mask_single(mask))
1547 return snd_mask_value(mask);
1549 if (hw_is_interval(var)) {
1550 const struct snd_interval *i = hw_param_interval_c(params, var);
1551 if (!snd_interval_single(i))
1555 return snd_interval_value(i);
1559 EXPORT_SYMBOL(snd_pcm_hw_param_value);
1561 void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1562 snd_pcm_hw_param_t var)
1564 if (hw_is_mask(var)) {
1565 snd_mask_none(hw_param_mask(params, var));
1566 params->cmask |= 1 << var;
1567 params->rmask |= 1 << var;
1568 } else if (hw_is_interval(var)) {
1569 snd_interval_none(hw_param_interval(params, var));
1570 params->cmask |= 1 << var;
1571 params->rmask |= 1 << var;
1576 EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1578 static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1579 snd_pcm_hw_param_t var)
1582 if (hw_is_mask(var))
1583 changed = snd_mask_refine_first(hw_param_mask(params, var));
1584 else if (hw_is_interval(var))
1585 changed = snd_interval_refine_first(hw_param_interval(params, var));
1589 params->cmask |= 1 << var;
1590 params->rmask |= 1 << var;
1597 * snd_pcm_hw_param_first - refine config space and return minimum value
1598 * @pcm: PCM instance
1599 * @params: the hw_params instance
1600 * @var: parameter to retrieve
1601 * @dir: pointer to the direction (-1,0,1) or %NULL
1603 * Inside configuration space defined by @params remove from @var all
1604 * values > minimum. Reduce configuration space accordingly.
1606 * Return: The minimum, or a negative error code on failure.
1608 int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1609 struct snd_pcm_hw_params *params,
1610 snd_pcm_hw_param_t var, int *dir)
1612 int changed = _snd_pcm_hw_param_first(params, var);
1615 if (params->rmask) {
1616 int err = snd_pcm_hw_refine(pcm, params);
1620 return snd_pcm_hw_param_value(params, var, dir);
1622 EXPORT_SYMBOL(snd_pcm_hw_param_first);
1624 static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1625 snd_pcm_hw_param_t var)
1628 if (hw_is_mask(var))
1629 changed = snd_mask_refine_last(hw_param_mask(params, var));
1630 else if (hw_is_interval(var))
1631 changed = snd_interval_refine_last(hw_param_interval(params, var));
1635 params->cmask |= 1 << var;
1636 params->rmask |= 1 << var;
1643 * snd_pcm_hw_param_last - refine config space and return maximum value
1644 * @pcm: PCM instance
1645 * @params: the hw_params instance
1646 * @var: parameter to retrieve
1647 * @dir: pointer to the direction (-1,0,1) or %NULL
1649 * Inside configuration space defined by @params remove from @var all
1650 * values < maximum. Reduce configuration space accordingly.
1652 * Return: The maximum, or a negative error code on failure.
1654 int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1655 struct snd_pcm_hw_params *params,
1656 snd_pcm_hw_param_t var, int *dir)
1658 int changed = _snd_pcm_hw_param_last(params, var);
1661 if (params->rmask) {
1662 int err = snd_pcm_hw_refine(pcm, params);
1666 return snd_pcm_hw_param_value(params, var, dir);
1668 EXPORT_SYMBOL(snd_pcm_hw_param_last);
1670 static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1673 struct snd_pcm_runtime *runtime = substream->runtime;
1674 unsigned long flags;
1675 snd_pcm_stream_lock_irqsave(substream, flags);
1676 if (snd_pcm_running(substream) &&
1677 snd_pcm_update_hw_ptr(substream) >= 0)
1678 runtime->status->hw_ptr %= runtime->buffer_size;
1680 runtime->status->hw_ptr = 0;
1681 runtime->hw_ptr_wrap = 0;
1683 snd_pcm_stream_unlock_irqrestore(substream, flags);
1687 static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1690 struct snd_pcm_channel_info *info = arg;
1691 struct snd_pcm_runtime *runtime = substream->runtime;
1693 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1697 width = snd_pcm_format_physical_width(runtime->format);
1701 switch (runtime->access) {
1702 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1703 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1704 info->first = info->channel * width;
1705 info->step = runtime->channels * width;
1707 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1708 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1710 size_t size = runtime->dma_bytes / runtime->channels;
1711 info->first = info->channel * size * 8;
1722 static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1725 struct snd_pcm_hw_params *params = arg;
1726 snd_pcm_format_t format;
1730 params->fifo_size = substream->runtime->hw.fifo_size;
1731 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1732 format = params_format(params);
1733 channels = params_channels(params);
1734 frame_size = snd_pcm_format_size(format, channels);
1736 params->fifo_size /= frame_size;
1742 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1743 * @substream: the pcm substream instance
1744 * @cmd: ioctl command
1745 * @arg: ioctl argument
1747 * Processes the generic ioctl commands for PCM.
1748 * Can be passed as the ioctl callback for PCM ops.
1750 * Return: Zero if successful, or a negative error code on failure.
1752 int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1753 unsigned int cmd, void *arg)
1756 case SNDRV_PCM_IOCTL1_RESET:
1757 return snd_pcm_lib_ioctl_reset(substream, arg);
1758 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1759 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1760 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1761 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1765 EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1768 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1769 * under acquired lock of PCM substream.
1770 * @substream: the instance of pcm substream.
1772 * This function is called when the batch of audio data frames as the same size as the period of
1773 * buffer is already processed in audio data transmission.
1775 * The call of function updates the status of runtime with the latest position of audio data
1776 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1777 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1778 * substream according to configured threshold.
1780 * The function is intended to use for the case that PCM driver operates audio data frames under
1781 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1782 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1783 * since lock of PCM substream should be acquired in advance.
1785 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1788 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1789 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1790 * - .get_time_info - to retrieve audio time stamp if needed.
1792 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1794 void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1796 struct snd_pcm_runtime *runtime;
1798 if (PCM_RUNTIME_CHECK(substream))
1800 runtime = substream->runtime;
1802 if (!snd_pcm_running(substream) ||
1803 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1806 #ifdef CONFIG_SND_PCM_TIMER
1807 if (substream->timer_running)
1808 snd_timer_interrupt(substream->timer, 1);
1811 snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1813 EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1816 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1818 * @substream: the instance of PCM substream.
1820 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1821 * acquiring lock of PCM substream voluntarily.
1823 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1824 * the batch of audio data frames as the same size as the period of buffer is already processed in
1825 * audio data transmission.
1827 void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1829 unsigned long flags;
1831 if (snd_BUG_ON(!substream))
1834 snd_pcm_stream_lock_irqsave(substream, flags);
1835 snd_pcm_period_elapsed_under_stream_lock(substream);
1836 snd_pcm_stream_unlock_irqrestore(substream, flags);
1838 EXPORT_SYMBOL(snd_pcm_period_elapsed);
1841 * Wait until avail_min data becomes available
1842 * Returns a negative error code if any error occurs during operation.
1843 * The available space is stored on availp. When err = 0 and avail = 0
1844 * on the capture stream, it indicates the stream is in DRAINING state.
1846 static int wait_for_avail(struct snd_pcm_substream *substream,
1847 snd_pcm_uframes_t *availp)
1849 struct snd_pcm_runtime *runtime = substream->runtime;
1850 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1851 wait_queue_entry_t wait;
1853 snd_pcm_uframes_t avail = 0;
1854 long wait_time, tout;
1856 init_waitqueue_entry(&wait, current);
1857 set_current_state(TASK_INTERRUPTIBLE);
1858 add_wait_queue(&runtime->tsleep, &wait);
1860 if (runtime->no_period_wakeup)
1861 wait_time = MAX_SCHEDULE_TIMEOUT;
1863 /* use wait time from substream if available */
1864 if (substream->wait_time) {
1865 wait_time = substream->wait_time;
1869 if (runtime->rate) {
1870 long t = runtime->buffer_size * 1100 / runtime->rate;
1871 wait_time = max(t, wait_time);
1874 wait_time = msecs_to_jiffies(wait_time);
1878 if (signal_pending(current)) {
1884 * We need to check if space became available already
1885 * (and thus the wakeup happened already) first to close
1886 * the race of space already having become available.
1887 * This check must happen after been added to the waitqueue
1888 * and having current state be INTERRUPTIBLE.
1890 avail = snd_pcm_avail(substream);
1891 if (avail >= runtime->twake)
1893 snd_pcm_stream_unlock_irq(substream);
1895 tout = schedule_timeout(wait_time);
1897 snd_pcm_stream_lock_irq(substream);
1898 set_current_state(TASK_INTERRUPTIBLE);
1899 switch (runtime->state) {
1900 case SNDRV_PCM_STATE_SUSPENDED:
1903 case SNDRV_PCM_STATE_XRUN:
1906 case SNDRV_PCM_STATE_DRAINING:
1910 avail = 0; /* indicate draining */
1912 case SNDRV_PCM_STATE_OPEN:
1913 case SNDRV_PCM_STATE_SETUP:
1914 case SNDRV_PCM_STATE_DISCONNECTED:
1917 case SNDRV_PCM_STATE_PAUSED:
1921 pcm_dbg(substream->pcm,
1922 "%s timeout (DMA or IRQ trouble?)\n",
1923 is_playback ? "playback write" : "capture read");
1929 set_current_state(TASK_RUNNING);
1930 remove_wait_queue(&runtime->tsleep, &wait);
1935 typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1936 int channel, unsigned long hwoff,
1937 void *buf, unsigned long bytes);
1939 typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1940 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1942 /* calculate the target DMA-buffer position to be written/read */
1943 static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1944 int channel, unsigned long hwoff)
1946 return runtime->dma_area + hwoff +
1947 channel * (runtime->dma_bytes / runtime->channels);
1950 /* default copy_user ops for write; used for both interleaved and non- modes */
1951 static int default_write_copy(struct snd_pcm_substream *substream,
1952 int channel, unsigned long hwoff,
1953 void *buf, unsigned long bytes)
1955 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1956 (void __user *)buf, bytes))
1961 /* default copy_kernel ops for write */
1962 static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1963 int channel, unsigned long hwoff,
1964 void *buf, unsigned long bytes)
1966 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1970 /* fill silence instead of copy data; called as a transfer helper
1971 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1972 * a NULL buffer is passed
1974 static int fill_silence(struct snd_pcm_substream *substream, int channel,
1975 unsigned long hwoff, void *buf, unsigned long bytes)
1977 struct snd_pcm_runtime *runtime = substream->runtime;
1979 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1981 if (substream->ops->fill_silence)
1982 return substream->ops->fill_silence(substream, channel,
1985 snd_pcm_format_set_silence(runtime->format,
1986 get_dma_ptr(runtime, channel, hwoff),
1987 bytes_to_samples(runtime, bytes));
1991 /* default copy_user ops for read; used for both interleaved and non- modes */
1992 static int default_read_copy(struct snd_pcm_substream *substream,
1993 int channel, unsigned long hwoff,
1994 void *buf, unsigned long bytes)
1996 if (copy_to_user((void __user *)buf,
1997 get_dma_ptr(substream->runtime, channel, hwoff),
2003 /* default copy_kernel ops for read */
2004 static int default_read_copy_kernel(struct snd_pcm_substream *substream,
2005 int channel, unsigned long hwoff,
2006 void *buf, unsigned long bytes)
2008 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
2012 /* call transfer function with the converted pointers and sizes;
2013 * for interleaved mode, it's one shot for all samples
2015 static int interleaved_copy(struct snd_pcm_substream *substream,
2016 snd_pcm_uframes_t hwoff, void *data,
2017 snd_pcm_uframes_t off,
2018 snd_pcm_uframes_t frames,
2019 pcm_transfer_f transfer)
2021 struct snd_pcm_runtime *runtime = substream->runtime;
2023 /* convert to bytes */
2024 hwoff = frames_to_bytes(runtime, hwoff);
2025 off = frames_to_bytes(runtime, off);
2026 frames = frames_to_bytes(runtime, frames);
2027 return transfer(substream, 0, hwoff, data + off, frames);
2030 /* call transfer function with the converted pointers and sizes for each
2031 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2033 static int noninterleaved_copy(struct snd_pcm_substream *substream,
2034 snd_pcm_uframes_t hwoff, void *data,
2035 snd_pcm_uframes_t off,
2036 snd_pcm_uframes_t frames,
2037 pcm_transfer_f transfer)
2039 struct snd_pcm_runtime *runtime = substream->runtime;
2040 int channels = runtime->channels;
2044 /* convert to bytes; note that it's not frames_to_bytes() here.
2045 * in non-interleaved mode, we copy for each channel, thus
2046 * each copy is n_samples bytes x channels = whole frames.
2048 off = samples_to_bytes(runtime, off);
2049 frames = samples_to_bytes(runtime, frames);
2050 hwoff = samples_to_bytes(runtime, hwoff);
2051 for (c = 0; c < channels; ++c, ++bufs) {
2052 if (!data || !*bufs)
2053 err = fill_silence(substream, c, hwoff, NULL, frames);
2055 err = transfer(substream, c, hwoff, *bufs + off,
2063 /* fill silence on the given buffer position;
2064 * called from snd_pcm_playback_silence()
2066 static int fill_silence_frames(struct snd_pcm_substream *substream,
2067 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2069 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2070 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2071 return interleaved_copy(substream, off, NULL, 0, frames,
2074 return noninterleaved_copy(substream, off, NULL, 0, frames,
2078 /* sanity-check for read/write methods */
2079 static int pcm_sanity_check(struct snd_pcm_substream *substream)
2081 struct snd_pcm_runtime *runtime;
2082 if (PCM_RUNTIME_CHECK(substream))
2084 runtime = substream->runtime;
2085 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2087 if (runtime->state == SNDRV_PCM_STATE_OPEN)
2092 static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2094 switch (runtime->state) {
2095 case SNDRV_PCM_STATE_PREPARED:
2096 case SNDRV_PCM_STATE_RUNNING:
2097 case SNDRV_PCM_STATE_PAUSED:
2099 case SNDRV_PCM_STATE_XRUN:
2101 case SNDRV_PCM_STATE_SUSPENDED:
2108 /* update to the given appl_ptr and call ack callback if needed;
2109 * when an error is returned, take back to the original value
2111 int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2112 snd_pcm_uframes_t appl_ptr)
2114 struct snd_pcm_runtime *runtime = substream->runtime;
2115 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2116 snd_pcm_sframes_t diff;
2119 if (old_appl_ptr == appl_ptr)
2122 if (appl_ptr >= runtime->boundary)
2125 * check if a rewind is requested by the application
2127 if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2128 diff = appl_ptr - old_appl_ptr;
2130 if (diff > runtime->buffer_size)
2133 if (runtime->boundary + diff > runtime->buffer_size)
2138 runtime->control->appl_ptr = appl_ptr;
2139 if (substream->ops->ack) {
2140 ret = substream->ops->ack(substream);
2142 runtime->control->appl_ptr = old_appl_ptr;
2144 __snd_pcm_xrun(substream);
2149 trace_applptr(substream, old_appl_ptr, appl_ptr);
2154 /* the common loop for read/write data */
2155 snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2156 void *data, bool interleaved,
2157 snd_pcm_uframes_t size, bool in_kernel)
2159 struct snd_pcm_runtime *runtime = substream->runtime;
2160 snd_pcm_uframes_t xfer = 0;
2161 snd_pcm_uframes_t offset = 0;
2162 snd_pcm_uframes_t avail;
2164 pcm_transfer_f transfer;
2169 err = pcm_sanity_check(substream);
2173 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2175 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2176 runtime->channels > 1)
2178 writer = interleaved_copy;
2180 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2182 writer = noninterleaved_copy;
2187 transfer = fill_silence;
2190 } else if (in_kernel) {
2191 if (substream->ops->copy_kernel)
2192 transfer = substream->ops->copy_kernel;
2194 transfer = is_playback ?
2195 default_write_copy_kernel : default_read_copy_kernel;
2197 if (substream->ops->copy_user)
2198 transfer = (pcm_transfer_f)substream->ops->copy_user;
2200 transfer = is_playback ?
2201 default_write_copy : default_read_copy;
2207 nonblock = !!(substream->f_flags & O_NONBLOCK);
2209 snd_pcm_stream_lock_irq(substream);
2210 err = pcm_accessible_state(runtime);
2214 runtime->twake = runtime->control->avail_min ? : 1;
2215 if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2216 snd_pcm_update_hw_ptr(substream);
2219 * If size < start_threshold, wait indefinitely. Another
2220 * thread may start capture
2223 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2224 size >= runtime->start_threshold) {
2225 err = snd_pcm_start(substream);
2230 avail = snd_pcm_avail(substream);
2233 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2234 snd_pcm_uframes_t cont;
2237 runtime->state == SNDRV_PCM_STATE_DRAINING) {
2238 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2245 runtime->twake = min_t(snd_pcm_uframes_t, size,
2246 runtime->control->avail_min ? : 1);
2247 err = wait_for_avail(substream, &avail);
2251 continue; /* draining */
2253 frames = size > avail ? avail : size;
2254 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2255 appl_ofs = appl_ptr % runtime->buffer_size;
2256 cont = runtime->buffer_size - appl_ofs;
2259 if (snd_BUG_ON(!frames)) {
2263 if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2267 snd_pcm_stream_unlock_irq(substream);
2269 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2270 err = writer(substream, appl_ofs, data, offset, frames,
2273 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2274 snd_pcm_stream_lock_irq(substream);
2275 atomic_dec(&runtime->buffer_accessing);
2278 err = pcm_accessible_state(runtime);
2282 if (appl_ptr >= runtime->boundary)
2283 appl_ptr -= runtime->boundary;
2284 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2293 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2294 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2295 err = snd_pcm_start(substream);
2302 if (xfer > 0 && err >= 0)
2303 snd_pcm_update_state(substream, runtime);
2304 snd_pcm_stream_unlock_irq(substream);
2305 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2307 EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2310 * standard channel mapping helpers
2313 /* default channel maps for multi-channel playbacks, up to 8 channels */
2314 const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2316 .map = { SNDRV_CHMAP_MONO } },
2318 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2320 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2321 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2323 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2324 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2325 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2327 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2328 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2329 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2330 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2333 EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2335 /* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2336 const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2338 .map = { SNDRV_CHMAP_MONO } },
2340 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2342 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2343 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2345 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2346 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2347 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2349 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2350 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2351 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2352 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2355 EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2357 static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2359 if (ch > info->max_channels)
2361 return !info->channel_mask || (info->channel_mask & (1U << ch));
2364 static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2365 struct snd_ctl_elem_info *uinfo)
2367 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2369 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2370 uinfo->count = info->max_channels;
2371 uinfo->value.integer.min = 0;
2372 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2376 /* get callback for channel map ctl element
2377 * stores the channel position firstly matching with the current channels
2379 static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2380 struct snd_ctl_elem_value *ucontrol)
2382 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2383 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2384 struct snd_pcm_substream *substream;
2385 const struct snd_pcm_chmap_elem *map;
2389 substream = snd_pcm_chmap_substream(info, idx);
2392 memset(ucontrol->value.integer.value, 0,
2393 sizeof(long) * info->max_channels);
2394 if (!substream->runtime)
2395 return 0; /* no channels set */
2396 for (map = info->chmap; map->channels; map++) {
2398 if (map->channels == substream->runtime->channels &&
2399 valid_chmap_channels(info, map->channels)) {
2400 for (i = 0; i < map->channels; i++)
2401 ucontrol->value.integer.value[i] = map->map[i];
2408 /* tlv callback for channel map ctl element
2409 * expands the pre-defined channel maps in a form of TLV
2411 static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2412 unsigned int size, unsigned int __user *tlv)
2414 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2415 const struct snd_pcm_chmap_elem *map;
2416 unsigned int __user *dst;
2423 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2427 for (map = info->chmap; map->channels; map++) {
2428 int chs_bytes = map->channels * 4;
2429 if (!valid_chmap_channels(info, map->channels))
2433 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2434 put_user(chs_bytes, dst + 1))
2439 if (size < chs_bytes)
2443 for (c = 0; c < map->channels; c++) {
2444 if (put_user(map->map[c], dst))
2449 if (put_user(count, tlv + 1))
2454 static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2456 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2457 info->pcm->streams[info->stream].chmap_kctl = NULL;
2462 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2463 * @pcm: the assigned PCM instance
2464 * @stream: stream direction
2465 * @chmap: channel map elements (for query)
2466 * @max_channels: the max number of channels for the stream
2467 * @private_value: the value passed to each kcontrol's private_value field
2468 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2470 * Create channel-mapping control elements assigned to the given PCM stream(s).
2471 * Return: Zero if successful, or a negative error value.
2473 int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2474 const struct snd_pcm_chmap_elem *chmap,
2476 unsigned long private_value,
2477 struct snd_pcm_chmap **info_ret)
2479 struct snd_pcm_chmap *info;
2480 struct snd_kcontrol_new knew = {
2481 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2482 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2483 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2484 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2485 .info = pcm_chmap_ctl_info,
2486 .get = pcm_chmap_ctl_get,
2487 .tlv.c = pcm_chmap_ctl_tlv,
2491 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2493 info = kzalloc(sizeof(*info), GFP_KERNEL);
2497 info->stream = stream;
2498 info->chmap = chmap;
2499 info->max_channels = max_channels;
2500 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2501 knew.name = "Playback Channel Map";
2503 knew.name = "Capture Channel Map";
2504 knew.device = pcm->device;
2505 knew.count = pcm->streams[stream].substream_count;
2506 knew.private_value = private_value;
2507 info->kctl = snd_ctl_new1(&knew, info);
2512 info->kctl->private_free = pcm_chmap_ctl_private_free;
2513 err = snd_ctl_add(pcm->card, info->kctl);
2516 pcm->streams[stream].chmap_kctl = info->kctl;
2521 EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);