ALSA: hdspm: Fix single speed ADAT capture and playback with RME HDSPe AIO
[linux-2.6-block.git] / sound / firewire / amdtp-stream.c
CommitLineData
31ef9134
CL
1/*
2 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
3 * with Common Isochronous Packet (IEC 61883-1) headers
4 *
5 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
6 * Licensed under the terms of the GNU General Public License, version 2.
7 */
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/firewire.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <sound/pcm.h>
7b2d99fa 15#include <sound/pcm_params.h>
d67c46b9 16#include "amdtp-stream.h"
31ef9134
CL
17
18#define TICKS_PER_CYCLE 3072
19#define CYCLES_PER_SECOND 8000
20#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
21
0c95c1d6
TS
22/* Always support Linux tracing subsystem. */
23#define CREATE_TRACE_POINTS
24#include "amdtp-stream-trace.h"
25
ca5b5050 26#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31ef9134 27
b445db44
TS
28/* isochronous header parameters */
29#define ISO_DATA_LENGTH_SHIFT 16
3b196c39 30#define TAG_NO_CIP_HEADER 0
31ef9134
CL
31#define TAG_CIP 1
32
b445db44 33/* common isochronous packet header parameters */
9a2820c1
TS
34#define CIP_EOH_SHIFT 31
35#define CIP_EOH (1u << CIP_EOH_SHIFT)
b445db44 36#define CIP_EOH_MASK 0x80000000
9a2820c1
TS
37#define CIP_SID_SHIFT 24
38#define CIP_SID_MASK 0x3f000000
39#define CIP_DBS_MASK 0x00ff0000
40#define CIP_DBS_SHIFT 16
9863874f
TS
41#define CIP_SPH_MASK 0x00000400
42#define CIP_SPH_SHIFT 10
9a2820c1
TS
43#define CIP_DBC_MASK 0x000000ff
44#define CIP_FMT_SHIFT 24
b445db44 45#define CIP_FMT_MASK 0x3f000000
9a2820c1
TS
46#define CIP_FDF_MASK 0x00ff0000
47#define CIP_FDF_SHIFT 16
b445db44
TS
48#define CIP_SYT_MASK 0x0000ffff
49#define CIP_SYT_NO_INFO 0xffff
b445db44 50
51c29fd2 51/* Audio and Music transfer protocol specific parameters */
414ba022 52#define CIP_FMT_AM 0x10
2b3fc456 53#define AMDTP_FDF_NO_DATA 0xff
31ef9134
CL
54
55/* TODO: make these configurable */
56#define INTERRUPT_INTERVAL 16
57#define QUEUE_LENGTH 48
58
f11453c7
TS
59// For iso header, tstamp and 2 CIP header.
60#define IR_CTX_HEADER_SIZE_CIP 16
61// For iso header and tstamp.
62#define IR_CTX_HEADER_SIZE_NO_CIP 8
cc4f8e91 63#define HEADER_TSTAMP_MASK 0x0000ffff
4b7da117 64
76fb8789
CL
65static void pcm_period_tasklet(unsigned long data);
66
31ef9134 67/**
be4a2894
TS
68 * amdtp_stream_init - initialize an AMDTP stream structure
69 * @s: the AMDTP stream to initialize
31ef9134 70 * @unit: the target of the stream
3ff7e8f0 71 * @dir: the direction of stream
31ef9134 72 * @flags: the packet transmission method to use
5955815e 73 * @fmt: the value of fmt field in CIP header
df075fee
TS
74 * @process_data_blocks: callback handler to process data blocks
75 * @protocol_size: the size to allocate newly for protocol
31ef9134 76 */
be4a2894 77int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
5955815e 78 enum amdtp_stream_direction dir, enum cip_flags flags,
df075fee
TS
79 unsigned int fmt,
80 amdtp_stream_process_data_blocks_t process_data_blocks,
81 unsigned int protocol_size)
31ef9134 82{
df075fee
TS
83 if (process_data_blocks == NULL)
84 return -EINVAL;
85
86 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
87 if (!s->protocol)
88 return -ENOMEM;
89
c6f224dc 90 s->unit = unit;
3ff7e8f0 91 s->direction = dir;
31ef9134
CL
92 s->flags = flags;
93 s->context = ERR_PTR(-1);
94 mutex_init(&s->mutex);
76fb8789 95 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
ec00f5e4 96 s->packet_index = 0;
31ef9134 97
7b3b0d85
TS
98 init_waitqueue_head(&s->callback_wait);
99 s->callbacked = false;
7b3b0d85 100
5955815e 101 s->fmt = fmt;
df075fee 102 s->process_data_blocks = process_data_blocks;
414ba022 103
31ef9134
CL
104 return 0;
105}
be4a2894 106EXPORT_SYMBOL(amdtp_stream_init);
31ef9134
CL
107
108/**
be4a2894
TS
109 * amdtp_stream_destroy - free stream resources
110 * @s: the AMDTP stream to destroy
31ef9134 111 */
be4a2894 112void amdtp_stream_destroy(struct amdtp_stream *s)
31ef9134 113{
44c376b9
TS
114 /* Not initialized. */
115 if (s->protocol == NULL)
116 return;
117
be4a2894 118 WARN_ON(amdtp_stream_running(s));
df075fee 119 kfree(s->protocol);
31ef9134 120 mutex_destroy(&s->mutex);
31ef9134 121}
be4a2894 122EXPORT_SYMBOL(amdtp_stream_destroy);
31ef9134 123
c5280e99 124const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
a7304e3b
CL
125 [CIP_SFC_32000] = 8,
126 [CIP_SFC_44100] = 8,
127 [CIP_SFC_48000] = 8,
128 [CIP_SFC_88200] = 16,
129 [CIP_SFC_96000] = 16,
130 [CIP_SFC_176400] = 32,
131 [CIP_SFC_192000] = 32,
132};
133EXPORT_SYMBOL(amdtp_syt_intervals);
134
f9503a68 135const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
1017abed
TS
136 [CIP_SFC_32000] = 32000,
137 [CIP_SFC_44100] = 44100,
138 [CIP_SFC_48000] = 48000,
139 [CIP_SFC_88200] = 88200,
140 [CIP_SFC_96000] = 96000,
141 [CIP_SFC_176400] = 176400,
142 [CIP_SFC_192000] = 192000,
143};
144EXPORT_SYMBOL(amdtp_rate_table);
145
59502295
TS
146static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
147 struct snd_pcm_hw_rule *rule)
148{
149 struct snd_interval *s = hw_param_interval(params, rule->var);
150 const struct snd_interval *r =
151 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
826b5de9
TS
152 struct snd_interval t = {0};
153 unsigned int step = 0;
59502295
TS
154 int i;
155
156 for (i = 0; i < CIP_SFC_COUNT; ++i) {
826b5de9
TS
157 if (snd_interval_test(r, amdtp_rate_table[i]))
158 step = max(step, amdtp_syt_intervals[i]);
59502295
TS
159 }
160
826b5de9
TS
161 t.min = roundup(s->min, step);
162 t.max = rounddown(s->max, step);
163 t.integer = 1;
59502295
TS
164
165 return snd_interval_refine(s, &t);
166}
167
7b2d99fa
TS
168/**
169 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
170 * @s: the AMDTP stream, which must be initialized.
171 * @runtime: the PCM substream runtime
172 */
173int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
174 struct snd_pcm_runtime *runtime)
175{
55799c5a 176 struct snd_pcm_hardware *hw = &runtime->hw;
7b2d99fa
TS
177 int err;
178
55799c5a
TS
179 hw->info = SNDRV_PCM_INFO_BATCH |
180 SNDRV_PCM_INFO_BLOCK_TRANSFER |
181 SNDRV_PCM_INFO_INTERLEAVED |
182 SNDRV_PCM_INFO_JOINT_DUPLEX |
183 SNDRV_PCM_INFO_MMAP |
184 SNDRV_PCM_INFO_MMAP_VALID;
185
186 /* SNDRV_PCM_INFO_BATCH */
187 hw->periods_min = 2;
188 hw->periods_max = UINT_MAX;
189
190 /* bytes for a frame */
191 hw->period_bytes_min = 4 * hw->channels_max;
192
193 /* Just to prevent from allocating much pages. */
194 hw->period_bytes_max = hw->period_bytes_min * 2048;
195 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
196
7b2d99fa
TS
197 /*
198 * Currently firewire-lib processes 16 packets in one software
199 * interrupt callback. This equals to 2msec but actually the
200 * interval of the interrupts has a jitter.
201 * Additionally, even if adding a constraint to fit period size to
202 * 2msec, actual calculated frames per period doesn't equal to 2msec,
203 * depending on sampling rate.
204 * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
205 * Here let us use 5msec for safe period interrupt.
206 */
207 err = snd_pcm_hw_constraint_minmax(runtime,
208 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
209 5000, UINT_MAX);
210 if (err < 0)
211 goto end;
212
213 /* Non-Blocking stream has no more constraints */
214 if (!(s->flags & CIP_BLOCKING))
215 goto end;
216
217 /*
218 * One AMDTP packet can include some frames. In blocking mode, the
219 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
220 * depending on its sampling rate. For accurate period interrupt, it's
ce991981 221 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
7b2d99fa 222 */
59502295
TS
223 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
224 apply_constraint_to_size, NULL,
826b5de9 225 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
59502295
TS
226 SNDRV_PCM_HW_PARAM_RATE, -1);
227 if (err < 0)
228 goto end;
59502295
TS
229 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
230 apply_constraint_to_size, NULL,
826b5de9 231 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
59502295
TS
232 SNDRV_PCM_HW_PARAM_RATE, -1);
233 if (err < 0)
234 goto end;
7b2d99fa
TS
235end:
236 return err;
237}
238EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
239
31ef9134 240/**
be4a2894
TS
241 * amdtp_stream_set_parameters - set stream parameters
242 * @s: the AMDTP stream to configure
31ef9134 243 * @rate: the sample rate
df075fee 244 * @data_block_quadlets: the size of a data block in quadlet unit
31ef9134 245 *
a7304e3b 246 * The parameters must be set before the stream is started, and must not be
31ef9134
CL
247 * changed while the stream is running.
248 */
df075fee
TS
249int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
250 unsigned int data_block_quadlets)
31ef9134 251{
df075fee 252 unsigned int sfc;
31ef9134 253
547e631c 254 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
1017abed 255 if (amdtp_rate_table[sfc] == rate)
547e631c
TS
256 break;
257 }
258 if (sfc == ARRAY_SIZE(amdtp_rate_table))
259 return -EINVAL;
e84d15f6 260
e84d15f6 261 s->sfc = sfc;
df075fee 262 s->data_block_quadlets = data_block_quadlets;
a7304e3b 263 s->syt_interval = amdtp_syt_intervals[sfc];
e84d15f6 264
d3d10a4a
TS
265 // default buffering in the device.
266 if (s->direction == AMDTP_OUT_STREAM) {
267 s->ctx_data.rx.transfer_delay =
268 TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
269
270 if (s->flags & CIP_BLOCKING) {
271 // additional buffering needed to adjust for no-data
272 // packets.
273 s->ctx_data.rx.transfer_delay +=
274 TICKS_PER_SECOND * s->syt_interval / rate;
275 }
276 }
77d2a8a4 277
547e631c 278 return 0;
31ef9134 279}
be4a2894 280EXPORT_SYMBOL(amdtp_stream_set_parameters);
31ef9134
CL
281
282/**
be4a2894
TS
283 * amdtp_stream_get_max_payload - get the stream's packet size
284 * @s: the AMDTP stream
31ef9134
CL
285 *
286 * This function must not be called before the stream has been configured
be4a2894 287 * with amdtp_stream_set_parameters().
31ef9134 288 */
be4a2894 289unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
31ef9134 290{
a2064710 291 unsigned int multiplier = 1;
07ea238c 292 unsigned int cip_header_size = 0;
a2064710
TS
293
294 if (s->flags & CIP_JUMBO_PAYLOAD)
295 multiplier = 5;
3b196c39 296 if (!(s->flags & CIP_NO_HEADER))
07ea238c 297 cip_header_size = sizeof(__be32) * 2;
a2064710 298
07ea238c
TS
299 return cip_header_size +
300 s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
31ef9134 301}
be4a2894 302EXPORT_SYMBOL(amdtp_stream_get_max_payload);
31ef9134 303
76fb8789 304/**
be4a2894
TS
305 * amdtp_stream_pcm_prepare - prepare PCM device for running
306 * @s: the AMDTP stream
76fb8789
CL
307 *
308 * This function should be called from the PCM device's .prepare callback.
309 */
be4a2894 310void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
76fb8789
CL
311{
312 tasklet_kill(&s->period_tasklet);
313 s->pcm_buffer_pointer = 0;
314 s->pcm_period_pointer = 0;
315}
be4a2894 316EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
76fb8789 317
875be091
TS
318static unsigned int calculate_data_blocks(struct amdtp_stream *s,
319 unsigned int syt)
31ef9134
CL
320{
321 unsigned int phase, data_blocks;
322
875be091
TS
323 /* Blocking mode. */
324 if (s->flags & CIP_BLOCKING) {
325 /* This module generate empty packet for 'no data'. */
326 if (syt == CIP_SYT_NO_INFO)
327 data_blocks = 0;
328 else
329 data_blocks = s->syt_interval;
330 /* Non-blocking mode. */
31ef9134 331 } else {
875be091 332 if (!cip_sfc_is_base_44100(s->sfc)) {
d3d10a4a
TS
333 // Sample_rate / 8000 is an integer, and precomputed.
334 data_blocks = s->ctx_data.rx.data_block_state;
875be091 335 } else {
d3d10a4a 336 phase = s->ctx_data.rx.data_block_state;
31ef9134
CL
337
338 /*
339 * This calculates the number of data blocks per packet so that
340 * 1) the overall rate is correct and exactly synchronized to
341 * the bus clock, and
342 * 2) packets with a rounded-up number of blocks occur as early
343 * as possible in the sequence (to prevent underruns of the
344 * device's buffer).
345 */
875be091
TS
346 if (s->sfc == CIP_SFC_44100)
347 /* 6 6 5 6 5 6 5 ... */
348 data_blocks = 5 + ((phase & 1) ^
349 (phase == 0 || phase >= 40));
350 else
351 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
352 data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
353 if (++phase >= (80 >> (s->sfc >> 1)))
354 phase = 0;
d3d10a4a 355 s->ctx_data.rx.data_block_state = phase;
875be091 356 }
31ef9134
CL
357 }
358
359 return data_blocks;
360}
361
be4a2894 362static unsigned int calculate_syt(struct amdtp_stream *s,
31ef9134
CL
363 unsigned int cycle)
364{
365 unsigned int syt_offset, phase, index, syt;
366
d3d10a4a 367 if (s->ctx_data.rx.last_syt_offset < TICKS_PER_CYCLE) {
31ef9134 368 if (!cip_sfc_is_base_44100(s->sfc))
d3d10a4a
TS
369 syt_offset = s->ctx_data.rx.last_syt_offset +
370 s->ctx_data.rx.syt_offset_state;
31ef9134
CL
371 else {
372 /*
373 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
374 * n * SYT_INTERVAL * 24576000 / sample_rate
375 * Modulo TICKS_PER_CYCLE, the difference between successive
376 * elements is about 1386.23. Rounding the results of this
377 * formula to the SYT precision results in a sequence of
378 * differences that begins with:
379 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
380 * This code generates _exactly_ the same sequence.
381 */
d3d10a4a 382 phase = s->ctx_data.rx.syt_offset_state;
31ef9134 383 index = phase % 13;
d3d10a4a 384 syt_offset = s->ctx_data.rx.last_syt_offset;
31ef9134
CL
385 syt_offset += 1386 + ((index && !(index & 3)) ||
386 phase == 146);
387 if (++phase >= 147)
388 phase = 0;
d3d10a4a 389 s->ctx_data.rx.syt_offset_state = phase;
31ef9134
CL
390 }
391 } else
d3d10a4a
TS
392 syt_offset = s->ctx_data.rx.last_syt_offset - TICKS_PER_CYCLE;
393 s->ctx_data.rx.last_syt_offset = syt_offset;
31ef9134 394
be454366 395 if (syt_offset < TICKS_PER_CYCLE) {
d3d10a4a 396 syt_offset += s->ctx_data.rx.transfer_delay;
be454366
CL
397 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
398 syt += syt_offset % TICKS_PER_CYCLE;
31ef9134 399
b445db44 400 return syt & CIP_SYT_MASK;
be454366 401 } else {
b445db44 402 return CIP_SYT_NO_INFO;
be454366 403 }
31ef9134
CL
404}
405
4b7da117
TS
406static void update_pcm_pointers(struct amdtp_stream *s,
407 struct snd_pcm_substream *pcm,
408 unsigned int frames)
65845f29
TS
409{
410 unsigned int ptr;
411
4b7da117
TS
412 ptr = s->pcm_buffer_pointer + frames;
413 if (ptr >= pcm->runtime->buffer_size)
414 ptr -= pcm->runtime->buffer_size;
6aa7de05 415 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
4b7da117
TS
416
417 s->pcm_period_pointer += frames;
418 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
419 s->pcm_period_pointer -= pcm->runtime->period_size;
4b7da117
TS
420 tasklet_hi_schedule(&s->period_tasklet);
421 }
422}
423
424static void pcm_period_tasklet(unsigned long data)
425{
426 struct amdtp_stream *s = (void *)data;
6aa7de05 427 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
4b7da117
TS
428
429 if (pcm)
430 snd_pcm_period_elapsed(pcm);
431}
432
df9160b9 433static int queue_packet(struct amdtp_stream *s, unsigned int payload_length)
4b7da117
TS
434{
435 struct fw_iso_packet p = {0};
7b3b0d85
TS
436 int err = 0;
437
438 if (IS_ERR(s->context))
439 goto end;
4b7da117
TS
440
441 p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
3b196c39 442 p.tag = s->tag;
df9160b9
TS
443
444 if (s->direction == AMDTP_IN_STREAM) {
445 // Queue one packet for IR context.
446 p.header_length = s->ctx_data.tx.ctx_header_size;
447 } else {
448 // No header for this packet.
449 p.header_length = 0;
450 }
451
ff38e0c7
TS
452 if (payload_length > 0)
453 p.payload_length = payload_length;
454 else
455 p.skip = true;
4b7da117
TS
456 err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer,
457 s->buffer.packets[s->packet_index].offset);
458 if (err < 0) {
459 dev_err(&s->unit->device, "queueing error: %d\n", err);
460 goto end;
461 }
462
463 if (++s->packet_index >= QUEUE_LENGTH)
464 s->packet_index = 0;
465end:
466 return err;
467}
468
469static inline int queue_out_packet(struct amdtp_stream *s,
ff38e0c7 470 unsigned int payload_length)
4b7da117 471{
df9160b9 472 return queue_packet(s, payload_length);
4b7da117
TS
473}
474
2b3fc456
TS
475static inline int queue_in_packet(struct amdtp_stream *s)
476{
f11453c7 477 return queue_packet(s, s->ctx_data.tx.max_ctx_payload_length);
2b3fc456
TS
478}
479
6f3c07d0 480static int handle_out_packet(struct amdtp_stream *s, unsigned int cycle,
8a400b99
TS
481 const __be32 *ctx_header, __be32 *buffer,
482 unsigned int index)
31ef9134 483{
390a1512
TS
484 unsigned int syt;
485 unsigned int data_blocks;
20e44577 486 unsigned int pcm_frames;
6f3c07d0 487 unsigned int payload_length;
31ef9134 488 struct snd_pcm_substream *pcm;
31ef9134 489
390a1512
TS
490 syt = calculate_syt(s, cycle);
491 data_blocks = calculate_data_blocks(s, syt);
df075fee 492 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
20e44577 493
9dae017b
TS
494 if (s->flags & CIP_DBC_IS_END_EVENT)
495 s->data_block_counter =
496 (s->data_block_counter + data_blocks) & 0xff;
497
6aa7de05 498 buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
9a2820c1 499 (s->data_block_quadlets << CIP_DBS_SHIFT) |
9863874f 500 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
31ef9134 501 s->data_block_counter);
414ba022 502 buffer[1] = cpu_to_be32(CIP_EOH |
d3d10a4a
TS
503 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
504 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
505 (syt & CIP_SYT_MASK));
31ef9134 506
9dae017b
TS
507 if (!(s->flags & CIP_DBC_IS_END_EVENT))
508 s->data_block_counter =
509 (s->data_block_counter + data_blocks) & 0xff;
4b7da117 510 payload_length = 8 + data_blocks * 4 * s->data_block_quadlets;
0c95c1d6 511
8d3f1fdf 512 trace_amdtp_packet(s, cycle, buffer, payload_length, data_blocks, index);
0c95c1d6 513
ff38e0c7 514 if (queue_out_packet(s, payload_length) < 0)
a4103bd7 515 return -EIO;
31ef9134 516
6aa7de05 517 pcm = READ_ONCE(s->pcm);
20e44577
TS
518 if (pcm && pcm_frames > 0)
519 update_pcm_pointers(s, pcm, pcm_frames);
a4103bd7
TS
520
521 /* No need to return the number of handled data blocks. */
522 return 0;
76fb8789
CL
523}
524
3b196c39 525static int handle_out_packet_without_header(struct amdtp_stream *s,
6f3c07d0 526 unsigned int cycle, const __be32 *ctx_header,
8a400b99 527 __be32 *buffer, unsigned int index)
3b196c39 528{
3b196c39
TS
529 unsigned int syt;
530 unsigned int data_blocks;
531 unsigned int pcm_frames;
6f3c07d0 532 unsigned int payload_length;
3b196c39
TS
533 struct snd_pcm_substream *pcm;
534
3b196c39
TS
535 syt = calculate_syt(s, cycle);
536 data_blocks = calculate_data_blocks(s, syt);
537 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, &syt);
538 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;
539
540 payload_length = data_blocks * 4 * s->data_block_quadlets;
b164d2fd 541
8d3f1fdf 542 trace_amdtp_packet(s, cycle, NULL, payload_length, data_blocks, index);
b164d2fd 543
3b196c39
TS
544 if (queue_out_packet(s, payload_length) < 0)
545 return -EIO;
546
6aa7de05 547 pcm = READ_ONCE(s->pcm);
3b196c39
TS
548 if (pcm && pcm_frames > 0)
549 update_pcm_pointers(s, pcm, pcm_frames);
550
551 /* No need to return the number of handled data blocks. */
552 return 0;
553}
554
e335425b
TS
555static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
556 unsigned int payload_length,
557 unsigned int *data_blocks, unsigned int *syt)
2b3fc456
TS
558{
559 u32 cip_header[2];
e335425b
TS
560 unsigned int sph;
561 unsigned int fmt;
562 unsigned int fdf;
563 unsigned int data_block_counter;
c8bdf49b 564 bool lost;
2b3fc456 565
e335425b
TS
566 cip_header[0] = be32_to_cpu(buf[0]);
567 cip_header[1] = be32_to_cpu(buf[1]);
2b3fc456
TS
568
569 /*
570 * This module supports 'Two-quadlet CIP header with SYT field'.
77d2a8a4 571 * For convenience, also check FMT field is AM824 or not.
2b3fc456 572 */
2128f78f
TS
573 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
574 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
575 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
2b3fc456
TS
576 dev_info_ratelimited(&s->unit->device,
577 "Invalid CIP header for AMDTP: %08X:%08X\n",
578 cip_header[0], cip_header[1]);
e335425b 579 return -EAGAIN;
2b3fc456
TS
580 }
581
414ba022 582 /* Check valid protocol or not. */
9863874f 583 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
414ba022 584 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
9863874f 585 if (sph != s->sph || fmt != s->fmt) {
2a7e1713
TS
586 dev_info_ratelimited(&s->unit->device,
587 "Detect unexpected protocol: %08x %08x\n",
588 cip_header[0], cip_header[1]);
e335425b 589 return -EAGAIN;
414ba022
TS
590 }
591
2b3fc456 592 /* Calculate data blocks */
414ba022 593 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
e335425b 594 if (payload_length < sizeof(__be32) * 2 ||
414ba022 595 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
e335425b 596 *data_blocks = 0;
2b3fc456 597 } else {
e335425b
TS
598 unsigned int data_block_quadlets =
599 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
2b3fc456
TS
600 /* avoid division by zero */
601 if (data_block_quadlets == 0) {
12e0f438 602 dev_err(&s->unit->device,
2b3fc456
TS
603 "Detect invalid value in dbs field: %08X\n",
604 cip_header[0]);
a9007054 605 return -EPROTO;
2b3fc456 606 }
69702239
TS
607 if (s->flags & CIP_WRONG_DBS)
608 data_block_quadlets = s->data_block_quadlets;
2b3fc456 609
e335425b 610 *data_blocks = (payload_length / sizeof(__be32) - 2) /
ff0fb5aa 611 data_block_quadlets;
2b3fc456
TS
612 }
613
614 /* Check data block counter continuity */
9a2820c1 615 data_block_counter = cip_header[0] & CIP_DBC_MASK;
e335425b 616 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
9d59124c
TS
617 s->data_block_counter != UINT_MAX)
618 data_block_counter = s->data_block_counter;
619
18f5ed36 620 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
d3d10a4a 621 data_block_counter == s->ctx_data.tx.first_dbc) ||
18f5ed36 622 s->data_block_counter == UINT_MAX) {
b84b1a27
TS
623 lost = false;
624 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
c8bdf49b 625 lost = data_block_counter != s->data_block_counter;
d9cd0065 626 } else {
e335425b
TS
627 unsigned int dbc_interval;
628
629 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
d3d10a4a 630 dbc_interval = s->ctx_data.tx.dbc_interval;
d9cd0065 631 else
e335425b 632 dbc_interval = *data_blocks;
d9cd0065 633
c8bdf49b 634 lost = data_block_counter !=
d9cd0065
TS
635 ((s->data_block_counter + dbc_interval) & 0xff);
636 }
c8bdf49b
TS
637
638 if (lost) {
12e0f438
TS
639 dev_err(&s->unit->device,
640 "Detect discontinuity of CIP: %02X %02X\n",
641 s->data_block_counter, data_block_counter);
6fc6b9ce 642 return -EIO;
2b3fc456
TS
643 }
644
e335425b 645 *syt = cip_header[1] & CIP_SYT_MASK;
2b3fc456 646
e335425b 647 if (s->flags & CIP_DBC_IS_END_EVENT) {
c8bdf49b 648 s->data_block_counter = data_block_counter;
e335425b 649 } else {
c8bdf49b 650 s->data_block_counter =
e335425b
TS
651 (data_block_counter + *data_blocks) & 0xff;
652 }
653
654 return 0;
655}
656
657static int handle_in_packet(struct amdtp_stream *s, unsigned int cycle,
658 const __be32 *ctx_header, __be32 *buffer,
659 unsigned int index)
660{
661 unsigned int payload_length;
f11453c7 662 const __be32 *cip_header;
e335425b
TS
663 unsigned int syt;
664 unsigned int data_blocks;
665 struct snd_pcm_substream *pcm;
666 unsigned int pcm_frames;
667 int err;
668
669 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
f11453c7
TS
670 if (payload_length > s->ctx_data.tx.ctx_header_size +
671 s->ctx_data.tx.max_ctx_payload_length) {
e335425b
TS
672 dev_err(&s->unit->device,
673 "Detect jumbo payload: %04x %04x\n",
f11453c7 674 payload_length, s->ctx_data.tx.max_ctx_payload_length);
e335425b
TS
675 return -EIO;
676 }
677
f11453c7 678 cip_header = ctx_header + 2;
947b437e
TS
679 if (!(s->flags & CIP_NO_HEADER)) {
680 cip_header = &ctx_header[2];
681 err = check_cip_header(s, cip_header, payload_length,
682 &data_blocks, &syt);
683 if (err < 0) {
684 if (err != -EAGAIN)
685 return err;
686 pcm_frames = 0;
687 goto end;
688 }
689 } else {
690 cip_header = NULL;
691 data_blocks = payload_length / 4 / s->data_block_quadlets;
692 syt = 0;
e335425b
TS
693 }
694
f11453c7
TS
695 trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
696 index);
e335425b 697
f11453c7 698 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, &syt);
2b3fc456
TS
699end:
700 if (queue_in_packet(s) < 0)
6fc6b9ce 701 return -EIO;
2b3fc456 702
6aa7de05 703 pcm = READ_ONCE(s->pcm);
20e44577
TS
704 if (pcm && pcm_frames > 0)
705 update_pcm_pointers(s, pcm, pcm_frames);
2b3fc456 706
31ea49ba 707 return 0;
2b3fc456
TS
708}
709
26cd1e58
TS
710// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
711// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
712// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
713static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
73fc7f08 714{
26cd1e58 715 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
73fc7f08
TS
716 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
717}
718
719static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
720{
721 cycle += addend;
722 if (cycle >= 8 * CYCLES_PER_SECOND)
723 cycle -= 8 * CYCLES_PER_SECOND;
724 return cycle;
725}
726
26cd1e58
TS
727// Align to actual cycle count for the packet which is going to be scheduled.
728// This module queued the same number of isochronous cycle as QUEUE_LENGTH to
729// skip isochronous cycle, therefore it's OK to just increment the cycle by
730// QUEUE_LENGTH for scheduled cycle.
731static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp)
732{
733 u32 cycle = compute_cycle_count(ctx_header_tstamp);
734 return increment_cycle_count(cycle, QUEUE_LENGTH);
735}
736
fce9b013
TS
737static inline void cancel_stream(struct amdtp_stream *s)
738{
739 s->packet_index = -1;
740 if (in_interrupt())
741 amdtp_stream_pcm_abort(s);
742 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
743}
744
73fc7f08 745static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
4b7da117
TS
746 size_t header_length, void *header,
747 void *private_data)
31ef9134 748{
be4a2894 749 struct amdtp_stream *s = private_data;
26cd1e58
TS
750 const __be32 *ctx_header = header;
751 unsigned int i, packets = header_length / sizeof(*ctx_header);
31ef9134 752
a4103bd7
TS
753 if (s->packet_index < 0)
754 return;
755
26cd1e58
TS
756 for (i = 0; i < packets; ++i) {
757 u32 cycle;
8a400b99 758 __be32 *buffer;
73fc7f08 759
26cd1e58 760 cycle = compute_it_cycle(*ctx_header);
8a400b99 761 buffer = s->buffer.packets[s->packet_index].buffer;
31ef9134 762
8a400b99 763 if (s->handle_packet(s, cycle, ctx_header, buffer, i) < 0) {
fce9b013 764 cancel_stream(s);
a4103bd7
TS
765 return;
766 }
26cd1e58
TS
767
768 ++ctx_header;
ccccad86 769 }
a4103bd7 770
13882a82 771 fw_iso_context_queue_flush(s->context);
31ef9134
CL
772}
773
73fc7f08 774static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
2b3fc456
TS
775 size_t header_length, void *header,
776 void *private_data)
777{
778 struct amdtp_stream *s = private_data;
d9a16fc9 779 unsigned int i, packets;
cc4f8e91 780 __be32 *ctx_header = header;
2b3fc456 781
a4103bd7
TS
782 if (s->packet_index < 0)
783 return;
784
d3d10a4a
TS
785 // The number of packets in buffer.
786 packets = header_length / s->ctx_data.tx.ctx_header_size;
f90e2ded 787
d9a16fc9 788 for (i = 0; i < packets; i++) {
26cd1e58 789 u32 cycle;
8a400b99 790 __be32 *buffer;
cc4f8e91 791
26cd1e58 792 cycle = compute_cycle_count(ctx_header[1]);
8a400b99 793 buffer = s->buffer.packets[s->packet_index].buffer;
2b3fc456 794
947b437e 795 if (handle_in_packet(s, cycle, ctx_header, buffer, i) < 0)
6fc6b9ce 796 break;
cc4f8e91 797
d3d10a4a 798 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
2b3fc456
TS
799 }
800
dec63cc8 801 /* Queueing error or detecting invalid payload. */
d9a16fc9 802 if (i < packets) {
fce9b013 803 cancel_stream(s);
7b3b0d85
TS
804 return;
805 }
806
2b3fc456
TS
807 fw_iso_context_queue_flush(s->context);
808}
809
7b3b0d85
TS
810/* this is executed one time */
811static void amdtp_stream_first_callback(struct fw_iso_context *context,
73fc7f08 812 u32 tstamp, size_t header_length,
7b3b0d85
TS
813 void *header, void *private_data)
814{
815 struct amdtp_stream *s = private_data;
26cd1e58 816 const __be32 *ctx_header = header;
a04513f8 817 u32 cycle;
7b3b0d85
TS
818
819 /*
820 * For in-stream, first packet has come.
821 * For out-stream, prepared to transmit first packet
822 */
823 s->callbacked = true;
824 wake_up(&s->callback_wait);
825
a04513f8 826 if (s->direction == AMDTP_IN_STREAM) {
26cd1e58 827 cycle = compute_cycle_count(ctx_header[1]);
cc4f8e91 828
7b3b0d85 829 context->callback.sc = in_stream_callback;
a04513f8 830 } else {
26cd1e58
TS
831 cycle = compute_it_cycle(*ctx_header);
832
7b3b0d85 833 context->callback.sc = out_stream_callback;
3b196c39
TS
834 if (s->flags & CIP_NO_HEADER)
835 s->handle_packet = handle_out_packet_without_header;
836 else
837 s->handle_packet = handle_out_packet;
a04513f8
TS
838 }
839
840 s->start_cycle = cycle;
7b3b0d85 841
73fc7f08 842 context->callback.sc(context, tstamp, header_length, header, s);
7b3b0d85
TS
843}
844
31ef9134 845/**
be4a2894
TS
846 * amdtp_stream_start - start transferring packets
847 * @s: the AMDTP stream to start
31ef9134
CL
848 * @channel: the isochronous channel on the bus
849 * @speed: firewire speed code
850 *
851 * The stream cannot be started until it has been configured with
be4a2894
TS
852 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
853 * device can be started.
31ef9134 854 */
be4a2894 855int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
31ef9134
CL
856{
857 static const struct {
858 unsigned int data_block;
859 unsigned int syt_offset;
d3d10a4a 860 } *entry, initial_state[] = {
31ef9134
CL
861 [CIP_SFC_32000] = { 4, 3072 },
862 [CIP_SFC_48000] = { 6, 1024 },
863 [CIP_SFC_96000] = { 12, 1024 },
864 [CIP_SFC_192000] = { 24, 1024 },
865 [CIP_SFC_44100] = { 0, 67 },
866 [CIP_SFC_88200] = { 0, 67 },
867 [CIP_SFC_176400] = { 0, 67 },
868 };
d3d10a4a 869 unsigned int ctx_header_size;
f11453c7 870 unsigned int max_ctx_payload_size;
2b3fc456 871 enum dma_data_direction dir;
7ab56645 872 int type, tag, err;
31ef9134
CL
873
874 mutex_lock(&s->mutex);
875
be4a2894 876 if (WARN_ON(amdtp_stream_running(s) ||
4b7da117 877 (s->data_block_quadlets < 1))) {
31ef9134
CL
878 err = -EBADFD;
879 goto err_unlock;
880 }
881
d3d10a4a 882 if (s->direction == AMDTP_IN_STREAM) {
b6bc8123 883 s->data_block_counter = UINT_MAX;
d3d10a4a
TS
884 } else {
885 entry = &initial_state[s->sfc];
886
b6bc8123 887 s->data_block_counter = 0;
d3d10a4a
TS
888 s->ctx_data.rx.data_block_state = entry->data_block;
889 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
890 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
891 }
31ef9134 892
2b3fc456
TS
893 /* initialize packet buffer */
894 if (s->direction == AMDTP_IN_STREAM) {
895 dir = DMA_FROM_DEVICE;
896 type = FW_ISO_CONTEXT_RECEIVE;
f11453c7
TS
897 if (!(s->flags & CIP_NO_HEADER))
898 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
899 else
900 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
2b3fc456
TS
901 } else {
902 dir = DMA_TO_DEVICE;
903 type = FW_ISO_CONTEXT_TRANSMIT;
df9160b9 904 ctx_header_size = 0; // No effect for IT context.
2b3fc456 905 }
f11453c7
TS
906
907 max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
908 ctx_header_size;
909
31ef9134 910 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
f11453c7 911 max_ctx_payload_size, dir);
31ef9134
CL
912 if (err < 0)
913 goto err_unlock;
914
915 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
d3d10a4a
TS
916 type, channel, speed, ctx_header_size,
917 amdtp_stream_first_callback, s);
31ef9134
CL
918 if (IS_ERR(s->context)) {
919 err = PTR_ERR(s->context);
920 if (err == -EBUSY)
921 dev_err(&s->unit->device,
be4a2894 922 "no free stream on this controller\n");
31ef9134
CL
923 goto err_buffer;
924 }
925
be4a2894 926 amdtp_stream_update(s);
31ef9134 927
d3d10a4a 928 if (s->direction == AMDTP_IN_STREAM) {
f11453c7 929 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
d3d10a4a
TS
930 s->ctx_data.tx.ctx_header_size = ctx_header_size;
931 }
52759c09 932
3b196c39
TS
933 if (s->flags & CIP_NO_HEADER)
934 s->tag = TAG_NO_CIP_HEADER;
935 else
936 s->tag = TAG_CIP;
937
ec00f5e4 938 s->packet_index = 0;
4b7da117 939 do {
2b3fc456
TS
940 if (s->direction == AMDTP_IN_STREAM)
941 err = queue_in_packet(s);
942 else
ff38e0c7 943 err = queue_out_packet(s, 0);
4b7da117
TS
944 if (err < 0)
945 goto err_context;
946 } while (s->packet_index > 0);
31ef9134 947
2b3fc456 948 /* NOTE: TAG1 matches CIP. This just affects in stream. */
7ab56645 949 tag = FW_ISO_CONTEXT_MATCH_TAG1;
3b196c39 950 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
7ab56645
TS
951 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
952
7b3b0d85 953 s->callbacked = false;
7ab56645 954 err = fw_iso_context_start(s->context, -1, 0, tag);
31ef9134
CL
955 if (err < 0)
956 goto err_context;
957
958 mutex_unlock(&s->mutex);
959
960 return 0;
961
962err_context:
963 fw_iso_context_destroy(s->context);
964 s->context = ERR_PTR(-1);
965err_buffer:
966 iso_packets_buffer_destroy(&s->buffer, s->unit);
967err_unlock:
968 mutex_unlock(&s->mutex);
969
970 return err;
971}
be4a2894 972EXPORT_SYMBOL(amdtp_stream_start);
31ef9134 973
e9148ddd 974/**
be4a2894
TS
975 * amdtp_stream_pcm_pointer - get the PCM buffer position
976 * @s: the AMDTP stream that transports the PCM data
e9148ddd
CL
977 *
978 * Returns the current buffer position, in frames.
979 */
be4a2894 980unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
e9148ddd 981{
1dba9db0
TS
982 /*
983 * This function is called in software IRQ context of period_tasklet or
984 * process context.
985 *
986 * When the software IRQ context was scheduled by software IRQ context
987 * of IR/IT contexts, queued packets were already handled. Therefore,
988 * no need to flush the queue in buffer anymore.
989 *
990 * When the process context reach here, some packets will be already
991 * queued in the buffer. These packets should be handled immediately
992 * to keep better granularity of PCM pointer.
993 *
994 * Later, the process context will sometimes schedules software IRQ
995 * context of the period_tasklet. Then, no need to flush the queue by
996 * the same reason as described for IR/IT contexts.
997 */
998 if (!in_interrupt() && amdtp_stream_running(s))
92b862c7 999 fw_iso_context_flush_completions(s->context);
e9148ddd 1000
6aa7de05 1001 return READ_ONCE(s->pcm_buffer_pointer);
e9148ddd 1002}
be4a2894 1003EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
e9148ddd 1004
875becf8
TS
1005/**
1006 * amdtp_stream_pcm_ack - acknowledge queued PCM frames
1007 * @s: the AMDTP stream that transfers the PCM frames
1008 *
1009 * Returns zero always.
1010 */
1011int amdtp_stream_pcm_ack(struct amdtp_stream *s)
1012{
1013 /*
1014 * Process isochronous packets for recent isochronous cycle to handle
1015 * queued PCM frames.
1016 */
1017 if (amdtp_stream_running(s))
1018 fw_iso_context_flush_completions(s->context);
1019
1020 return 0;
1021}
1022EXPORT_SYMBOL(amdtp_stream_pcm_ack);
1023
31ef9134 1024/**
be4a2894
TS
1025 * amdtp_stream_update - update the stream after a bus reset
1026 * @s: the AMDTP stream
31ef9134 1027 */
be4a2894 1028void amdtp_stream_update(struct amdtp_stream *s)
31ef9134 1029{
9a2820c1 1030 /* Precomputing. */
6aa7de05
MR
1031 WRITE_ONCE(s->source_node_id_field,
1032 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
31ef9134 1033}
be4a2894 1034EXPORT_SYMBOL(amdtp_stream_update);
31ef9134
CL
1035
1036/**
be4a2894
TS
1037 * amdtp_stream_stop - stop sending packets
1038 * @s: the AMDTP stream to stop
31ef9134
CL
1039 *
1040 * All PCM and MIDI devices of the stream must be stopped before the stream
1041 * itself can be stopped.
1042 */
be4a2894 1043void amdtp_stream_stop(struct amdtp_stream *s)
31ef9134
CL
1044{
1045 mutex_lock(&s->mutex);
1046
be4a2894 1047 if (!amdtp_stream_running(s)) {
31ef9134
CL
1048 mutex_unlock(&s->mutex);
1049 return;
1050 }
1051
76fb8789 1052 tasklet_kill(&s->period_tasklet);
31ef9134
CL
1053 fw_iso_context_stop(s->context);
1054 fw_iso_context_destroy(s->context);
1055 s->context = ERR_PTR(-1);
1056 iso_packets_buffer_destroy(&s->buffer, s->unit);
1057
7b3b0d85
TS
1058 s->callbacked = false;
1059
31ef9134
CL
1060 mutex_unlock(&s->mutex);
1061}
be4a2894 1062EXPORT_SYMBOL(amdtp_stream_stop);
31ef9134
CL
1063
1064/**
be4a2894 1065 * amdtp_stream_pcm_abort - abort the running PCM device
31ef9134
CL
1066 * @s: the AMDTP stream about to be stopped
1067 *
1068 * If the isochronous stream needs to be stopped asynchronously, call this
1069 * function first to stop the PCM device.
1070 */
be4a2894 1071void amdtp_stream_pcm_abort(struct amdtp_stream *s)
31ef9134
CL
1072{
1073 struct snd_pcm_substream *pcm;
1074
6aa7de05 1075 pcm = READ_ONCE(s->pcm);
1fb8510c
TI
1076 if (pcm)
1077 snd_pcm_stop_xrun(pcm);
31ef9134 1078}
be4a2894 1079EXPORT_SYMBOL(amdtp_stream_pcm_abort);