ALSA: xen-front: fix unintention integer overflow on left shifts
[linux-block.git] / sound / firewire / amdtp-stream.c
CommitLineData
31ef9134
CL
1/*
2 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
3 * with Common Isochronous Packet (IEC 61883-1) headers
4 *
5 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
6 * Licensed under the terms of the GNU General Public License, version 2.
7 */
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/firewire.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <sound/pcm.h>
7b2d99fa 15#include <sound/pcm_params.h>
d67c46b9 16#include "amdtp-stream.h"
31ef9134
CL
17
18#define TICKS_PER_CYCLE 3072
19#define CYCLES_PER_SECOND 8000
20#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
21
0c95c1d6
TS
22/* Always support Linux tracing subsystem. */
23#define CREATE_TRACE_POINTS
24#include "amdtp-stream-trace.h"
25
ca5b5050 26#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31ef9134 27
b445db44
TS
28/* isochronous header parameters */
29#define ISO_DATA_LENGTH_SHIFT 16
3b196c39 30#define TAG_NO_CIP_HEADER 0
31ef9134
CL
31#define TAG_CIP 1
32
b445db44 33/* common isochronous packet header parameters */
9a2820c1
TS
34#define CIP_EOH_SHIFT 31
35#define CIP_EOH (1u << CIP_EOH_SHIFT)
b445db44 36#define CIP_EOH_MASK 0x80000000
9a2820c1
TS
37#define CIP_SID_SHIFT 24
38#define CIP_SID_MASK 0x3f000000
39#define CIP_DBS_MASK 0x00ff0000
40#define CIP_DBS_SHIFT 16
9863874f
TS
41#define CIP_SPH_MASK 0x00000400
42#define CIP_SPH_SHIFT 10
9a2820c1
TS
43#define CIP_DBC_MASK 0x000000ff
44#define CIP_FMT_SHIFT 24
b445db44 45#define CIP_FMT_MASK 0x3f000000
9a2820c1
TS
46#define CIP_FDF_MASK 0x00ff0000
47#define CIP_FDF_SHIFT 16
b445db44
TS
48#define CIP_SYT_MASK 0x0000ffff
49#define CIP_SYT_NO_INFO 0xffff
b445db44 50
51c29fd2 51/* Audio and Music transfer protocol specific parameters */
414ba022 52#define CIP_FMT_AM 0x10
2b3fc456 53#define AMDTP_FDF_NO_DATA 0xff
31ef9134
CL
54
55/* TODO: make these configurable */
56#define INTERRUPT_INTERVAL 16
57#define QUEUE_LENGTH 48
58
f11453c7
TS
59// For iso header, tstamp and 2 CIP header.
60#define IR_CTX_HEADER_SIZE_CIP 16
61// For iso header and tstamp.
62#define IR_CTX_HEADER_SIZE_NO_CIP 8
cc4f8e91 63#define HEADER_TSTAMP_MASK 0x0000ffff
4b7da117 64
b18f0cfa
TS
65#define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
66#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
67
76fb8789
CL
68static void pcm_period_tasklet(unsigned long data);
69
31ef9134 70/**
be4a2894
TS
71 * amdtp_stream_init - initialize an AMDTP stream structure
72 * @s: the AMDTP stream to initialize
31ef9134 73 * @unit: the target of the stream
3ff7e8f0 74 * @dir: the direction of stream
31ef9134 75 * @flags: the packet transmission method to use
5955815e 76 * @fmt: the value of fmt field in CIP header
df075fee
TS
77 * @process_data_blocks: callback handler to process data blocks
78 * @protocol_size: the size to allocate newly for protocol
31ef9134 79 */
be4a2894 80int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
5955815e 81 enum amdtp_stream_direction dir, enum cip_flags flags,
df075fee
TS
82 unsigned int fmt,
83 amdtp_stream_process_data_blocks_t process_data_blocks,
84 unsigned int protocol_size)
31ef9134 85{
df075fee
TS
86 if (process_data_blocks == NULL)
87 return -EINVAL;
88
89 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
90 if (!s->protocol)
91 return -ENOMEM;
92
c6f224dc 93 s->unit = unit;
3ff7e8f0 94 s->direction = dir;
31ef9134
CL
95 s->flags = flags;
96 s->context = ERR_PTR(-1);
97 mutex_init(&s->mutex);
76fb8789 98 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
ec00f5e4 99 s->packet_index = 0;
31ef9134 100
7b3b0d85
TS
101 init_waitqueue_head(&s->callback_wait);
102 s->callbacked = false;
7b3b0d85 103
5955815e 104 s->fmt = fmt;
df075fee 105 s->process_data_blocks = process_data_blocks;
414ba022 106
31ef9134
CL
107 return 0;
108}
be4a2894 109EXPORT_SYMBOL(amdtp_stream_init);
31ef9134
CL
110
111/**
be4a2894
TS
112 * amdtp_stream_destroy - free stream resources
113 * @s: the AMDTP stream to destroy
31ef9134 114 */
be4a2894 115void amdtp_stream_destroy(struct amdtp_stream *s)
31ef9134 116{
44c376b9
TS
117 /* Not initialized. */
118 if (s->protocol == NULL)
119 return;
120
be4a2894 121 WARN_ON(amdtp_stream_running(s));
df075fee 122 kfree(s->protocol);
31ef9134 123 mutex_destroy(&s->mutex);
31ef9134 124}
be4a2894 125EXPORT_SYMBOL(amdtp_stream_destroy);
31ef9134 126
c5280e99 127const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
a7304e3b
CL
128 [CIP_SFC_32000] = 8,
129 [CIP_SFC_44100] = 8,
130 [CIP_SFC_48000] = 8,
131 [CIP_SFC_88200] = 16,
132 [CIP_SFC_96000] = 16,
133 [CIP_SFC_176400] = 32,
134 [CIP_SFC_192000] = 32,
135};
136EXPORT_SYMBOL(amdtp_syt_intervals);
137
f9503a68 138const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
1017abed
TS
139 [CIP_SFC_32000] = 32000,
140 [CIP_SFC_44100] = 44100,
141 [CIP_SFC_48000] = 48000,
142 [CIP_SFC_88200] = 88200,
143 [CIP_SFC_96000] = 96000,
144 [CIP_SFC_176400] = 176400,
145 [CIP_SFC_192000] = 192000,
146};
147EXPORT_SYMBOL(amdtp_rate_table);
148
59502295
TS
149static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
150 struct snd_pcm_hw_rule *rule)
151{
152 struct snd_interval *s = hw_param_interval(params, rule->var);
153 const struct snd_interval *r =
154 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
826b5de9
TS
155 struct snd_interval t = {0};
156 unsigned int step = 0;
59502295
TS
157 int i;
158
159 for (i = 0; i < CIP_SFC_COUNT; ++i) {
826b5de9
TS
160 if (snd_interval_test(r, amdtp_rate_table[i]))
161 step = max(step, amdtp_syt_intervals[i]);
59502295
TS
162 }
163
826b5de9
TS
164 t.min = roundup(s->min, step);
165 t.max = rounddown(s->max, step);
166 t.integer = 1;
59502295
TS
167
168 return snd_interval_refine(s, &t);
169}
170
7b2d99fa
TS
171/**
172 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
173 * @s: the AMDTP stream, which must be initialized.
174 * @runtime: the PCM substream runtime
175 */
176int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
177 struct snd_pcm_runtime *runtime)
178{
55799c5a 179 struct snd_pcm_hardware *hw = &runtime->hw;
7b2d99fa
TS
180 int err;
181
55799c5a
TS
182 hw->info = SNDRV_PCM_INFO_BATCH |
183 SNDRV_PCM_INFO_BLOCK_TRANSFER |
184 SNDRV_PCM_INFO_INTERLEAVED |
185 SNDRV_PCM_INFO_JOINT_DUPLEX |
186 SNDRV_PCM_INFO_MMAP |
187 SNDRV_PCM_INFO_MMAP_VALID;
188
189 /* SNDRV_PCM_INFO_BATCH */
190 hw->periods_min = 2;
191 hw->periods_max = UINT_MAX;
192
193 /* bytes for a frame */
194 hw->period_bytes_min = 4 * hw->channels_max;
195
196 /* Just to prevent from allocating much pages. */
197 hw->period_bytes_max = hw->period_bytes_min * 2048;
198 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
199
7b2d99fa
TS
200 /*
201 * Currently firewire-lib processes 16 packets in one software
202 * interrupt callback. This equals to 2msec but actually the
203 * interval of the interrupts has a jitter.
204 * Additionally, even if adding a constraint to fit period size to
205 * 2msec, actual calculated frames per period doesn't equal to 2msec,
206 * depending on sampling rate.
207 * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
208 * Here let us use 5msec for safe period interrupt.
209 */
210 err = snd_pcm_hw_constraint_minmax(runtime,
211 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
212 5000, UINT_MAX);
213 if (err < 0)
214 goto end;
215
216 /* Non-Blocking stream has no more constraints */
217 if (!(s->flags & CIP_BLOCKING))
218 goto end;
219
220 /*
221 * One AMDTP packet can include some frames. In blocking mode, the
222 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
223 * depending on its sampling rate. For accurate period interrupt, it's
ce991981 224 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
7b2d99fa 225 */
59502295
TS
226 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
227 apply_constraint_to_size, NULL,
826b5de9 228 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
59502295
TS
229 SNDRV_PCM_HW_PARAM_RATE, -1);
230 if (err < 0)
231 goto end;
59502295
TS
232 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
233 apply_constraint_to_size, NULL,
826b5de9 234 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
59502295
TS
235 SNDRV_PCM_HW_PARAM_RATE, -1);
236 if (err < 0)
237 goto end;
7b2d99fa
TS
238end:
239 return err;
240}
241EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
242
31ef9134 243/**
be4a2894
TS
244 * amdtp_stream_set_parameters - set stream parameters
245 * @s: the AMDTP stream to configure
31ef9134 246 * @rate: the sample rate
df075fee 247 * @data_block_quadlets: the size of a data block in quadlet unit
31ef9134 248 *
a7304e3b 249 * The parameters must be set before the stream is started, and must not be
31ef9134
CL
250 * changed while the stream is running.
251 */
df075fee
TS
252int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
253 unsigned int data_block_quadlets)
31ef9134 254{
df075fee 255 unsigned int sfc;
31ef9134 256
547e631c 257 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
1017abed 258 if (amdtp_rate_table[sfc] == rate)
547e631c
TS
259 break;
260 }
261 if (sfc == ARRAY_SIZE(amdtp_rate_table))
262 return -EINVAL;
e84d15f6 263
e84d15f6 264 s->sfc = sfc;
df075fee 265 s->data_block_quadlets = data_block_quadlets;
a7304e3b 266 s->syt_interval = amdtp_syt_intervals[sfc];
e84d15f6 267
d3d10a4a
TS
268 // default buffering in the device.
269 if (s->direction == AMDTP_OUT_STREAM) {
270 s->ctx_data.rx.transfer_delay =
271 TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
272
273 if (s->flags & CIP_BLOCKING) {
274 // additional buffering needed to adjust for no-data
275 // packets.
276 s->ctx_data.rx.transfer_delay +=
277 TICKS_PER_SECOND * s->syt_interval / rate;
278 }
279 }
77d2a8a4 280
547e631c 281 return 0;
31ef9134 282}
be4a2894 283EXPORT_SYMBOL(amdtp_stream_set_parameters);
31ef9134
CL
284
285/**
be4a2894
TS
286 * amdtp_stream_get_max_payload - get the stream's packet size
287 * @s: the AMDTP stream
31ef9134
CL
288 *
289 * This function must not be called before the stream has been configured
be4a2894 290 * with amdtp_stream_set_parameters().
31ef9134 291 */
be4a2894 292unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
31ef9134 293{
a2064710 294 unsigned int multiplier = 1;
07ea238c 295 unsigned int cip_header_size = 0;
a2064710
TS
296
297 if (s->flags & CIP_JUMBO_PAYLOAD)
298 multiplier = 5;
3b196c39 299 if (!(s->flags & CIP_NO_HEADER))
07ea238c 300 cip_header_size = sizeof(__be32) * 2;
a2064710 301
07ea238c
TS
302 return cip_header_size +
303 s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
31ef9134 304}
be4a2894 305EXPORT_SYMBOL(amdtp_stream_get_max_payload);
31ef9134 306
76fb8789 307/**
be4a2894
TS
308 * amdtp_stream_pcm_prepare - prepare PCM device for running
309 * @s: the AMDTP stream
76fb8789
CL
310 *
311 * This function should be called from the PCM device's .prepare callback.
312 */
be4a2894 313void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
76fb8789
CL
314{
315 tasklet_kill(&s->period_tasklet);
316 s->pcm_buffer_pointer = 0;
317 s->pcm_period_pointer = 0;
318}
be4a2894 319EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
76fb8789 320
875be091
TS
321static unsigned int calculate_data_blocks(struct amdtp_stream *s,
322 unsigned int syt)
31ef9134
CL
323{
324 unsigned int phase, data_blocks;
325
875be091
TS
326 /* Blocking mode. */
327 if (s->flags & CIP_BLOCKING) {
328 /* This module generate empty packet for 'no data'. */
329 if (syt == CIP_SYT_NO_INFO)
330 data_blocks = 0;
331 else
332 data_blocks = s->syt_interval;
333 /* Non-blocking mode. */
31ef9134 334 } else {
875be091 335 if (!cip_sfc_is_base_44100(s->sfc)) {
d3d10a4a
TS
336 // Sample_rate / 8000 is an integer, and precomputed.
337 data_blocks = s->ctx_data.rx.data_block_state;
875be091 338 } else {
d3d10a4a 339 phase = s->ctx_data.rx.data_block_state;
31ef9134
CL
340
341 /*
342 * This calculates the number of data blocks per packet so that
343 * 1) the overall rate is correct and exactly synchronized to
344 * the bus clock, and
345 * 2) packets with a rounded-up number of blocks occur as early
346 * as possible in the sequence (to prevent underruns of the
347 * device's buffer).
348 */
875be091
TS
349 if (s->sfc == CIP_SFC_44100)
350 /* 6 6 5 6 5 6 5 ... */
351 data_blocks = 5 + ((phase & 1) ^
352 (phase == 0 || phase >= 40));
353 else
354 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
355 data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
356 if (++phase >= (80 >> (s->sfc >> 1)))
357 phase = 0;
d3d10a4a 358 s->ctx_data.rx.data_block_state = phase;
875be091 359 }
31ef9134
CL
360 }
361
362 return data_blocks;
363}
364
be4a2894 365static unsigned int calculate_syt(struct amdtp_stream *s,
31ef9134
CL
366 unsigned int cycle)
367{
368 unsigned int syt_offset, phase, index, syt;
369
d3d10a4a 370 if (s->ctx_data.rx.last_syt_offset < TICKS_PER_CYCLE) {
31ef9134 371 if (!cip_sfc_is_base_44100(s->sfc))
d3d10a4a
TS
372 syt_offset = s->ctx_data.rx.last_syt_offset +
373 s->ctx_data.rx.syt_offset_state;
31ef9134
CL
374 else {
375 /*
376 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
377 * n * SYT_INTERVAL * 24576000 / sample_rate
378 * Modulo TICKS_PER_CYCLE, the difference between successive
379 * elements is about 1386.23. Rounding the results of this
380 * formula to the SYT precision results in a sequence of
381 * differences that begins with:
382 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
383 * This code generates _exactly_ the same sequence.
384 */
d3d10a4a 385 phase = s->ctx_data.rx.syt_offset_state;
31ef9134 386 index = phase % 13;
d3d10a4a 387 syt_offset = s->ctx_data.rx.last_syt_offset;
31ef9134
CL
388 syt_offset += 1386 + ((index && !(index & 3)) ||
389 phase == 146);
390 if (++phase >= 147)
391 phase = 0;
d3d10a4a 392 s->ctx_data.rx.syt_offset_state = phase;
31ef9134
CL
393 }
394 } else
d3d10a4a
TS
395 syt_offset = s->ctx_data.rx.last_syt_offset - TICKS_PER_CYCLE;
396 s->ctx_data.rx.last_syt_offset = syt_offset;
31ef9134 397
be454366 398 if (syt_offset < TICKS_PER_CYCLE) {
d3d10a4a 399 syt_offset += s->ctx_data.rx.transfer_delay;
be454366
CL
400 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
401 syt += syt_offset % TICKS_PER_CYCLE;
31ef9134 402
b445db44 403 return syt & CIP_SYT_MASK;
be454366 404 } else {
b445db44 405 return CIP_SYT_NO_INFO;
be454366 406 }
31ef9134
CL
407}
408
4b7da117
TS
409static void update_pcm_pointers(struct amdtp_stream *s,
410 struct snd_pcm_substream *pcm,
411 unsigned int frames)
65845f29
TS
412{
413 unsigned int ptr;
414
4b7da117
TS
415 ptr = s->pcm_buffer_pointer + frames;
416 if (ptr >= pcm->runtime->buffer_size)
417 ptr -= pcm->runtime->buffer_size;
6aa7de05 418 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
4b7da117
TS
419
420 s->pcm_period_pointer += frames;
421 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
422 s->pcm_period_pointer -= pcm->runtime->period_size;
4b7da117
TS
423 tasklet_hi_schedule(&s->period_tasklet);
424 }
425}
426
427static void pcm_period_tasklet(unsigned long data)
428{
429 struct amdtp_stream *s = (void *)data;
6aa7de05 430 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
4b7da117
TS
431
432 if (pcm)
433 snd_pcm_period_elapsed(pcm);
434}
435
6007bf54 436static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params)
4b7da117 437{
6007bf54 438 int err;
df9160b9 439
6007bf54
TS
440 params->interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
441 params->tag = s->tag;
442 params->sy = 0;
df9160b9 443
6007bf54 444 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
4b7da117
TS
445 s->buffer.packets[s->packet_index].offset);
446 if (err < 0) {
447 dev_err(&s->unit->device, "queueing error: %d\n", err);
448 goto end;
449 }
450
451 if (++s->packet_index >= QUEUE_LENGTH)
452 s->packet_index = 0;
453end:
454 return err;
455}
456
457static inline int queue_out_packet(struct amdtp_stream *s,
b18f0cfa 458 struct fw_iso_packet *params)
4b7da117 459{
b18f0cfa
TS
460 params->skip =
461 !!(params->header_length == 0 && params->payload_length == 0);
6007bf54 462 return queue_packet(s, params);
4b7da117
TS
463}
464
6007bf54
TS
465static inline int queue_in_packet(struct amdtp_stream *s,
466 struct fw_iso_packet *params)
2b3fc456 467{
6007bf54
TS
468 // Queue one packet for IR context.
469 params->header_length = s->ctx_data.tx.ctx_header_size;
470 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
471 params->skip = false;
472 return queue_packet(s, params);
2b3fc456
TS
473}
474
252219c7
TS
475static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
476 unsigned int syt)
477{
478 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
479 (s->data_block_quadlets << CIP_DBS_SHIFT) |
480 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
481 s->data_block_counter);
482 cip_header[1] = cpu_to_be32(CIP_EOH |
483 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
484 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
485 (syt & CIP_SYT_MASK));
486}
487
6bc1a269
TS
488static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
489 struct fw_iso_packet *params,
490 unsigned int data_blocks, unsigned int syt,
491 unsigned int index)
31ef9134 492{
16be4589 493 __be32 *cip_header;
20e44577 494
6bc1a269 495 if (s->flags & CIP_DBC_IS_END_EVENT) {
9dae017b
TS
496 s->data_block_counter =
497 (s->data_block_counter + data_blocks) & 0xff;
6bc1a269 498 }
9dae017b 499
b18f0cfa 500 if (!(s->flags & CIP_NO_HEADER)) {
6bc1a269 501 cip_header = (__be32 *)params->header;
16be4589 502 generate_cip_header(s, cip_header, syt);
6bc1a269 503 params->header_length = 2 * sizeof(__be32);
b18f0cfa
TS
504 } else {
505 cip_header = NULL;
506 }
31ef9134 507
6bc1a269 508 if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
9dae017b
TS
509 s->data_block_counter =
510 (s->data_block_counter + data_blocks) & 0xff;
6bc1a269 511 }
0c95c1d6 512
6bc1a269 513 params->payload_length =
b18f0cfa
TS
514 data_blocks * sizeof(__be32) * s->data_block_quadlets;
515
6bc1a269 516 trace_amdtp_packet(s, cycle, cip_header, params->payload_length,
b18f0cfa 517 data_blocks, index);
3b196c39
TS
518}
519
e335425b
TS
520static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
521 unsigned int payload_length,
522 unsigned int *data_blocks, unsigned int *syt)
2b3fc456
TS
523{
524 u32 cip_header[2];
e335425b
TS
525 unsigned int sph;
526 unsigned int fmt;
527 unsigned int fdf;
528 unsigned int data_block_counter;
c8bdf49b 529 bool lost;
2b3fc456 530
e335425b
TS
531 cip_header[0] = be32_to_cpu(buf[0]);
532 cip_header[1] = be32_to_cpu(buf[1]);
2b3fc456
TS
533
534 /*
535 * This module supports 'Two-quadlet CIP header with SYT field'.
77d2a8a4 536 * For convenience, also check FMT field is AM824 or not.
2b3fc456 537 */
2128f78f
TS
538 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
539 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
540 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
2b3fc456
TS
541 dev_info_ratelimited(&s->unit->device,
542 "Invalid CIP header for AMDTP: %08X:%08X\n",
543 cip_header[0], cip_header[1]);
e335425b 544 return -EAGAIN;
2b3fc456
TS
545 }
546
414ba022 547 /* Check valid protocol or not. */
9863874f 548 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
414ba022 549 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
9863874f 550 if (sph != s->sph || fmt != s->fmt) {
2a7e1713
TS
551 dev_info_ratelimited(&s->unit->device,
552 "Detect unexpected protocol: %08x %08x\n",
553 cip_header[0], cip_header[1]);
e335425b 554 return -EAGAIN;
414ba022
TS
555 }
556
2b3fc456 557 /* Calculate data blocks */
414ba022 558 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
e335425b 559 if (payload_length < sizeof(__be32) * 2 ||
414ba022 560 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
e335425b 561 *data_blocks = 0;
2b3fc456 562 } else {
e335425b
TS
563 unsigned int data_block_quadlets =
564 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
2b3fc456
TS
565 /* avoid division by zero */
566 if (data_block_quadlets == 0) {
12e0f438 567 dev_err(&s->unit->device,
2b3fc456
TS
568 "Detect invalid value in dbs field: %08X\n",
569 cip_header[0]);
a9007054 570 return -EPROTO;
2b3fc456 571 }
69702239
TS
572 if (s->flags & CIP_WRONG_DBS)
573 data_block_quadlets = s->data_block_quadlets;
2b3fc456 574
e335425b 575 *data_blocks = (payload_length / sizeof(__be32) - 2) /
ff0fb5aa 576 data_block_quadlets;
2b3fc456
TS
577 }
578
579 /* Check data block counter continuity */
9a2820c1 580 data_block_counter = cip_header[0] & CIP_DBC_MASK;
e335425b 581 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
9d59124c
TS
582 s->data_block_counter != UINT_MAX)
583 data_block_counter = s->data_block_counter;
584
18f5ed36 585 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
d3d10a4a 586 data_block_counter == s->ctx_data.tx.first_dbc) ||
18f5ed36 587 s->data_block_counter == UINT_MAX) {
b84b1a27
TS
588 lost = false;
589 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
c8bdf49b 590 lost = data_block_counter != s->data_block_counter;
d9cd0065 591 } else {
e335425b
TS
592 unsigned int dbc_interval;
593
594 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
d3d10a4a 595 dbc_interval = s->ctx_data.tx.dbc_interval;
d9cd0065 596 else
e335425b 597 dbc_interval = *data_blocks;
d9cd0065 598
c8bdf49b 599 lost = data_block_counter !=
d9cd0065
TS
600 ((s->data_block_counter + dbc_interval) & 0xff);
601 }
c8bdf49b
TS
602
603 if (lost) {
12e0f438
TS
604 dev_err(&s->unit->device,
605 "Detect discontinuity of CIP: %02X %02X\n",
606 s->data_block_counter, data_block_counter);
6fc6b9ce 607 return -EIO;
2b3fc456
TS
608 }
609
e335425b 610 *syt = cip_header[1] & CIP_SYT_MASK;
2b3fc456 611
e335425b 612 if (s->flags & CIP_DBC_IS_END_EVENT) {
c8bdf49b 613 s->data_block_counter = data_block_counter;
e335425b 614 } else {
c8bdf49b 615 s->data_block_counter =
e335425b
TS
616 (data_block_counter + *data_blocks) & 0xff;
617 }
618
619 return 0;
620}
621
98e3e43b
TS
622static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
623 const __be32 *ctx_header,
624 unsigned int *payload_length,
625 unsigned int *data_blocks,
626 unsigned int *syt, unsigned int index)
e335425b 627{
f11453c7 628 const __be32 *cip_header;
e335425b
TS
629 int err;
630
98e3e43b
TS
631 *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
632 if (*payload_length > s->ctx_data.tx.ctx_header_size +
f11453c7 633 s->ctx_data.tx.max_ctx_payload_length) {
e335425b
TS
634 dev_err(&s->unit->device,
635 "Detect jumbo payload: %04x %04x\n",
98e3e43b 636 *payload_length, s->ctx_data.tx.max_ctx_payload_length);
e335425b
TS
637 return -EIO;
638 }
639
947b437e 640 if (!(s->flags & CIP_NO_HEADER)) {
98e3e43b
TS
641 cip_header = ctx_header + 2;
642 err = check_cip_header(s, cip_header, *payload_length,
643 data_blocks, syt);
644 if (err < 0)
645 return err;
947b437e
TS
646 } else {
647 cip_header = NULL;
98e3e43b
TS
648 *data_blocks = *payload_length / sizeof(__be32) /
649 s->data_block_quadlets;
650 *syt = 0;
3c194923 651 s->data_block_counter =
98e3e43b 652 (s->data_block_counter + *data_blocks) & 0xff;
e335425b
TS
653 }
654
98e3e43b 655 trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
f11453c7 656 index);
e335425b 657
31ea49ba 658 return 0;
2b3fc456
TS
659}
660
26cd1e58
TS
661// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
662// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
663// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
664static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
73fc7f08 665{
26cd1e58 666 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
73fc7f08
TS
667 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
668}
669
670static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
671{
672 cycle += addend;
673 if (cycle >= 8 * CYCLES_PER_SECOND)
674 cycle -= 8 * CYCLES_PER_SECOND;
675 return cycle;
676}
677
26cd1e58
TS
678// Align to actual cycle count for the packet which is going to be scheduled.
679// This module queued the same number of isochronous cycle as QUEUE_LENGTH to
680// skip isochronous cycle, therefore it's OK to just increment the cycle by
681// QUEUE_LENGTH for scheduled cycle.
682static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp)
683{
684 u32 cycle = compute_cycle_count(ctx_header_tstamp);
685 return increment_cycle_count(cycle, QUEUE_LENGTH);
686}
687
fce9b013
TS
688static inline void cancel_stream(struct amdtp_stream *s)
689{
690 s->packet_index = -1;
691 if (in_interrupt())
692 amdtp_stream_pcm_abort(s);
693 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
694}
695
73fc7f08 696static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
4b7da117
TS
697 size_t header_length, void *header,
698 void *private_data)
31ef9134 699{
be4a2894 700 struct amdtp_stream *s = private_data;
26cd1e58
TS
701 const __be32 *ctx_header = header;
702 unsigned int i, packets = header_length / sizeof(*ctx_header);
31ef9134 703
a4103bd7
TS
704 if (s->packet_index < 0)
705 return;
706
26cd1e58
TS
707 for (i = 0; i < packets; ++i) {
708 u32 cycle;
6bc1a269
TS
709 unsigned int syt;
710 unsigned int data_block;
8a400b99 711 __be32 *buffer;
6bc1a269
TS
712 unsigned int pcm_frames;
713 struct {
714 struct fw_iso_packet params;
715 __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
716 } template = { {0}, {0} };
717 struct snd_pcm_substream *pcm;
73fc7f08 718
26cd1e58 719 cycle = compute_it_cycle(*ctx_header);
6bc1a269
TS
720 syt = calculate_syt(s, cycle);
721 data_block = calculate_data_blocks(s, syt);
8a400b99 722 buffer = s->buffer.packets[s->packet_index].buffer;
6bc1a269 723 pcm_frames = s->process_data_blocks(s, buffer, data_block, &syt);
31ef9134 724
6bc1a269
TS
725 build_it_pkt_header(s, cycle, &template.params, data_block, syt,
726 i);
727
728 if (queue_out_packet(s, &template.params) < 0) {
fce9b013 729 cancel_stream(s);
a4103bd7
TS
730 return;
731 }
26cd1e58 732
6bc1a269
TS
733 pcm = READ_ONCE(s->pcm);
734 if (pcm && pcm_frames > 0)
735 update_pcm_pointers(s, pcm, pcm_frames);
736
26cd1e58 737 ++ctx_header;
ccccad86 738 }
a4103bd7 739
13882a82 740 fw_iso_context_queue_flush(s->context);
31ef9134
CL
741}
742
73fc7f08 743static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
2b3fc456
TS
744 size_t header_length, void *header,
745 void *private_data)
746{
747 struct amdtp_stream *s = private_data;
d9a16fc9 748 unsigned int i, packets;
cc4f8e91 749 __be32 *ctx_header = header;
2b3fc456 750
a4103bd7
TS
751 if (s->packet_index < 0)
752 return;
753
d3d10a4a
TS
754 // The number of packets in buffer.
755 packets = header_length / s->ctx_data.tx.ctx_header_size;
f90e2ded 756
d9a16fc9 757 for (i = 0; i < packets; i++) {
26cd1e58 758 u32 cycle;
98e3e43b
TS
759 unsigned int payload_length;
760 unsigned int data_block;
761 unsigned int syt;
8a400b99 762 __be32 *buffer;
98e3e43b
TS
763 unsigned int pcm_frames = 0;
764 struct fw_iso_packet params = {0};
765 struct snd_pcm_substream *pcm;
766 int err;
cc4f8e91 767
26cd1e58 768 cycle = compute_cycle_count(ctx_header[1]);
98e3e43b
TS
769 err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
770 &data_block, &syt, i);
771 if (err < 0 && err != -EAGAIN)
772 break;
773 if (err >= 0) {
774 buffer = s->buffer.packets[s->packet_index].buffer;
775 pcm_frames = s->process_data_blocks(s, buffer,
776 data_block, &syt);
777 }
2b3fc456 778
98e3e43b 779 if (queue_in_packet(s, &params) < 0)
6fc6b9ce 780 break;
cc4f8e91 781
98e3e43b
TS
782 pcm = READ_ONCE(s->pcm);
783 if (pcm && pcm_frames > 0)
784 update_pcm_pointers(s, pcm, pcm_frames);
785
d3d10a4a 786 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
2b3fc456
TS
787 }
788
dec63cc8 789 /* Queueing error or detecting invalid payload. */
d9a16fc9 790 if (i < packets) {
fce9b013 791 cancel_stream(s);
7b3b0d85
TS
792 return;
793 }
794
2b3fc456
TS
795 fw_iso_context_queue_flush(s->context);
796}
797
7b3b0d85
TS
798/* this is executed one time */
799static void amdtp_stream_first_callback(struct fw_iso_context *context,
73fc7f08 800 u32 tstamp, size_t header_length,
7b3b0d85
TS
801 void *header, void *private_data)
802{
803 struct amdtp_stream *s = private_data;
26cd1e58 804 const __be32 *ctx_header = header;
a04513f8 805 u32 cycle;
7b3b0d85
TS
806
807 /*
808 * For in-stream, first packet has come.
809 * For out-stream, prepared to transmit first packet
810 */
811 s->callbacked = true;
812 wake_up(&s->callback_wait);
813
a04513f8 814 if (s->direction == AMDTP_IN_STREAM) {
26cd1e58 815 cycle = compute_cycle_count(ctx_header[1]);
cc4f8e91 816
7b3b0d85 817 context->callback.sc = in_stream_callback;
a04513f8 818 } else {
26cd1e58
TS
819 cycle = compute_it_cycle(*ctx_header);
820
7b3b0d85 821 context->callback.sc = out_stream_callback;
a04513f8
TS
822 }
823
824 s->start_cycle = cycle;
7b3b0d85 825
73fc7f08 826 context->callback.sc(context, tstamp, header_length, header, s);
7b3b0d85
TS
827}
828
31ef9134 829/**
be4a2894
TS
830 * amdtp_stream_start - start transferring packets
831 * @s: the AMDTP stream to start
31ef9134
CL
832 * @channel: the isochronous channel on the bus
833 * @speed: firewire speed code
834 *
835 * The stream cannot be started until it has been configured with
be4a2894
TS
836 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
837 * device can be started.
31ef9134 838 */
be4a2894 839int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
31ef9134
CL
840{
841 static const struct {
842 unsigned int data_block;
843 unsigned int syt_offset;
d3d10a4a 844 } *entry, initial_state[] = {
31ef9134
CL
845 [CIP_SFC_32000] = { 4, 3072 },
846 [CIP_SFC_48000] = { 6, 1024 },
847 [CIP_SFC_96000] = { 12, 1024 },
848 [CIP_SFC_192000] = { 24, 1024 },
849 [CIP_SFC_44100] = { 0, 67 },
850 [CIP_SFC_88200] = { 0, 67 },
851 [CIP_SFC_176400] = { 0, 67 },
852 };
d3d10a4a 853 unsigned int ctx_header_size;
f11453c7 854 unsigned int max_ctx_payload_size;
2b3fc456 855 enum dma_data_direction dir;
7ab56645 856 int type, tag, err;
31ef9134
CL
857
858 mutex_lock(&s->mutex);
859
be4a2894 860 if (WARN_ON(amdtp_stream_running(s) ||
4b7da117 861 (s->data_block_quadlets < 1))) {
31ef9134
CL
862 err = -EBADFD;
863 goto err_unlock;
864 }
865
d3d10a4a 866 if (s->direction == AMDTP_IN_STREAM) {
b6bc8123 867 s->data_block_counter = UINT_MAX;
d3d10a4a
TS
868 } else {
869 entry = &initial_state[s->sfc];
870
b6bc8123 871 s->data_block_counter = 0;
d3d10a4a
TS
872 s->ctx_data.rx.data_block_state = entry->data_block;
873 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
874 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
875 }
31ef9134 876
2b3fc456
TS
877 /* initialize packet buffer */
878 if (s->direction == AMDTP_IN_STREAM) {
879 dir = DMA_FROM_DEVICE;
880 type = FW_ISO_CONTEXT_RECEIVE;
f11453c7
TS
881 if (!(s->flags & CIP_NO_HEADER))
882 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
883 else
884 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
b18f0cfa
TS
885
886 max_ctx_payload_size = amdtp_stream_get_max_payload(s) -
887 ctx_header_size;
2b3fc456
TS
888 } else {
889 dir = DMA_TO_DEVICE;
890 type = FW_ISO_CONTEXT_TRANSMIT;
df9160b9 891 ctx_header_size = 0; // No effect for IT context.
f11453c7 892
b18f0cfa
TS
893 max_ctx_payload_size = amdtp_stream_get_max_payload(s);
894 if (!(s->flags & CIP_NO_HEADER))
895 max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
896 }
f11453c7 897
31ef9134 898 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
f11453c7 899 max_ctx_payload_size, dir);
31ef9134
CL
900 if (err < 0)
901 goto err_unlock;
902
903 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
d3d10a4a
TS
904 type, channel, speed, ctx_header_size,
905 amdtp_stream_first_callback, s);
31ef9134
CL
906 if (IS_ERR(s->context)) {
907 err = PTR_ERR(s->context);
908 if (err == -EBUSY)
909 dev_err(&s->unit->device,
be4a2894 910 "no free stream on this controller\n");
31ef9134
CL
911 goto err_buffer;
912 }
913
be4a2894 914 amdtp_stream_update(s);
31ef9134 915
d3d10a4a 916 if (s->direction == AMDTP_IN_STREAM) {
f11453c7 917 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
d3d10a4a
TS
918 s->ctx_data.tx.ctx_header_size = ctx_header_size;
919 }
52759c09 920
3b196c39
TS
921 if (s->flags & CIP_NO_HEADER)
922 s->tag = TAG_NO_CIP_HEADER;
923 else
924 s->tag = TAG_CIP;
925
ec00f5e4 926 s->packet_index = 0;
4b7da117 927 do {
6007bf54 928 struct fw_iso_packet params;
b18f0cfa 929 if (s->direction == AMDTP_IN_STREAM) {
6007bf54 930 err = queue_in_packet(s, &params);
b18f0cfa
TS
931 } else {
932 params.header_length = 0;
933 params.payload_length = 0;
934 err = queue_out_packet(s, &params);
935 }
4b7da117
TS
936 if (err < 0)
937 goto err_context;
938 } while (s->packet_index > 0);
31ef9134 939
2b3fc456 940 /* NOTE: TAG1 matches CIP. This just affects in stream. */
7ab56645 941 tag = FW_ISO_CONTEXT_MATCH_TAG1;
3b196c39 942 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
7ab56645
TS
943 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
944
7b3b0d85 945 s->callbacked = false;
7ab56645 946 err = fw_iso_context_start(s->context, -1, 0, tag);
31ef9134
CL
947 if (err < 0)
948 goto err_context;
949
950 mutex_unlock(&s->mutex);
951
952 return 0;
953
954err_context:
955 fw_iso_context_destroy(s->context);
956 s->context = ERR_PTR(-1);
957err_buffer:
958 iso_packets_buffer_destroy(&s->buffer, s->unit);
959err_unlock:
960 mutex_unlock(&s->mutex);
961
962 return err;
963}
be4a2894 964EXPORT_SYMBOL(amdtp_stream_start);
31ef9134 965
e9148ddd 966/**
be4a2894
TS
967 * amdtp_stream_pcm_pointer - get the PCM buffer position
968 * @s: the AMDTP stream that transports the PCM data
e9148ddd
CL
969 *
970 * Returns the current buffer position, in frames.
971 */
be4a2894 972unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
e9148ddd 973{
1dba9db0
TS
974 /*
975 * This function is called in software IRQ context of period_tasklet or
976 * process context.
977 *
978 * When the software IRQ context was scheduled by software IRQ context
979 * of IR/IT contexts, queued packets were already handled. Therefore,
980 * no need to flush the queue in buffer anymore.
981 *
982 * When the process context reach here, some packets will be already
983 * queued in the buffer. These packets should be handled immediately
984 * to keep better granularity of PCM pointer.
985 *
986 * Later, the process context will sometimes schedules software IRQ
987 * context of the period_tasklet. Then, no need to flush the queue by
988 * the same reason as described for IR/IT contexts.
989 */
990 if (!in_interrupt() && amdtp_stream_running(s))
92b862c7 991 fw_iso_context_flush_completions(s->context);
e9148ddd 992
6aa7de05 993 return READ_ONCE(s->pcm_buffer_pointer);
e9148ddd 994}
be4a2894 995EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
e9148ddd 996
875becf8
TS
997/**
998 * amdtp_stream_pcm_ack - acknowledge queued PCM frames
999 * @s: the AMDTP stream that transfers the PCM frames
1000 *
1001 * Returns zero always.
1002 */
1003int amdtp_stream_pcm_ack(struct amdtp_stream *s)
1004{
1005 /*
1006 * Process isochronous packets for recent isochronous cycle to handle
1007 * queued PCM frames.
1008 */
1009 if (amdtp_stream_running(s))
1010 fw_iso_context_flush_completions(s->context);
1011
1012 return 0;
1013}
1014EXPORT_SYMBOL(amdtp_stream_pcm_ack);
1015
31ef9134 1016/**
be4a2894
TS
1017 * amdtp_stream_update - update the stream after a bus reset
1018 * @s: the AMDTP stream
31ef9134 1019 */
be4a2894 1020void amdtp_stream_update(struct amdtp_stream *s)
31ef9134 1021{
9a2820c1 1022 /* Precomputing. */
6aa7de05
MR
1023 WRITE_ONCE(s->source_node_id_field,
1024 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
31ef9134 1025}
be4a2894 1026EXPORT_SYMBOL(amdtp_stream_update);
31ef9134
CL
1027
1028/**
be4a2894
TS
1029 * amdtp_stream_stop - stop sending packets
1030 * @s: the AMDTP stream to stop
31ef9134
CL
1031 *
1032 * All PCM and MIDI devices of the stream must be stopped before the stream
1033 * itself can be stopped.
1034 */
be4a2894 1035void amdtp_stream_stop(struct amdtp_stream *s)
31ef9134
CL
1036{
1037 mutex_lock(&s->mutex);
1038
be4a2894 1039 if (!amdtp_stream_running(s)) {
31ef9134
CL
1040 mutex_unlock(&s->mutex);
1041 return;
1042 }
1043
76fb8789 1044 tasklet_kill(&s->period_tasklet);
31ef9134
CL
1045 fw_iso_context_stop(s->context);
1046 fw_iso_context_destroy(s->context);
1047 s->context = ERR_PTR(-1);
1048 iso_packets_buffer_destroy(&s->buffer, s->unit);
1049
7b3b0d85
TS
1050 s->callbacked = false;
1051
31ef9134
CL
1052 mutex_unlock(&s->mutex);
1053}
be4a2894 1054EXPORT_SYMBOL(amdtp_stream_stop);
31ef9134
CL
1055
1056/**
be4a2894 1057 * amdtp_stream_pcm_abort - abort the running PCM device
31ef9134
CL
1058 * @s: the AMDTP stream about to be stopped
1059 *
1060 * If the isochronous stream needs to be stopped asynchronously, call this
1061 * function first to stop the PCM device.
1062 */
be4a2894 1063void amdtp_stream_pcm_abort(struct amdtp_stream *s)
31ef9134
CL
1064{
1065 struct snd_pcm_substream *pcm;
1066
6aa7de05 1067 pcm = READ_ONCE(s->pcm);
1fb8510c
TI
1068 if (pcm)
1069 snd_pcm_stop_xrun(pcm);
31ef9134 1070}
be4a2894 1071EXPORT_SYMBOL(amdtp_stream_pcm_abort);