1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 #define OHCI_SECOND_MODULUS 8
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37 #define CIP_HEADER_QUADLETS 2
38 #define CIP_EOH_SHIFT 31
39 #define CIP_EOH (1u << CIP_EOH_SHIFT)
40 #define CIP_EOH_MASK 0x80000000
41 #define CIP_SID_SHIFT 24
42 #define CIP_SID_MASK 0x3f000000
43 #define CIP_DBS_MASK 0x00ff0000
44 #define CIP_DBS_SHIFT 16
45 #define CIP_SPH_MASK 0x00000400
46 #define CIP_SPH_SHIFT 10
47 #define CIP_DBC_MASK 0x000000ff
48 #define CIP_FMT_SHIFT 24
49 #define CIP_FMT_MASK 0x3f000000
50 #define CIP_FDF_MASK 0x00ff0000
51 #define CIP_FDF_SHIFT 16
52 #define CIP_FDF_NO_DATA 0xff
53 #define CIP_SYT_MASK 0x0000ffff
54 #define CIP_SYT_NO_INFO 0xffff
55 #define CIP_SYT_CYCLE_MODULUS 16
56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
60 /* Audio and Music transfer protocol specific parameters */
61 #define CIP_FMT_AM 0x10
62 #define AMDTP_FDF_NO_DATA 0xff
64 // For iso header and tstamp.
65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68 // Add two quadlets CIP header.
69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70 #define HEADER_TSTAMP_MASK 0x0000ffff
72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77 // overrun. Actual device can skip more, then this module stops the packet streaming.
78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
80 static void pcm_period_work(struct work_struct *work);
83 * amdtp_stream_init - initialize an AMDTP stream structure
84 * @s: the AMDTP stream to initialize
85 * @unit: the target of the stream
86 * @dir: the direction of stream
87 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
88 * @fmt: the value of fmt field in CIP header
89 * @process_ctx_payloads: callback handler to process payloads of isoc context
90 * @protocol_size: the size to allocate newly for protocol
92 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
93 enum amdtp_stream_direction dir, unsigned int flags,
95 amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
96 unsigned int protocol_size)
98 if (process_ctx_payloads == NULL)
101 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
108 s->context = ERR_PTR(-1);
109 mutex_init(&s->mutex);
110 INIT_WORK(&s->period_work, pcm_period_work);
113 init_waitqueue_head(&s->ready_wait);
116 s->process_ctx_payloads = process_ctx_payloads;
120 EXPORT_SYMBOL(amdtp_stream_init);
123 * amdtp_stream_destroy - free stream resources
124 * @s: the AMDTP stream to destroy
126 void amdtp_stream_destroy(struct amdtp_stream *s)
128 /* Not initialized. */
129 if (s->protocol == NULL)
132 WARN_ON(amdtp_stream_running(s));
134 mutex_destroy(&s->mutex);
136 EXPORT_SYMBOL(amdtp_stream_destroy);
138 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
142 [CIP_SFC_88200] = 16,
143 [CIP_SFC_96000] = 16,
144 [CIP_SFC_176400] = 32,
145 [CIP_SFC_192000] = 32,
147 EXPORT_SYMBOL(amdtp_syt_intervals);
149 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
150 [CIP_SFC_32000] = 32000,
151 [CIP_SFC_44100] = 44100,
152 [CIP_SFC_48000] = 48000,
153 [CIP_SFC_88200] = 88200,
154 [CIP_SFC_96000] = 96000,
155 [CIP_SFC_176400] = 176400,
156 [CIP_SFC_192000] = 192000,
158 EXPORT_SYMBOL(amdtp_rate_table);
160 static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
161 struct snd_pcm_hw_rule *rule)
163 struct snd_interval *s = hw_param_interval(params, rule->var);
164 const struct snd_interval *r =
165 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
166 struct snd_interval t = {0};
167 unsigned int step = 0;
170 for (i = 0; i < CIP_SFC_COUNT; ++i) {
171 if (snd_interval_test(r, amdtp_rate_table[i]))
172 step = max(step, amdtp_syt_intervals[i]);
175 t.min = roundup(s->min, step);
176 t.max = rounddown(s->max, step);
179 return snd_interval_refine(s, &t);
183 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
184 * @s: the AMDTP stream, which must be initialized.
185 * @runtime: the PCM substream runtime
187 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
188 struct snd_pcm_runtime *runtime)
190 struct snd_pcm_hardware *hw = &runtime->hw;
191 unsigned int ctx_header_size;
192 unsigned int maximum_usec_per_period;
195 hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
196 SNDRV_PCM_INFO_INTERLEAVED |
197 SNDRV_PCM_INFO_JOINT_DUPLEX |
198 SNDRV_PCM_INFO_MMAP |
199 SNDRV_PCM_INFO_MMAP_VALID |
200 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
203 hw->periods_max = UINT_MAX;
205 /* bytes for a frame */
206 hw->period_bytes_min = 4 * hw->channels_max;
208 /* Just to prevent from allocating much pages. */
209 hw->period_bytes_max = hw->period_bytes_min * 2048;
210 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
212 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
213 // context when total size of accumulated context header reaches
214 // PAGE_SIZE. This kicks work for the isoc context and brings
215 // callback in the middle of scheduled interrupts.
216 // Although AMDTP streams in the same domain use the same events per
217 // IRQ, use the largest size of context header between IT/IR contexts.
218 // Here, use the value of context header in IR context is for both
220 if (!(s->flags & CIP_NO_HEADER))
221 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
223 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
224 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
225 CYCLES_PER_SECOND / ctx_header_size;
227 // In IEC 61883-6, one isoc packet can transfer events up to the value
228 // of syt interval. This comes from the interval of isoc cycle. As 1394
229 // OHCI controller can generate hardware IRQ per isoc packet, the
230 // interval is 125 usec.
231 // However, there are two ways of transmission in IEC 61883-6; blocking
232 // and non-blocking modes. In blocking mode, the sequence of isoc packet
233 // includes 'empty' or 'NODATA' packets which include no event. In
234 // non-blocking mode, the number of events per packet is variable up to
236 // Due to the above protocol design, the minimum PCM frames per
237 // interrupt should be double of the value of syt interval, thus it is
239 err = snd_pcm_hw_constraint_minmax(runtime,
240 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
241 250, maximum_usec_per_period);
245 /* Non-Blocking stream has no more constraints */
246 if (!(s->flags & CIP_BLOCKING))
250 * One AMDTP packet can include some frames. In blocking mode, the
251 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
252 * depending on its sampling rate. For accurate period interrupt, it's
253 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
255 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
256 apply_constraint_to_size, NULL,
257 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
258 SNDRV_PCM_HW_PARAM_RATE, -1);
261 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
262 apply_constraint_to_size, NULL,
263 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
264 SNDRV_PCM_HW_PARAM_RATE, -1);
270 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
273 * amdtp_stream_set_parameters - set stream parameters
274 * @s: the AMDTP stream to configure
275 * @rate: the sample rate
276 * @data_block_quadlets: the size of a data block in quadlet unit
278 * The parameters must be set before the stream is started, and must not be
279 * changed while the stream is running.
281 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
282 unsigned int data_block_quadlets)
286 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
287 if (amdtp_rate_table[sfc] == rate)
290 if (sfc == ARRAY_SIZE(amdtp_rate_table))
294 s->data_block_quadlets = data_block_quadlets;
295 s->syt_interval = amdtp_syt_intervals[sfc];
297 // default buffering in the device.
298 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
300 // additional buffering needed to adjust for no-data packets.
301 if (s->flags & CIP_BLOCKING)
302 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
306 EXPORT_SYMBOL(amdtp_stream_set_parameters);
308 // The CIP header is processed in context header apart from context payload.
309 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
311 unsigned int multiplier;
313 if (s->flags & CIP_JUMBO_PAYLOAD)
314 multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
318 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
322 * amdtp_stream_get_max_payload - get the stream's packet size
323 * @s: the AMDTP stream
325 * This function must not be called before the stream has been configured
326 * with amdtp_stream_set_parameters().
328 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
330 unsigned int cip_header_size;
332 if (!(s->flags & CIP_NO_HEADER))
333 cip_header_size = CIP_HEADER_SIZE;
337 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
339 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
342 * amdtp_stream_pcm_prepare - prepare PCM device for running
343 * @s: the AMDTP stream
345 * This function should be called from the PCM device's .prepare callback.
347 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
349 cancel_work_sync(&s->period_work);
350 s->pcm_buffer_pointer = 0;
351 s->pcm_period_pointer = 0;
353 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
355 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
356 const unsigned int seq_size, unsigned int seq_tail,
359 const unsigned int syt_interval = s->syt_interval;
362 for (i = 0; i < count; ++i) {
363 struct seq_desc *desc = descs + seq_tail;
365 if (desc->syt_offset != CIP_SYT_NO_INFO)
366 desc->data_blocks = syt_interval;
368 desc->data_blocks = 0;
370 seq_tail = (seq_tail + 1) % seq_size;
374 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
375 const unsigned int seq_size, unsigned int seq_tail,
378 const enum cip_sfc sfc = s->sfc;
379 unsigned int state = s->ctx_data.rx.data_block_state;
382 for (i = 0; i < count; ++i) {
383 struct seq_desc *desc = descs + seq_tail;
385 if (!cip_sfc_is_base_44100(sfc)) {
386 // Sample_rate / 8000 is an integer, and precomputed.
387 desc->data_blocks = state;
389 unsigned int phase = state;
392 * This calculates the number of data blocks per packet so that
393 * 1) the overall rate is correct and exactly synchronized to
395 * 2) packets with a rounded-up number of blocks occur as early
396 * as possible in the sequence (to prevent underruns of the
399 if (sfc == CIP_SFC_44100)
400 /* 6 6 5 6 5 6 5 ... */
401 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
403 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
404 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
405 if (++phase >= (80 >> (sfc >> 1)))
410 seq_tail = (seq_tail + 1) % seq_size;
413 s->ctx_data.rx.data_block_state = state;
416 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
417 unsigned int *syt_offset_state, enum cip_sfc sfc)
419 unsigned int syt_offset;
421 if (*last_syt_offset < TICKS_PER_CYCLE) {
422 if (!cip_sfc_is_base_44100(sfc))
423 syt_offset = *last_syt_offset + *syt_offset_state;
426 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
427 * n * SYT_INTERVAL * 24576000 / sample_rate
428 * Modulo TICKS_PER_CYCLE, the difference between successive
429 * elements is about 1386.23. Rounding the results of this
430 * formula to the SYT precision results in a sequence of
431 * differences that begins with:
432 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
433 * This code generates _exactly_ the same sequence.
435 unsigned int phase = *syt_offset_state;
436 unsigned int index = phase % 13;
438 syt_offset = *last_syt_offset;
439 syt_offset += 1386 + ((index && !(index & 3)) ||
443 *syt_offset_state = phase;
446 syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
447 *last_syt_offset = syt_offset;
449 if (syt_offset >= TICKS_PER_CYCLE)
450 syt_offset = CIP_SYT_NO_INFO;
455 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
456 const unsigned int seq_size, unsigned int seq_tail,
459 const enum cip_sfc sfc = s->sfc;
460 unsigned int last = s->ctx_data.rx.last_syt_offset;
461 unsigned int state = s->ctx_data.rx.syt_offset_state;
464 for (i = 0; i < count; ++i) {
465 struct seq_desc *desc = descs + seq_tail;
467 desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
469 seq_tail = (seq_tail + 1) % seq_size;
472 s->ctx_data.rx.last_syt_offset = last;
473 s->ctx_data.rx.syt_offset_state = state;
476 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
477 unsigned int transfer_delay)
479 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
480 unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
481 unsigned int syt_offset;
484 if (syt_cycle_lo < cycle_lo)
485 syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
486 syt_cycle_lo -= cycle_lo;
488 // Subtract transfer delay so that the synchronization offset is not so large
490 syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
491 if (syt_offset < transfer_delay)
492 syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
494 return syt_offset - transfer_delay;
497 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
498 // Additionally, the sequence of tx packets is severely checked against any discontinuity
499 // before filling entries in the queue. The calculation is safe even if it looks fragile by
501 static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
503 const unsigned int cache_size = s->ctx_data.tx.cache.size;
504 unsigned int cycles = s->ctx_data.tx.cache.tail;
507 cycles += cache_size;
513 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *descs, unsigned int desc_count)
515 const unsigned int transfer_delay = s->transfer_delay;
516 const unsigned int cache_size = s->ctx_data.tx.cache.size;
517 struct seq_desc *cache = s->ctx_data.tx.cache.descs;
518 unsigned int cache_tail = s->ctx_data.tx.cache.tail;
519 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
522 for (i = 0; i < desc_count; ++i) {
523 struct seq_desc *dst = cache + cache_tail;
524 const struct pkt_desc *src = descs + i;
526 if (aware_syt && src->syt != CIP_SYT_NO_INFO)
527 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
529 dst->syt_offset = CIP_SYT_NO_INFO;
530 dst->data_blocks = src->data_blocks;
532 cache_tail = (cache_tail + 1) % cache_size;
535 s->ctx_data.tx.cache.tail = cache_tail;
538 static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count)
540 struct seq_desc *descs = s->ctx_data.rx.seq.descs;
541 unsigned int seq_tail = s->ctx_data.rx.seq.tail;
542 const unsigned int seq_size = s->ctx_data.rx.seq.size;
544 pool_ideal_syt_offsets(s, descs, seq_size, seq_tail, count);
546 if (s->flags & CIP_BLOCKING)
547 pool_blocking_data_blocks(s, descs, seq_size, seq_tail, count);
549 pool_ideal_nonblocking_data_blocks(s, descs, seq_size, seq_tail, count);
551 s->ctx_data.rx.seq.tail = (seq_tail + count) % seq_size;
554 static void pool_replayed_seq(struct amdtp_stream *s, unsigned int count)
556 struct amdtp_stream *target = s->ctx_data.rx.replay_target;
557 const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
558 const unsigned int cache_size = target->ctx_data.tx.cache.size;
559 unsigned int cache_head = s->ctx_data.rx.cache_head;
560 struct seq_desc *descs = s->ctx_data.rx.seq.descs;
561 const unsigned int seq_size = s->ctx_data.rx.seq.size;
562 unsigned int seq_tail = s->ctx_data.rx.seq.tail;
565 for (i = 0; i < count; ++i) {
566 descs[seq_tail] = cache[cache_head];
567 seq_tail = (seq_tail + 1) % seq_size;
568 cache_head = (cache_head + 1) % cache_size;
571 s->ctx_data.rx.seq.tail = seq_tail;
572 s->ctx_data.rx.cache_head = cache_head;
575 static void pool_seq_descs(struct amdtp_stream *s, unsigned int count)
577 struct amdtp_domain *d = s->domain;
579 if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
580 pool_ideal_seq_descs(s, count);
582 if (!d->replay.on_the_fly) {
583 pool_replayed_seq(s, count);
585 struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
586 const unsigned int cache_size = tx->ctx_data.tx.cache.size;
587 const unsigned int cache_head = s->ctx_data.rx.cache_head;
588 unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_head);
590 if (cached_cycles > count && cached_cycles > cache_size / 2)
591 pool_replayed_seq(s, count);
593 pool_ideal_seq_descs(s, count);
598 static void update_pcm_pointers(struct amdtp_stream *s,
599 struct snd_pcm_substream *pcm,
604 ptr = s->pcm_buffer_pointer + frames;
605 if (ptr >= pcm->runtime->buffer_size)
606 ptr -= pcm->runtime->buffer_size;
607 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
609 s->pcm_period_pointer += frames;
610 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
611 s->pcm_period_pointer -= pcm->runtime->period_size;
613 // The program in user process should periodically check the status of intermediate
614 // buffer associated to PCM substream to process PCM frames in the buffer, instead
615 // of receiving notification of period elapsed by poll wait.
616 if (!pcm->runtime->no_period_wakeup)
617 queue_work(system_highpri_wq, &s->period_work);
621 static void pcm_period_work(struct work_struct *work)
623 struct amdtp_stream *s = container_of(work, struct amdtp_stream,
625 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
628 snd_pcm_period_elapsed(pcm);
631 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
636 params->interrupt = sched_irq;
637 params->tag = s->tag;
640 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
641 s->buffer.packets[s->packet_index].offset);
643 dev_err(&s->unit->device, "queueing error: %d\n", err);
647 if (++s->packet_index >= s->queue_size)
653 static inline int queue_out_packet(struct amdtp_stream *s,
654 struct fw_iso_packet *params, bool sched_irq)
657 !!(params->header_length == 0 && params->payload_length == 0);
658 return queue_packet(s, params, sched_irq);
661 static inline int queue_in_packet(struct amdtp_stream *s,
662 struct fw_iso_packet *params)
664 // Queue one packet for IR context.
665 params->header_length = s->ctx_data.tx.ctx_header_size;
666 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
667 params->skip = false;
668 return queue_packet(s, params, false);
671 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
672 unsigned int data_block_counter, unsigned int syt)
674 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
675 (s->data_block_quadlets << CIP_DBS_SHIFT) |
676 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
678 cip_header[1] = cpu_to_be32(CIP_EOH |
679 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
680 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
681 (syt & CIP_SYT_MASK));
684 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
685 struct fw_iso_packet *params, unsigned int header_length,
686 unsigned int data_blocks,
687 unsigned int data_block_counter,
688 unsigned int syt, unsigned int index)
690 unsigned int payload_length;
693 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
694 params->payload_length = payload_length;
696 if (header_length > 0) {
697 cip_header = (__be32 *)params->header;
698 generate_cip_header(s, cip_header, data_block_counter, syt);
699 params->header_length = header_length;
704 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
705 data_block_counter, s->packet_index, index);
708 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
709 unsigned int payload_length,
710 unsigned int *data_blocks,
711 unsigned int *data_block_counter, unsigned int *syt)
720 cip_header[0] = be32_to_cpu(buf[0]);
721 cip_header[1] = be32_to_cpu(buf[1]);
724 * This module supports 'Two-quadlet CIP header with SYT field'.
725 * For convenience, also check FMT field is AM824 or not.
727 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
728 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
729 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
730 dev_info_ratelimited(&s->unit->device,
731 "Invalid CIP header for AMDTP: %08X:%08X\n",
732 cip_header[0], cip_header[1]);
736 /* Check valid protocol or not. */
737 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
738 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
739 if (sph != s->sph || fmt != s->fmt) {
740 dev_info_ratelimited(&s->unit->device,
741 "Detect unexpected protocol: %08x %08x\n",
742 cip_header[0], cip_header[1]);
746 /* Calculate data blocks */
747 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
748 if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
751 unsigned int data_block_quadlets =
752 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
753 /* avoid division by zero */
754 if (data_block_quadlets == 0) {
755 dev_err(&s->unit->device,
756 "Detect invalid value in dbs field: %08X\n",
760 if (s->flags & CIP_WRONG_DBS)
761 data_block_quadlets = s->data_block_quadlets;
763 *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
766 /* Check data block counter continuity */
767 dbc = cip_header[0] & CIP_DBC_MASK;
768 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
769 *data_block_counter != UINT_MAX)
770 dbc = *data_block_counter;
772 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
773 *data_block_counter == UINT_MAX) {
775 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
776 lost = dbc != *data_block_counter;
778 unsigned int dbc_interval;
780 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
781 dbc_interval = s->ctx_data.tx.dbc_interval;
783 dbc_interval = *data_blocks;
785 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
789 dev_err(&s->unit->device,
790 "Detect discontinuity of CIP: %02X %02X\n",
791 *data_block_counter, dbc);
795 *data_block_counter = dbc;
797 if (!(s->flags & CIP_UNAWARE_SYT))
798 *syt = cip_header[1] & CIP_SYT_MASK;
803 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
804 const __be32 *ctx_header,
805 unsigned int *data_blocks,
806 unsigned int *data_block_counter,
807 unsigned int *syt, unsigned int packet_index, unsigned int index)
809 unsigned int payload_length;
810 const __be32 *cip_header;
811 unsigned int cip_header_size;
813 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
815 if (!(s->flags & CIP_NO_HEADER))
816 cip_header_size = CIP_HEADER_SIZE;
820 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
821 dev_err(&s->unit->device,
822 "Detect jumbo payload: %04x %04x\n",
823 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
827 if (cip_header_size > 0) {
828 if (payload_length >= cip_header_size) {
831 cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
832 err = check_cip_header(s, cip_header, payload_length - cip_header_size,
833 data_blocks, data_block_counter, syt);
837 // Handle the cycle so that empty packet arrives.
844 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
847 if (*data_block_counter == UINT_MAX)
848 *data_block_counter = 0;
851 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
852 *data_block_counter, packet_index, index);
857 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
858 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
859 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
860 static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
862 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
863 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
866 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
869 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
870 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
874 static int compare_ohci_cycle_count(u32 lval, u32 rval)
878 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
884 // Align to actual cycle count for the packet which is going to be scheduled.
885 // This module queued the same number of isochronous cycle as the size of queue
886 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
887 // the size of queue for scheduled cycle.
888 static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
889 unsigned int queue_size)
891 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
892 return increment_ohci_cycle_count(cycle, queue_size);
895 static int generate_device_pkt_descs(struct amdtp_stream *s,
896 struct pkt_desc *descs,
897 const __be32 *ctx_header,
898 unsigned int packets,
899 unsigned int *desc_count)
901 unsigned int next_cycle = s->next_cycle;
902 unsigned int dbc = s->data_block_counter;
903 unsigned int packet_index = s->packet_index;
904 unsigned int queue_size = s->queue_size;
909 for (i = 0; i < packets; ++i) {
910 struct pkt_desc *desc = descs + *desc_count;
913 unsigned int data_blocks;
916 cycle = compute_ohci_cycle_count(ctx_header[1]);
917 lost = (next_cycle != cycle);
919 if (s->flags & CIP_NO_HEADER) {
920 // Fireface skips transmission just for an isoc cycle corresponding
922 unsigned int prev_cycle = next_cycle;
924 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
925 lost = (next_cycle != cycle);
927 // Prepare a description for the skipped cycle for
929 desc->cycle = prev_cycle;
931 desc->data_blocks = 0;
932 desc->data_block_counter = dbc;
933 desc->ctx_payload = NULL;
937 } else if (s->flags & CIP_JUMBO_PAYLOAD) {
938 // OXFW970 skips transmission for several isoc cycles during
939 // asynchronous transaction. The sequence replay is impossible due
941 unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
942 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
943 lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0);
946 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
952 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
959 desc->data_blocks = data_blocks;
960 desc->data_block_counter = dbc;
961 desc->ctx_payload = s->buffer.packets[packet_index].buffer;
963 if (!(s->flags & CIP_DBC_IS_END_EVENT))
964 dbc = (dbc + desc->data_blocks) & 0xff;
966 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
968 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
969 packet_index = (packet_index + 1) % queue_size;
972 s->next_cycle = next_cycle;
973 s->data_block_counter = dbc;
978 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
979 unsigned int transfer_delay)
983 syt_offset += transfer_delay;
984 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
985 (syt_offset % TICKS_PER_CYCLE);
986 return syt & CIP_SYT_MASK;
989 static void generate_pkt_descs(struct amdtp_stream *s, const __be32 *ctx_header, unsigned int packets)
991 struct pkt_desc *descs = s->pkt_descs;
992 const struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
993 const unsigned int seq_size = s->ctx_data.rx.seq.size;
994 unsigned int dbc = s->data_block_counter;
995 unsigned int seq_head = s->ctx_data.rx.seq.head;
996 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
999 for (i = 0; i < packets; ++i) {
1000 struct pkt_desc *desc = descs + i;
1001 unsigned int index = (s->packet_index + i) % s->queue_size;
1002 const struct seq_desc *seq = seq_descs + seq_head;
1004 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
1006 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
1007 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
1009 desc->syt = CIP_SYT_NO_INFO;
1011 desc->data_blocks = seq->data_blocks;
1013 if (s->flags & CIP_DBC_IS_END_EVENT)
1014 dbc = (dbc + desc->data_blocks) & 0xff;
1016 desc->data_block_counter = dbc;
1018 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1019 dbc = (dbc + desc->data_blocks) & 0xff;
1021 desc->ctx_payload = s->buffer.packets[index].buffer;
1023 seq_head = (seq_head + 1) % seq_size;
1028 s->data_block_counter = dbc;
1029 s->ctx_data.rx.seq.head = seq_head;
1032 static inline void cancel_stream(struct amdtp_stream *s)
1034 s->packet_index = -1;
1035 if (current_work() == &s->period_work)
1036 amdtp_stream_pcm_abort(s);
1037 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
1040 static void process_ctx_payloads(struct amdtp_stream *s,
1041 const struct pkt_desc *descs,
1042 unsigned int packets)
1044 struct snd_pcm_substream *pcm;
1045 unsigned int pcm_frames;
1047 pcm = READ_ONCE(s->pcm);
1048 pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm);
1050 update_pcm_pointers(s, pcm, pcm_frames);
1053 static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1054 void *header, void *private_data)
1056 struct amdtp_stream *s = private_data;
1057 const struct amdtp_domain *d = s->domain;
1058 const __be32 *ctx_header = header;
1059 const unsigned int events_per_period = d->events_per_period;
1060 unsigned int event_count = s->ctx_data.rx.event_count;
1061 unsigned int pkt_header_length;
1062 unsigned int packets;
1066 if (s->packet_index < 0)
1069 // Calculate the number of packets in buffer and check XRUN.
1070 packets = header_length / sizeof(*ctx_header);
1072 pool_seq_descs(s, packets);
1074 generate_pkt_descs(s, ctx_header, packets);
1076 process_ctx_payloads(s, s->pkt_descs, packets);
1078 if (!(s->flags & CIP_NO_HEADER))
1079 pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
1081 pkt_header_length = 0;
1083 if (s == d->irq_target) {
1084 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1085 // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1086 // with some requests, instead of scheduled hardware IRQ of an IT context.
1087 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
1088 need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
1090 need_hw_irq = false;
1093 for (i = 0; i < packets; ++i) {
1094 const struct pkt_desc *desc = s->pkt_descs + i;
1096 struct fw_iso_packet params;
1097 __be32 header[CIP_HEADER_QUADLETS];
1098 } template = { {0}, {0} };
1099 bool sched_irq = false;
1101 build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
1102 desc->data_blocks, desc->data_block_counter,
1105 if (s == s->domain->irq_target) {
1106 event_count += desc->data_blocks;
1107 if (event_count >= events_per_period) {
1108 event_count -= events_per_period;
1109 sched_irq = need_hw_irq;
1113 if (queue_out_packet(s, &template.params, sched_irq) < 0) {
1119 s->ctx_data.rx.event_count = event_count;
1122 static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1123 void *header, void *private_data)
1125 struct amdtp_stream *s = private_data;
1126 struct amdtp_domain *d = s->domain;
1127 const __be32 *ctx_header = header;
1128 unsigned int packets;
1132 if (s->packet_index < 0)
1135 packets = header_length / sizeof(*ctx_header);
1137 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1138 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1140 for (i = 0; i < packets; ++i) {
1141 struct fw_iso_packet params = {
1143 .payload_length = 0,
1145 bool sched_irq = (s == d->irq_target && i == packets - 1);
1147 if (queue_out_packet(s, ¶ms, sched_irq) < 0) {
1154 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1155 void *header, void *private_data);
1157 static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1158 size_t header_length, void *header, void *private_data)
1160 struct amdtp_stream *s = private_data;
1161 struct amdtp_domain *d = s->domain;
1162 __be32 *ctx_header = header;
1163 const unsigned int queue_size = s->queue_size;
1164 unsigned int packets;
1165 unsigned int offset;
1167 if (s->packet_index < 0)
1170 packets = header_length / sizeof(*ctx_header);
1173 while (offset < packets) {
1174 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
1176 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1183 unsigned int length = sizeof(*ctx_header) * offset;
1185 skip_rx_packets(context, tstamp, length, ctx_header, private_data);
1186 if (amdtp_streaming_error(s))
1189 ctx_header += offset;
1190 header_length -= length;
1193 if (offset < packets) {
1194 s->ready_processing = true;
1195 wake_up(&s->ready_wait);
1197 process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
1198 if (amdtp_streaming_error(s))
1201 if (s == d->irq_target)
1202 s->context->callback.sc = irq_target_callback;
1204 s->context->callback.sc = process_rx_packets;
1208 static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1209 void *header, void *private_data)
1211 struct amdtp_stream *s = private_data;
1212 __be32 *ctx_header = header;
1213 unsigned int packets;
1214 unsigned int desc_count;
1218 if (s->packet_index < 0)
1221 // Calculate the number of packets in buffer and check XRUN.
1222 packets = header_length / s->ctx_data.tx.ctx_header_size;
1225 err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets, &desc_count);
1227 if (err != -EAGAIN) {
1232 struct amdtp_domain *d = s->domain;
1234 process_ctx_payloads(s, s->pkt_descs, desc_count);
1236 if (d->replay.enable)
1237 cache_seq(s, s->pkt_descs, desc_count);
1240 for (i = 0; i < packets; ++i) {
1241 struct fw_iso_packet params = {0};
1243 if (queue_in_packet(s, ¶ms) < 0) {
1250 static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1251 void *header, void *private_data)
1253 struct amdtp_stream *s = private_data;
1254 const __be32 *ctx_header = header;
1255 unsigned int packets;
1259 if (s->packet_index < 0)
1262 packets = header_length / s->ctx_data.tx.ctx_header_size;
1264 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1265 cycle = compute_ohci_cycle_count(ctx_header[1]);
1266 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1268 for (i = 0; i < packets; ++i) {
1269 struct fw_iso_packet params = {0};
1271 if (queue_in_packet(s, ¶ms) < 0) {
1278 static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1279 size_t header_length, void *header, void *private_data)
1281 struct amdtp_stream *s = private_data;
1282 struct amdtp_domain *d = s->domain;
1284 unsigned int packets;
1285 unsigned int offset;
1287 if (s->packet_index < 0)
1290 packets = header_length / s->ctx_data.tx.ctx_header_size;
1293 ctx_header = header;
1294 while (offset < packets) {
1295 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
1297 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1300 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1304 ctx_header = header;
1307 size_t length = s->ctx_data.tx.ctx_header_size * offset;
1309 drop_tx_packets(context, tstamp, length, ctx_header, s);
1310 if (amdtp_streaming_error(s))
1313 ctx_header += length / sizeof(*ctx_header);
1314 header_length -= length;
1317 if (offset < packets) {
1318 s->ready_processing = true;
1319 wake_up(&s->ready_wait);
1321 process_tx_packets(context, tstamp, header_length, ctx_header, s);
1322 if (amdtp_streaming_error(s))
1325 context->callback.sc = process_tx_packets;
1329 static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
1330 size_t header_length, void *header, void *private_data)
1332 struct amdtp_stream *s = private_data;
1333 struct amdtp_domain *d = s->domain;
1336 unsigned int events;
1339 if (s->packet_index < 0)
1342 count = header_length / s->ctx_data.tx.ctx_header_size;
1344 // Attempt to detect any event in the batch of packets.
1346 ctx_header = header;
1347 for (i = 0; i < count; ++i) {
1348 unsigned int payload_quads =
1349 (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
1350 unsigned int data_blocks;
1352 if (s->flags & CIP_NO_HEADER) {
1353 data_blocks = payload_quads / s->data_block_quadlets;
1355 __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
1357 if (payload_quads < CIP_HEADER_QUADLETS) {
1360 payload_quads -= CIP_HEADER_QUADLETS;
1362 if (s->flags & CIP_UNAWARE_SYT) {
1363 data_blocks = payload_quads / s->data_block_quadlets;
1365 u32 cip1 = be32_to_cpu(cip_headers[1]);
1367 // NODATA packet can includes any data blocks but they are
1368 // not available as event.
1369 if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
1372 data_blocks = payload_quads / s->data_block_quadlets;
1377 events += data_blocks;
1379 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1382 drop_tx_packets(context, tstamp, header_length, header, s);
1385 s->ctx_data.tx.event_starts = true;
1387 // Decide the cycle count to begin processing content of packet in IR contexts.
1389 unsigned int stream_count = 0;
1390 unsigned int event_starts_count = 0;
1391 unsigned int cycle = UINT_MAX;
1393 list_for_each_entry(s, &d->streams, list) {
1394 if (s->direction == AMDTP_IN_STREAM) {
1396 if (s->ctx_data.tx.event_starts)
1397 ++event_starts_count;
1401 if (stream_count == event_starts_count) {
1402 unsigned int next_cycle;
1404 list_for_each_entry(s, &d->streams, list) {
1405 if (s->direction != AMDTP_IN_STREAM)
1408 next_cycle = increment_ohci_cycle_count(s->next_cycle,
1409 d->processing_cycle.tx_init_skip);
1410 if (cycle == UINT_MAX ||
1411 compare_ohci_cycle_count(next_cycle, cycle) > 0)
1414 s->context->callback.sc = process_tx_packets_intermediately;
1417 d->processing_cycle.tx_start = cycle;
1422 static void process_ctxs_in_domain(struct amdtp_domain *d)
1424 struct amdtp_stream *s;
1426 list_for_each_entry(s, &d->streams, list) {
1427 if (s != d->irq_target && amdtp_stream_running(s))
1428 fw_iso_context_flush_completions(s->context);
1430 if (amdtp_streaming_error(s))
1436 if (amdtp_stream_running(d->irq_target))
1437 cancel_stream(d->irq_target);
1439 list_for_each_entry(s, &d->streams, list) {
1440 if (amdtp_stream_running(s))
1445 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1446 void *header, void *private_data)
1448 struct amdtp_stream *s = private_data;
1449 struct amdtp_domain *d = s->domain;
1451 process_rx_packets(context, tstamp, header_length, header, private_data);
1452 process_ctxs_in_domain(d);
1455 static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
1456 size_t header_length, void *header, void *private_data)
1458 struct amdtp_stream *s = private_data;
1459 struct amdtp_domain *d = s->domain;
1461 process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
1462 process_ctxs_in_domain(d);
1465 static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
1466 size_t header_length, void *header, void *private_data)
1468 struct amdtp_stream *s = private_data;
1469 struct amdtp_domain *d = s->domain;
1470 bool ready_to_start;
1472 skip_rx_packets(context, tstamp, header_length, header, private_data);
1473 process_ctxs_in_domain(d);
1475 if (d->replay.enable && !d->replay.on_the_fly) {
1476 unsigned int rx_count = 0;
1477 unsigned int rx_ready_count = 0;
1478 struct amdtp_stream *rx;
1480 list_for_each_entry(rx, &d->streams, list) {
1481 struct amdtp_stream *tx;
1482 unsigned int cached_cycles;
1484 if (rx->direction != AMDTP_OUT_STREAM)
1488 tx = rx->ctx_data.rx.replay_target;
1489 cached_cycles = calculate_cached_cycle_count(tx, 0);
1490 if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
1494 ready_to_start = (rx_count == rx_ready_count);
1496 ready_to_start = true;
1499 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1500 // contexts are expected to start and get callback when reaching here.
1501 if (ready_to_start) {
1502 unsigned int cycle = s->next_cycle;
1503 list_for_each_entry(s, &d->streams, list) {
1504 if (s->direction != AMDTP_OUT_STREAM)
1507 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1508 cycle = s->next_cycle;
1510 if (s == d->irq_target)
1511 s->context->callback.sc = irq_target_callback_intermediately;
1513 s->context->callback.sc = process_rx_packets_intermediately;
1516 d->processing_cycle.rx_start = cycle;
1520 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1521 // transmit first packet.
1522 static void amdtp_stream_first_callback(struct fw_iso_context *context,
1523 u32 tstamp, size_t header_length,
1524 void *header, void *private_data)
1526 struct amdtp_stream *s = private_data;
1527 struct amdtp_domain *d = s->domain;
1529 if (s->direction == AMDTP_IN_STREAM) {
1530 context->callback.sc = drop_tx_packets_initially;
1532 if (s == d->irq_target)
1533 context->callback.sc = irq_target_callback_skip;
1535 context->callback.sc = skip_rx_packets;
1538 context->callback.sc(context, tstamp, header_length, header, s);
1542 * amdtp_stream_start - start transferring packets
1543 * @s: the AMDTP stream to start
1544 * @channel: the isochronous channel on the bus
1545 * @speed: firewire speed code
1546 * @queue_size: The number of packets in the queue.
1547 * @idle_irq_interval: the interval to queue packet during initial state.
1549 * The stream cannot be started until it has been configured with
1550 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1551 * device can be started.
1553 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1554 unsigned int queue_size, unsigned int idle_irq_interval)
1556 bool is_irq_target = (s == s->domain->irq_target);
1557 unsigned int ctx_header_size;
1558 unsigned int max_ctx_payload_size;
1559 enum dma_data_direction dir;
1562 mutex_lock(&s->mutex);
1564 if (WARN_ON(amdtp_stream_running(s) ||
1565 (s->data_block_quadlets < 1))) {
1570 if (s->direction == AMDTP_IN_STREAM) {
1571 // NOTE: IT context should be used for constant IRQ.
1572 if (is_irq_target) {
1577 s->data_block_counter = UINT_MAX;
1579 s->data_block_counter = 0;
1582 // initialize packet buffer.
1583 if (s->direction == AMDTP_IN_STREAM) {
1584 dir = DMA_FROM_DEVICE;
1585 type = FW_ISO_CONTEXT_RECEIVE;
1586 if (!(s->flags & CIP_NO_HEADER))
1587 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1589 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1591 dir = DMA_TO_DEVICE;
1592 type = FW_ISO_CONTEXT_TRANSMIT;
1593 ctx_header_size = 0; // No effect for IT context.
1595 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
1597 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
1600 s->queue_size = queue_size;
1602 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1603 type, channel, speed, ctx_header_size,
1604 amdtp_stream_first_callback, s);
1605 if (IS_ERR(s->context)) {
1606 err = PTR_ERR(s->context);
1608 dev_err(&s->unit->device,
1609 "no free stream on this controller\n");
1613 amdtp_stream_update(s);
1615 if (s->direction == AMDTP_IN_STREAM) {
1616 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1617 s->ctx_data.tx.ctx_header_size = ctx_header_size;
1618 s->ctx_data.tx.event_starts = false;
1620 if (s->domain->replay.enable) {
1621 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1622 // possible to cache much unexpectedly.
1623 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1624 queue_size * 3 / 2);
1625 s->ctx_data.tx.cache.tail = 0;
1626 s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
1627 sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1628 if (!s->ctx_data.tx.cache.descs) {
1634 static const struct {
1635 unsigned int data_block;
1636 unsigned int syt_offset;
1637 } *entry, initial_state[] = {
1638 [CIP_SFC_32000] = { 4, 3072 },
1639 [CIP_SFC_48000] = { 6, 1024 },
1640 [CIP_SFC_96000] = { 12, 1024 },
1641 [CIP_SFC_192000] = { 24, 1024 },
1642 [CIP_SFC_44100] = { 0, 67 },
1643 [CIP_SFC_88200] = { 0, 67 },
1644 [CIP_SFC_176400] = { 0, 67 },
1647 s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1648 if (!s->ctx_data.rx.seq.descs) {
1652 s->ctx_data.rx.seq.size = queue_size;
1653 s->ctx_data.rx.seq.tail = 0;
1654 s->ctx_data.rx.seq.head = 0;
1656 entry = &initial_state[s->sfc];
1657 s->ctx_data.rx.data_block_state = entry->data_block;
1658 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1659 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1661 s->ctx_data.rx.event_count = 0;
1664 if (s->flags & CIP_NO_HEADER)
1665 s->tag = TAG_NO_CIP_HEADER;
1669 s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
1671 if (!s->pkt_descs) {
1676 s->packet_index = 0;
1678 struct fw_iso_packet params;
1680 if (s->direction == AMDTP_IN_STREAM) {
1681 err = queue_in_packet(s, ¶ms);
1683 bool sched_irq = false;
1685 params.header_length = 0;
1686 params.payload_length = 0;
1688 if (is_irq_target) {
1689 sched_irq = !((s->packet_index + 1) %
1693 err = queue_out_packet(s, ¶ms, sched_irq);
1697 } while (s->packet_index > 0);
1699 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1700 tag = FW_ISO_CONTEXT_MATCH_TAG1;
1701 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1702 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1704 s->ready_processing = false;
1705 err = fw_iso_context_start(s->context, -1, 0, tag);
1709 mutex_unlock(&s->mutex);
1713 kfree(s->pkt_descs);
1715 if (s->direction == AMDTP_OUT_STREAM) {
1716 kfree(s->ctx_data.rx.seq.descs);
1718 if (s->domain->replay.enable)
1719 kfree(s->ctx_data.tx.cache.descs);
1721 fw_iso_context_destroy(s->context);
1722 s->context = ERR_PTR(-1);
1724 iso_packets_buffer_destroy(&s->buffer, s->unit);
1726 mutex_unlock(&s->mutex);
1732 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1733 * @d: the AMDTP domain.
1734 * @s: the AMDTP stream that transports the PCM data
1736 * Returns the current buffer position, in frames.
1738 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1739 struct amdtp_stream *s)
1741 struct amdtp_stream *irq_target = d->irq_target;
1743 if (irq_target && amdtp_stream_running(irq_target)) {
1744 // This function is called in software IRQ context of
1745 // period_work or process context.
1747 // When the software IRQ context was scheduled by software IRQ
1748 // context of IT contexts, queued packets were already handled.
1749 // Therefore, no need to flush the queue in buffer furthermore.
1751 // When the process context reach here, some packets will be
1752 // already queued in the buffer. These packets should be handled
1753 // immediately to keep better granularity of PCM pointer.
1755 // Later, the process context will sometimes schedules software
1756 // IRQ context of the period_work. Then, no need to flush the
1757 // queue by the same reason as described in the above
1758 if (current_work() != &s->period_work)
1759 fw_iso_context_flush_completions(irq_target->context);
1762 return READ_ONCE(s->pcm_buffer_pointer);
1764 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1767 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1768 * @d: the AMDTP domain.
1769 * @s: the AMDTP stream that transfers the PCM frames
1771 * Returns zero always.
1773 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1775 struct amdtp_stream *irq_target = d->irq_target;
1777 // Process isochronous packets for recent isochronous cycle to handle
1778 // queued PCM frames.
1779 if (irq_target && amdtp_stream_running(irq_target))
1780 fw_iso_context_flush_completions(irq_target->context);
1784 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1787 * amdtp_stream_update - update the stream after a bus reset
1788 * @s: the AMDTP stream
1790 void amdtp_stream_update(struct amdtp_stream *s)
1793 WRITE_ONCE(s->source_node_id_field,
1794 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1796 EXPORT_SYMBOL(amdtp_stream_update);
1799 * amdtp_stream_stop - stop sending packets
1800 * @s: the AMDTP stream to stop
1802 * All PCM and MIDI devices of the stream must be stopped before the stream
1803 * itself can be stopped.
1805 static void amdtp_stream_stop(struct amdtp_stream *s)
1807 mutex_lock(&s->mutex);
1809 if (!amdtp_stream_running(s)) {
1810 mutex_unlock(&s->mutex);
1814 cancel_work_sync(&s->period_work);
1815 fw_iso_context_stop(s->context);
1816 fw_iso_context_destroy(s->context);
1817 s->context = ERR_PTR(-1);
1818 iso_packets_buffer_destroy(&s->buffer, s->unit);
1819 kfree(s->pkt_descs);
1821 if (s->direction == AMDTP_OUT_STREAM) {
1822 kfree(s->ctx_data.rx.seq.descs);
1824 if (s->domain->replay.enable)
1825 kfree(s->ctx_data.tx.cache.descs);
1828 mutex_unlock(&s->mutex);
1832 * amdtp_stream_pcm_abort - abort the running PCM device
1833 * @s: the AMDTP stream about to be stopped
1835 * If the isochronous stream needs to be stopped asynchronously, call this
1836 * function first to stop the PCM device.
1838 void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1840 struct snd_pcm_substream *pcm;
1842 pcm = READ_ONCE(s->pcm);
1844 snd_pcm_stop_xrun(pcm);
1846 EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1849 * amdtp_domain_init - initialize an AMDTP domain structure
1850 * @d: the AMDTP domain to initialize.
1852 int amdtp_domain_init(struct amdtp_domain *d)
1854 INIT_LIST_HEAD(&d->streams);
1856 d->events_per_period = 0;
1860 EXPORT_SYMBOL_GPL(amdtp_domain_init);
1863 * amdtp_domain_destroy - destroy an AMDTP domain structure
1864 * @d: the AMDTP domain to destroy.
1866 void amdtp_domain_destroy(struct amdtp_domain *d)
1868 // At present nothing to do.
1871 EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1874 * amdtp_domain_add_stream - register isoc context into the domain.
1875 * @d: the AMDTP domain.
1876 * @s: the AMDTP stream.
1877 * @channel: the isochronous channel on the bus.
1878 * @speed: firewire speed code.
1880 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1881 int channel, int speed)
1883 struct amdtp_stream *tmp;
1885 list_for_each_entry(tmp, &d->streams, list) {
1890 list_add(&s->list, &d->streams);
1892 s->channel = channel;
1898 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
1900 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
1901 // is less than the number of rx streams, the first tx stream is selected.
1902 static int make_association(struct amdtp_domain *d)
1904 unsigned int dst_index = 0;
1905 struct amdtp_stream *rx;
1907 // Make association to replay target.
1908 list_for_each_entry(rx, &d->streams, list) {
1909 if (rx->direction == AMDTP_OUT_STREAM) {
1910 unsigned int src_index = 0;
1911 struct amdtp_stream *tx = NULL;
1912 struct amdtp_stream *s;
1914 list_for_each_entry(s, &d->streams, list) {
1915 if (s->direction == AMDTP_IN_STREAM) {
1916 if (dst_index == src_index) {
1925 // Select the first entry.
1926 list_for_each_entry(s, &d->streams, list) {
1927 if (s->direction == AMDTP_IN_STREAM) {
1932 // No target is available to replay sequence.
1937 rx->ctx_data.rx.replay_target = tx;
1938 rx->ctx_data.rx.cache_head = 0;
1948 * amdtp_domain_start - start sending packets for isoc context in the domain.
1949 * @d: the AMDTP domain.
1950 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
1952 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
1954 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
1955 * according to arrival of events in tx packets.
1957 int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
1958 bool replay_on_the_fly)
1960 unsigned int events_per_buffer = d->events_per_buffer;
1961 unsigned int events_per_period = d->events_per_period;
1962 unsigned int queue_size;
1963 struct amdtp_stream *s;
1967 err = make_association(d);
1971 d->replay.enable = replay_seq;
1972 d->replay.on_the_fly = replay_on_the_fly;
1974 // Select an IT context as IRQ target.
1975 list_for_each_entry(s, &d->streams, list) {
1976 if (s->direction == AMDTP_OUT_STREAM)
1983 d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
1985 // This is a case that AMDTP streams in domain run just for MIDI
1986 // substream. Use the number of events equivalent to 10 msec as
1987 // interval of hardware IRQ.
1988 if (events_per_period == 0)
1989 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
1990 if (events_per_buffer == 0)
1991 events_per_buffer = events_per_period * 3;
1993 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
1994 amdtp_rate_table[d->irq_target->sfc]);
1996 list_for_each_entry(s, &d->streams, list) {
1997 unsigned int idle_irq_interval = 0;
1999 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
2000 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
2001 amdtp_rate_table[d->irq_target->sfc]);
2004 // Starts immediately but actually DMA context starts several hundred cycles later.
2005 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
2012 list_for_each_entry(s, &d->streams, list)
2013 amdtp_stream_stop(s);
2016 EXPORT_SYMBOL_GPL(amdtp_domain_start);
2019 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2020 * @d: the AMDTP domain to which the isoc contexts belong.
2022 void amdtp_domain_stop(struct amdtp_domain *d)
2024 struct amdtp_stream *s, *next;
2027 amdtp_stream_stop(d->irq_target);
2029 list_for_each_entry_safe(s, next, &d->streams, list) {
2032 if (s != d->irq_target)
2033 amdtp_stream_stop(s);
2036 d->events_per_period = 0;
2037 d->irq_target = NULL;
2039 EXPORT_SYMBOL_GPL(amdtp_domain_stop);