1 // SPDX-License-Identifier: GPL-2.0-only
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
23 #define OHCI_SECOND_MODULUS 8
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37 #define CIP_HEADER_QUADLETS 2
38 #define CIP_EOH_SHIFT 31
39 #define CIP_EOH (1u << CIP_EOH_SHIFT)
40 #define CIP_EOH_MASK 0x80000000
41 #define CIP_SID_SHIFT 24
42 #define CIP_SID_MASK 0x3f000000
43 #define CIP_DBS_MASK 0x00ff0000
44 #define CIP_DBS_SHIFT 16
45 #define CIP_SPH_MASK 0x00000400
46 #define CIP_SPH_SHIFT 10
47 #define CIP_DBC_MASK 0x000000ff
48 #define CIP_FMT_SHIFT 24
49 #define CIP_FMT_MASK 0x3f000000
50 #define CIP_FDF_MASK 0x00ff0000
51 #define CIP_FDF_SHIFT 16
52 #define CIP_FDF_NO_DATA 0xff
53 #define CIP_SYT_MASK 0x0000ffff
54 #define CIP_SYT_NO_INFO 0xffff
55 #define CIP_SYT_CYCLE_MODULUS 16
56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
60 /* Audio and Music transfer protocol specific parameters */
61 #define CIP_FMT_AM 0x10
62 #define AMDTP_FDF_NO_DATA 0xff
64 // For iso header and tstamp.
65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68 // Add two quadlets CIP header.
69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70 #define HEADER_TSTAMP_MASK 0x0000ffff
72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77 // overrun. Actual device can skip more, then this module stops the packet streaming.
78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
80 static void pcm_period_work(struct work_struct *work);
83 * amdtp_stream_init - initialize an AMDTP stream structure
84 * @s: the AMDTP stream to initialize
85 * @unit: the target of the stream
86 * @dir: the direction of stream
87 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
88 * @fmt: the value of fmt field in CIP header
89 * @process_ctx_payloads: callback handler to process payloads of isoc context
90 * @protocol_size: the size to allocate newly for protocol
92 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
93 enum amdtp_stream_direction dir, unsigned int flags,
95 amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
96 unsigned int protocol_size)
98 if (process_ctx_payloads == NULL)
101 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
108 s->context = ERR_PTR(-1);
109 mutex_init(&s->mutex);
110 INIT_WORK(&s->period_work, pcm_period_work);
113 init_waitqueue_head(&s->ready_wait);
116 s->process_ctx_payloads = process_ctx_payloads;
120 EXPORT_SYMBOL(amdtp_stream_init);
123 * amdtp_stream_destroy - free stream resources
124 * @s: the AMDTP stream to destroy
126 void amdtp_stream_destroy(struct amdtp_stream *s)
128 /* Not initialized. */
129 if (s->protocol == NULL)
132 WARN_ON(amdtp_stream_running(s));
134 mutex_destroy(&s->mutex);
136 EXPORT_SYMBOL(amdtp_stream_destroy);
138 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
142 [CIP_SFC_88200] = 16,
143 [CIP_SFC_96000] = 16,
144 [CIP_SFC_176400] = 32,
145 [CIP_SFC_192000] = 32,
147 EXPORT_SYMBOL(amdtp_syt_intervals);
149 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
150 [CIP_SFC_32000] = 32000,
151 [CIP_SFC_44100] = 44100,
152 [CIP_SFC_48000] = 48000,
153 [CIP_SFC_88200] = 88200,
154 [CIP_SFC_96000] = 96000,
155 [CIP_SFC_176400] = 176400,
156 [CIP_SFC_192000] = 192000,
158 EXPORT_SYMBOL(amdtp_rate_table);
160 static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
161 struct snd_pcm_hw_rule *rule)
163 struct snd_interval *s = hw_param_interval(params, rule->var);
164 const struct snd_interval *r =
165 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
166 struct snd_interval t = {0};
167 unsigned int step = 0;
170 for (i = 0; i < CIP_SFC_COUNT; ++i) {
171 if (snd_interval_test(r, amdtp_rate_table[i]))
172 step = max(step, amdtp_syt_intervals[i]);
178 t.min = roundup(s->min, step);
179 t.max = rounddown(s->max, step);
182 return snd_interval_refine(s, &t);
186 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
187 * @s: the AMDTP stream, which must be initialized.
188 * @runtime: the PCM substream runtime
190 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
191 struct snd_pcm_runtime *runtime)
193 struct snd_pcm_hardware *hw = &runtime->hw;
194 unsigned int ctx_header_size;
195 unsigned int maximum_usec_per_period;
198 hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
199 SNDRV_PCM_INFO_INTERLEAVED |
200 SNDRV_PCM_INFO_JOINT_DUPLEX |
201 SNDRV_PCM_INFO_MMAP |
202 SNDRV_PCM_INFO_MMAP_VALID |
203 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
206 hw->periods_max = UINT_MAX;
208 /* bytes for a frame */
209 hw->period_bytes_min = 4 * hw->channels_max;
211 /* Just to prevent from allocating much pages. */
212 hw->period_bytes_max = hw->period_bytes_min * 2048;
213 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
215 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
216 // context when total size of accumulated context header reaches
217 // PAGE_SIZE. This kicks work for the isoc context and brings
218 // callback in the middle of scheduled interrupts.
219 // Although AMDTP streams in the same domain use the same events per
220 // IRQ, use the largest size of context header between IT/IR contexts.
221 // Here, use the value of context header in IR context is for both
223 if (!(s->flags & CIP_NO_HEADER))
224 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
226 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
227 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
228 CYCLES_PER_SECOND / ctx_header_size;
230 // In IEC 61883-6, one isoc packet can transfer events up to the value
231 // of syt interval. This comes from the interval of isoc cycle. As 1394
232 // OHCI controller can generate hardware IRQ per isoc packet, the
233 // interval is 125 usec.
234 // However, there are two ways of transmission in IEC 61883-6; blocking
235 // and non-blocking modes. In blocking mode, the sequence of isoc packet
236 // includes 'empty' or 'NODATA' packets which include no event. In
237 // non-blocking mode, the number of events per packet is variable up to
239 // Due to the above protocol design, the minimum PCM frames per
240 // interrupt should be double of the value of syt interval, thus it is
242 err = snd_pcm_hw_constraint_minmax(runtime,
243 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
244 250, maximum_usec_per_period);
248 /* Non-Blocking stream has no more constraints */
249 if (!(s->flags & CIP_BLOCKING))
253 * One AMDTP packet can include some frames. In blocking mode, the
254 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
255 * depending on its sampling rate. For accurate period interrupt, it's
256 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
258 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
259 apply_constraint_to_size, NULL,
260 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
261 SNDRV_PCM_HW_PARAM_RATE, -1);
264 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
265 apply_constraint_to_size, NULL,
266 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
267 SNDRV_PCM_HW_PARAM_RATE, -1);
273 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
276 * amdtp_stream_set_parameters - set stream parameters
277 * @s: the AMDTP stream to configure
278 * @rate: the sample rate
279 * @data_block_quadlets: the size of a data block in quadlet unit
280 * @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP
283 * The parameters must be set before the stream is started, and must not be
284 * changed while the stream is running.
286 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
287 unsigned int data_block_quadlets, unsigned int pcm_frame_multiplier)
291 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
292 if (amdtp_rate_table[sfc] == rate)
295 if (sfc == ARRAY_SIZE(amdtp_rate_table))
299 s->data_block_quadlets = data_block_quadlets;
300 s->syt_interval = amdtp_syt_intervals[sfc];
302 // default buffering in the device.
303 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
305 // additional buffering needed to adjust for no-data packets.
306 if (s->flags & CIP_BLOCKING)
307 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
309 s->pcm_frame_multiplier = pcm_frame_multiplier;
313 EXPORT_SYMBOL(amdtp_stream_set_parameters);
315 // The CIP header is processed in context header apart from context payload.
316 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
318 unsigned int multiplier;
320 if (s->flags & CIP_JUMBO_PAYLOAD)
321 multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
325 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
329 * amdtp_stream_get_max_payload - get the stream's packet size
330 * @s: the AMDTP stream
332 * This function must not be called before the stream has been configured
333 * with amdtp_stream_set_parameters().
335 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
337 unsigned int cip_header_size;
339 if (!(s->flags & CIP_NO_HEADER))
340 cip_header_size = CIP_HEADER_SIZE;
344 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
346 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
349 * amdtp_stream_pcm_prepare - prepare PCM device for running
350 * @s: the AMDTP stream
352 * This function should be called from the PCM device's .prepare callback.
354 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
356 cancel_work_sync(&s->period_work);
357 s->pcm_buffer_pointer = 0;
358 s->pcm_period_pointer = 0;
360 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
362 #define prev_packet_desc(s, desc) \
363 list_prev_entry_circular(desc, &s->packet_descs_list, link)
365 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
366 unsigned int size, unsigned int pos, unsigned int count)
368 const unsigned int syt_interval = s->syt_interval;
371 for (i = 0; i < count; ++i) {
372 struct seq_desc *desc = descs + pos;
374 if (desc->syt_offset != CIP_SYT_NO_INFO)
375 desc->data_blocks = syt_interval;
377 desc->data_blocks = 0;
379 pos = (pos + 1) % size;
383 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
384 unsigned int size, unsigned int pos,
387 const enum cip_sfc sfc = s->sfc;
388 unsigned int state = s->ctx_data.rx.data_block_state;
391 for (i = 0; i < count; ++i) {
392 struct seq_desc *desc = descs + pos;
394 if (!cip_sfc_is_base_44100(sfc)) {
395 // Sample_rate / 8000 is an integer, and precomputed.
396 desc->data_blocks = state;
398 unsigned int phase = state;
401 * This calculates the number of data blocks per packet so that
402 * 1) the overall rate is correct and exactly synchronized to
404 * 2) packets with a rounded-up number of blocks occur as early
405 * as possible in the sequence (to prevent underruns of the
408 if (sfc == CIP_SFC_44100)
409 /* 6 6 5 6 5 6 5 ... */
410 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
412 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
413 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
414 if (++phase >= (80 >> (sfc >> 1)))
419 pos = (pos + 1) % size;
422 s->ctx_data.rx.data_block_state = state;
425 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
426 unsigned int *syt_offset_state, enum cip_sfc sfc)
428 unsigned int syt_offset;
430 if (*last_syt_offset < TICKS_PER_CYCLE) {
431 if (!cip_sfc_is_base_44100(sfc))
432 syt_offset = *last_syt_offset + *syt_offset_state;
435 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
436 * n * SYT_INTERVAL * 24576000 / sample_rate
437 * Modulo TICKS_PER_CYCLE, the difference between successive
438 * elements is about 1386.23. Rounding the results of this
439 * formula to the SYT precision results in a sequence of
440 * differences that begins with:
441 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
442 * This code generates _exactly_ the same sequence.
444 unsigned int phase = *syt_offset_state;
445 unsigned int index = phase % 13;
447 syt_offset = *last_syt_offset;
448 syt_offset += 1386 + ((index && !(index & 3)) ||
452 *syt_offset_state = phase;
455 syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
456 *last_syt_offset = syt_offset;
458 if (syt_offset >= TICKS_PER_CYCLE)
459 syt_offset = CIP_SYT_NO_INFO;
464 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
465 unsigned int size, unsigned int pos, unsigned int count)
467 const enum cip_sfc sfc = s->sfc;
468 unsigned int last = s->ctx_data.rx.last_syt_offset;
469 unsigned int state = s->ctx_data.rx.syt_offset_state;
472 for (i = 0; i < count; ++i) {
473 struct seq_desc *desc = descs + pos;
475 desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
477 pos = (pos + 1) % size;
480 s->ctx_data.rx.last_syt_offset = last;
481 s->ctx_data.rx.syt_offset_state = state;
484 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
485 unsigned int transfer_delay)
487 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
488 unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
489 unsigned int syt_offset;
492 if (syt_cycle_lo < cycle_lo)
493 syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
494 syt_cycle_lo -= cycle_lo;
496 // Subtract transfer delay so that the synchronization offset is not so large
498 syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
499 if (syt_offset < transfer_delay)
500 syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
502 return syt_offset - transfer_delay;
505 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
506 // Additionally, the sequence of tx packets is severely checked against any discontinuity
507 // before filling entries in the queue. The calculation is safe even if it looks fragile by
509 static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
511 const unsigned int cache_size = s->ctx_data.tx.cache.size;
512 unsigned int cycles = s->ctx_data.tx.cache.pos;
515 cycles += cache_size;
521 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count)
523 const unsigned int transfer_delay = s->transfer_delay;
524 const unsigned int cache_size = s->ctx_data.tx.cache.size;
525 struct seq_desc *cache = s->ctx_data.tx.cache.descs;
526 unsigned int cache_pos = s->ctx_data.tx.cache.pos;
527 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
530 for (i = 0; i < desc_count; ++i) {
531 struct seq_desc *dst = cache + cache_pos;
533 if (aware_syt && src->syt != CIP_SYT_NO_INFO)
534 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
536 dst->syt_offset = CIP_SYT_NO_INFO;
537 dst->data_blocks = src->data_blocks;
539 cache_pos = (cache_pos + 1) % cache_size;
540 src = amdtp_stream_next_packet_desc(s, src);
543 s->ctx_data.tx.cache.pos = cache_pos;
546 static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
547 unsigned int pos, unsigned int count)
549 pool_ideal_syt_offsets(s, descs, size, pos, count);
551 if (s->flags & CIP_BLOCKING)
552 pool_blocking_data_blocks(s, descs, size, pos, count);
554 pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
557 static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
558 unsigned int pos, unsigned int count)
560 struct amdtp_stream *target = s->ctx_data.rx.replay_target;
561 const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
562 const unsigned int cache_size = target->ctx_data.tx.cache.size;
563 unsigned int cache_pos = s->ctx_data.rx.cache_pos;
566 for (i = 0; i < count; ++i) {
567 descs[pos] = cache[cache_pos];
568 cache_pos = (cache_pos + 1) % cache_size;
569 pos = (pos + 1) % size;
572 s->ctx_data.rx.cache_pos = cache_pos;
575 static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
576 unsigned int pos, unsigned int count)
578 struct amdtp_domain *d = s->domain;
579 void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
580 unsigned int pos, unsigned int count);
582 if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
583 pool_seq_descs = pool_ideal_seq_descs;
585 if (!d->replay.on_the_fly) {
586 pool_seq_descs = pool_replayed_seq;
588 struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
589 const unsigned int cache_size = tx->ctx_data.tx.cache.size;
590 const unsigned int cache_pos = s->ctx_data.rx.cache_pos;
591 unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_pos);
593 if (cached_cycles > count && cached_cycles > cache_size / 2)
594 pool_seq_descs = pool_replayed_seq;
596 pool_seq_descs = pool_ideal_seq_descs;
600 pool_seq_descs(s, descs, size, pos, count);
603 static void update_pcm_pointers(struct amdtp_stream *s,
604 struct snd_pcm_substream *pcm,
609 ptr = s->pcm_buffer_pointer + frames;
610 if (ptr >= pcm->runtime->buffer_size)
611 ptr -= pcm->runtime->buffer_size;
612 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
614 s->pcm_period_pointer += frames;
615 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
616 s->pcm_period_pointer -= pcm->runtime->period_size;
618 // The program in user process should periodically check the status of intermediate
619 // buffer associated to PCM substream to process PCM frames in the buffer, instead
620 // of receiving notification of period elapsed by poll wait.
622 // Use another work item for period elapsed event to prevent the following AB/BA
626 // ================================= =================================
627 // A.work item (process) pcm ioctl (process)
629 // process_rx_packets() B.PCM stream lock
630 // process_tx_packets() v
631 // v callbacks in snd_pcm_ops
632 // update_pcm_pointers() v
633 // snd_pcm_elapsed() fw_iso_context_flush_completions()
634 // snd_pcm_stream_lock_irqsave() disable_work_sync()
636 // wait until release of B wait until A exits
637 if (!pcm->runtime->no_period_wakeup)
638 queue_work(system_highpri_wq, &s->period_work);
642 static void pcm_period_work(struct work_struct *work)
644 struct amdtp_stream *s = container_of(work, struct amdtp_stream,
646 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
649 snd_pcm_period_elapsed(pcm);
652 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
657 params->interrupt = sched_irq;
658 params->tag = s->tag;
661 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
662 s->buffer.packets[s->packet_index].offset);
664 dev_err(&s->unit->device, "queueing error: %d\n", err);
668 if (++s->packet_index >= s->queue_size)
674 static inline int queue_out_packet(struct amdtp_stream *s,
675 struct fw_iso_packet *params, bool sched_irq)
678 !!(params->header_length == 0 && params->payload_length == 0);
679 return queue_packet(s, params, sched_irq);
682 static inline int queue_in_packet(struct amdtp_stream *s,
683 struct fw_iso_packet *params)
685 // Queue one packet for IR context.
686 params->header_length = s->ctx_data.tx.ctx_header_size;
687 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
688 params->skip = false;
689 return queue_packet(s, params, false);
692 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
693 unsigned int data_block_counter, unsigned int syt)
695 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
696 (s->data_block_quadlets << CIP_DBS_SHIFT) |
697 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
699 cip_header[1] = cpu_to_be32(CIP_EOH |
700 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
701 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
702 (syt & CIP_SYT_MASK));
705 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
706 struct fw_iso_packet *params, unsigned int header_length,
707 unsigned int data_blocks,
708 unsigned int data_block_counter,
709 unsigned int syt, unsigned int index, u32 curr_cycle_time)
711 unsigned int payload_length;
714 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
715 params->payload_length = payload_length;
717 if (header_length > 0) {
718 cip_header = (__be32 *)params->header;
719 generate_cip_header(s, cip_header, data_block_counter, syt);
720 params->header_length = header_length;
725 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
726 data_block_counter, s->packet_index, index, curr_cycle_time);
729 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
730 unsigned int payload_length,
731 unsigned int *data_blocks,
732 unsigned int *data_block_counter, unsigned int *syt)
741 cip_header[0] = be32_to_cpu(buf[0]);
742 cip_header[1] = be32_to_cpu(buf[1]);
745 * This module supports 'Two-quadlet CIP header with SYT field'.
746 * For convenience, also check FMT field is AM824 or not.
748 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
749 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
750 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
751 dev_info_ratelimited(&s->unit->device,
752 "Invalid CIP header for AMDTP: %08X:%08X\n",
753 cip_header[0], cip_header[1]);
757 /* Check valid protocol or not. */
758 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
759 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
760 if (sph != s->sph || fmt != s->fmt) {
761 dev_info_ratelimited(&s->unit->device,
762 "Detect unexpected protocol: %08x %08x\n",
763 cip_header[0], cip_header[1]);
767 /* Calculate data blocks */
768 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
769 if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
772 unsigned int data_block_quadlets =
773 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
774 /* avoid division by zero */
775 if (data_block_quadlets == 0) {
776 dev_err(&s->unit->device,
777 "Detect invalid value in dbs field: %08X\n",
781 if (s->flags & CIP_WRONG_DBS)
782 data_block_quadlets = s->data_block_quadlets;
784 *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
787 /* Check data block counter continuity */
788 dbc = cip_header[0] & CIP_DBC_MASK;
789 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
790 *data_block_counter != UINT_MAX)
791 dbc = *data_block_counter;
793 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
794 *data_block_counter == UINT_MAX) {
796 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
797 lost = dbc != *data_block_counter;
799 unsigned int dbc_interval;
801 if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
802 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
803 dbc_interval = s->ctx_data.tx.dbc_interval;
805 dbc_interval = *data_blocks;
807 dbc_interval = payload_length / sizeof(__be32);
810 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
814 dev_err(&s->unit->device,
815 "Detect discontinuity of CIP: %02X %02X\n",
816 *data_block_counter, dbc);
820 *data_block_counter = dbc;
822 if (!(s->flags & CIP_UNAWARE_SYT))
823 *syt = cip_header[1] & CIP_SYT_MASK;
828 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
829 const __be32 *ctx_header,
830 unsigned int *data_blocks,
831 unsigned int *data_block_counter,
832 unsigned int *syt, unsigned int packet_index, unsigned int index,
835 unsigned int payload_length;
836 const __be32 *cip_header;
837 unsigned int cip_header_size;
839 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
841 if (!(s->flags & CIP_NO_HEADER))
842 cip_header_size = CIP_HEADER_SIZE;
846 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
847 dev_err(&s->unit->device,
848 "Detect jumbo payload: %04x %04x\n",
849 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
853 if (cip_header_size > 0) {
854 if (payload_length >= cip_header_size) {
857 cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
858 err = check_cip_header(s, cip_header, payload_length - cip_header_size,
859 data_blocks, data_block_counter, syt);
863 // Handle the cycle so that empty packet arrives.
870 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
873 if (*data_block_counter == UINT_MAX)
874 *data_block_counter = 0;
877 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
878 *data_block_counter, packet_index, index, curr_cycle_time);
883 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
884 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
885 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
886 static inline u32 compute_ohci_iso_ctx_cycle_count(u32 tstamp)
888 return (((tstamp >> 13) & 0x07) * CYCLES_PER_SECOND) + (tstamp & 0x1fff);
891 static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
893 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
894 return compute_ohci_iso_ctx_cycle_count(tstamp);
897 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
900 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
901 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
905 static inline u32 decrement_ohci_cycle_count(u32 minuend, u32 subtrahend)
907 if (minuend < subtrahend)
908 minuend += OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
910 return minuend - subtrahend;
913 static int compare_ohci_cycle_count(u32 lval, u32 rval)
917 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
923 // Align to actual cycle count for the packet which is going to be scheduled.
924 // This module queued the same number of isochronous cycle as the size of queue
925 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
926 // the size of queue for scheduled cycle.
927 static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
928 unsigned int queue_size)
930 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
931 return increment_ohci_cycle_count(cycle, queue_size);
934 static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
935 const __be32 *ctx_header, unsigned int packet_count,
936 unsigned int *desc_count)
938 unsigned int next_cycle = s->next_cycle;
939 unsigned int dbc = s->data_block_counter;
940 unsigned int packet_index = s->packet_index;
941 unsigned int queue_size = s->queue_size;
942 u32 curr_cycle_time = 0;
946 if (trace_amdtp_packet_enabled())
947 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
950 for (i = 0; i < packet_count; ++i) {
953 unsigned int data_blocks;
956 cycle = compute_ohci_cycle_count(ctx_header[1]);
957 lost = (next_cycle != cycle);
959 if (s->flags & CIP_NO_HEADER) {
960 // Fireface skips transmission just for an isoc cycle corresponding
962 unsigned int prev_cycle = next_cycle;
964 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
965 lost = (next_cycle != cycle);
967 // Prepare a description for the skipped cycle for
969 desc->cycle = prev_cycle;
971 desc->data_blocks = 0;
972 desc->data_block_counter = dbc;
973 desc->ctx_payload = NULL;
974 desc = amdtp_stream_next_packet_desc(s, desc);
977 } else if (s->flags & CIP_JUMBO_PAYLOAD) {
978 // OXFW970 skips transmission for several isoc cycles during
979 // asynchronous transaction. The sequence replay is impossible due
981 unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
982 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
983 lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
986 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
992 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
993 packet_index, i, curr_cycle_time);
999 desc->data_blocks = data_blocks;
1000 desc->data_block_counter = dbc;
1001 desc->ctx_payload = s->buffer.packets[packet_index].buffer;
1003 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1004 dbc = (dbc + desc->data_blocks) & 0xff;
1006 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
1007 desc = amdtp_stream_next_packet_desc(s, desc);
1009 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1010 packet_index = (packet_index + 1) % queue_size;
1013 s->next_cycle = next_cycle;
1014 s->data_block_counter = dbc;
1019 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
1020 unsigned int transfer_delay)
1024 syt_offset += transfer_delay;
1025 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
1026 (syt_offset % TICKS_PER_CYCLE);
1027 return syt & CIP_SYT_MASK;
1030 static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
1031 const __be32 *ctx_header, unsigned int packet_count)
1033 struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
1034 unsigned int seq_size = s->ctx_data.rx.seq.size;
1035 unsigned int seq_pos = s->ctx_data.rx.seq.pos;
1036 unsigned int dbc = s->data_block_counter;
1037 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
1040 pool_seq_descs(s, seq_descs, seq_size, seq_pos, packet_count);
1042 for (i = 0; i < packet_count; ++i) {
1043 unsigned int index = (s->packet_index + i) % s->queue_size;
1044 const struct seq_desc *seq = seq_descs + seq_pos;
1046 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
1048 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
1049 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
1051 desc->syt = CIP_SYT_NO_INFO;
1053 desc->data_blocks = seq->data_blocks;
1055 if (s->flags & CIP_DBC_IS_END_EVENT)
1056 dbc = (dbc + desc->data_blocks) & 0xff;
1058 desc->data_block_counter = dbc;
1060 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1061 dbc = (dbc + desc->data_blocks) & 0xff;
1063 desc->ctx_payload = s->buffer.packets[index].buffer;
1065 seq_pos = (seq_pos + 1) % seq_size;
1066 desc = amdtp_stream_next_packet_desc(s, desc);
1071 s->data_block_counter = dbc;
1072 s->ctx_data.rx.seq.pos = seq_pos;
1075 static inline void cancel_stream(struct amdtp_stream *s)
1077 struct work_struct *work = current_work();
1079 s->packet_index = -1;
1081 // Detect work items for any isochronous context. The work item for pcm_period_work()
1082 // should be avoided since the call of snd_pcm_period_elapsed() can reach via
1083 // snd_pcm_ops.pointer() under acquiring PCM stream(group) lock and causes dead lock at
1084 // snd_pcm_stop_xrun().
1085 if (work && work != &s->period_work)
1086 amdtp_stream_pcm_abort(s);
1087 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
1090 static snd_pcm_sframes_t compute_pcm_extra_delay(struct amdtp_stream *s,
1091 const struct pkt_desc *desc, unsigned int count)
1093 unsigned int data_block_count = 0;
1103 // Forward to the latest record.
1104 for (i = 0; i < count - 1; ++i)
1105 desc = amdtp_stream_next_packet_desc(s, desc);
1106 latest_cycle = desc->cycle;
1108 err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time);
1112 // Compute cycle count with lower 3 bits of second field and cycle field like timestamp
1113 // format of 1394 OHCI isochronous context.
1114 curr_cycle = compute_ohci_iso_ctx_cycle_count((cycle_time >> 12) & 0x0000ffff);
1116 if (s->direction == AMDTP_IN_STREAM) {
1117 // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since
1118 // it corresponds to arrived isochronous packet.
1119 if (compare_ohci_cycle_count(latest_cycle, curr_cycle) > 0)
1121 cycle_gap = decrement_ohci_cycle_count(curr_cycle, latest_cycle);
1123 // NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated
1124 // value expectedly corresponds to a few packets (0-2) since the packet arrived at
1125 // the most recent isochronous cycle has been already processed.
1126 for (i = 0; i < cycle_gap; ++i) {
1127 desc = amdtp_stream_next_packet_desc(s, desc);
1128 data_block_count += desc->data_blocks;
1131 // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle
1132 // since it was already scheduled.
1133 if (compare_ohci_cycle_count(latest_cycle, curr_cycle) < 0)
1135 cycle_gap = decrement_ohci_cycle_count(latest_cycle, curr_cycle);
1137 // NOTE: use history of scheduled packets.
1138 for (i = 0; i < cycle_gap; ++i) {
1139 data_block_count += desc->data_blocks;
1140 desc = prev_packet_desc(s, desc);
1144 return data_block_count * s->pcm_frame_multiplier;
1147 static void process_ctx_payloads(struct amdtp_stream *s,
1148 const struct pkt_desc *desc,
1151 struct snd_pcm_substream *pcm;
1154 pcm = READ_ONCE(s->pcm);
1155 s->process_ctx_payloads(s, desc, count, pcm);
1158 unsigned int data_block_count = 0;
1160 pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count);
1162 for (i = 0; i < count; ++i) {
1163 data_block_count += desc->data_blocks;
1164 desc = amdtp_stream_next_packet_desc(s, desc);
1167 update_pcm_pointers(s, pcm, data_block_count * s->pcm_frame_multiplier);
1171 static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1172 void *header, void *private_data)
1174 struct amdtp_stream *s = private_data;
1175 const struct amdtp_domain *d = s->domain;
1176 const __be32 *ctx_header = header;
1177 const unsigned int events_per_period = d->events_per_period;
1178 unsigned int event_count = s->ctx_data.rx.event_count;
1179 struct pkt_desc *desc = s->packet_descs_cursor;
1180 unsigned int pkt_header_length;
1181 unsigned int packets;
1182 u32 curr_cycle_time;
1186 if (s->packet_index < 0)
1189 // Calculate the number of packets in buffer and check XRUN.
1190 packets = header_length / sizeof(*ctx_header);
1192 generate_rx_packet_descs(s, desc, ctx_header, packets);
1194 process_ctx_payloads(s, desc, packets);
1196 if (!(s->flags & CIP_NO_HEADER))
1197 pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
1199 pkt_header_length = 0;
1201 if (s == d->irq_target) {
1202 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1203 // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1204 // with some requests, instead of scheduled hardware IRQ of an IT context.
1205 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
1206 need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
1208 need_hw_irq = false;
1211 if (trace_amdtp_packet_enabled())
1212 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
1214 for (i = 0; i < packets; ++i) {
1215 DEFINE_RAW_FLEX(struct fw_iso_packet, template, header, CIP_HEADER_QUADLETS);
1216 bool sched_irq = false;
1218 build_it_pkt_header(s, desc->cycle, template, pkt_header_length,
1219 desc->data_blocks, desc->data_block_counter,
1220 desc->syt, i, curr_cycle_time);
1222 if (s == s->domain->irq_target) {
1223 event_count += desc->data_blocks;
1224 if (event_count >= events_per_period) {
1225 event_count -= events_per_period;
1226 sched_irq = need_hw_irq;
1230 if (queue_out_packet(s, template, sched_irq) < 0) {
1235 desc = amdtp_stream_next_packet_desc(s, desc);
1238 s->ctx_data.rx.event_count = event_count;
1239 s->packet_descs_cursor = desc;
1242 static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1243 void *header, void *private_data)
1245 struct amdtp_stream *s = private_data;
1246 struct amdtp_domain *d = s->domain;
1247 const __be32 *ctx_header = header;
1248 unsigned int packets;
1252 if (s->packet_index < 0)
1255 packets = header_length / sizeof(*ctx_header);
1257 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1258 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1260 for (i = 0; i < packets; ++i) {
1261 struct fw_iso_packet params = {
1263 .payload_length = 0,
1265 bool sched_irq = (s == d->irq_target && i == packets - 1);
1267 if (queue_out_packet(s, ¶ms, sched_irq) < 0) {
1274 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1275 void *header, void *private_data);
1277 static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1278 size_t header_length, void *header, void *private_data)
1280 struct amdtp_stream *s = private_data;
1281 struct amdtp_domain *d = s->domain;
1282 __be32 *ctx_header = header;
1283 const unsigned int queue_size = s->queue_size;
1284 unsigned int packets;
1285 unsigned int offset;
1287 if (s->packet_index < 0)
1290 packets = header_length / sizeof(*ctx_header);
1293 while (offset < packets) {
1294 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
1296 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1303 unsigned int length = sizeof(*ctx_header) * offset;
1305 skip_rx_packets(context, tstamp, length, ctx_header, private_data);
1306 if (amdtp_streaming_error(s))
1309 ctx_header += offset;
1310 header_length -= length;
1313 if (offset < packets) {
1314 s->ready_processing = true;
1315 wake_up(&s->ready_wait);
1317 if (d->replay.enable)
1318 s->ctx_data.rx.cache_pos = 0;
1320 process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
1321 if (amdtp_streaming_error(s))
1324 if (s == d->irq_target)
1325 s->context->callback.sc = irq_target_callback;
1327 s->context->callback.sc = process_rx_packets;
1331 static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1332 void *header, void *private_data)
1334 struct amdtp_stream *s = private_data;
1335 __be32 *ctx_header = header;
1336 struct pkt_desc *desc = s->packet_descs_cursor;
1337 unsigned int packet_count;
1338 unsigned int desc_count;
1342 if (s->packet_index < 0)
1345 // Calculate the number of packets in buffer and check XRUN.
1346 packet_count = header_length / s->ctx_data.tx.ctx_header_size;
1349 err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, &desc_count);
1351 if (err != -EAGAIN) {
1356 struct amdtp_domain *d = s->domain;
1358 process_ctx_payloads(s, desc, desc_count);
1360 if (d->replay.enable)
1361 cache_seq(s, desc, desc_count);
1363 for (i = 0; i < desc_count; ++i)
1364 desc = amdtp_stream_next_packet_desc(s, desc);
1365 s->packet_descs_cursor = desc;
1368 for (i = 0; i < packet_count; ++i) {
1369 struct fw_iso_packet params = {0};
1371 if (queue_in_packet(s, ¶ms) < 0) {
1378 static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1379 void *header, void *private_data)
1381 struct amdtp_stream *s = private_data;
1382 const __be32 *ctx_header = header;
1383 unsigned int packets;
1387 if (s->packet_index < 0)
1390 packets = header_length / s->ctx_data.tx.ctx_header_size;
1392 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1393 cycle = compute_ohci_cycle_count(ctx_header[1]);
1394 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1396 for (i = 0; i < packets; ++i) {
1397 struct fw_iso_packet params = {0};
1399 if (queue_in_packet(s, ¶ms) < 0) {
1406 static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1407 size_t header_length, void *header, void *private_data)
1409 struct amdtp_stream *s = private_data;
1410 struct amdtp_domain *d = s->domain;
1412 unsigned int packets;
1413 unsigned int offset;
1415 if (s->packet_index < 0)
1418 packets = header_length / s->ctx_data.tx.ctx_header_size;
1421 ctx_header = header;
1422 while (offset < packets) {
1423 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
1425 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1428 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1432 ctx_header = header;
1435 size_t length = s->ctx_data.tx.ctx_header_size * offset;
1437 drop_tx_packets(context, tstamp, length, ctx_header, s);
1438 if (amdtp_streaming_error(s))
1441 ctx_header += length / sizeof(*ctx_header);
1442 header_length -= length;
1445 if (offset < packets) {
1446 s->ready_processing = true;
1447 wake_up(&s->ready_wait);
1449 process_tx_packets(context, tstamp, header_length, ctx_header, s);
1450 if (amdtp_streaming_error(s))
1453 context->callback.sc = process_tx_packets;
1457 static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
1458 size_t header_length, void *header, void *private_data)
1460 struct amdtp_stream *s = private_data;
1461 struct amdtp_domain *d = s->domain;
1464 unsigned int events;
1467 if (s->packet_index < 0)
1470 count = header_length / s->ctx_data.tx.ctx_header_size;
1472 // Attempt to detect any event in the batch of packets.
1474 ctx_header = header;
1475 for (i = 0; i < count; ++i) {
1476 unsigned int payload_quads =
1477 (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
1478 unsigned int data_blocks;
1480 if (s->flags & CIP_NO_HEADER) {
1481 data_blocks = payload_quads / s->data_block_quadlets;
1483 __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
1485 if (payload_quads < CIP_HEADER_QUADLETS) {
1488 payload_quads -= CIP_HEADER_QUADLETS;
1490 if (s->flags & CIP_UNAWARE_SYT) {
1491 data_blocks = payload_quads / s->data_block_quadlets;
1493 u32 cip1 = be32_to_cpu(cip_headers[1]);
1495 // NODATA packet can includes any data blocks but they are
1496 // not available as event.
1497 if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
1500 data_blocks = payload_quads / s->data_block_quadlets;
1505 events += data_blocks;
1507 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1510 drop_tx_packets(context, tstamp, header_length, header, s);
1513 s->ctx_data.tx.event_starts = true;
1515 // Decide the cycle count to begin processing content of packet in IR contexts.
1517 unsigned int stream_count = 0;
1518 unsigned int event_starts_count = 0;
1519 unsigned int cycle = UINT_MAX;
1521 list_for_each_entry(s, &d->streams, list) {
1522 if (s->direction == AMDTP_IN_STREAM) {
1524 if (s->ctx_data.tx.event_starts)
1525 ++event_starts_count;
1529 if (stream_count == event_starts_count) {
1530 unsigned int next_cycle;
1532 list_for_each_entry(s, &d->streams, list) {
1533 if (s->direction != AMDTP_IN_STREAM)
1536 next_cycle = increment_ohci_cycle_count(s->next_cycle,
1537 d->processing_cycle.tx_init_skip);
1538 if (cycle == UINT_MAX ||
1539 compare_ohci_cycle_count(next_cycle, cycle) > 0)
1542 s->context->callback.sc = process_tx_packets_intermediately;
1545 d->processing_cycle.tx_start = cycle;
1550 static void process_ctxs_in_domain(struct amdtp_domain *d)
1552 struct amdtp_stream *s;
1554 list_for_each_entry(s, &d->streams, list) {
1555 if (s != d->irq_target && amdtp_stream_running(s))
1556 fw_iso_context_flush_completions(s->context);
1558 if (amdtp_streaming_error(s))
1564 if (amdtp_stream_running(d->irq_target))
1565 cancel_stream(d->irq_target);
1567 list_for_each_entry(s, &d->streams, list) {
1568 if (amdtp_stream_running(s))
1573 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1574 void *header, void *private_data)
1576 struct amdtp_stream *s = private_data;
1577 struct amdtp_domain *d = s->domain;
1579 process_rx_packets(context, tstamp, header_length, header, private_data);
1580 process_ctxs_in_domain(d);
1583 static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
1584 size_t header_length, void *header, void *private_data)
1586 struct amdtp_stream *s = private_data;
1587 struct amdtp_domain *d = s->domain;
1589 process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
1590 process_ctxs_in_domain(d);
1593 static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
1594 size_t header_length, void *header, void *private_data)
1596 struct amdtp_stream *s = private_data;
1597 struct amdtp_domain *d = s->domain;
1598 bool ready_to_start;
1600 skip_rx_packets(context, tstamp, header_length, header, private_data);
1601 process_ctxs_in_domain(d);
1603 if (d->replay.enable && !d->replay.on_the_fly) {
1604 unsigned int rx_count = 0;
1605 unsigned int rx_ready_count = 0;
1606 struct amdtp_stream *rx;
1608 list_for_each_entry(rx, &d->streams, list) {
1609 struct amdtp_stream *tx;
1610 unsigned int cached_cycles;
1612 if (rx->direction != AMDTP_OUT_STREAM)
1616 tx = rx->ctx_data.rx.replay_target;
1617 cached_cycles = calculate_cached_cycle_count(tx, 0);
1618 if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
1622 ready_to_start = (rx_count == rx_ready_count);
1624 ready_to_start = true;
1627 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1628 // contexts are expected to start and get callback when reaching here.
1629 if (ready_to_start) {
1630 unsigned int cycle = s->next_cycle;
1631 list_for_each_entry(s, &d->streams, list) {
1632 if (s->direction != AMDTP_OUT_STREAM)
1635 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1636 cycle = s->next_cycle;
1638 if (s == d->irq_target)
1639 s->context->callback.sc = irq_target_callback_intermediately;
1641 s->context->callback.sc = process_rx_packets_intermediately;
1644 d->processing_cycle.rx_start = cycle;
1648 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1649 // transmit first packet.
1650 static void amdtp_stream_first_callback(struct fw_iso_context *context,
1651 u32 tstamp, size_t header_length,
1652 void *header, void *private_data)
1654 struct amdtp_stream *s = private_data;
1655 struct amdtp_domain *d = s->domain;
1657 if (s->direction == AMDTP_IN_STREAM) {
1658 context->callback.sc = drop_tx_packets_initially;
1660 if (s == d->irq_target)
1661 context->callback.sc = irq_target_callback_skip;
1663 context->callback.sc = skip_rx_packets;
1666 context->callback.sc(context, tstamp, header_length, header, s);
1670 * amdtp_stream_start - start transferring packets
1671 * @s: the AMDTP stream to start
1672 * @channel: the isochronous channel on the bus
1673 * @speed: firewire speed code
1674 * @queue_size: The number of packets in the queue.
1675 * @idle_irq_interval: the interval to queue packet during initial state.
1677 * The stream cannot be started until it has been configured with
1678 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1679 * device can be started.
1681 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1682 unsigned int queue_size, unsigned int idle_irq_interval)
1684 bool is_irq_target = (s == s->domain->irq_target);
1685 unsigned int ctx_header_size;
1686 unsigned int max_ctx_payload_size;
1687 enum dma_data_direction dir;
1688 struct pkt_desc *descs;
1689 int i, type, tag, err;
1691 mutex_lock(&s->mutex);
1693 if (WARN_ON(amdtp_stream_running(s) ||
1694 (s->data_block_quadlets < 1))) {
1699 if (s->direction == AMDTP_IN_STREAM) {
1700 // NOTE: IT context should be used for constant IRQ.
1701 if (is_irq_target) {
1706 s->data_block_counter = UINT_MAX;
1708 s->data_block_counter = 0;
1711 // initialize packet buffer.
1712 if (s->direction == AMDTP_IN_STREAM) {
1713 dir = DMA_FROM_DEVICE;
1714 type = FW_ISO_CONTEXT_RECEIVE;
1715 if (!(s->flags & CIP_NO_HEADER))
1716 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1718 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1720 dir = DMA_TO_DEVICE;
1721 type = FW_ISO_CONTEXT_TRANSMIT;
1722 ctx_header_size = 0; // No effect for IT context.
1724 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
1726 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
1729 s->queue_size = queue_size;
1731 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1732 type, channel, speed, ctx_header_size,
1733 amdtp_stream_first_callback, s);
1734 if (IS_ERR(s->context)) {
1735 err = PTR_ERR(s->context);
1737 dev_err(&s->unit->device,
1738 "no free stream on this controller\n");
1742 amdtp_stream_update(s);
1744 if (s->direction == AMDTP_IN_STREAM) {
1745 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1746 s->ctx_data.tx.ctx_header_size = ctx_header_size;
1747 s->ctx_data.tx.event_starts = false;
1749 if (s->domain->replay.enable) {
1750 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1751 // possible to cache much unexpectedly.
1752 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1753 queue_size * 3 / 2);
1754 s->ctx_data.tx.cache.pos = 0;
1755 s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
1756 sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1757 if (!s->ctx_data.tx.cache.descs) {
1763 static const struct {
1764 unsigned int data_block;
1765 unsigned int syt_offset;
1766 } *entry, initial_state[] = {
1767 [CIP_SFC_32000] = { 4, 3072 },
1768 [CIP_SFC_48000] = { 6, 1024 },
1769 [CIP_SFC_96000] = { 12, 1024 },
1770 [CIP_SFC_192000] = { 24, 1024 },
1771 [CIP_SFC_44100] = { 0, 67 },
1772 [CIP_SFC_88200] = { 0, 67 },
1773 [CIP_SFC_176400] = { 0, 67 },
1776 s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1777 if (!s->ctx_data.rx.seq.descs) {
1781 s->ctx_data.rx.seq.size = queue_size;
1782 s->ctx_data.rx.seq.pos = 0;
1784 entry = &initial_state[s->sfc];
1785 s->ctx_data.rx.data_block_state = entry->data_block;
1786 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1787 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1789 s->ctx_data.rx.event_count = 0;
1792 if (s->flags & CIP_NO_HEADER)
1793 s->tag = TAG_NO_CIP_HEADER;
1797 // NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request
1798 // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It
1799 // could take a round over queue of AMDTP packet descriptors and small loss of history. For
1800 // safe, keep more 8 elements for the queue, equivalent to 1 ms.
1801 descs = kcalloc(s->queue_size + 8, sizeof(*descs), GFP_KERNEL);
1806 s->packet_descs = descs;
1808 INIT_LIST_HEAD(&s->packet_descs_list);
1809 for (i = 0; i < s->queue_size; ++i) {
1810 INIT_LIST_HEAD(&descs->link);
1811 list_add_tail(&descs->link, &s->packet_descs_list);
1814 s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
1816 s->packet_index = 0;
1818 struct fw_iso_packet params;
1820 if (s->direction == AMDTP_IN_STREAM) {
1821 err = queue_in_packet(s, ¶ms);
1823 bool sched_irq = false;
1825 params.header_length = 0;
1826 params.payload_length = 0;
1828 if (is_irq_target) {
1829 sched_irq = !((s->packet_index + 1) %
1833 err = queue_out_packet(s, ¶ms, sched_irq);
1837 } while (s->packet_index > 0);
1839 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1840 tag = FW_ISO_CONTEXT_MATCH_TAG1;
1841 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1842 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1844 s->ready_processing = false;
1845 err = fw_iso_context_start(s->context, -1, 0, tag);
1849 mutex_unlock(&s->mutex);
1853 kfree(s->packet_descs);
1854 s->packet_descs = NULL;
1856 if (s->direction == AMDTP_OUT_STREAM) {
1857 kfree(s->ctx_data.rx.seq.descs);
1859 if (s->domain->replay.enable)
1860 kfree(s->ctx_data.tx.cache.descs);
1862 fw_iso_context_destroy(s->context);
1863 s->context = ERR_PTR(-1);
1865 iso_packets_buffer_destroy(&s->buffer, s->unit);
1867 mutex_unlock(&s->mutex);
1873 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1874 * @d: the AMDTP domain.
1875 * @s: the AMDTP stream that transports the PCM data
1877 * Returns the current buffer position, in frames.
1879 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1880 struct amdtp_stream *s)
1882 struct amdtp_stream *irq_target = d->irq_target;
1884 if (irq_target && amdtp_stream_running(irq_target)) {
1885 // The work item to call snd_pcm_period_elapsed() can reach here by the call of
1886 // snd_pcm_ops.pointer(), however less packets would be available then. Therefore
1887 // the following call is just for user process contexts.
1888 if (current_work() != &s->period_work)
1889 fw_iso_context_flush_completions(irq_target->context);
1892 return READ_ONCE(s->pcm_buffer_pointer);
1894 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1897 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1898 * @d: the AMDTP domain.
1899 * @s: the AMDTP stream that transfers the PCM frames
1901 * Returns zero always.
1903 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1905 struct amdtp_stream *irq_target = d->irq_target;
1907 // Process isochronous packets for recent isochronous cycle to handle
1908 // queued PCM frames.
1909 if (irq_target && amdtp_stream_running(irq_target))
1910 fw_iso_context_flush_completions(irq_target->context);
1914 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1917 * amdtp_stream_update - update the stream after a bus reset
1918 * @s: the AMDTP stream
1920 void amdtp_stream_update(struct amdtp_stream *s)
1923 WRITE_ONCE(s->source_node_id_field,
1924 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1926 EXPORT_SYMBOL(amdtp_stream_update);
1929 * amdtp_stream_stop - stop sending packets
1930 * @s: the AMDTP stream to stop
1932 * All PCM and MIDI devices of the stream must be stopped before the stream
1933 * itself can be stopped.
1935 static void amdtp_stream_stop(struct amdtp_stream *s)
1937 mutex_lock(&s->mutex);
1939 if (!amdtp_stream_running(s)) {
1940 mutex_unlock(&s->mutex);
1944 cancel_work_sync(&s->period_work);
1945 fw_iso_context_stop(s->context);
1946 fw_iso_context_destroy(s->context);
1947 s->context = ERR_PTR(-1);
1948 iso_packets_buffer_destroy(&s->buffer, s->unit);
1949 kfree(s->packet_descs);
1950 s->packet_descs = NULL;
1952 if (s->direction == AMDTP_OUT_STREAM) {
1953 kfree(s->ctx_data.rx.seq.descs);
1955 if (s->domain->replay.enable)
1956 kfree(s->ctx_data.tx.cache.descs);
1959 mutex_unlock(&s->mutex);
1963 * amdtp_stream_pcm_abort - abort the running PCM device
1964 * @s: the AMDTP stream about to be stopped
1966 * If the isochronous stream needs to be stopped asynchronously, call this
1967 * function first to stop the PCM device.
1969 void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1971 struct snd_pcm_substream *pcm;
1973 pcm = READ_ONCE(s->pcm);
1975 snd_pcm_stop_xrun(pcm);
1977 EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1980 * amdtp_domain_init - initialize an AMDTP domain structure
1981 * @d: the AMDTP domain to initialize.
1983 int amdtp_domain_init(struct amdtp_domain *d)
1985 INIT_LIST_HEAD(&d->streams);
1987 d->events_per_period = 0;
1991 EXPORT_SYMBOL_GPL(amdtp_domain_init);
1994 * amdtp_domain_destroy - destroy an AMDTP domain structure
1995 * @d: the AMDTP domain to destroy.
1997 void amdtp_domain_destroy(struct amdtp_domain *d)
1999 // At present nothing to do.
2002 EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
2005 * amdtp_domain_add_stream - register isoc context into the domain.
2006 * @d: the AMDTP domain.
2007 * @s: the AMDTP stream.
2008 * @channel: the isochronous channel on the bus.
2009 * @speed: firewire speed code.
2011 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
2012 int channel, int speed)
2014 struct amdtp_stream *tmp;
2016 list_for_each_entry(tmp, &d->streams, list) {
2021 list_add(&s->list, &d->streams);
2023 s->channel = channel;
2029 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
2031 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
2032 // is less than the number of rx streams, the first tx stream is selected.
2033 static int make_association(struct amdtp_domain *d)
2035 unsigned int dst_index = 0;
2036 struct amdtp_stream *rx;
2038 // Make association to replay target.
2039 list_for_each_entry(rx, &d->streams, list) {
2040 if (rx->direction == AMDTP_OUT_STREAM) {
2041 unsigned int src_index = 0;
2042 struct amdtp_stream *tx = NULL;
2043 struct amdtp_stream *s;
2045 list_for_each_entry(s, &d->streams, list) {
2046 if (s->direction == AMDTP_IN_STREAM) {
2047 if (dst_index == src_index) {
2056 // Select the first entry.
2057 list_for_each_entry(s, &d->streams, list) {
2058 if (s->direction == AMDTP_IN_STREAM) {
2063 // No target is available to replay sequence.
2068 rx->ctx_data.rx.replay_target = tx;
2078 * amdtp_domain_start - start sending packets for isoc context in the domain.
2079 * @d: the AMDTP domain.
2080 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
2082 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
2084 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
2085 * according to arrival of events in tx packets.
2087 int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
2088 bool replay_on_the_fly)
2090 unsigned int events_per_buffer = d->events_per_buffer;
2091 unsigned int events_per_period = d->events_per_period;
2092 unsigned int queue_size;
2093 struct amdtp_stream *s;
2098 err = make_association(d);
2102 d->replay.enable = replay_seq;
2103 d->replay.on_the_fly = replay_on_the_fly;
2105 // Select an IT context as IRQ target.
2106 list_for_each_entry(s, &d->streams, list) {
2107 if (s->direction == AMDTP_OUT_STREAM) {
2116 d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
2118 // This is a case that AMDTP streams in domain run just for MIDI
2119 // substream. Use the number of events equivalent to 10 msec as
2120 // interval of hardware IRQ.
2121 if (events_per_period == 0)
2122 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
2123 if (events_per_buffer == 0)
2124 events_per_buffer = events_per_period * 3;
2126 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
2127 amdtp_rate_table[d->irq_target->sfc]);
2129 list_for_each_entry(s, &d->streams, list) {
2130 unsigned int idle_irq_interval = 0;
2132 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
2133 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
2134 amdtp_rate_table[d->irq_target->sfc]);
2137 // Starts immediately but actually DMA context starts several hundred cycles later.
2138 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
2145 list_for_each_entry(s, &d->streams, list)
2146 amdtp_stream_stop(s);
2149 EXPORT_SYMBOL_GPL(amdtp_domain_start);
2152 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2153 * @d: the AMDTP domain to which the isoc contexts belong.
2155 void amdtp_domain_stop(struct amdtp_domain *d)
2157 struct amdtp_stream *s, *next;
2160 amdtp_stream_stop(d->irq_target);
2162 list_for_each_entry_safe(s, next, &d->streams, list) {
2165 if (s != d->irq_target)
2166 amdtp_stream_stop(s);
2169 d->events_per_period = 0;
2170 d->irq_target = NULL;
2172 EXPORT_SYMBOL_GPL(amdtp_domain_stop);