Commit | Line | Data |
---|---|---|
da607e19 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
31ef9134 CL |
2 | /* |
3 | * Audio and Music Data Transmission Protocol (IEC 61883-6) streams | |
4 | * with Common Isochronous Packet (IEC 61883-1) headers | |
5 | * | |
6 | * Copyright (c) Clemens Ladisch <clemens@ladisch.de> | |
31ef9134 CL |
7 | */ |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/err.h> | |
11 | #include <linux/firewire.h> | |
acfedcbe | 12 | #include <linux/firewire-constants.h> |
31ef9134 CL |
13 | #include <linux/module.h> |
14 | #include <linux/slab.h> | |
15 | #include <sound/pcm.h> | |
7b2d99fa | 16 | #include <sound/pcm_params.h> |
d67c46b9 | 17 | #include "amdtp-stream.h" |
31ef9134 CL |
18 | |
19 | #define TICKS_PER_CYCLE 3072 | |
20 | #define CYCLES_PER_SECOND 8000 | |
21 | #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) | |
22 | ||
3e106f4f | 23 | #define OHCI_SECOND_MODULUS 8 |
10aa8e4a | 24 | |
0c95c1d6 TS |
25 | /* Always support Linux tracing subsystem. */ |
26 | #define CREATE_TRACE_POINTS | |
27 | #include "amdtp-stream-trace.h" | |
28 | ||
ca5b5050 | 29 | #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ |
31ef9134 | 30 | |
b445db44 TS |
31 | /* isochronous header parameters */ |
32 | #define ISO_DATA_LENGTH_SHIFT 16 | |
3b196c39 | 33 | #define TAG_NO_CIP_HEADER 0 |
31ef9134 CL |
34 | #define TAG_CIP 1 |
35 | ||
b445db44 | 36 | /* common isochronous packet header parameters */ |
9a2820c1 TS |
37 | #define CIP_EOH_SHIFT 31 |
38 | #define CIP_EOH (1u << CIP_EOH_SHIFT) | |
b445db44 | 39 | #define CIP_EOH_MASK 0x80000000 |
9a2820c1 TS |
40 | #define CIP_SID_SHIFT 24 |
41 | #define CIP_SID_MASK 0x3f000000 | |
42 | #define CIP_DBS_MASK 0x00ff0000 | |
43 | #define CIP_DBS_SHIFT 16 | |
9863874f TS |
44 | #define CIP_SPH_MASK 0x00000400 |
45 | #define CIP_SPH_SHIFT 10 | |
9a2820c1 TS |
46 | #define CIP_DBC_MASK 0x000000ff |
47 | #define CIP_FMT_SHIFT 24 | |
b445db44 | 48 | #define CIP_FMT_MASK 0x3f000000 |
9a2820c1 TS |
49 | #define CIP_FDF_MASK 0x00ff0000 |
50 | #define CIP_FDF_SHIFT 16 | |
b445db44 TS |
51 | #define CIP_SYT_MASK 0x0000ffff |
52 | #define CIP_SYT_NO_INFO 0xffff | |
b445db44 | 53 | |
51c29fd2 | 54 | /* Audio and Music transfer protocol specific parameters */ |
414ba022 | 55 | #define CIP_FMT_AM 0x10 |
2b3fc456 | 56 | #define AMDTP_FDF_NO_DATA 0xff |
31ef9134 | 57 | |
f11453c7 TS |
58 | // For iso header, tstamp and 2 CIP header. |
59 | #define IR_CTX_HEADER_SIZE_CIP 16 | |
60 | // For iso header and tstamp. | |
61 | #define IR_CTX_HEADER_SIZE_NO_CIP 8 | |
cc4f8e91 | 62 | #define HEADER_TSTAMP_MASK 0x0000ffff |
4b7da117 | 63 | |
b18f0cfa TS |
64 | #define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header. |
65 | #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing. | |
66 | ||
6a3ce97d TS |
67 | // The initial firmware of OXFW970 can postpone transmission of packet during finishing |
68 | // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer | |
69 | // overrun. Actual device can skip more, then this module stops the packet streaming. | |
70 | #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5 | |
71 | ||
2b3d2987 | 72 | static void pcm_period_work(struct work_struct *work); |
76fb8789 | 73 | |
31ef9134 | 74 | /** |
be4a2894 TS |
75 | * amdtp_stream_init - initialize an AMDTP stream structure |
76 | * @s: the AMDTP stream to initialize | |
31ef9134 | 77 | * @unit: the target of the stream |
3ff7e8f0 | 78 | * @dir: the direction of stream |
ffe66bbe | 79 | * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants. |
5955815e | 80 | * @fmt: the value of fmt field in CIP header |
9a738ad1 | 81 | * @process_ctx_payloads: callback handler to process payloads of isoc context |
df075fee | 82 | * @protocol_size: the size to allocate newly for protocol |
31ef9134 | 83 | */ |
be4a2894 | 84 | int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, |
ffe66bbe | 85 | enum amdtp_stream_direction dir, unsigned int flags, |
df075fee | 86 | unsigned int fmt, |
9a738ad1 | 87 | amdtp_stream_process_ctx_payloads_t process_ctx_payloads, |
df075fee | 88 | unsigned int protocol_size) |
31ef9134 | 89 | { |
9a738ad1 | 90 | if (process_ctx_payloads == NULL) |
df075fee TS |
91 | return -EINVAL; |
92 | ||
93 | s->protocol = kzalloc(protocol_size, GFP_KERNEL); | |
94 | if (!s->protocol) | |
95 | return -ENOMEM; | |
96 | ||
c6f224dc | 97 | s->unit = unit; |
3ff7e8f0 | 98 | s->direction = dir; |
31ef9134 CL |
99 | s->flags = flags; |
100 | s->context = ERR_PTR(-1); | |
101 | mutex_init(&s->mutex); | |
2b3d2987 | 102 | INIT_WORK(&s->period_work, pcm_period_work); |
ec00f5e4 | 103 | s->packet_index = 0; |
31ef9134 | 104 | |
7b3b0d85 TS |
105 | init_waitqueue_head(&s->callback_wait); |
106 | s->callbacked = false; | |
7b3b0d85 | 107 | |
5955815e | 108 | s->fmt = fmt; |
9a738ad1 | 109 | s->process_ctx_payloads = process_ctx_payloads; |
414ba022 | 110 | |
3baf3053 TS |
111 | if (dir == AMDTP_OUT_STREAM) |
112 | s->ctx_data.rx.syt_override = -1; | |
113 | ||
31ef9134 CL |
114 | return 0; |
115 | } | |
be4a2894 | 116 | EXPORT_SYMBOL(amdtp_stream_init); |
31ef9134 CL |
117 | |
118 | /** | |
be4a2894 TS |
119 | * amdtp_stream_destroy - free stream resources |
120 | * @s: the AMDTP stream to destroy | |
31ef9134 | 121 | */ |
be4a2894 | 122 | void amdtp_stream_destroy(struct amdtp_stream *s) |
31ef9134 | 123 | { |
44c376b9 TS |
124 | /* Not initialized. */ |
125 | if (s->protocol == NULL) | |
126 | return; | |
127 | ||
be4a2894 | 128 | WARN_ON(amdtp_stream_running(s)); |
df075fee | 129 | kfree(s->protocol); |
31ef9134 | 130 | mutex_destroy(&s->mutex); |
31ef9134 | 131 | } |
be4a2894 | 132 | EXPORT_SYMBOL(amdtp_stream_destroy); |
31ef9134 | 133 | |
c5280e99 | 134 | const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { |
a7304e3b CL |
135 | [CIP_SFC_32000] = 8, |
136 | [CIP_SFC_44100] = 8, | |
137 | [CIP_SFC_48000] = 8, | |
138 | [CIP_SFC_88200] = 16, | |
139 | [CIP_SFC_96000] = 16, | |
140 | [CIP_SFC_176400] = 32, | |
141 | [CIP_SFC_192000] = 32, | |
142 | }; | |
143 | EXPORT_SYMBOL(amdtp_syt_intervals); | |
144 | ||
f9503a68 | 145 | const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { |
1017abed TS |
146 | [CIP_SFC_32000] = 32000, |
147 | [CIP_SFC_44100] = 44100, | |
148 | [CIP_SFC_48000] = 48000, | |
149 | [CIP_SFC_88200] = 88200, | |
150 | [CIP_SFC_96000] = 96000, | |
151 | [CIP_SFC_176400] = 176400, | |
152 | [CIP_SFC_192000] = 192000, | |
153 | }; | |
154 | EXPORT_SYMBOL(amdtp_rate_table); | |
155 | ||
59502295 TS |
156 | static int apply_constraint_to_size(struct snd_pcm_hw_params *params, |
157 | struct snd_pcm_hw_rule *rule) | |
158 | { | |
159 | struct snd_interval *s = hw_param_interval(params, rule->var); | |
160 | const struct snd_interval *r = | |
161 | hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); | |
826b5de9 TS |
162 | struct snd_interval t = {0}; |
163 | unsigned int step = 0; | |
59502295 TS |
164 | int i; |
165 | ||
166 | for (i = 0; i < CIP_SFC_COUNT; ++i) { | |
826b5de9 TS |
167 | if (snd_interval_test(r, amdtp_rate_table[i])) |
168 | step = max(step, amdtp_syt_intervals[i]); | |
59502295 TS |
169 | } |
170 | ||
826b5de9 TS |
171 | t.min = roundup(s->min, step); |
172 | t.max = rounddown(s->max, step); | |
173 | t.integer = 1; | |
59502295 TS |
174 | |
175 | return snd_interval_refine(s, &t); | |
176 | } | |
177 | ||
7b2d99fa TS |
178 | /** |
179 | * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream | |
180 | * @s: the AMDTP stream, which must be initialized. | |
181 | * @runtime: the PCM substream runtime | |
182 | */ | |
183 | int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, | |
184 | struct snd_pcm_runtime *runtime) | |
185 | { | |
55799c5a | 186 | struct snd_pcm_hardware *hw = &runtime->hw; |
99921ec6 TS |
187 | unsigned int ctx_header_size; |
188 | unsigned int maximum_usec_per_period; | |
7b2d99fa TS |
189 | int err; |
190 | ||
55799c5a TS |
191 | hw->info = SNDRV_PCM_INFO_BATCH | |
192 | SNDRV_PCM_INFO_BLOCK_TRANSFER | | |
193 | SNDRV_PCM_INFO_INTERLEAVED | | |
194 | SNDRV_PCM_INFO_JOINT_DUPLEX | | |
195 | SNDRV_PCM_INFO_MMAP | | |
196 | SNDRV_PCM_INFO_MMAP_VALID; | |
197 | ||
198 | /* SNDRV_PCM_INFO_BATCH */ | |
199 | hw->periods_min = 2; | |
200 | hw->periods_max = UINT_MAX; | |
201 | ||
202 | /* bytes for a frame */ | |
203 | hw->period_bytes_min = 4 * hw->channels_max; | |
204 | ||
205 | /* Just to prevent from allocating much pages. */ | |
206 | hw->period_bytes_max = hw->period_bytes_min * 2048; | |
207 | hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; | |
208 | ||
99921ec6 TS |
209 | // Linux driver for 1394 OHCI controller voluntarily flushes isoc |
210 | // context when total size of accumulated context header reaches | |
2b3d2987 | 211 | // PAGE_SIZE. This kicks work for the isoc context and brings |
99921ec6 TS |
212 | // callback in the middle of scheduled interrupts. |
213 | // Although AMDTP streams in the same domain use the same events per | |
214 | // IRQ, use the largest size of context header between IT/IR contexts. | |
215 | // Here, use the value of context header in IR context is for both | |
216 | // contexts. | |
217 | if (!(s->flags & CIP_NO_HEADER)) | |
218 | ctx_header_size = IR_CTX_HEADER_SIZE_CIP; | |
219 | else | |
220 | ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; | |
221 | maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE / | |
222 | CYCLES_PER_SECOND / ctx_header_size; | |
223 | ||
f706df4f TS |
224 | // In IEC 61883-6, one isoc packet can transfer events up to the value |
225 | // of syt interval. This comes from the interval of isoc cycle. As 1394 | |
226 | // OHCI controller can generate hardware IRQ per isoc packet, the | |
227 | // interval is 125 usec. | |
228 | // However, there are two ways of transmission in IEC 61883-6; blocking | |
229 | // and non-blocking modes. In blocking mode, the sequence of isoc packet | |
230 | // includes 'empty' or 'NODATA' packets which include no event. In | |
231 | // non-blocking mode, the number of events per packet is variable up to | |
232 | // the syt interval. | |
233 | // Due to the above protocol design, the minimum PCM frames per | |
234 | // interrupt should be double of the value of syt interval, thus it is | |
235 | // 250 usec. | |
7b2d99fa TS |
236 | err = snd_pcm_hw_constraint_minmax(runtime, |
237 | SNDRV_PCM_HW_PARAM_PERIOD_TIME, | |
f706df4f | 238 | 250, maximum_usec_per_period); |
7b2d99fa TS |
239 | if (err < 0) |
240 | goto end; | |
241 | ||
242 | /* Non-Blocking stream has no more constraints */ | |
243 | if (!(s->flags & CIP_BLOCKING)) | |
244 | goto end; | |
245 | ||
246 | /* | |
247 | * One AMDTP packet can include some frames. In blocking mode, the | |
248 | * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, | |
249 | * depending on its sampling rate. For accurate period interrupt, it's | |
ce991981 | 250 | * preferrable to align period/buffer sizes to current SYT_INTERVAL. |
7b2d99fa | 251 | */ |
59502295 TS |
252 | err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, |
253 | apply_constraint_to_size, NULL, | |
826b5de9 | 254 | SNDRV_PCM_HW_PARAM_PERIOD_SIZE, |
59502295 TS |
255 | SNDRV_PCM_HW_PARAM_RATE, -1); |
256 | if (err < 0) | |
257 | goto end; | |
59502295 TS |
258 | err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, |
259 | apply_constraint_to_size, NULL, | |
826b5de9 | 260 | SNDRV_PCM_HW_PARAM_BUFFER_SIZE, |
59502295 TS |
261 | SNDRV_PCM_HW_PARAM_RATE, -1); |
262 | if (err < 0) | |
263 | goto end; | |
7b2d99fa TS |
264 | end: |
265 | return err; | |
266 | } | |
267 | EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); | |
268 | ||
31ef9134 | 269 | /** |
be4a2894 TS |
270 | * amdtp_stream_set_parameters - set stream parameters |
271 | * @s: the AMDTP stream to configure | |
31ef9134 | 272 | * @rate: the sample rate |
df075fee | 273 | * @data_block_quadlets: the size of a data block in quadlet unit |
31ef9134 | 274 | * |
a7304e3b | 275 | * The parameters must be set before the stream is started, and must not be |
31ef9134 CL |
276 | * changed while the stream is running. |
277 | */ | |
df075fee TS |
278 | int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, |
279 | unsigned int data_block_quadlets) | |
31ef9134 | 280 | { |
df075fee | 281 | unsigned int sfc; |
31ef9134 | 282 | |
547e631c | 283 | for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { |
1017abed | 284 | if (amdtp_rate_table[sfc] == rate) |
547e631c TS |
285 | break; |
286 | } | |
287 | if (sfc == ARRAY_SIZE(amdtp_rate_table)) | |
288 | return -EINVAL; | |
e84d15f6 | 289 | |
e84d15f6 | 290 | s->sfc = sfc; |
df075fee | 291 | s->data_block_quadlets = data_block_quadlets; |
a7304e3b | 292 | s->syt_interval = amdtp_syt_intervals[sfc]; |
e84d15f6 | 293 | |
d3d10a4a TS |
294 | // default buffering in the device. |
295 | if (s->direction == AMDTP_OUT_STREAM) { | |
296 | s->ctx_data.rx.transfer_delay = | |
297 | TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; | |
298 | ||
299 | if (s->flags & CIP_BLOCKING) { | |
300 | // additional buffering needed to adjust for no-data | |
301 | // packets. | |
302 | s->ctx_data.rx.transfer_delay += | |
303 | TICKS_PER_SECOND * s->syt_interval / rate; | |
304 | } | |
305 | } | |
77d2a8a4 | 306 | |
547e631c | 307 | return 0; |
31ef9134 | 308 | } |
be4a2894 | 309 | EXPORT_SYMBOL(amdtp_stream_set_parameters); |
31ef9134 CL |
310 | |
311 | /** | |
be4a2894 TS |
312 | * amdtp_stream_get_max_payload - get the stream's packet size |
313 | * @s: the AMDTP stream | |
31ef9134 CL |
314 | * |
315 | * This function must not be called before the stream has been configured | |
be4a2894 | 316 | * with amdtp_stream_set_parameters(). |
31ef9134 | 317 | */ |
be4a2894 | 318 | unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) |
31ef9134 | 319 | { |
a2064710 | 320 | unsigned int multiplier = 1; |
07ea238c | 321 | unsigned int cip_header_size = 0; |
a2064710 TS |
322 | |
323 | if (s->flags & CIP_JUMBO_PAYLOAD) | |
6a3ce97d | 324 | multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES; |
3b196c39 | 325 | if (!(s->flags & CIP_NO_HEADER)) |
07ea238c | 326 | cip_header_size = sizeof(__be32) * 2; |
a2064710 | 327 | |
07ea238c TS |
328 | return cip_header_size + |
329 | s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; | |
31ef9134 | 330 | } |
be4a2894 | 331 | EXPORT_SYMBOL(amdtp_stream_get_max_payload); |
31ef9134 | 332 | |
76fb8789 | 333 | /** |
be4a2894 TS |
334 | * amdtp_stream_pcm_prepare - prepare PCM device for running |
335 | * @s: the AMDTP stream | |
76fb8789 CL |
336 | * |
337 | * This function should be called from the PCM device's .prepare callback. | |
338 | */ | |
be4a2894 | 339 | void amdtp_stream_pcm_prepare(struct amdtp_stream *s) |
76fb8789 | 340 | { |
2b3d2987 | 341 | cancel_work_sync(&s->period_work); |
76fb8789 CL |
342 | s->pcm_buffer_pointer = 0; |
343 | s->pcm_period_pointer = 0; | |
344 | } | |
be4a2894 | 345 | EXPORT_SYMBOL(amdtp_stream_pcm_prepare); |
76fb8789 | 346 | |
274fc355 TS |
347 | static unsigned int calculate_data_blocks(unsigned int *data_block_state, |
348 | bool is_blocking, bool is_no_info, | |
349 | unsigned int syt_interval, enum cip_sfc sfc) | |
31ef9134 | 350 | { |
274fc355 | 351 | unsigned int data_blocks; |
31ef9134 | 352 | |
875be091 | 353 | /* Blocking mode. */ |
274fc355 | 354 | if (is_blocking) { |
875be091 | 355 | /* This module generate empty packet for 'no data'. */ |
274fc355 | 356 | if (is_no_info) |
875be091 TS |
357 | data_blocks = 0; |
358 | else | |
274fc355 | 359 | data_blocks = syt_interval; |
875be091 | 360 | /* Non-blocking mode. */ |
31ef9134 | 361 | } else { |
274fc355 | 362 | if (!cip_sfc_is_base_44100(sfc)) { |
d3d10a4a | 363 | // Sample_rate / 8000 is an integer, and precomputed. |
274fc355 | 364 | data_blocks = *data_block_state; |
875be091 | 365 | } else { |
274fc355 | 366 | unsigned int phase = *data_block_state; |
31ef9134 CL |
367 | |
368 | /* | |
369 | * This calculates the number of data blocks per packet so that | |
370 | * 1) the overall rate is correct and exactly synchronized to | |
371 | * the bus clock, and | |
372 | * 2) packets with a rounded-up number of blocks occur as early | |
373 | * as possible in the sequence (to prevent underruns of the | |
374 | * device's buffer). | |
375 | */ | |
274fc355 | 376 | if (sfc == CIP_SFC_44100) |
875be091 TS |
377 | /* 6 6 5 6 5 6 5 ... */ |
378 | data_blocks = 5 + ((phase & 1) ^ | |
379 | (phase == 0 || phase >= 40)); | |
380 | else | |
381 | /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ | |
274fc355 TS |
382 | data_blocks = 11 * (sfc >> 1) + (phase == 0); |
383 | if (++phase >= (80 >> (sfc >> 1))) | |
875be091 | 384 | phase = 0; |
274fc355 | 385 | *data_block_state = phase; |
875be091 | 386 | } |
31ef9134 CL |
387 | } |
388 | ||
389 | return data_blocks; | |
390 | } | |
391 | ||
816d8482 TS |
392 | static unsigned int calculate_syt_offset(unsigned int *last_syt_offset, |
393 | unsigned int *syt_offset_state, enum cip_sfc sfc) | |
31ef9134 | 394 | { |
816d8482 | 395 | unsigned int syt_offset; |
31ef9134 | 396 | |
816d8482 TS |
397 | if (*last_syt_offset < TICKS_PER_CYCLE) { |
398 | if (!cip_sfc_is_base_44100(sfc)) | |
399 | syt_offset = *last_syt_offset + *syt_offset_state; | |
31ef9134 CL |
400 | else { |
401 | /* | |
402 | * The time, in ticks, of the n'th SYT_INTERVAL sample is: | |
403 | * n * SYT_INTERVAL * 24576000 / sample_rate | |
404 | * Modulo TICKS_PER_CYCLE, the difference between successive | |
405 | * elements is about 1386.23. Rounding the results of this | |
406 | * formula to the SYT precision results in a sequence of | |
407 | * differences that begins with: | |
408 | * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... | |
409 | * This code generates _exactly_ the same sequence. | |
410 | */ | |
816d8482 TS |
411 | unsigned int phase = *syt_offset_state; |
412 | unsigned int index = phase % 13; | |
413 | ||
414 | syt_offset = *last_syt_offset; | |
31ef9134 CL |
415 | syt_offset += 1386 + ((index && !(index & 3)) || |
416 | phase == 146); | |
417 | if (++phase >= 147) | |
418 | phase = 0; | |
816d8482 | 419 | *syt_offset_state = phase; |
31ef9134 CL |
420 | } |
421 | } else | |
816d8482 TS |
422 | syt_offset = *last_syt_offset - TICKS_PER_CYCLE; |
423 | *last_syt_offset = syt_offset; | |
31ef9134 | 424 | |
83cfb5c5 TS |
425 | if (syt_offset >= TICKS_PER_CYCLE) |
426 | syt_offset = CIP_SYT_NO_INFO; | |
31ef9134 | 427 | |
83cfb5c5 | 428 | return syt_offset; |
31ef9134 CL |
429 | } |
430 | ||
4b7da117 TS |
431 | static void update_pcm_pointers(struct amdtp_stream *s, |
432 | struct snd_pcm_substream *pcm, | |
433 | unsigned int frames) | |
65845f29 TS |
434 | { |
435 | unsigned int ptr; | |
436 | ||
4b7da117 TS |
437 | ptr = s->pcm_buffer_pointer + frames; |
438 | if (ptr >= pcm->runtime->buffer_size) | |
439 | ptr -= pcm->runtime->buffer_size; | |
6aa7de05 | 440 | WRITE_ONCE(s->pcm_buffer_pointer, ptr); |
4b7da117 TS |
441 | |
442 | s->pcm_period_pointer += frames; | |
443 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { | |
444 | s->pcm_period_pointer -= pcm->runtime->period_size; | |
2b3d2987 | 445 | queue_work(system_highpri_wq, &s->period_work); |
4b7da117 TS |
446 | } |
447 | } | |
448 | ||
2b3d2987 | 449 | static void pcm_period_work(struct work_struct *work) |
4b7da117 | 450 | { |
2b3d2987 TI |
451 | struct amdtp_stream *s = container_of(work, struct amdtp_stream, |
452 | period_work); | |
6aa7de05 | 453 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
4b7da117 TS |
454 | |
455 | if (pcm) | |
456 | snd_pcm_period_elapsed(pcm); | |
457 | } | |
458 | ||
e229853d TS |
459 | static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, |
460 | bool sched_irq) | |
4b7da117 | 461 | { |
6007bf54 | 462 | int err; |
df9160b9 | 463 | |
e229853d | 464 | params->interrupt = sched_irq; |
6007bf54 TS |
465 | params->tag = s->tag; |
466 | params->sy = 0; | |
df9160b9 | 467 | |
6007bf54 | 468 | err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, |
4b7da117 TS |
469 | s->buffer.packets[s->packet_index].offset); |
470 | if (err < 0) { | |
471 | dev_err(&s->unit->device, "queueing error: %d\n", err); | |
472 | goto end; | |
473 | } | |
474 | ||
a0e02331 | 475 | if (++s->packet_index >= s->queue_size) |
4b7da117 TS |
476 | s->packet_index = 0; |
477 | end: | |
478 | return err; | |
479 | } | |
480 | ||
481 | static inline int queue_out_packet(struct amdtp_stream *s, | |
e229853d | 482 | struct fw_iso_packet *params, bool sched_irq) |
4b7da117 | 483 | { |
b18f0cfa TS |
484 | params->skip = |
485 | !!(params->header_length == 0 && params->payload_length == 0); | |
e229853d | 486 | return queue_packet(s, params, sched_irq); |
4b7da117 TS |
487 | } |
488 | ||
6007bf54 | 489 | static inline int queue_in_packet(struct amdtp_stream *s, |
60dd4929 | 490 | struct fw_iso_packet *params) |
2b3fc456 | 491 | { |
6007bf54 TS |
492 | // Queue one packet for IR context. |
493 | params->header_length = s->ctx_data.tx.ctx_header_size; | |
494 | params->payload_length = s->ctx_data.tx.max_ctx_payload_length; | |
495 | params->skip = false; | |
60dd4929 | 496 | return queue_packet(s, params, false); |
2b3fc456 TS |
497 | } |
498 | ||
252219c7 | 499 | static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], |
860d798c | 500 | unsigned int data_block_counter, unsigned int syt) |
252219c7 TS |
501 | { |
502 | cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | | |
503 | (s->data_block_quadlets << CIP_DBS_SHIFT) | | |
504 | ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | | |
860d798c | 505 | data_block_counter); |
252219c7 TS |
506 | cip_header[1] = cpu_to_be32(CIP_EOH | |
507 | ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | | |
508 | ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | | |
509 | (syt & CIP_SYT_MASK)); | |
510 | } | |
511 | ||
6bc1a269 TS |
512 | static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, |
513 | struct fw_iso_packet *params, | |
860d798c TS |
514 | unsigned int data_blocks, |
515 | unsigned int data_block_counter, | |
516 | unsigned int syt, unsigned int index) | |
31ef9134 | 517 | { |
0ebf3ceb | 518 | unsigned int payload_length; |
16be4589 | 519 | __be32 *cip_header; |
20e44577 | 520 | |
0ebf3ceb TS |
521 | payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; |
522 | params->payload_length = payload_length; | |
523 | ||
b18f0cfa | 524 | if (!(s->flags & CIP_NO_HEADER)) { |
6bc1a269 | 525 | cip_header = (__be32 *)params->header; |
860d798c | 526 | generate_cip_header(s, cip_header, data_block_counter, syt); |
6bc1a269 | 527 | params->header_length = 2 * sizeof(__be32); |
0ebf3ceb | 528 | payload_length += params->header_length; |
b18f0cfa TS |
529 | } else { |
530 | cip_header = NULL; | |
531 | } | |
31ef9134 | 532 | |
213fa989 | 533 | trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks, |
814b4312 | 534 | data_block_counter, s->packet_index, index); |
3b196c39 TS |
535 | } |
536 | ||
e335425b TS |
537 | static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, |
538 | unsigned int payload_length, | |
a35463d1 TS |
539 | unsigned int *data_blocks, |
540 | unsigned int *data_block_counter, unsigned int *syt) | |
2b3fc456 TS |
541 | { |
542 | u32 cip_header[2]; | |
e335425b TS |
543 | unsigned int sph; |
544 | unsigned int fmt; | |
545 | unsigned int fdf; | |
a35463d1 | 546 | unsigned int dbc; |
c8bdf49b | 547 | bool lost; |
2b3fc456 | 548 | |
e335425b TS |
549 | cip_header[0] = be32_to_cpu(buf[0]); |
550 | cip_header[1] = be32_to_cpu(buf[1]); | |
2b3fc456 TS |
551 | |
552 | /* | |
553 | * This module supports 'Two-quadlet CIP header with SYT field'. | |
77d2a8a4 | 554 | * For convenience, also check FMT field is AM824 or not. |
2b3fc456 | 555 | */ |
2128f78f TS |
556 | if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || |
557 | ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && | |
558 | (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { | |
2b3fc456 TS |
559 | dev_info_ratelimited(&s->unit->device, |
560 | "Invalid CIP header for AMDTP: %08X:%08X\n", | |
561 | cip_header[0], cip_header[1]); | |
e335425b | 562 | return -EAGAIN; |
2b3fc456 TS |
563 | } |
564 | ||
414ba022 | 565 | /* Check valid protocol or not. */ |
9863874f | 566 | sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT; |
414ba022 | 567 | fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT; |
9863874f | 568 | if (sph != s->sph || fmt != s->fmt) { |
2a7e1713 TS |
569 | dev_info_ratelimited(&s->unit->device, |
570 | "Detect unexpected protocol: %08x %08x\n", | |
571 | cip_header[0], cip_header[1]); | |
e335425b | 572 | return -EAGAIN; |
414ba022 TS |
573 | } |
574 | ||
2b3fc456 | 575 | /* Calculate data blocks */ |
414ba022 | 576 | fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; |
4fd18787 | 577 | if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { |
e335425b | 578 | *data_blocks = 0; |
2b3fc456 | 579 | } else { |
e335425b TS |
580 | unsigned int data_block_quadlets = |
581 | (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; | |
2b3fc456 TS |
582 | /* avoid division by zero */ |
583 | if (data_block_quadlets == 0) { | |
12e0f438 | 584 | dev_err(&s->unit->device, |
2b3fc456 TS |
585 | "Detect invalid value in dbs field: %08X\n", |
586 | cip_header[0]); | |
a9007054 | 587 | return -EPROTO; |
2b3fc456 | 588 | } |
69702239 TS |
589 | if (s->flags & CIP_WRONG_DBS) |
590 | data_block_quadlets = s->data_block_quadlets; | |
2b3fc456 | 591 | |
4fd18787 | 592 | *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets; |
2b3fc456 TS |
593 | } |
594 | ||
595 | /* Check data block counter continuity */ | |
a35463d1 | 596 | dbc = cip_header[0] & CIP_DBC_MASK; |
e335425b | 597 | if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && |
a35463d1 TS |
598 | *data_block_counter != UINT_MAX) |
599 | dbc = *data_block_counter; | |
9d59124c | 600 | |
a35463d1 TS |
601 | if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || |
602 | *data_block_counter == UINT_MAX) { | |
b84b1a27 TS |
603 | lost = false; |
604 | } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { | |
a35463d1 | 605 | lost = dbc != *data_block_counter; |
d9cd0065 | 606 | } else { |
e335425b TS |
607 | unsigned int dbc_interval; |
608 | ||
609 | if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) | |
d3d10a4a | 610 | dbc_interval = s->ctx_data.tx.dbc_interval; |
d9cd0065 | 611 | else |
e335425b | 612 | dbc_interval = *data_blocks; |
d9cd0065 | 613 | |
a35463d1 | 614 | lost = dbc != ((*data_block_counter + dbc_interval) & 0xff); |
d9cd0065 | 615 | } |
c8bdf49b TS |
616 | |
617 | if (lost) { | |
12e0f438 TS |
618 | dev_err(&s->unit->device, |
619 | "Detect discontinuity of CIP: %02X %02X\n", | |
a35463d1 | 620 | *data_block_counter, dbc); |
6fc6b9ce | 621 | return -EIO; |
2b3fc456 TS |
622 | } |
623 | ||
753e7179 TS |
624 | *data_block_counter = dbc; |
625 | ||
e335425b | 626 | *syt = cip_header[1] & CIP_SYT_MASK; |
2b3fc456 | 627 | |
e335425b TS |
628 | return 0; |
629 | } | |
630 | ||
98e3e43b TS |
631 | static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, |
632 | const __be32 *ctx_header, | |
a35463d1 TS |
633 | unsigned int *data_blocks, |
634 | unsigned int *data_block_counter, | |
814b4312 | 635 | unsigned int *syt, unsigned int packet_index, unsigned int index) |
e335425b | 636 | { |
ebd2a647 | 637 | unsigned int payload_length; |
f11453c7 | 638 | const __be32 *cip_header; |
395f41e2 | 639 | unsigned int cip_header_size; |
e335425b TS |
640 | int err; |
641 | ||
ebd2a647 | 642 | payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT; |
395f41e2 TS |
643 | |
644 | if (!(s->flags & CIP_NO_HEADER)) | |
645 | cip_header_size = 8; | |
646 | else | |
647 | cip_header_size = 0; | |
648 | ||
ebd2a647 | 649 | if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) { |
e335425b TS |
650 | dev_err(&s->unit->device, |
651 | "Detect jumbo payload: %04x %04x\n", | |
ebd2a647 | 652 | payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length); |
e335425b TS |
653 | return -EIO; |
654 | } | |
655 | ||
395f41e2 | 656 | if (cip_header_size > 0) { |
ebd2a647 | 657 | if (payload_length >= cip_header_size) { |
c09010ee | 658 | cip_header = ctx_header + 2; |
4fd18787 TS |
659 | err = check_cip_header(s, cip_header, payload_length - cip_header_size, |
660 | data_blocks, data_block_counter, syt); | |
c09010ee TS |
661 | if (err < 0) |
662 | return err; | |
663 | } else { | |
664 | // Handle the cycle so that empty packet arrives. | |
665 | cip_header = NULL; | |
666 | *data_blocks = 0; | |
667 | *syt = 0; | |
668 | } | |
947b437e TS |
669 | } else { |
670 | cip_header = NULL; | |
76864868 | 671 | err = 0; |
ebd2a647 | 672 | *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets; |
98e3e43b | 673 | *syt = 0; |
7fbf9096 | 674 | |
a35463d1 TS |
675 | if (*data_block_counter == UINT_MAX) |
676 | *data_block_counter = 0; | |
e335425b TS |
677 | } |
678 | ||
ebd2a647 | 679 | trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks, |
814b4312 | 680 | *data_block_counter, packet_index, index); |
e335425b | 681 | |
76864868 | 682 | return err; |
2b3fc456 TS |
683 | } |
684 | ||
26cd1e58 TS |
685 | // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On |
686 | // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent | |
687 | // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. | |
3e106f4f | 688 | static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp) |
73fc7f08 | 689 | { |
26cd1e58 | 690 | u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK; |
73fc7f08 TS |
691 | return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff); |
692 | } | |
693 | ||
3e106f4f | 694 | static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend) |
73fc7f08 TS |
695 | { |
696 | cycle += addend; | |
3e106f4f TS |
697 | if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND) |
698 | cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND; | |
73fc7f08 TS |
699 | return cycle; |
700 | } | |
701 | ||
705794c5 TS |
702 | static int compare_ohci_cycle_count(u32 lval, u32 rval) |
703 | { | |
704 | if (lval == rval) | |
705 | return 0; | |
706 | else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2) | |
707 | return -1; | |
708 | else | |
709 | return 1; | |
710 | } | |
711 | ||
26cd1e58 | 712 | // Align to actual cycle count for the packet which is going to be scheduled. |
a0e02331 TS |
713 | // This module queued the same number of isochronous cycle as the size of queue |
714 | // to kip isochronous cycle, therefore it's OK to just increment the cycle by | |
715 | // the size of queue for scheduled cycle. | |
3e106f4f TS |
716 | static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp, |
717 | unsigned int queue_size) | |
26cd1e58 | 718 | { |
3e106f4f TS |
719 | u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp); |
720 | return increment_ohci_cycle_count(cycle, queue_size); | |
26cd1e58 TS |
721 | } |
722 | ||
753e7179 TS |
723 | static int generate_device_pkt_descs(struct amdtp_stream *s, |
724 | struct pkt_desc *descs, | |
725 | const __be32 *ctx_header, | |
73246fc4 TS |
726 | unsigned int packets, |
727 | unsigned int *desc_count) | |
753e7179 | 728 | { |
705794c5 | 729 | unsigned int next_cycle = s->next_cycle; |
753e7179 | 730 | unsigned int dbc = s->data_block_counter; |
814b4312 TS |
731 | unsigned int packet_index = s->packet_index; |
732 | unsigned int queue_size = s->queue_size; | |
753e7179 TS |
733 | int i; |
734 | int err; | |
735 | ||
73246fc4 | 736 | *desc_count = 0; |
753e7179 | 737 | for (i = 0; i < packets; ++i) { |
73246fc4 | 738 | struct pkt_desc *desc = descs + *desc_count; |
753e7179 | 739 | unsigned int cycle; |
705794c5 | 740 | bool lost; |
753e7179 TS |
741 | unsigned int data_blocks; |
742 | unsigned int syt; | |
743 | ||
3e106f4f | 744 | cycle = compute_ohci_cycle_count(ctx_header[1]); |
705794c5 TS |
745 | lost = (next_cycle != cycle); |
746 | if (lost) { | |
747 | if (s->flags & CIP_NO_HEADER) { | |
748 | // Fireface skips transmission just for an isoc cycle corresponding | |
749 | // to empty packet. | |
73246fc4 TS |
750 | unsigned int prev_cycle = next_cycle; |
751 | ||
705794c5 TS |
752 | next_cycle = increment_ohci_cycle_count(next_cycle, 1); |
753 | lost = (next_cycle != cycle); | |
73246fc4 TS |
754 | if (!lost) { |
755 | // Prepare a description for the skipped cycle for | |
756 | // sequence replay. | |
757 | desc->cycle = prev_cycle; | |
758 | desc->syt = 0; | |
759 | desc->data_blocks = 0; | |
760 | desc->data_block_counter = dbc; | |
761 | desc->ctx_payload = NULL; | |
762 | ++desc; | |
763 | ++(*desc_count); | |
764 | } | |
705794c5 TS |
765 | } else if (s->flags & CIP_JUMBO_PAYLOAD) { |
766 | // OXFW970 skips transmission for several isoc cycles during | |
73246fc4 TS |
767 | // asynchronous transaction. The sequence replay is impossible due |
768 | // to the reason. | |
705794c5 TS |
769 | unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle, |
770 | IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES); | |
771 | lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0); | |
772 | } | |
773 | if (lost) { | |
774 | dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n", | |
775 | next_cycle, cycle); | |
776 | return -EIO; | |
777 | } | |
778 | } | |
753e7179 | 779 | |
ebd2a647 TS |
780 | err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt, |
781 | packet_index, i); | |
753e7179 TS |
782 | if (err < 0) |
783 | return err; | |
784 | ||
785 | desc->cycle = cycle; | |
786 | desc->syt = syt; | |
787 | desc->data_blocks = data_blocks; | |
788 | desc->data_block_counter = dbc; | |
814b4312 | 789 | desc->ctx_payload = s->buffer.packets[packet_index].buffer; |
753e7179 TS |
790 | |
791 | if (!(s->flags & CIP_DBC_IS_END_EVENT)) | |
792 | dbc = (dbc + desc->data_blocks) & 0xff; | |
793 | ||
705794c5 | 794 | next_cycle = increment_ohci_cycle_count(next_cycle, 1); |
73246fc4 | 795 | ++(*desc_count); |
705794c5 | 796 | ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); |
814b4312 | 797 | packet_index = (packet_index + 1) % queue_size; |
753e7179 TS |
798 | } |
799 | ||
705794c5 | 800 | s->next_cycle = next_cycle; |
753e7179 TS |
801 | s->data_block_counter = dbc; |
802 | ||
803 | return 0; | |
804 | } | |
805 | ||
83cfb5c5 TS |
806 | static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle, |
807 | unsigned int transfer_delay) | |
808 | { | |
809 | unsigned int syt; | |
810 | ||
811 | syt_offset += transfer_delay; | |
812 | syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) | | |
813 | (syt_offset % TICKS_PER_CYCLE); | |
814 | return syt & CIP_SYT_MASK; | |
815 | } | |
816 | ||
69efd5c4 TS |
817 | static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs, |
818 | const __be32 *ctx_header, unsigned int packets, | |
819 | const struct seq_desc *seq_descs, | |
820 | unsigned int seq_size) | |
f4f6ae7b TS |
821 | { |
822 | unsigned int dbc = s->data_block_counter; | |
69efd5c4 | 823 | unsigned int seq_index = s->ctx_data.rx.seq_index; |
f4f6ae7b TS |
824 | int i; |
825 | ||
826 | for (i = 0; i < packets; ++i) { | |
827 | struct pkt_desc *desc = descs + i; | |
a0e02331 | 828 | unsigned int index = (s->packet_index + i) % s->queue_size; |
69efd5c4 TS |
829 | const struct seq_desc *seq = seq_descs + seq_index; |
830 | unsigned int syt; | |
f4f6ae7b | 831 | |
3e106f4f | 832 | desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); |
69efd5c4 TS |
833 | |
834 | syt = seq->syt_offset; | |
835 | if (syt != CIP_SYT_NO_INFO) { | |
836 | syt = compute_syt(syt, desc->cycle, | |
837 | s->ctx_data.rx.transfer_delay); | |
83cfb5c5 | 838 | } |
69efd5c4 TS |
839 | desc->syt = syt; |
840 | desc->data_blocks = seq->data_blocks; | |
f4f6ae7b TS |
841 | |
842 | if (s->flags & CIP_DBC_IS_END_EVENT) | |
843 | dbc = (dbc + desc->data_blocks) & 0xff; | |
844 | ||
845 | desc->data_block_counter = dbc; | |
846 | ||
847 | if (!(s->flags & CIP_DBC_IS_END_EVENT)) | |
848 | dbc = (dbc + desc->data_blocks) & 0xff; | |
849 | ||
850 | desc->ctx_payload = s->buffer.packets[index].buffer; | |
851 | ||
69efd5c4 TS |
852 | seq_index = (seq_index + 1) % seq_size; |
853 | ||
f4f6ae7b TS |
854 | ++ctx_header; |
855 | } | |
856 | ||
857 | s->data_block_counter = dbc; | |
69efd5c4 | 858 | s->ctx_data.rx.seq_index = seq_index; |
f4f6ae7b TS |
859 | } |
860 | ||
fce9b013 TS |
861 | static inline void cancel_stream(struct amdtp_stream *s) |
862 | { | |
863 | s->packet_index = -1; | |
2b3d2987 | 864 | if (current_work() == &s->period_work) |
fce9b013 TS |
865 | amdtp_stream_pcm_abort(s); |
866 | WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); | |
867 | } | |
868 | ||
0f5cfcb2 TS |
869 | static void process_ctx_payloads(struct amdtp_stream *s, |
870 | const struct pkt_desc *descs, | |
871 | unsigned int packets) | |
31ef9134 | 872 | { |
9a738ad1 TS |
873 | struct snd_pcm_substream *pcm; |
874 | unsigned int pcm_frames; | |
5e2ece0f | 875 | |
9a738ad1 TS |
876 | pcm = READ_ONCE(s->pcm); |
877 | pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm); | |
878 | if (pcm) | |
879 | update_pcm_pointers(s, pcm, pcm_frames); | |
0f5cfcb2 TS |
880 | } |
881 | ||
882 | static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, | |
883 | size_t header_length, void *header, | |
884 | void *private_data) | |
885 | { | |
886 | struct amdtp_stream *s = private_data; | |
69efd5c4 | 887 | const struct amdtp_domain *d = s->domain; |
0f5cfcb2 | 888 | const __be32 *ctx_header = header; |
6d60b7a3 | 889 | unsigned int events_per_period = d->events_per_period; |
60dd4929 | 890 | unsigned int event_count = s->ctx_data.rx.event_count; |
a0e02331 | 891 | unsigned int packets; |
0f5cfcb2 TS |
892 | int i; |
893 | ||
894 | if (s->packet_index < 0) | |
895 | return; | |
896 | ||
a0e02331 TS |
897 | // Calculate the number of packets in buffer and check XRUN. |
898 | packets = header_length / sizeof(*ctx_header); | |
899 | ||
d32872f3 TS |
900 | generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq.descs, |
901 | d->seq.size); | |
0f5cfcb2 TS |
902 | |
903 | process_ctx_payloads(s, s->pkt_descs, packets); | |
5e2ece0f TS |
904 | |
905 | for (i = 0; i < packets; ++i) { | |
906 | const struct pkt_desc *desc = s->pkt_descs + i; | |
f4f6ae7b | 907 | unsigned int syt; |
6bc1a269 TS |
908 | struct { |
909 | struct fw_iso_packet params; | |
910 | __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)]; | |
911 | } template = { {0}, {0} }; | |
e229853d | 912 | bool sched_irq = false; |
31ef9134 | 913 | |
f4f6ae7b TS |
914 | if (s->ctx_data.rx.syt_override < 0) |
915 | syt = desc->syt; | |
916 | else | |
3baf3053 TS |
917 | syt = s->ctx_data.rx.syt_override; |
918 | ||
f4f6ae7b TS |
919 | build_it_pkt_header(s, desc->cycle, &template.params, |
920 | desc->data_blocks, desc->data_block_counter, | |
921 | syt, i); | |
6bc1a269 | 922 | |
2472cfb3 | 923 | if (s == s->domain->irq_target) { |
60dd4929 TS |
924 | event_count += desc->data_blocks; |
925 | if (event_count >= events_per_period) { | |
926 | event_count -= events_per_period; | |
927 | sched_irq = true; | |
928 | } | |
e229853d TS |
929 | } |
930 | ||
931 | if (queue_out_packet(s, &template.params, sched_irq) < 0) { | |
fce9b013 | 932 | cancel_stream(s); |
a4103bd7 TS |
933 | return; |
934 | } | |
ccccad86 | 935 | } |
a4103bd7 | 936 | |
60dd4929 | 937 | s->ctx_data.rx.event_count = event_count; |
31ef9134 CL |
938 | } |
939 | ||
73fc7f08 | 940 | static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, |
2b3fc456 TS |
941 | size_t header_length, void *header, |
942 | void *private_data) | |
943 | { | |
944 | struct amdtp_stream *s = private_data; | |
cc4f8e91 | 945 | __be32 *ctx_header = header; |
e229853d | 946 | unsigned int packets; |
73246fc4 | 947 | unsigned int desc_count; |
753e7179 TS |
948 | int i; |
949 | int err; | |
2b3fc456 | 950 | |
a4103bd7 TS |
951 | if (s->packet_index < 0) |
952 | return; | |
953 | ||
a0e02331 | 954 | // Calculate the number of packets in buffer and check XRUN. |
d3d10a4a | 955 | packets = header_length / s->ctx_data.tx.ctx_header_size; |
f90e2ded | 956 | |
73246fc4 TS |
957 | desc_count = 0; |
958 | err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets, &desc_count); | |
753e7179 TS |
959 | if (err < 0) { |
960 | if (err != -EAGAIN) { | |
961 | cancel_stream(s); | |
962 | return; | |
963 | } | |
5e2ece0f | 964 | } else { |
73246fc4 | 965 | process_ctx_payloads(s, s->pkt_descs, desc_count); |
5e2ece0f TS |
966 | } |
967 | ||
968 | for (i = 0; i < packets; ++i) { | |
969 | struct fw_iso_packet params = {0}; | |
2b3fc456 | 970 | |
60dd4929 | 971 | if (queue_in_packet(s, ¶ms) < 0) { |
753e7179 TS |
972 | cancel_stream(s); |
973 | return; | |
974 | } | |
7b3b0d85 | 975 | } |
60dd4929 TS |
976 | } |
977 | ||
1a4be183 TS |
978 | static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets) |
979 | { | |
980 | struct amdtp_stream *irq_target = d->irq_target; | |
d32872f3 TS |
981 | unsigned int seq_tail = d->seq.tail; |
982 | unsigned int seq_size = d->seq.size; | |
1a4be183 TS |
983 | unsigned int min_avail; |
984 | struct amdtp_stream *s; | |
985 | ||
d32872f3 | 986 | min_avail = d->seq.size; |
1a4be183 TS |
987 | list_for_each_entry(s, &d->streams, list) { |
988 | unsigned int seq_index; | |
989 | unsigned int avail; | |
990 | ||
991 | if (s->direction == AMDTP_IN_STREAM) | |
992 | continue; | |
993 | ||
994 | seq_index = s->ctx_data.rx.seq_index; | |
d32872f3 | 995 | avail = d->seq.tail; |
1a4be183 | 996 | if (seq_index > avail) |
d32872f3 | 997 | avail += d->seq.size; |
1a4be183 TS |
998 | avail -= seq_index; |
999 | ||
1000 | if (avail < min_avail) | |
1001 | min_avail = avail; | |
1002 | } | |
1003 | ||
1004 | while (min_avail < packets) { | |
d32872f3 | 1005 | struct seq_desc *desc = d->seq.descs + seq_tail; |
1a4be183 TS |
1006 | |
1007 | desc->syt_offset = calculate_syt_offset(&d->last_syt_offset, | |
1008 | &d->syt_offset_state, irq_target->sfc); | |
1009 | desc->data_blocks = calculate_data_blocks(&d->data_block_state, | |
1010 | !!(irq_target->flags & CIP_BLOCKING), | |
1011 | desc->syt_offset == CIP_SYT_NO_INFO, | |
1012 | irq_target->syt_interval, irq_target->sfc); | |
1013 | ||
1014 | ++seq_tail; | |
1015 | seq_tail %= seq_size; | |
1016 | ||
1017 | ++min_avail; | |
1018 | } | |
1019 | ||
d32872f3 | 1020 | d->seq.tail = seq_tail; |
1a4be183 TS |
1021 | } |
1022 | ||
2472cfb3 TS |
1023 | static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, |
1024 | size_t header_length, void *header, | |
1025 | void *private_data) | |
60dd4929 | 1026 | { |
2472cfb3 TS |
1027 | struct amdtp_stream *irq_target = private_data; |
1028 | struct amdtp_domain *d = irq_target->domain; | |
1a4be183 | 1029 | unsigned int packets = header_length / sizeof(__be32); |
60dd4929 TS |
1030 | struct amdtp_stream *s; |
1031 | ||
1a4be183 TS |
1032 | // Record enough entries with extra 3 cycles at least. |
1033 | pool_ideal_seq_descs(d, packets + 3); | |
1034 | ||
60dd4929 TS |
1035 | out_stream_callback(context, tstamp, header_length, header, irq_target); |
1036 | if (amdtp_streaming_error(irq_target)) | |
1037 | goto error; | |
7b3b0d85 | 1038 | |
60dd4929 TS |
1039 | list_for_each_entry(s, &d->streams, list) { |
1040 | if (s != irq_target && amdtp_stream_running(s)) { | |
1041 | fw_iso_context_flush_completions(s->context); | |
1042 | if (amdtp_streaming_error(s)) | |
1043 | goto error; | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | return; | |
1048 | error: | |
1049 | if (amdtp_stream_running(irq_target)) | |
1050 | cancel_stream(irq_target); | |
1051 | ||
1052 | list_for_each_entry(s, &d->streams, list) { | |
1053 | if (amdtp_stream_running(s)) | |
1054 | cancel_stream(s); | |
1055 | } | |
2b3fc456 TS |
1056 | } |
1057 | ||
60dd4929 | 1058 | // this is executed one time. |
7b3b0d85 | 1059 | static void amdtp_stream_first_callback(struct fw_iso_context *context, |
73fc7f08 | 1060 | u32 tstamp, size_t header_length, |
7b3b0d85 TS |
1061 | void *header, void *private_data) |
1062 | { | |
1063 | struct amdtp_stream *s = private_data; | |
26cd1e58 | 1064 | const __be32 *ctx_header = header; |
a04513f8 | 1065 | u32 cycle; |
7b3b0d85 TS |
1066 | |
1067 | /* | |
1068 | * For in-stream, first packet has come. | |
1069 | * For out-stream, prepared to transmit first packet | |
1070 | */ | |
1071 | s->callbacked = true; | |
1072 | wake_up(&s->callback_wait); | |
1073 | ||
a04513f8 | 1074 | if (s->direction == AMDTP_IN_STREAM) { |
3e106f4f | 1075 | cycle = compute_ohci_cycle_count(ctx_header[1]); |
705794c5 | 1076 | s->next_cycle = cycle; |
cc4f8e91 | 1077 | |
7b3b0d85 | 1078 | context->callback.sc = in_stream_callback; |
a04513f8 | 1079 | } else { |
3e106f4f | 1080 | cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); |
26cd1e58 | 1081 | |
2472cfb3 TS |
1082 | if (s == s->domain->irq_target) |
1083 | context->callback.sc = irq_target_callback; | |
1084 | else | |
1085 | context->callback.sc = out_stream_callback; | |
a04513f8 TS |
1086 | } |
1087 | ||
1088 | s->start_cycle = cycle; | |
7b3b0d85 | 1089 | |
73fc7f08 | 1090 | context->callback.sc(context, tstamp, header_length, header, s); |
7b3b0d85 TS |
1091 | } |
1092 | ||
31ef9134 | 1093 | /** |
be4a2894 TS |
1094 | * amdtp_stream_start - start transferring packets |
1095 | * @s: the AMDTP stream to start | |
31ef9134 CL |
1096 | * @channel: the isochronous channel on the bus |
1097 | * @speed: firewire speed code | |
acfedcbe TS |
1098 | * @start_cycle: the isochronous cycle to start the context. Start immediately |
1099 | * if negative value is given. | |
af86b0b1 TS |
1100 | * @queue_size: The number of packets in the queue. |
1101 | * @idle_irq_interval: the interval to queue packet during initial state. | |
31ef9134 CL |
1102 | * |
1103 | * The stream cannot be started until it has been configured with | |
be4a2894 TS |
1104 | * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI |
1105 | * device can be started. | |
31ef9134 | 1106 | */ |
a0e02331 | 1107 | static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, |
af86b0b1 TS |
1108 | int start_cycle, unsigned int queue_size, |
1109 | unsigned int idle_irq_interval) | |
31ef9134 | 1110 | { |
2472cfb3 | 1111 | bool is_irq_target = (s == s->domain->irq_target); |
d3d10a4a | 1112 | unsigned int ctx_header_size; |
f11453c7 | 1113 | unsigned int max_ctx_payload_size; |
2b3fc456 | 1114 | enum dma_data_direction dir; |
7ab56645 | 1115 | int type, tag, err; |
31ef9134 CL |
1116 | |
1117 | mutex_lock(&s->mutex); | |
1118 | ||
be4a2894 | 1119 | if (WARN_ON(amdtp_stream_running(s) || |
4b7da117 | 1120 | (s->data_block_quadlets < 1))) { |
31ef9134 CL |
1121 | err = -EBADFD; |
1122 | goto err_unlock; | |
1123 | } | |
1124 | ||
d3d10a4a | 1125 | if (s->direction == AMDTP_IN_STREAM) { |
60dd4929 TS |
1126 | // NOTE: IT context should be used for constant IRQ. |
1127 | if (is_irq_target) { | |
1128 | err = -EINVAL; | |
1129 | goto err_unlock; | |
1130 | } | |
1131 | ||
b6bc8123 | 1132 | s->data_block_counter = UINT_MAX; |
d3d10a4a | 1133 | } else { |
b6bc8123 | 1134 | s->data_block_counter = 0; |
d3d10a4a | 1135 | } |
31ef9134 | 1136 | |
1be4f21d TS |
1137 | // initialize packet buffer. |
1138 | max_ctx_payload_size = amdtp_stream_get_max_payload(s); | |
2b3fc456 TS |
1139 | if (s->direction == AMDTP_IN_STREAM) { |
1140 | dir = DMA_FROM_DEVICE; | |
1141 | type = FW_ISO_CONTEXT_RECEIVE; | |
1be4f21d TS |
1142 | if (!(s->flags & CIP_NO_HEADER)) { |
1143 | max_ctx_payload_size -= 8; | |
f11453c7 | 1144 | ctx_header_size = IR_CTX_HEADER_SIZE_CIP; |
1be4f21d | 1145 | } else { |
f11453c7 | 1146 | ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; |
1be4f21d | 1147 | } |
2b3fc456 TS |
1148 | } else { |
1149 | dir = DMA_TO_DEVICE; | |
1150 | type = FW_ISO_CONTEXT_TRANSMIT; | |
df9160b9 | 1151 | ctx_header_size = 0; // No effect for IT context. |
f11453c7 | 1152 | |
b18f0cfa TS |
1153 | if (!(s->flags & CIP_NO_HEADER)) |
1154 | max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP; | |
1155 | } | |
f11453c7 | 1156 | |
af86b0b1 | 1157 | err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, |
f11453c7 | 1158 | max_ctx_payload_size, dir); |
31ef9134 CL |
1159 | if (err < 0) |
1160 | goto err_unlock; | |
af86b0b1 | 1161 | s->queue_size = queue_size; |
60dd4929 | 1162 | |
31ef9134 | 1163 | s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, |
d3d10a4a | 1164 | type, channel, speed, ctx_header_size, |
2472cfb3 | 1165 | amdtp_stream_first_callback, s); |
31ef9134 CL |
1166 | if (IS_ERR(s->context)) { |
1167 | err = PTR_ERR(s->context); | |
1168 | if (err == -EBUSY) | |
1169 | dev_err(&s->unit->device, | |
be4a2894 | 1170 | "no free stream on this controller\n"); |
31ef9134 CL |
1171 | goto err_buffer; |
1172 | } | |
1173 | ||
be4a2894 | 1174 | amdtp_stream_update(s); |
31ef9134 | 1175 | |
d3d10a4a | 1176 | if (s->direction == AMDTP_IN_STREAM) { |
f11453c7 | 1177 | s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; |
d3d10a4a TS |
1178 | s->ctx_data.tx.ctx_header_size = ctx_header_size; |
1179 | } | |
52759c09 | 1180 | |
3b196c39 TS |
1181 | if (s->flags & CIP_NO_HEADER) |
1182 | s->tag = TAG_NO_CIP_HEADER; | |
1183 | else | |
1184 | s->tag = TAG_CIP; | |
1185 | ||
a0e02331 | 1186 | s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs), |
04130cf8 TS |
1187 | GFP_KERNEL); |
1188 | if (!s->pkt_descs) { | |
1189 | err = -ENOMEM; | |
1190 | goto err_context; | |
1191 | } | |
1192 | ||
ec00f5e4 | 1193 | s->packet_index = 0; |
4b7da117 | 1194 | do { |
6007bf54 | 1195 | struct fw_iso_packet params; |
e229853d | 1196 | |
b18f0cfa | 1197 | if (s->direction == AMDTP_IN_STREAM) { |
60dd4929 | 1198 | err = queue_in_packet(s, ¶ms); |
b18f0cfa | 1199 | } else { |
60dd4929 TS |
1200 | bool sched_irq = false; |
1201 | ||
b18f0cfa TS |
1202 | params.header_length = 0; |
1203 | params.payload_length = 0; | |
60dd4929 TS |
1204 | |
1205 | if (is_irq_target) { | |
1206 | sched_irq = !((s->packet_index + 1) % | |
1207 | idle_irq_interval); | |
1208 | } | |
1209 | ||
e229853d | 1210 | err = queue_out_packet(s, ¶ms, sched_irq); |
b18f0cfa | 1211 | } |
4b7da117 | 1212 | if (err < 0) |
04130cf8 | 1213 | goto err_pkt_descs; |
4b7da117 | 1214 | } while (s->packet_index > 0); |
31ef9134 | 1215 | |
2b3fc456 | 1216 | /* NOTE: TAG1 matches CIP. This just affects in stream. */ |
7ab56645 | 1217 | tag = FW_ISO_CONTEXT_MATCH_TAG1; |
3b196c39 | 1218 | if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) |
7ab56645 TS |
1219 | tag |= FW_ISO_CONTEXT_MATCH_TAG0; |
1220 | ||
7b3b0d85 | 1221 | s->callbacked = false; |
acfedcbe | 1222 | err = fw_iso_context_start(s->context, start_cycle, 0, tag); |
31ef9134 | 1223 | if (err < 0) |
04130cf8 | 1224 | goto err_pkt_descs; |
31ef9134 CL |
1225 | |
1226 | mutex_unlock(&s->mutex); | |
1227 | ||
1228 | return 0; | |
04130cf8 TS |
1229 | err_pkt_descs: |
1230 | kfree(s->pkt_descs); | |
31ef9134 CL |
1231 | err_context: |
1232 | fw_iso_context_destroy(s->context); | |
1233 | s->context = ERR_PTR(-1); | |
1234 | err_buffer: | |
1235 | iso_packets_buffer_destroy(&s->buffer, s->unit); | |
1236 | err_unlock: | |
1237 | mutex_unlock(&s->mutex); | |
1238 | ||
1239 | return err; | |
1240 | } | |
31ef9134 | 1241 | |
e9148ddd | 1242 | /** |
f890f9a0 TS |
1243 | * amdtp_domain_stream_pcm_pointer - get the PCM buffer position |
1244 | * @d: the AMDTP domain. | |
be4a2894 | 1245 | * @s: the AMDTP stream that transports the PCM data |
e9148ddd CL |
1246 | * |
1247 | * Returns the current buffer position, in frames. | |
1248 | */ | |
f890f9a0 TS |
1249 | unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, |
1250 | struct amdtp_stream *s) | |
e9148ddd | 1251 | { |
f890f9a0 TS |
1252 | struct amdtp_stream *irq_target = d->irq_target; |
1253 | ||
1254 | if (irq_target && amdtp_stream_running(irq_target)) { | |
1255 | // This function is called in software IRQ context of | |
2b3d2987 | 1256 | // period_work or process context. |
f890f9a0 TS |
1257 | // |
1258 | // When the software IRQ context was scheduled by software IRQ | |
1259 | // context of IT contexts, queued packets were already handled. | |
1260 | // Therefore, no need to flush the queue in buffer furthermore. | |
1261 | // | |
1262 | // When the process context reach here, some packets will be | |
1263 | // already queued in the buffer. These packets should be handled | |
1264 | // immediately to keep better granularity of PCM pointer. | |
1265 | // | |
1266 | // Later, the process context will sometimes schedules software | |
2b3d2987 | 1267 | // IRQ context of the period_work. Then, no need to flush the |
f890f9a0 | 1268 | // queue by the same reason as described in the above |
2b3d2987 | 1269 | if (current_work() != &s->period_work) { |
f890f9a0 TS |
1270 | // Queued packet should be processed without any kernel |
1271 | // preemption to keep latency against bus cycle. | |
1272 | preempt_disable(); | |
1273 | fw_iso_context_flush_completions(irq_target->context); | |
1274 | preempt_enable(); | |
1275 | } | |
1276 | } | |
e9148ddd | 1277 | |
6aa7de05 | 1278 | return READ_ONCE(s->pcm_buffer_pointer); |
e9148ddd | 1279 | } |
f890f9a0 | 1280 | EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer); |
e9148ddd | 1281 | |
875becf8 | 1282 | /** |
e6dcc92f TS |
1283 | * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames |
1284 | * @d: the AMDTP domain. | |
875becf8 TS |
1285 | * @s: the AMDTP stream that transfers the PCM frames |
1286 | * | |
1287 | * Returns zero always. | |
1288 | */ | |
e6dcc92f | 1289 | int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s) |
875becf8 | 1290 | { |
e6dcc92f TS |
1291 | struct amdtp_stream *irq_target = d->irq_target; |
1292 | ||
1293 | // Process isochronous packets for recent isochronous cycle to handle | |
1294 | // queued PCM frames. | |
1295 | if (irq_target && amdtp_stream_running(irq_target)) { | |
1296 | // Queued packet should be processed without any kernel | |
1297 | // preemption to keep latency against bus cycle. | |
1298 | preempt_disable(); | |
1299 | fw_iso_context_flush_completions(irq_target->context); | |
1300 | preempt_enable(); | |
1301 | } | |
875becf8 TS |
1302 | |
1303 | return 0; | |
1304 | } | |
e6dcc92f | 1305 | EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack); |
875becf8 | 1306 | |
31ef9134 | 1307 | /** |
be4a2894 TS |
1308 | * amdtp_stream_update - update the stream after a bus reset |
1309 | * @s: the AMDTP stream | |
31ef9134 | 1310 | */ |
be4a2894 | 1311 | void amdtp_stream_update(struct amdtp_stream *s) |
31ef9134 | 1312 | { |
9a2820c1 | 1313 | /* Precomputing. */ |
6aa7de05 MR |
1314 | WRITE_ONCE(s->source_node_id_field, |
1315 | (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); | |
31ef9134 | 1316 | } |
be4a2894 | 1317 | EXPORT_SYMBOL(amdtp_stream_update); |
31ef9134 CL |
1318 | |
1319 | /** | |
be4a2894 TS |
1320 | * amdtp_stream_stop - stop sending packets |
1321 | * @s: the AMDTP stream to stop | |
31ef9134 CL |
1322 | * |
1323 | * All PCM and MIDI devices of the stream must be stopped before the stream | |
1324 | * itself can be stopped. | |
1325 | */ | |
74f94e41 | 1326 | static void amdtp_stream_stop(struct amdtp_stream *s) |
31ef9134 CL |
1327 | { |
1328 | mutex_lock(&s->mutex); | |
1329 | ||
be4a2894 | 1330 | if (!amdtp_stream_running(s)) { |
31ef9134 CL |
1331 | mutex_unlock(&s->mutex); |
1332 | return; | |
1333 | } | |
1334 | ||
2b3d2987 | 1335 | cancel_work_sync(&s->period_work); |
31ef9134 CL |
1336 | fw_iso_context_stop(s->context); |
1337 | fw_iso_context_destroy(s->context); | |
1338 | s->context = ERR_PTR(-1); | |
1339 | iso_packets_buffer_destroy(&s->buffer, s->unit); | |
04130cf8 | 1340 | kfree(s->pkt_descs); |
31ef9134 | 1341 | |
7b3b0d85 TS |
1342 | s->callbacked = false; |
1343 | ||
31ef9134 CL |
1344 | mutex_unlock(&s->mutex); |
1345 | } | |
31ef9134 CL |
1346 | |
1347 | /** | |
be4a2894 | 1348 | * amdtp_stream_pcm_abort - abort the running PCM device |
31ef9134 CL |
1349 | * @s: the AMDTP stream about to be stopped |
1350 | * | |
1351 | * If the isochronous stream needs to be stopped asynchronously, call this | |
1352 | * function first to stop the PCM device. | |
1353 | */ | |
be4a2894 | 1354 | void amdtp_stream_pcm_abort(struct amdtp_stream *s) |
31ef9134 CL |
1355 | { |
1356 | struct snd_pcm_substream *pcm; | |
1357 | ||
6aa7de05 | 1358 | pcm = READ_ONCE(s->pcm); |
1fb8510c TI |
1359 | if (pcm) |
1360 | snd_pcm_stop_xrun(pcm); | |
31ef9134 | 1361 | } |
be4a2894 | 1362 | EXPORT_SYMBOL(amdtp_stream_pcm_abort); |
3ec3d7a3 TS |
1363 | |
1364 | /** | |
1365 | * amdtp_domain_init - initialize an AMDTP domain structure | |
1366 | * @d: the AMDTP domain to initialize. | |
1367 | */ | |
1368 | int amdtp_domain_init(struct amdtp_domain *d) | |
1369 | { | |
1370 | INIT_LIST_HEAD(&d->streams); | |
1371 | ||
d68c3123 TS |
1372 | d->events_per_period = 0; |
1373 | ||
d32872f3 | 1374 | d->seq.descs = NULL; |
25babf29 | 1375 | |
3ec3d7a3 TS |
1376 | return 0; |
1377 | } | |
1378 | EXPORT_SYMBOL_GPL(amdtp_domain_init); | |
1379 | ||
1380 | /** | |
1381 | * amdtp_domain_destroy - destroy an AMDTP domain structure | |
1382 | * @d: the AMDTP domain to destroy. | |
1383 | */ | |
1384 | void amdtp_domain_destroy(struct amdtp_domain *d) | |
1385 | { | |
8d0d5c3f TS |
1386 | // At present nothing to do. |
1387 | return; | |
3ec3d7a3 TS |
1388 | } |
1389 | EXPORT_SYMBOL_GPL(amdtp_domain_destroy); | |
6261f90b | 1390 | |
157a53ee TS |
1391 | /** |
1392 | * amdtp_domain_add_stream - register isoc context into the domain. | |
1393 | * @d: the AMDTP domain. | |
1394 | * @s: the AMDTP stream. | |
1395 | * @channel: the isochronous channel on the bus. | |
1396 | * @speed: firewire speed code. | |
1397 | */ | |
1398 | int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, | |
1399 | int channel, int speed) | |
1400 | { | |
1401 | struct amdtp_stream *tmp; | |
1402 | ||
1403 | list_for_each_entry(tmp, &d->streams, list) { | |
1404 | if (s == tmp) | |
1405 | return -EBUSY; | |
1406 | } | |
1407 | ||
1408 | list_add(&s->list, &d->streams); | |
1409 | ||
1410 | s->channel = channel; | |
1411 | s->speed = speed; | |
2472cfb3 | 1412 | s->domain = d; |
157a53ee TS |
1413 | |
1414 | return 0; | |
1415 | } | |
1416 | EXPORT_SYMBOL_GPL(amdtp_domain_add_stream); | |
1417 | ||
acfedcbe TS |
1418 | static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle) |
1419 | { | |
1420 | int generation; | |
1421 | int rcode; | |
1422 | __be32 reg; | |
1423 | u32 data; | |
1424 | ||
1425 | // This is a request to local 1394 OHCI controller and expected to | |
1426 | // complete without any event waiting. | |
1427 | generation = fw_card->generation; | |
1428 | smp_rmb(); // node_id vs. generation. | |
1429 | rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST, | |
1430 | fw_card->node_id, generation, SCODE_100, | |
1431 | CSR_REGISTER_BASE + CSR_CYCLE_TIME, | |
1432 | ®, sizeof(reg)); | |
1433 | if (rcode != RCODE_COMPLETE) | |
1434 | return -EIO; | |
1435 | ||
1436 | data = be32_to_cpu(reg); | |
1437 | *cur_cycle = data >> 12; | |
1438 | ||
1439 | return 0; | |
1440 | } | |
1441 | ||
9b4702b0 TS |
1442 | /** |
1443 | * amdtp_domain_start - start sending packets for isoc context in the domain. | |
1444 | * @d: the AMDTP domain. | |
acfedcbe | 1445 | * @ir_delay_cycle: the cycle delay to start all IR contexts. |
9b4702b0 | 1446 | */ |
acfedcbe | 1447 | int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle) |
9b4702b0 | 1448 | { |
1a4be183 TS |
1449 | static const struct { |
1450 | unsigned int data_block; | |
1451 | unsigned int syt_offset; | |
1452 | } *entry, initial_state[] = { | |
1453 | [CIP_SFC_32000] = { 4, 3072 }, | |
1454 | [CIP_SFC_48000] = { 6, 1024 }, | |
1455 | [CIP_SFC_96000] = { 12, 1024 }, | |
1456 | [CIP_SFC_192000] = { 24, 1024 }, | |
1457 | [CIP_SFC_44100] = { 0, 67 }, | |
1458 | [CIP_SFC_88200] = { 0, 67 }, | |
1459 | [CIP_SFC_176400] = { 0, 67 }, | |
1460 | }; | |
af86b0b1 TS |
1461 | unsigned int events_per_buffer = d->events_per_buffer; |
1462 | unsigned int events_per_period = d->events_per_period; | |
1463 | unsigned int idle_irq_interval; | |
1464 | unsigned int queue_size; | |
9b4702b0 | 1465 | struct amdtp_stream *s; |
acfedcbe TS |
1466 | int cycle; |
1467 | int err; | |
9b4702b0 | 1468 | |
60dd4929 | 1469 | // Select an IT context as IRQ target. |
9b4702b0 | 1470 | list_for_each_entry(s, &d->streams, list) { |
60dd4929 | 1471 | if (s->direction == AMDTP_OUT_STREAM) |
9b4702b0 TS |
1472 | break; |
1473 | } | |
60dd4929 TS |
1474 | if (!s) |
1475 | return -ENXIO; | |
1476 | d->irq_target = s; | |
9b4702b0 | 1477 | |
af86b0b1 TS |
1478 | // This is a case that AMDTP streams in domain run just for MIDI |
1479 | // substream. Use the number of events equivalent to 10 msec as | |
1480 | // interval of hardware IRQ. | |
1481 | if (events_per_period == 0) | |
1482 | events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100; | |
1483 | if (events_per_buffer == 0) | |
1484 | events_per_buffer = events_per_period * 3; | |
1485 | ||
1486 | queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer, | |
1487 | amdtp_rate_table[d->irq_target->sfc]); | |
1488 | ||
d32872f3 TS |
1489 | d->seq.descs = kcalloc(queue_size, sizeof(*d->seq.descs), GFP_KERNEL); |
1490 | if (!d->seq.descs) | |
25babf29 | 1491 | return -ENOMEM; |
d32872f3 TS |
1492 | d->seq.size = queue_size; |
1493 | d->seq.tail = 0; | |
25babf29 | 1494 | |
1a4be183 TS |
1495 | entry = &initial_state[s->sfc]; |
1496 | d->data_block_state = entry->data_block; | |
1497 | d->syt_offset_state = entry->syt_offset; | |
1498 | d->last_syt_offset = TICKS_PER_CYCLE; | |
1499 | ||
acfedcbe TS |
1500 | if (ir_delay_cycle > 0) { |
1501 | struct fw_card *fw_card = fw_parent_device(s->unit)->card; | |
1502 | ||
1503 | err = get_current_cycle_time(fw_card, &cycle); | |
1504 | if (err < 0) | |
25babf29 | 1505 | goto error; |
acfedcbe TS |
1506 | |
1507 | // No need to care overflow in cycle field because of enough | |
1508 | // width. | |
1509 | cycle += ir_delay_cycle; | |
1510 | ||
1511 | // Round up to sec field. | |
1512 | if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) { | |
1513 | unsigned int sec; | |
1514 | ||
1515 | // The sec field can overflow. | |
1516 | sec = (cycle & 0xffffe000) >> 13; | |
1517 | cycle = (++sec << 13) | | |
1518 | ((cycle & 0x00001fff) / CYCLES_PER_SECOND); | |
1519 | } | |
1520 | ||
1521 | // In OHCI 1394 specification, lower 2 bits are available for | |
1522 | // sec field. | |
1523 | cycle &= 0x00007fff; | |
1524 | } else { | |
1525 | cycle = -1; | |
1526 | } | |
1527 | ||
60dd4929 | 1528 | list_for_each_entry(s, &d->streams, list) { |
acfedcbe TS |
1529 | int cycle_match; |
1530 | ||
1531 | if (s->direction == AMDTP_IN_STREAM) { | |
1532 | cycle_match = cycle; | |
1533 | } else { | |
1534 | // IT context starts immediately. | |
1535 | cycle_match = -1; | |
1a4be183 | 1536 | s->ctx_data.rx.seq_index = 0; |
acfedcbe TS |
1537 | } |
1538 | ||
60dd4929 | 1539 | if (s != d->irq_target) { |
2472cfb3 | 1540 | err = amdtp_stream_start(s, s->channel, s->speed, |
af86b0b1 | 1541 | cycle_match, queue_size, 0); |
60dd4929 TS |
1542 | if (err < 0) |
1543 | goto error; | |
1544 | } | |
9b4702b0 TS |
1545 | } |
1546 | ||
60dd4929 | 1547 | s = d->irq_target; |
af86b0b1 | 1548 | s->ctx_data.rx.event_count = 0; |
1a4be183 | 1549 | s->ctx_data.rx.seq_index = 0; |
af86b0b1 TS |
1550 | |
1551 | idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period, | |
1552 | amdtp_rate_table[d->irq_target->sfc]); | |
1553 | err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size, | |
1554 | idle_irq_interval); | |
60dd4929 TS |
1555 | if (err < 0) |
1556 | goto error; | |
1557 | ||
1558 | return 0; | |
1559 | error: | |
1560 | list_for_each_entry(s, &d->streams, list) | |
1561 | amdtp_stream_stop(s); | |
d32872f3 TS |
1562 | kfree(d->seq.descs); |
1563 | d->seq.descs = NULL; | |
9b4702b0 TS |
1564 | return err; |
1565 | } | |
1566 | EXPORT_SYMBOL_GPL(amdtp_domain_start); | |
1567 | ||
6261f90b TS |
1568 | /** |
1569 | * amdtp_domain_stop - stop sending packets for isoc context in the same domain. | |
1570 | * @d: the AMDTP domain to which the isoc contexts belong. | |
1571 | */ | |
1572 | void amdtp_domain_stop(struct amdtp_domain *d) | |
1573 | { | |
1574 | struct amdtp_stream *s, *next; | |
1575 | ||
60dd4929 TS |
1576 | if (d->irq_target) |
1577 | amdtp_stream_stop(d->irq_target); | |
1578 | ||
6261f90b TS |
1579 | list_for_each_entry_safe(s, next, &d->streams, list) { |
1580 | list_del(&s->list); | |
1581 | ||
60dd4929 TS |
1582 | if (s != d->irq_target) |
1583 | amdtp_stream_stop(s); | |
6261f90b | 1584 | } |
d68c3123 TS |
1585 | |
1586 | d->events_per_period = 0; | |
60dd4929 | 1587 | d->irq_target = NULL; |
25babf29 | 1588 | |
d32872f3 TS |
1589 | kfree(d->seq.descs); |
1590 | d->seq.descs = NULL; | |
6261f90b TS |
1591 | } |
1592 | EXPORT_SYMBOL_GPL(amdtp_domain_stop); |