Commit | Line | Data |
---|---|---|
da607e19 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
31ef9134 CL |
2 | /* |
3 | * Audio and Music Data Transmission Protocol (IEC 61883-6) streams | |
4 | * with Common Isochronous Packet (IEC 61883-1) headers | |
5 | * | |
6 | * Copyright (c) Clemens Ladisch <clemens@ladisch.de> | |
31ef9134 CL |
7 | */ |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/err.h> | |
11 | #include <linux/firewire.h> | |
acfedcbe | 12 | #include <linux/firewire-constants.h> |
31ef9134 CL |
13 | #include <linux/module.h> |
14 | #include <linux/slab.h> | |
15 | #include <sound/pcm.h> | |
7b2d99fa | 16 | #include <sound/pcm_params.h> |
d67c46b9 | 17 | #include "amdtp-stream.h" |
31ef9134 CL |
18 | |
19 | #define TICKS_PER_CYCLE 3072 | |
20 | #define CYCLES_PER_SECOND 8000 | |
21 | #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) | |
22 | ||
3e106f4f | 23 | #define OHCI_SECOND_MODULUS 8 |
10aa8e4a | 24 | |
0c95c1d6 TS |
25 | /* Always support Linux tracing subsystem. */ |
26 | #define CREATE_TRACE_POINTS | |
27 | #include "amdtp-stream-trace.h" | |
28 | ||
ca5b5050 | 29 | #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ |
31ef9134 | 30 | |
b445db44 TS |
31 | /* isochronous header parameters */ |
32 | #define ISO_DATA_LENGTH_SHIFT 16 | |
3b196c39 | 33 | #define TAG_NO_CIP_HEADER 0 |
31ef9134 CL |
34 | #define TAG_CIP 1 |
35 | ||
67d92ee7 TS |
36 | // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported. |
37 | #define CIP_HEADER_QUADLETS 2 | |
9a2820c1 TS |
38 | #define CIP_EOH_SHIFT 31 |
39 | #define CIP_EOH (1u << CIP_EOH_SHIFT) | |
b445db44 | 40 | #define CIP_EOH_MASK 0x80000000 |
9a2820c1 TS |
41 | #define CIP_SID_SHIFT 24 |
42 | #define CIP_SID_MASK 0x3f000000 | |
43 | #define CIP_DBS_MASK 0x00ff0000 | |
44 | #define CIP_DBS_SHIFT 16 | |
9863874f TS |
45 | #define CIP_SPH_MASK 0x00000400 |
46 | #define CIP_SPH_SHIFT 10 | |
9a2820c1 TS |
47 | #define CIP_DBC_MASK 0x000000ff |
48 | #define CIP_FMT_SHIFT 24 | |
b445db44 | 49 | #define CIP_FMT_MASK 0x3f000000 |
9a2820c1 TS |
50 | #define CIP_FDF_MASK 0x00ff0000 |
51 | #define CIP_FDF_SHIFT 16 | |
b445db44 TS |
52 | #define CIP_SYT_MASK 0x0000ffff |
53 | #define CIP_SYT_NO_INFO 0xffff | |
b445db44 | 54 | |
67d92ee7 TS |
55 | #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS) |
56 | ||
51c29fd2 | 57 | /* Audio and Music transfer protocol specific parameters */ |
414ba022 | 58 | #define CIP_FMT_AM 0x10 |
2b3fc456 | 59 | #define AMDTP_FDF_NO_DATA 0xff |
31ef9134 | 60 | |
f11453c7 | 61 | // For iso header and tstamp. |
67d92ee7 TS |
62 | #define IR_CTX_HEADER_DEFAULT_QUADLETS 2 |
63 | // Add nothing. | |
64 | #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS) | |
65 | // Add two quadlets CIP header. | |
66 | #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE) | |
cc4f8e91 | 67 | #define HEADER_TSTAMP_MASK 0x0000ffff |
4b7da117 | 68 | |
67d92ee7 | 69 | #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE |
b18f0cfa TS |
70 | #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing. |
71 | ||
6a3ce97d TS |
72 | // The initial firmware of OXFW970 can postpone transmission of packet during finishing |
73 | // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer | |
74 | // overrun. Actual device can skip more, then this module stops the packet streaming. | |
75 | #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5 | |
76 | ||
2b3d2987 | 77 | static void pcm_period_work(struct work_struct *work); |
76fb8789 | 78 | |
31ef9134 | 79 | /** |
be4a2894 TS |
80 | * amdtp_stream_init - initialize an AMDTP stream structure |
81 | * @s: the AMDTP stream to initialize | |
31ef9134 | 82 | * @unit: the target of the stream |
3ff7e8f0 | 83 | * @dir: the direction of stream |
ffe66bbe | 84 | * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants. |
5955815e | 85 | * @fmt: the value of fmt field in CIP header |
9a738ad1 | 86 | * @process_ctx_payloads: callback handler to process payloads of isoc context |
df075fee | 87 | * @protocol_size: the size to allocate newly for protocol |
31ef9134 | 88 | */ |
be4a2894 | 89 | int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, |
ffe66bbe | 90 | enum amdtp_stream_direction dir, unsigned int flags, |
df075fee | 91 | unsigned int fmt, |
9a738ad1 | 92 | amdtp_stream_process_ctx_payloads_t process_ctx_payloads, |
df075fee | 93 | unsigned int protocol_size) |
31ef9134 | 94 | { |
9a738ad1 | 95 | if (process_ctx_payloads == NULL) |
df075fee TS |
96 | return -EINVAL; |
97 | ||
98 | s->protocol = kzalloc(protocol_size, GFP_KERNEL); | |
99 | if (!s->protocol) | |
100 | return -ENOMEM; | |
101 | ||
c6f224dc | 102 | s->unit = unit; |
3ff7e8f0 | 103 | s->direction = dir; |
31ef9134 CL |
104 | s->flags = flags; |
105 | s->context = ERR_PTR(-1); | |
106 | mutex_init(&s->mutex); | |
2b3d2987 | 107 | INIT_WORK(&s->period_work, pcm_period_work); |
ec00f5e4 | 108 | s->packet_index = 0; |
31ef9134 | 109 | |
bdaedca7 | 110 | init_waitqueue_head(&s->ready_wait); |
7b3b0d85 | 111 | s->callbacked = false; |
7b3b0d85 | 112 | |
5955815e | 113 | s->fmt = fmt; |
9a738ad1 | 114 | s->process_ctx_payloads = process_ctx_payloads; |
414ba022 | 115 | |
31ef9134 CL |
116 | return 0; |
117 | } | |
be4a2894 | 118 | EXPORT_SYMBOL(amdtp_stream_init); |
31ef9134 CL |
119 | |
120 | /** | |
be4a2894 TS |
121 | * amdtp_stream_destroy - free stream resources |
122 | * @s: the AMDTP stream to destroy | |
31ef9134 | 123 | */ |
be4a2894 | 124 | void amdtp_stream_destroy(struct amdtp_stream *s) |
31ef9134 | 125 | { |
44c376b9 TS |
126 | /* Not initialized. */ |
127 | if (s->protocol == NULL) | |
128 | return; | |
129 | ||
be4a2894 | 130 | WARN_ON(amdtp_stream_running(s)); |
df075fee | 131 | kfree(s->protocol); |
31ef9134 | 132 | mutex_destroy(&s->mutex); |
31ef9134 | 133 | } |
be4a2894 | 134 | EXPORT_SYMBOL(amdtp_stream_destroy); |
31ef9134 | 135 | |
c5280e99 | 136 | const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { |
a7304e3b CL |
137 | [CIP_SFC_32000] = 8, |
138 | [CIP_SFC_44100] = 8, | |
139 | [CIP_SFC_48000] = 8, | |
140 | [CIP_SFC_88200] = 16, | |
141 | [CIP_SFC_96000] = 16, | |
142 | [CIP_SFC_176400] = 32, | |
143 | [CIP_SFC_192000] = 32, | |
144 | }; | |
145 | EXPORT_SYMBOL(amdtp_syt_intervals); | |
146 | ||
f9503a68 | 147 | const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { |
1017abed TS |
148 | [CIP_SFC_32000] = 32000, |
149 | [CIP_SFC_44100] = 44100, | |
150 | [CIP_SFC_48000] = 48000, | |
151 | [CIP_SFC_88200] = 88200, | |
152 | [CIP_SFC_96000] = 96000, | |
153 | [CIP_SFC_176400] = 176400, | |
154 | [CIP_SFC_192000] = 192000, | |
155 | }; | |
156 | EXPORT_SYMBOL(amdtp_rate_table); | |
157 | ||
59502295 TS |
158 | static int apply_constraint_to_size(struct snd_pcm_hw_params *params, |
159 | struct snd_pcm_hw_rule *rule) | |
160 | { | |
161 | struct snd_interval *s = hw_param_interval(params, rule->var); | |
162 | const struct snd_interval *r = | |
163 | hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); | |
826b5de9 TS |
164 | struct snd_interval t = {0}; |
165 | unsigned int step = 0; | |
59502295 TS |
166 | int i; |
167 | ||
168 | for (i = 0; i < CIP_SFC_COUNT; ++i) { | |
826b5de9 TS |
169 | if (snd_interval_test(r, amdtp_rate_table[i])) |
170 | step = max(step, amdtp_syt_intervals[i]); | |
59502295 TS |
171 | } |
172 | ||
826b5de9 TS |
173 | t.min = roundup(s->min, step); |
174 | t.max = rounddown(s->max, step); | |
175 | t.integer = 1; | |
59502295 TS |
176 | |
177 | return snd_interval_refine(s, &t); | |
178 | } | |
179 | ||
7b2d99fa TS |
180 | /** |
181 | * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream | |
182 | * @s: the AMDTP stream, which must be initialized. | |
183 | * @runtime: the PCM substream runtime | |
184 | */ | |
185 | int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, | |
186 | struct snd_pcm_runtime *runtime) | |
187 | { | |
55799c5a | 188 | struct snd_pcm_hardware *hw = &runtime->hw; |
99921ec6 TS |
189 | unsigned int ctx_header_size; |
190 | unsigned int maximum_usec_per_period; | |
7b2d99fa TS |
191 | int err; |
192 | ||
55799c5a TS |
193 | hw->info = SNDRV_PCM_INFO_BATCH | |
194 | SNDRV_PCM_INFO_BLOCK_TRANSFER | | |
195 | SNDRV_PCM_INFO_INTERLEAVED | | |
196 | SNDRV_PCM_INFO_JOINT_DUPLEX | | |
197 | SNDRV_PCM_INFO_MMAP | | |
198 | SNDRV_PCM_INFO_MMAP_VALID; | |
199 | ||
200 | /* SNDRV_PCM_INFO_BATCH */ | |
201 | hw->periods_min = 2; | |
202 | hw->periods_max = UINT_MAX; | |
203 | ||
204 | /* bytes for a frame */ | |
205 | hw->period_bytes_min = 4 * hw->channels_max; | |
206 | ||
207 | /* Just to prevent from allocating much pages. */ | |
208 | hw->period_bytes_max = hw->period_bytes_min * 2048; | |
209 | hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; | |
210 | ||
99921ec6 TS |
211 | // Linux driver for 1394 OHCI controller voluntarily flushes isoc |
212 | // context when total size of accumulated context header reaches | |
2b3d2987 | 213 | // PAGE_SIZE. This kicks work for the isoc context and brings |
99921ec6 TS |
214 | // callback in the middle of scheduled interrupts. |
215 | // Although AMDTP streams in the same domain use the same events per | |
216 | // IRQ, use the largest size of context header between IT/IR contexts. | |
217 | // Here, use the value of context header in IR context is for both | |
218 | // contexts. | |
219 | if (!(s->flags & CIP_NO_HEADER)) | |
220 | ctx_header_size = IR_CTX_HEADER_SIZE_CIP; | |
221 | else | |
222 | ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; | |
223 | maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE / | |
224 | CYCLES_PER_SECOND / ctx_header_size; | |
225 | ||
f706df4f TS |
226 | // In IEC 61883-6, one isoc packet can transfer events up to the value |
227 | // of syt interval. This comes from the interval of isoc cycle. As 1394 | |
228 | // OHCI controller can generate hardware IRQ per isoc packet, the | |
229 | // interval is 125 usec. | |
230 | // However, there are two ways of transmission in IEC 61883-6; blocking | |
231 | // and non-blocking modes. In blocking mode, the sequence of isoc packet | |
232 | // includes 'empty' or 'NODATA' packets which include no event. In | |
233 | // non-blocking mode, the number of events per packet is variable up to | |
234 | // the syt interval. | |
235 | // Due to the above protocol design, the minimum PCM frames per | |
236 | // interrupt should be double of the value of syt interval, thus it is | |
237 | // 250 usec. | |
7b2d99fa TS |
238 | err = snd_pcm_hw_constraint_minmax(runtime, |
239 | SNDRV_PCM_HW_PARAM_PERIOD_TIME, | |
f706df4f | 240 | 250, maximum_usec_per_period); |
7b2d99fa TS |
241 | if (err < 0) |
242 | goto end; | |
243 | ||
244 | /* Non-Blocking stream has no more constraints */ | |
245 | if (!(s->flags & CIP_BLOCKING)) | |
246 | goto end; | |
247 | ||
248 | /* | |
249 | * One AMDTP packet can include some frames. In blocking mode, the | |
250 | * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, | |
251 | * depending on its sampling rate. For accurate period interrupt, it's | |
ce991981 | 252 | * preferrable to align period/buffer sizes to current SYT_INTERVAL. |
7b2d99fa | 253 | */ |
59502295 TS |
254 | err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, |
255 | apply_constraint_to_size, NULL, | |
826b5de9 | 256 | SNDRV_PCM_HW_PARAM_PERIOD_SIZE, |
59502295 TS |
257 | SNDRV_PCM_HW_PARAM_RATE, -1); |
258 | if (err < 0) | |
259 | goto end; | |
59502295 TS |
260 | err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, |
261 | apply_constraint_to_size, NULL, | |
826b5de9 | 262 | SNDRV_PCM_HW_PARAM_BUFFER_SIZE, |
59502295 TS |
263 | SNDRV_PCM_HW_PARAM_RATE, -1); |
264 | if (err < 0) | |
265 | goto end; | |
7b2d99fa TS |
266 | end: |
267 | return err; | |
268 | } | |
269 | EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); | |
270 | ||
31ef9134 | 271 | /** |
be4a2894 TS |
272 | * amdtp_stream_set_parameters - set stream parameters |
273 | * @s: the AMDTP stream to configure | |
31ef9134 | 274 | * @rate: the sample rate |
df075fee | 275 | * @data_block_quadlets: the size of a data block in quadlet unit |
31ef9134 | 276 | * |
a7304e3b | 277 | * The parameters must be set before the stream is started, and must not be |
31ef9134 CL |
278 | * changed while the stream is running. |
279 | */ | |
df075fee TS |
280 | int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, |
281 | unsigned int data_block_quadlets) | |
31ef9134 | 282 | { |
df075fee | 283 | unsigned int sfc; |
31ef9134 | 284 | |
547e631c | 285 | for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { |
1017abed | 286 | if (amdtp_rate_table[sfc] == rate) |
547e631c TS |
287 | break; |
288 | } | |
289 | if (sfc == ARRAY_SIZE(amdtp_rate_table)) | |
290 | return -EINVAL; | |
e84d15f6 | 291 | |
e84d15f6 | 292 | s->sfc = sfc; |
df075fee | 293 | s->data_block_quadlets = data_block_quadlets; |
a7304e3b | 294 | s->syt_interval = amdtp_syt_intervals[sfc]; |
e84d15f6 | 295 | |
d3d10a4a TS |
296 | // default buffering in the device. |
297 | if (s->direction == AMDTP_OUT_STREAM) { | |
298 | s->ctx_data.rx.transfer_delay = | |
299 | TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; | |
300 | ||
301 | if (s->flags & CIP_BLOCKING) { | |
302 | // additional buffering needed to adjust for no-data | |
303 | // packets. | |
304 | s->ctx_data.rx.transfer_delay += | |
305 | TICKS_PER_SECOND * s->syt_interval / rate; | |
306 | } | |
307 | } | |
77d2a8a4 | 308 | |
547e631c | 309 | return 0; |
31ef9134 | 310 | } |
be4a2894 | 311 | EXPORT_SYMBOL(amdtp_stream_set_parameters); |
31ef9134 | 312 | |
c75f3678 TS |
313 | // The CIP header is processed in context header apart from context payload. |
314 | static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s) | |
315 | { | |
316 | unsigned int multiplier; | |
317 | ||
318 | if (s->flags & CIP_JUMBO_PAYLOAD) | |
319 | multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES; | |
320 | else | |
321 | multiplier = 1; | |
322 | ||
323 | return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; | |
324 | } | |
325 | ||
31ef9134 | 326 | /** |
be4a2894 TS |
327 | * amdtp_stream_get_max_payload - get the stream's packet size |
328 | * @s: the AMDTP stream | |
31ef9134 CL |
329 | * |
330 | * This function must not be called before the stream has been configured | |
be4a2894 | 331 | * with amdtp_stream_set_parameters(). |
31ef9134 | 332 | */ |
be4a2894 | 333 | unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) |
31ef9134 | 334 | { |
c75f3678 | 335 | unsigned int cip_header_size; |
a2064710 | 336 | |
3b196c39 | 337 | if (!(s->flags & CIP_NO_HEADER)) |
67d92ee7 | 338 | cip_header_size = CIP_HEADER_SIZE; |
c75f3678 TS |
339 | else |
340 | cip_header_size = 0; | |
a2064710 | 341 | |
c75f3678 | 342 | return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s); |
31ef9134 | 343 | } |
be4a2894 | 344 | EXPORT_SYMBOL(amdtp_stream_get_max_payload); |
31ef9134 | 345 | |
76fb8789 | 346 | /** |
be4a2894 TS |
347 | * amdtp_stream_pcm_prepare - prepare PCM device for running |
348 | * @s: the AMDTP stream | |
76fb8789 CL |
349 | * |
350 | * This function should be called from the PCM device's .prepare callback. | |
351 | */ | |
be4a2894 | 352 | void amdtp_stream_pcm_prepare(struct amdtp_stream *s) |
76fb8789 | 353 | { |
2b3d2987 | 354 | cancel_work_sync(&s->period_work); |
76fb8789 CL |
355 | s->pcm_buffer_pointer = 0; |
356 | s->pcm_period_pointer = 0; | |
357 | } | |
be4a2894 | 358 | EXPORT_SYMBOL(amdtp_stream_pcm_prepare); |
76fb8789 | 359 | |
274fc355 TS |
360 | static unsigned int calculate_data_blocks(unsigned int *data_block_state, |
361 | bool is_blocking, bool is_no_info, | |
362 | unsigned int syt_interval, enum cip_sfc sfc) | |
31ef9134 | 363 | { |
274fc355 | 364 | unsigned int data_blocks; |
31ef9134 | 365 | |
875be091 | 366 | /* Blocking mode. */ |
274fc355 | 367 | if (is_blocking) { |
875be091 | 368 | /* This module generate empty packet for 'no data'. */ |
274fc355 | 369 | if (is_no_info) |
875be091 TS |
370 | data_blocks = 0; |
371 | else | |
274fc355 | 372 | data_blocks = syt_interval; |
875be091 | 373 | /* Non-blocking mode. */ |
31ef9134 | 374 | } else { |
274fc355 | 375 | if (!cip_sfc_is_base_44100(sfc)) { |
d3d10a4a | 376 | // Sample_rate / 8000 is an integer, and precomputed. |
274fc355 | 377 | data_blocks = *data_block_state; |
875be091 | 378 | } else { |
274fc355 | 379 | unsigned int phase = *data_block_state; |
31ef9134 CL |
380 | |
381 | /* | |
382 | * This calculates the number of data blocks per packet so that | |
383 | * 1) the overall rate is correct and exactly synchronized to | |
384 | * the bus clock, and | |
385 | * 2) packets with a rounded-up number of blocks occur as early | |
386 | * as possible in the sequence (to prevent underruns of the | |
387 | * device's buffer). | |
388 | */ | |
274fc355 | 389 | if (sfc == CIP_SFC_44100) |
875be091 TS |
390 | /* 6 6 5 6 5 6 5 ... */ |
391 | data_blocks = 5 + ((phase & 1) ^ | |
392 | (phase == 0 || phase >= 40)); | |
393 | else | |
394 | /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ | |
274fc355 TS |
395 | data_blocks = 11 * (sfc >> 1) + (phase == 0); |
396 | if (++phase >= (80 >> (sfc >> 1))) | |
875be091 | 397 | phase = 0; |
274fc355 | 398 | *data_block_state = phase; |
875be091 | 399 | } |
31ef9134 CL |
400 | } |
401 | ||
402 | return data_blocks; | |
403 | } | |
404 | ||
816d8482 TS |
405 | static unsigned int calculate_syt_offset(unsigned int *last_syt_offset, |
406 | unsigned int *syt_offset_state, enum cip_sfc sfc) | |
31ef9134 | 407 | { |
816d8482 | 408 | unsigned int syt_offset; |
31ef9134 | 409 | |
816d8482 TS |
410 | if (*last_syt_offset < TICKS_PER_CYCLE) { |
411 | if (!cip_sfc_is_base_44100(sfc)) | |
412 | syt_offset = *last_syt_offset + *syt_offset_state; | |
31ef9134 CL |
413 | else { |
414 | /* | |
415 | * The time, in ticks, of the n'th SYT_INTERVAL sample is: | |
416 | * n * SYT_INTERVAL * 24576000 / sample_rate | |
417 | * Modulo TICKS_PER_CYCLE, the difference between successive | |
418 | * elements is about 1386.23. Rounding the results of this | |
419 | * formula to the SYT precision results in a sequence of | |
420 | * differences that begins with: | |
421 | * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... | |
422 | * This code generates _exactly_ the same sequence. | |
423 | */ | |
816d8482 TS |
424 | unsigned int phase = *syt_offset_state; |
425 | unsigned int index = phase % 13; | |
426 | ||
427 | syt_offset = *last_syt_offset; | |
31ef9134 CL |
428 | syt_offset += 1386 + ((index && !(index & 3)) || |
429 | phase == 146); | |
430 | if (++phase >= 147) | |
431 | phase = 0; | |
816d8482 | 432 | *syt_offset_state = phase; |
31ef9134 CL |
433 | } |
434 | } else | |
816d8482 TS |
435 | syt_offset = *last_syt_offset - TICKS_PER_CYCLE; |
436 | *last_syt_offset = syt_offset; | |
31ef9134 | 437 | |
83cfb5c5 TS |
438 | if (syt_offset >= TICKS_PER_CYCLE) |
439 | syt_offset = CIP_SYT_NO_INFO; | |
31ef9134 | 440 | |
83cfb5c5 | 441 | return syt_offset; |
31ef9134 CL |
442 | } |
443 | ||
6f24bb8a TS |
444 | static void pool_ideal_seq_descs(struct amdtp_stream *s, unsigned int count) |
445 | { | |
446 | unsigned int seq_tail = s->ctx_data.rx.seq.tail; | |
447 | const unsigned int seq_size = s->ctx_data.rx.seq.size; | |
448 | const unsigned int syt_interval = s->syt_interval; | |
449 | const enum cip_sfc sfc = s->sfc; | |
450 | const bool is_blocking = !!(s->flags & CIP_BLOCKING); | |
451 | int i; | |
452 | ||
453 | for (i = 0; i < count; ++i) { | |
454 | struct seq_desc *desc = s->ctx_data.rx.seq.descs + seq_tail; | |
455 | ||
456 | desc->syt_offset = calculate_syt_offset(&s->ctx_data.rx.last_syt_offset, | |
457 | &s->ctx_data.rx.syt_offset_state, sfc); | |
458 | desc->data_blocks = calculate_data_blocks(&s->ctx_data.rx.data_block_state, | |
459 | is_blocking, desc->syt_offset == CIP_SYT_NO_INFO, | |
460 | syt_interval, sfc); | |
461 | ||
462 | seq_tail = (seq_tail + 1) % seq_size; | |
463 | } | |
464 | ||
465 | s->ctx_data.rx.seq.tail = seq_tail; | |
466 | } | |
467 | ||
4b7da117 TS |
468 | static void update_pcm_pointers(struct amdtp_stream *s, |
469 | struct snd_pcm_substream *pcm, | |
470 | unsigned int frames) | |
65845f29 TS |
471 | { |
472 | unsigned int ptr; | |
473 | ||
4b7da117 TS |
474 | ptr = s->pcm_buffer_pointer + frames; |
475 | if (ptr >= pcm->runtime->buffer_size) | |
476 | ptr -= pcm->runtime->buffer_size; | |
6aa7de05 | 477 | WRITE_ONCE(s->pcm_buffer_pointer, ptr); |
4b7da117 TS |
478 | |
479 | s->pcm_period_pointer += frames; | |
480 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { | |
481 | s->pcm_period_pointer -= pcm->runtime->period_size; | |
2b3d2987 | 482 | queue_work(system_highpri_wq, &s->period_work); |
4b7da117 TS |
483 | } |
484 | } | |
485 | ||
2b3d2987 | 486 | static void pcm_period_work(struct work_struct *work) |
4b7da117 | 487 | { |
2b3d2987 TI |
488 | struct amdtp_stream *s = container_of(work, struct amdtp_stream, |
489 | period_work); | |
6aa7de05 | 490 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
4b7da117 TS |
491 | |
492 | if (pcm) | |
493 | snd_pcm_period_elapsed(pcm); | |
494 | } | |
495 | ||
e229853d TS |
496 | static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, |
497 | bool sched_irq) | |
4b7da117 | 498 | { |
6007bf54 | 499 | int err; |
df9160b9 | 500 | |
e229853d | 501 | params->interrupt = sched_irq; |
6007bf54 TS |
502 | params->tag = s->tag; |
503 | params->sy = 0; | |
df9160b9 | 504 | |
6007bf54 | 505 | err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, |
4b7da117 TS |
506 | s->buffer.packets[s->packet_index].offset); |
507 | if (err < 0) { | |
508 | dev_err(&s->unit->device, "queueing error: %d\n", err); | |
509 | goto end; | |
510 | } | |
511 | ||
a0e02331 | 512 | if (++s->packet_index >= s->queue_size) |
4b7da117 TS |
513 | s->packet_index = 0; |
514 | end: | |
515 | return err; | |
516 | } | |
517 | ||
518 | static inline int queue_out_packet(struct amdtp_stream *s, | |
e229853d | 519 | struct fw_iso_packet *params, bool sched_irq) |
4b7da117 | 520 | { |
b18f0cfa TS |
521 | params->skip = |
522 | !!(params->header_length == 0 && params->payload_length == 0); | |
e229853d | 523 | return queue_packet(s, params, sched_irq); |
4b7da117 TS |
524 | } |
525 | ||
6007bf54 | 526 | static inline int queue_in_packet(struct amdtp_stream *s, |
60dd4929 | 527 | struct fw_iso_packet *params) |
2b3fc456 | 528 | { |
6007bf54 TS |
529 | // Queue one packet for IR context. |
530 | params->header_length = s->ctx_data.tx.ctx_header_size; | |
531 | params->payload_length = s->ctx_data.tx.max_ctx_payload_length; | |
532 | params->skip = false; | |
60dd4929 | 533 | return queue_packet(s, params, false); |
2b3fc456 TS |
534 | } |
535 | ||
252219c7 | 536 | static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], |
860d798c | 537 | unsigned int data_block_counter, unsigned int syt) |
252219c7 TS |
538 | { |
539 | cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | | |
540 | (s->data_block_quadlets << CIP_DBS_SHIFT) | | |
541 | ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | | |
860d798c | 542 | data_block_counter); |
252219c7 TS |
543 | cip_header[1] = cpu_to_be32(CIP_EOH | |
544 | ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | | |
545 | ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | | |
546 | (syt & CIP_SYT_MASK)); | |
547 | } | |
548 | ||
6bc1a269 | 549 | static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, |
233dbbc7 | 550 | struct fw_iso_packet *params, unsigned int header_length, |
860d798c TS |
551 | unsigned int data_blocks, |
552 | unsigned int data_block_counter, | |
553 | unsigned int syt, unsigned int index) | |
31ef9134 | 554 | { |
0ebf3ceb | 555 | unsigned int payload_length; |
16be4589 | 556 | __be32 *cip_header; |
20e44577 | 557 | |
0ebf3ceb TS |
558 | payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; |
559 | params->payload_length = payload_length; | |
560 | ||
233dbbc7 | 561 | if (header_length > 0) { |
6bc1a269 | 562 | cip_header = (__be32 *)params->header; |
860d798c | 563 | generate_cip_header(s, cip_header, data_block_counter, syt); |
233dbbc7 | 564 | params->header_length = header_length; |
b18f0cfa TS |
565 | } else { |
566 | cip_header = NULL; | |
567 | } | |
31ef9134 | 568 | |
233dbbc7 | 569 | trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks, |
814b4312 | 570 | data_block_counter, s->packet_index, index); |
3b196c39 TS |
571 | } |
572 | ||
e335425b TS |
573 | static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, |
574 | unsigned int payload_length, | |
a35463d1 TS |
575 | unsigned int *data_blocks, |
576 | unsigned int *data_block_counter, unsigned int *syt) | |
2b3fc456 TS |
577 | { |
578 | u32 cip_header[2]; | |
e335425b TS |
579 | unsigned int sph; |
580 | unsigned int fmt; | |
581 | unsigned int fdf; | |
a35463d1 | 582 | unsigned int dbc; |
c8bdf49b | 583 | bool lost; |
2b3fc456 | 584 | |
e335425b TS |
585 | cip_header[0] = be32_to_cpu(buf[0]); |
586 | cip_header[1] = be32_to_cpu(buf[1]); | |
2b3fc456 TS |
587 | |
588 | /* | |
589 | * This module supports 'Two-quadlet CIP header with SYT field'. | |
77d2a8a4 | 590 | * For convenience, also check FMT field is AM824 or not. |
2b3fc456 | 591 | */ |
2128f78f TS |
592 | if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || |
593 | ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && | |
594 | (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { | |
2b3fc456 TS |
595 | dev_info_ratelimited(&s->unit->device, |
596 | "Invalid CIP header for AMDTP: %08X:%08X\n", | |
597 | cip_header[0], cip_header[1]); | |
e335425b | 598 | return -EAGAIN; |
2b3fc456 TS |
599 | } |
600 | ||
414ba022 | 601 | /* Check valid protocol or not. */ |
9863874f | 602 | sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT; |
414ba022 | 603 | fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT; |
9863874f | 604 | if (sph != s->sph || fmt != s->fmt) { |
2a7e1713 TS |
605 | dev_info_ratelimited(&s->unit->device, |
606 | "Detect unexpected protocol: %08x %08x\n", | |
607 | cip_header[0], cip_header[1]); | |
e335425b | 608 | return -EAGAIN; |
414ba022 TS |
609 | } |
610 | ||
2b3fc456 | 611 | /* Calculate data blocks */ |
414ba022 | 612 | fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; |
4fd18787 | 613 | if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { |
e335425b | 614 | *data_blocks = 0; |
2b3fc456 | 615 | } else { |
e335425b TS |
616 | unsigned int data_block_quadlets = |
617 | (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; | |
2b3fc456 TS |
618 | /* avoid division by zero */ |
619 | if (data_block_quadlets == 0) { | |
12e0f438 | 620 | dev_err(&s->unit->device, |
2b3fc456 TS |
621 | "Detect invalid value in dbs field: %08X\n", |
622 | cip_header[0]); | |
a9007054 | 623 | return -EPROTO; |
2b3fc456 | 624 | } |
69702239 TS |
625 | if (s->flags & CIP_WRONG_DBS) |
626 | data_block_quadlets = s->data_block_quadlets; | |
2b3fc456 | 627 | |
4fd18787 | 628 | *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets; |
2b3fc456 TS |
629 | } |
630 | ||
631 | /* Check data block counter continuity */ | |
a35463d1 | 632 | dbc = cip_header[0] & CIP_DBC_MASK; |
e335425b | 633 | if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && |
a35463d1 TS |
634 | *data_block_counter != UINT_MAX) |
635 | dbc = *data_block_counter; | |
9d59124c | 636 | |
a35463d1 TS |
637 | if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || |
638 | *data_block_counter == UINT_MAX) { | |
b84b1a27 TS |
639 | lost = false; |
640 | } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { | |
a35463d1 | 641 | lost = dbc != *data_block_counter; |
d9cd0065 | 642 | } else { |
e335425b TS |
643 | unsigned int dbc_interval; |
644 | ||
645 | if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) | |
d3d10a4a | 646 | dbc_interval = s->ctx_data.tx.dbc_interval; |
d9cd0065 | 647 | else |
e335425b | 648 | dbc_interval = *data_blocks; |
d9cd0065 | 649 | |
a35463d1 | 650 | lost = dbc != ((*data_block_counter + dbc_interval) & 0xff); |
d9cd0065 | 651 | } |
c8bdf49b TS |
652 | |
653 | if (lost) { | |
12e0f438 TS |
654 | dev_err(&s->unit->device, |
655 | "Detect discontinuity of CIP: %02X %02X\n", | |
a35463d1 | 656 | *data_block_counter, dbc); |
6fc6b9ce | 657 | return -EIO; |
2b3fc456 TS |
658 | } |
659 | ||
753e7179 TS |
660 | *data_block_counter = dbc; |
661 | ||
8070d265 TS |
662 | if (!(s->flags & CIP_UNAWARE_SYT)) |
663 | *syt = cip_header[1] & CIP_SYT_MASK; | |
2b3fc456 | 664 | |
e335425b TS |
665 | return 0; |
666 | } | |
667 | ||
98e3e43b TS |
668 | static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, |
669 | const __be32 *ctx_header, | |
a35463d1 TS |
670 | unsigned int *data_blocks, |
671 | unsigned int *data_block_counter, | |
814b4312 | 672 | unsigned int *syt, unsigned int packet_index, unsigned int index) |
e335425b | 673 | { |
ebd2a647 | 674 | unsigned int payload_length; |
f11453c7 | 675 | const __be32 *cip_header; |
395f41e2 | 676 | unsigned int cip_header_size; |
e335425b | 677 | |
ebd2a647 | 678 | payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT; |
395f41e2 TS |
679 | |
680 | if (!(s->flags & CIP_NO_HEADER)) | |
67d92ee7 | 681 | cip_header_size = CIP_HEADER_SIZE; |
395f41e2 TS |
682 | else |
683 | cip_header_size = 0; | |
684 | ||
ebd2a647 | 685 | if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) { |
e335425b TS |
686 | dev_err(&s->unit->device, |
687 | "Detect jumbo payload: %04x %04x\n", | |
ebd2a647 | 688 | payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length); |
e335425b TS |
689 | return -EIO; |
690 | } | |
691 | ||
395f41e2 | 692 | if (cip_header_size > 0) { |
ebd2a647 | 693 | if (payload_length >= cip_header_size) { |
344f0f82 TS |
694 | int err; |
695 | ||
67d92ee7 | 696 | cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS; |
4fd18787 TS |
697 | err = check_cip_header(s, cip_header, payload_length - cip_header_size, |
698 | data_blocks, data_block_counter, syt); | |
c09010ee TS |
699 | if (err < 0) |
700 | return err; | |
701 | } else { | |
702 | // Handle the cycle so that empty packet arrives. | |
703 | cip_header = NULL; | |
704 | *data_blocks = 0; | |
705 | *syt = 0; | |
706 | } | |
947b437e TS |
707 | } else { |
708 | cip_header = NULL; | |
ebd2a647 | 709 | *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets; |
98e3e43b | 710 | *syt = 0; |
7fbf9096 | 711 | |
a35463d1 TS |
712 | if (*data_block_counter == UINT_MAX) |
713 | *data_block_counter = 0; | |
e335425b TS |
714 | } |
715 | ||
ebd2a647 | 716 | trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks, |
814b4312 | 717 | *data_block_counter, packet_index, index); |
e335425b | 718 | |
344f0f82 | 719 | return 0; |
2b3fc456 TS |
720 | } |
721 | ||
26cd1e58 TS |
722 | // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On |
723 | // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent | |
724 | // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. | |
3e106f4f | 725 | static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp) |
73fc7f08 | 726 | { |
26cd1e58 | 727 | u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK; |
73fc7f08 TS |
728 | return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff); |
729 | } | |
730 | ||
3e106f4f | 731 | static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend) |
73fc7f08 TS |
732 | { |
733 | cycle += addend; | |
3e106f4f TS |
734 | if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND) |
735 | cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND; | |
73fc7f08 TS |
736 | return cycle; |
737 | } | |
738 | ||
705794c5 TS |
739 | static int compare_ohci_cycle_count(u32 lval, u32 rval) |
740 | { | |
741 | if (lval == rval) | |
742 | return 0; | |
743 | else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2) | |
744 | return -1; | |
745 | else | |
746 | return 1; | |
747 | } | |
748 | ||
26cd1e58 | 749 | // Align to actual cycle count for the packet which is going to be scheduled. |
a0e02331 TS |
750 | // This module queued the same number of isochronous cycle as the size of queue |
751 | // to kip isochronous cycle, therefore it's OK to just increment the cycle by | |
752 | // the size of queue for scheduled cycle. | |
3e106f4f TS |
753 | static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp, |
754 | unsigned int queue_size) | |
26cd1e58 | 755 | { |
3e106f4f TS |
756 | u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp); |
757 | return increment_ohci_cycle_count(cycle, queue_size); | |
26cd1e58 TS |
758 | } |
759 | ||
753e7179 TS |
760 | static int generate_device_pkt_descs(struct amdtp_stream *s, |
761 | struct pkt_desc *descs, | |
762 | const __be32 *ctx_header, | |
73246fc4 TS |
763 | unsigned int packets, |
764 | unsigned int *desc_count) | |
753e7179 | 765 | { |
705794c5 | 766 | unsigned int next_cycle = s->next_cycle; |
753e7179 | 767 | unsigned int dbc = s->data_block_counter; |
814b4312 TS |
768 | unsigned int packet_index = s->packet_index; |
769 | unsigned int queue_size = s->queue_size; | |
753e7179 TS |
770 | int i; |
771 | int err; | |
772 | ||
73246fc4 | 773 | *desc_count = 0; |
753e7179 | 774 | for (i = 0; i < packets; ++i) { |
73246fc4 | 775 | struct pkt_desc *desc = descs + *desc_count; |
753e7179 | 776 | unsigned int cycle; |
705794c5 | 777 | bool lost; |
753e7179 TS |
778 | unsigned int data_blocks; |
779 | unsigned int syt; | |
780 | ||
3e106f4f | 781 | cycle = compute_ohci_cycle_count(ctx_header[1]); |
705794c5 TS |
782 | lost = (next_cycle != cycle); |
783 | if (lost) { | |
784 | if (s->flags & CIP_NO_HEADER) { | |
785 | // Fireface skips transmission just for an isoc cycle corresponding | |
786 | // to empty packet. | |
73246fc4 TS |
787 | unsigned int prev_cycle = next_cycle; |
788 | ||
705794c5 TS |
789 | next_cycle = increment_ohci_cycle_count(next_cycle, 1); |
790 | lost = (next_cycle != cycle); | |
73246fc4 TS |
791 | if (!lost) { |
792 | // Prepare a description for the skipped cycle for | |
793 | // sequence replay. | |
794 | desc->cycle = prev_cycle; | |
795 | desc->syt = 0; | |
796 | desc->data_blocks = 0; | |
797 | desc->data_block_counter = dbc; | |
798 | desc->ctx_payload = NULL; | |
799 | ++desc; | |
800 | ++(*desc_count); | |
801 | } | |
705794c5 TS |
802 | } else if (s->flags & CIP_JUMBO_PAYLOAD) { |
803 | // OXFW970 skips transmission for several isoc cycles during | |
73246fc4 TS |
804 | // asynchronous transaction. The sequence replay is impossible due |
805 | // to the reason. | |
705794c5 TS |
806 | unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle, |
807 | IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES); | |
808 | lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0); | |
809 | } | |
810 | if (lost) { | |
811 | dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n", | |
812 | next_cycle, cycle); | |
813 | return -EIO; | |
814 | } | |
815 | } | |
753e7179 | 816 | |
ebd2a647 TS |
817 | err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt, |
818 | packet_index, i); | |
753e7179 TS |
819 | if (err < 0) |
820 | return err; | |
821 | ||
822 | desc->cycle = cycle; | |
823 | desc->syt = syt; | |
824 | desc->data_blocks = data_blocks; | |
825 | desc->data_block_counter = dbc; | |
814b4312 | 826 | desc->ctx_payload = s->buffer.packets[packet_index].buffer; |
753e7179 TS |
827 | |
828 | if (!(s->flags & CIP_DBC_IS_END_EVENT)) | |
829 | dbc = (dbc + desc->data_blocks) & 0xff; | |
830 | ||
705794c5 | 831 | next_cycle = increment_ohci_cycle_count(next_cycle, 1); |
73246fc4 | 832 | ++(*desc_count); |
705794c5 | 833 | ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); |
814b4312 | 834 | packet_index = (packet_index + 1) % queue_size; |
753e7179 TS |
835 | } |
836 | ||
705794c5 | 837 | s->next_cycle = next_cycle; |
753e7179 TS |
838 | s->data_block_counter = dbc; |
839 | ||
840 | return 0; | |
841 | } | |
842 | ||
83cfb5c5 TS |
843 | static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle, |
844 | unsigned int transfer_delay) | |
845 | { | |
846 | unsigned int syt; | |
847 | ||
848 | syt_offset += transfer_delay; | |
849 | syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) | | |
850 | (syt_offset % TICKS_PER_CYCLE); | |
851 | return syt & CIP_SYT_MASK; | |
852 | } | |
853 | ||
69efd5c4 TS |
854 | static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs, |
855 | const __be32 *ctx_header, unsigned int packets, | |
856 | const struct seq_desc *seq_descs, | |
857 | unsigned int seq_size) | |
f4f6ae7b TS |
858 | { |
859 | unsigned int dbc = s->data_block_counter; | |
6f24bb8a | 860 | unsigned int seq_head = s->ctx_data.rx.seq.head; |
8070d265 | 861 | bool aware_syt = !(s->flags & CIP_UNAWARE_SYT); |
f4f6ae7b TS |
862 | int i; |
863 | ||
864 | for (i = 0; i < packets; ++i) { | |
865 | struct pkt_desc *desc = descs + i; | |
a0e02331 | 866 | unsigned int index = (s->packet_index + i) % s->queue_size; |
6f24bb8a | 867 | const struct seq_desc *seq = seq_descs + seq_head; |
f4f6ae7b | 868 | |
3e106f4f | 869 | desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); |
69efd5c4 | 870 | |
8070d265 TS |
871 | if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO) { |
872 | desc->syt = compute_syt(seq->syt_offset, desc->cycle, | |
873 | s->ctx_data.rx.transfer_delay); | |
874 | } else { | |
875 | desc->syt = CIP_SYT_NO_INFO; | |
83cfb5c5 | 876 | } |
8070d265 | 877 | |
69efd5c4 | 878 | desc->data_blocks = seq->data_blocks; |
f4f6ae7b TS |
879 | |
880 | if (s->flags & CIP_DBC_IS_END_EVENT) | |
881 | dbc = (dbc + desc->data_blocks) & 0xff; | |
882 | ||
883 | desc->data_block_counter = dbc; | |
884 | ||
885 | if (!(s->flags & CIP_DBC_IS_END_EVENT)) | |
886 | dbc = (dbc + desc->data_blocks) & 0xff; | |
887 | ||
888 | desc->ctx_payload = s->buffer.packets[index].buffer; | |
889 | ||
6f24bb8a | 890 | seq_head = (seq_head + 1) % seq_size; |
69efd5c4 | 891 | |
f4f6ae7b TS |
892 | ++ctx_header; |
893 | } | |
894 | ||
895 | s->data_block_counter = dbc; | |
6f24bb8a | 896 | s->ctx_data.rx.seq.head = seq_head; |
f4f6ae7b TS |
897 | } |
898 | ||
fce9b013 TS |
899 | static inline void cancel_stream(struct amdtp_stream *s) |
900 | { | |
901 | s->packet_index = -1; | |
2b3d2987 | 902 | if (current_work() == &s->period_work) |
fce9b013 TS |
903 | amdtp_stream_pcm_abort(s); |
904 | WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); | |
905 | } | |
906 | ||
0f5cfcb2 TS |
907 | static void process_ctx_payloads(struct amdtp_stream *s, |
908 | const struct pkt_desc *descs, | |
909 | unsigned int packets) | |
31ef9134 | 910 | { |
9a738ad1 TS |
911 | struct snd_pcm_substream *pcm; |
912 | unsigned int pcm_frames; | |
5e2ece0f | 913 | |
9a738ad1 TS |
914 | pcm = READ_ONCE(s->pcm); |
915 | pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm); | |
916 | if (pcm) | |
917 | update_pcm_pointers(s, pcm, pcm_frames); | |
0f5cfcb2 TS |
918 | } |
919 | ||
9b1fcd9b TS |
920 | static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, |
921 | void *header, void *private_data) | |
0f5cfcb2 TS |
922 | { |
923 | struct amdtp_stream *s = private_data; | |
69efd5c4 | 924 | const struct amdtp_domain *d = s->domain; |
0f5cfcb2 | 925 | const __be32 *ctx_header = header; |
9b1fcd9b | 926 | const unsigned int events_per_period = d->events_per_period; |
60dd4929 | 927 | unsigned int event_count = s->ctx_data.rx.event_count; |
233dbbc7 | 928 | unsigned int pkt_header_length; |
a0e02331 | 929 | unsigned int packets; |
0f5cfcb2 TS |
930 | int i; |
931 | ||
932 | if (s->packet_index < 0) | |
933 | return; | |
934 | ||
a0e02331 TS |
935 | // Calculate the number of packets in buffer and check XRUN. |
936 | packets = header_length / sizeof(*ctx_header); | |
937 | ||
6f24bb8a TS |
938 | pool_ideal_seq_descs(s, packets); |
939 | ||
940 | generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, s->ctx_data.rx.seq.descs, | |
941 | s->ctx_data.rx.seq.size); | |
0f5cfcb2 TS |
942 | |
943 | process_ctx_payloads(s, s->pkt_descs, packets); | |
5e2ece0f | 944 | |
233dbbc7 TS |
945 | if (!(s->flags & CIP_NO_HEADER)) |
946 | pkt_header_length = IT_PKT_HEADER_SIZE_CIP; | |
947 | else | |
948 | pkt_header_length = 0; | |
949 | ||
5e2ece0f TS |
950 | for (i = 0; i < packets; ++i) { |
951 | const struct pkt_desc *desc = s->pkt_descs + i; | |
6bc1a269 TS |
952 | struct { |
953 | struct fw_iso_packet params; | |
67d92ee7 | 954 | __be32 header[CIP_HEADER_QUADLETS]; |
6bc1a269 | 955 | } template = { {0}, {0} }; |
e229853d | 956 | bool sched_irq = false; |
31ef9134 | 957 | |
233dbbc7 | 958 | build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length, |
f4f6ae7b | 959 | desc->data_blocks, desc->data_block_counter, |
8070d265 | 960 | desc->syt, i); |
6bc1a269 | 961 | |
2472cfb3 | 962 | if (s == s->domain->irq_target) { |
60dd4929 TS |
963 | event_count += desc->data_blocks; |
964 | if (event_count >= events_per_period) { | |
965 | event_count -= events_per_period; | |
966 | sched_irq = true; | |
967 | } | |
e229853d TS |
968 | } |
969 | ||
970 | if (queue_out_packet(s, &template.params, sched_irq) < 0) { | |
fce9b013 | 971 | cancel_stream(s); |
a4103bd7 TS |
972 | return; |
973 | } | |
ccccad86 | 974 | } |
a4103bd7 | 975 | |
60dd4929 | 976 | s->ctx_data.rx.event_count = event_count; |
31ef9134 CL |
977 | } |
978 | ||
9b1fcd9b TS |
979 | static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, |
980 | void *header, void *private_data) | |
981 | { | |
982 | struct amdtp_stream *s = private_data; | |
983 | struct amdtp_domain *d = s->domain; | |
984 | const __be32 *ctx_header = header; | |
985 | unsigned int packets; | |
986 | unsigned int cycle; | |
987 | int i; | |
988 | ||
989 | if (s->packet_index < 0) | |
990 | return; | |
991 | ||
992 | packets = header_length / sizeof(*ctx_header); | |
993 | ||
994 | cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size); | |
995 | s->next_cycle = increment_ohci_cycle_count(cycle, 1); | |
996 | ||
997 | for (i = 0; i < packets; ++i) { | |
998 | struct fw_iso_packet params = { | |
999 | .header_length = 0, | |
1000 | .payload_length = 0, | |
1001 | }; | |
1002 | bool sched_irq = (s == d->irq_target && i == packets - 1); | |
1003 | ||
1004 | if (queue_out_packet(s, ¶ms, sched_irq) < 0) { | |
1005 | cancel_stream(s); | |
1006 | return; | |
1007 | } | |
1008 | } | |
1009 | } | |
1010 | ||
1011 | static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length, | |
1012 | void *header, void *private_data); | |
1013 | ||
1014 | static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp, | |
1015 | size_t header_length, void *header, void *private_data) | |
1016 | { | |
1017 | struct amdtp_stream *s = private_data; | |
1018 | struct amdtp_domain *d = s->domain; | |
1019 | __be32 *ctx_header = header; | |
1020 | const unsigned int queue_size = s->queue_size; | |
1021 | unsigned int packets; | |
1022 | unsigned int offset; | |
1023 | ||
1024 | if (s->packet_index < 0) | |
1025 | return; | |
1026 | ||
1027 | packets = header_length / sizeof(*ctx_header); | |
1028 | ||
1029 | offset = 0; | |
1030 | while (offset < packets) { | |
1031 | unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size); | |
1032 | ||
1033 | if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0) | |
1034 | break; | |
1035 | ||
1036 | ++offset; | |
1037 | } | |
1038 | ||
1039 | if (offset > 0) { | |
1040 | unsigned int length = sizeof(*ctx_header) * offset; | |
1041 | ||
1042 | skip_rx_packets(context, tstamp, length, ctx_header, private_data); | |
1043 | if (amdtp_streaming_error(s)) | |
1044 | return; | |
1045 | ||
1046 | ctx_header += offset; | |
1047 | header_length -= length; | |
1048 | } | |
1049 | ||
1050 | if (offset < packets) { | |
bdaedca7 TS |
1051 | s->ready_processing = true; |
1052 | wake_up(&s->ready_wait); | |
1053 | ||
9b1fcd9b TS |
1054 | process_rx_packets(context, tstamp, header_length, ctx_header, private_data); |
1055 | if (amdtp_streaming_error(s)) | |
1056 | return; | |
1057 | ||
1058 | if (s == d->irq_target) | |
1059 | s->context->callback.sc = irq_target_callback; | |
1060 | else | |
1061 | s->context->callback.sc = process_rx_packets; | |
1062 | } | |
1063 | } | |
1064 | ||
da3623ab TS |
1065 | static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, |
1066 | void *header, void *private_data) | |
2b3fc456 TS |
1067 | { |
1068 | struct amdtp_stream *s = private_data; | |
cc4f8e91 | 1069 | __be32 *ctx_header = header; |
e229853d | 1070 | unsigned int packets; |
73246fc4 | 1071 | unsigned int desc_count; |
753e7179 TS |
1072 | int i; |
1073 | int err; | |
2b3fc456 | 1074 | |
a4103bd7 TS |
1075 | if (s->packet_index < 0) |
1076 | return; | |
1077 | ||
a0e02331 | 1078 | // Calculate the number of packets in buffer and check XRUN. |
d3d10a4a | 1079 | packets = header_length / s->ctx_data.tx.ctx_header_size; |
f90e2ded | 1080 | |
73246fc4 TS |
1081 | desc_count = 0; |
1082 | err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets, &desc_count); | |
753e7179 TS |
1083 | if (err < 0) { |
1084 | if (err != -EAGAIN) { | |
1085 | cancel_stream(s); | |
1086 | return; | |
1087 | } | |
5e2ece0f | 1088 | } else { |
73246fc4 | 1089 | process_ctx_payloads(s, s->pkt_descs, desc_count); |
5e2ece0f TS |
1090 | } |
1091 | ||
1092 | for (i = 0; i < packets; ++i) { | |
1093 | struct fw_iso_packet params = {0}; | |
2b3fc456 | 1094 | |
60dd4929 | 1095 | if (queue_in_packet(s, ¶ms) < 0) { |
753e7179 TS |
1096 | cancel_stream(s); |
1097 | return; | |
1098 | } | |
7b3b0d85 | 1099 | } |
60dd4929 TS |
1100 | } |
1101 | ||
da3623ab TS |
1102 | static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length, |
1103 | void *header, void *private_data) | |
1104 | { | |
1105 | struct amdtp_stream *s = private_data; | |
1106 | const __be32 *ctx_header = header; | |
1107 | unsigned int packets; | |
1108 | unsigned int cycle; | |
1109 | int i; | |
1110 | ||
1111 | if (s->packet_index < 0) | |
1112 | return; | |
1113 | ||
1114 | packets = header_length / s->ctx_data.tx.ctx_header_size; | |
1115 | ||
1116 | ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); | |
1117 | cycle = compute_ohci_cycle_count(ctx_header[1]); | |
1118 | s->next_cycle = increment_ohci_cycle_count(cycle, 1); | |
1119 | ||
1120 | for (i = 0; i < packets; ++i) { | |
1121 | struct fw_iso_packet params = {0}; | |
1122 | ||
1123 | if (queue_in_packet(s, ¶ms) < 0) { | |
1124 | cancel_stream(s); | |
1125 | return; | |
1126 | } | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp, | |
1131 | size_t header_length, void *header, void *private_data) | |
1132 | { | |
1133 | struct amdtp_stream *s = private_data; | |
1134 | struct amdtp_domain *d = s->domain; | |
1135 | __be32 *ctx_header; | |
1136 | unsigned int packets; | |
1137 | unsigned int offset; | |
1138 | ||
1139 | if (s->packet_index < 0) | |
1140 | return; | |
1141 | ||
1142 | packets = header_length / s->ctx_data.tx.ctx_header_size; | |
1143 | ||
1144 | offset = 0; | |
1145 | ctx_header = header; | |
1146 | while (offset < packets) { | |
1147 | unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]); | |
1148 | ||
1149 | if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0) | |
1150 | break; | |
1151 | ||
1152 | ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32); | |
1153 | ++offset; | |
1154 | } | |
1155 | ||
1156 | ctx_header = header; | |
1157 | ||
1158 | if (offset > 0) { | |
1159 | size_t length = s->ctx_data.tx.ctx_header_size * offset; | |
1160 | ||
1161 | drop_tx_packets(context, tstamp, length, ctx_header, s); | |
1162 | if (amdtp_streaming_error(s)) | |
1163 | return; | |
1164 | ||
1165 | ctx_header += length / sizeof(*ctx_header); | |
1166 | header_length -= length; | |
1167 | } | |
1168 | ||
1169 | if (offset < packets) { | |
bdaedca7 TS |
1170 | s->ready_processing = true; |
1171 | wake_up(&s->ready_wait); | |
1172 | ||
da3623ab TS |
1173 | process_tx_packets(context, tstamp, header_length, ctx_header, s); |
1174 | if (amdtp_streaming_error(s)) | |
1175 | return; | |
1176 | ||
1177 | context->callback.sc = process_tx_packets; | |
1178 | } | |
1179 | } | |
1180 | ||
9b1fcd9b | 1181 | static void process_ctxs_in_domain(struct amdtp_domain *d) |
60dd4929 | 1182 | { |
60dd4929 TS |
1183 | struct amdtp_stream *s; |
1184 | ||
60dd4929 | 1185 | list_for_each_entry(s, &d->streams, list) { |
9b1fcd9b | 1186 | if (s != d->irq_target && amdtp_stream_running(s)) |
60dd4929 | 1187 | fw_iso_context_flush_completions(s->context); |
9b1fcd9b TS |
1188 | |
1189 | if (amdtp_streaming_error(s)) | |
1190 | goto error; | |
60dd4929 TS |
1191 | } |
1192 | ||
1193 | return; | |
1194 | error: | |
9b1fcd9b TS |
1195 | if (amdtp_stream_running(d->irq_target)) |
1196 | cancel_stream(d->irq_target); | |
60dd4929 TS |
1197 | |
1198 | list_for_each_entry(s, &d->streams, list) { | |
1199 | if (amdtp_stream_running(s)) | |
1200 | cancel_stream(s); | |
1201 | } | |
2b3fc456 TS |
1202 | } |
1203 | ||
9b1fcd9b TS |
1204 | static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length, |
1205 | void *header, void *private_data) | |
1206 | { | |
1207 | struct amdtp_stream *s = private_data; | |
1208 | struct amdtp_domain *d = s->domain; | |
9b1fcd9b TS |
1209 | |
1210 | process_rx_packets(context, tstamp, header_length, header, private_data); | |
1211 | process_ctxs_in_domain(d); | |
1212 | } | |
1213 | ||
1214 | static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp, | |
1215 | size_t header_length, void *header, void *private_data) | |
1216 | { | |
1217 | struct amdtp_stream *s = private_data; | |
1218 | struct amdtp_domain *d = s->domain; | |
9b1fcd9b TS |
1219 | |
1220 | process_rx_packets_intermediately(context, tstamp, header_length, header, private_data); | |
1221 | process_ctxs_in_domain(d); | |
1222 | } | |
1223 | ||
1224 | static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp, | |
1225 | size_t header_length, void *header, void *private_data) | |
1226 | { | |
1227 | struct amdtp_stream *s = private_data; | |
1228 | struct amdtp_domain *d = s->domain; | |
1229 | unsigned int cycle; | |
1230 | ||
1231 | skip_rx_packets(context, tstamp, header_length, header, private_data); | |
1232 | process_ctxs_in_domain(d); | |
1233 | ||
1234 | // Decide the cycle count to begin processing content of packet in IT contexts. All of IT | |
1235 | // contexts are expected to start and get callback when reaching here. | |
1236 | cycle = s->next_cycle; | |
1237 | list_for_each_entry(s, &d->streams, list) { | |
1238 | if (s->direction != AMDTP_OUT_STREAM) | |
1239 | continue; | |
1240 | ||
1241 | if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0) | |
1242 | cycle = s->next_cycle; | |
1243 | ||
1244 | if (s == d->irq_target) | |
1245 | s->context->callback.sc = irq_target_callback_intermediately; | |
1246 | else | |
1247 | s->context->callback.sc = process_rx_packets_intermediately; | |
1248 | } | |
1249 | ||
1250 | d->processing_cycle.rx_start = cycle; | |
1251 | } | |
1252 | ||
60dd4929 | 1253 | // this is executed one time. |
7b3b0d85 | 1254 | static void amdtp_stream_first_callback(struct fw_iso_context *context, |
73fc7f08 | 1255 | u32 tstamp, size_t header_length, |
7b3b0d85 TS |
1256 | void *header, void *private_data) |
1257 | { | |
1258 | struct amdtp_stream *s = private_data; | |
da3623ab | 1259 | struct amdtp_domain *d = s->domain; |
26cd1e58 | 1260 | const __be32 *ctx_header = header; |
a04513f8 | 1261 | u32 cycle; |
7b3b0d85 | 1262 | |
bdaedca7 TS |
1263 | // For in-stream, first packet has come. |
1264 | // For out-stream, prepared to transmit first packet | |
7b3b0d85 | 1265 | s->callbacked = true; |
7b3b0d85 | 1266 | |
a04513f8 | 1267 | if (s->direction == AMDTP_IN_STREAM) { |
3e106f4f | 1268 | cycle = compute_ohci_cycle_count(ctx_header[1]); |
cc4f8e91 | 1269 | |
da3623ab | 1270 | context->callback.sc = drop_tx_packets; |
a04513f8 | 1271 | } else { |
3e106f4f | 1272 | cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size); |
26cd1e58 | 1273 | |
da3623ab | 1274 | if (s == d->irq_target) |
9b1fcd9b | 1275 | context->callback.sc = irq_target_callback_skip; |
2472cfb3 | 1276 | else |
9b1fcd9b | 1277 | context->callback.sc = skip_rx_packets; |
a04513f8 TS |
1278 | } |
1279 | ||
73fc7f08 | 1280 | context->callback.sc(context, tstamp, header_length, header, s); |
da3623ab TS |
1281 | |
1282 | // Decide the cycle count to begin processing content of packet in IR contexts. | |
1283 | if (s->direction == AMDTP_IN_STREAM) { | |
1284 | unsigned int stream_count = 0; | |
1285 | unsigned int callbacked_count = 0; | |
1286 | ||
1287 | list_for_each_entry(s, &d->streams, list) { | |
1288 | if (s->direction == AMDTP_IN_STREAM) { | |
1289 | ++stream_count; | |
1290 | if (s->callbacked) | |
1291 | ++callbacked_count; | |
1292 | } | |
1293 | } | |
1294 | ||
1295 | if (stream_count == callbacked_count) { | |
26541cb1 TS |
1296 | unsigned int next_cycle; |
1297 | ||
da3623ab TS |
1298 | list_for_each_entry(s, &d->streams, list) { |
1299 | if (s->direction != AMDTP_IN_STREAM) | |
1300 | continue; | |
1301 | ||
26541cb1 TS |
1302 | next_cycle = increment_ohci_cycle_count(s->next_cycle, |
1303 | d->processing_cycle.tx_init_skip); | |
1304 | if (compare_ohci_cycle_count(next_cycle, cycle) > 0) | |
1305 | cycle = next_cycle; | |
da3623ab TS |
1306 | |
1307 | s->context->callback.sc = process_tx_packets_intermediately; | |
1308 | } | |
1309 | ||
1310 | d->processing_cycle.tx_start = cycle; | |
1311 | } | |
1312 | } | |
7b3b0d85 TS |
1313 | } |
1314 | ||
31ef9134 | 1315 | /** |
be4a2894 TS |
1316 | * amdtp_stream_start - start transferring packets |
1317 | * @s: the AMDTP stream to start | |
31ef9134 CL |
1318 | * @channel: the isochronous channel on the bus |
1319 | * @speed: firewire speed code | |
af86b0b1 TS |
1320 | * @queue_size: The number of packets in the queue. |
1321 | * @idle_irq_interval: the interval to queue packet during initial state. | |
31ef9134 CL |
1322 | * |
1323 | * The stream cannot be started until it has been configured with | |
be4a2894 TS |
1324 | * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI |
1325 | * device can be started. | |
31ef9134 | 1326 | */ |
a0e02331 | 1327 | static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, |
bd165079 | 1328 | unsigned int queue_size, unsigned int idle_irq_interval) |
31ef9134 | 1329 | { |
2472cfb3 | 1330 | bool is_irq_target = (s == s->domain->irq_target); |
d3d10a4a | 1331 | unsigned int ctx_header_size; |
f11453c7 | 1332 | unsigned int max_ctx_payload_size; |
2b3fc456 | 1333 | enum dma_data_direction dir; |
7ab56645 | 1334 | int type, tag, err; |
31ef9134 CL |
1335 | |
1336 | mutex_lock(&s->mutex); | |
1337 | ||
be4a2894 | 1338 | if (WARN_ON(amdtp_stream_running(s) || |
4b7da117 | 1339 | (s->data_block_quadlets < 1))) { |
31ef9134 CL |
1340 | err = -EBADFD; |
1341 | goto err_unlock; | |
1342 | } | |
1343 | ||
d3d10a4a | 1344 | if (s->direction == AMDTP_IN_STREAM) { |
60dd4929 TS |
1345 | // NOTE: IT context should be used for constant IRQ. |
1346 | if (is_irq_target) { | |
1347 | err = -EINVAL; | |
1348 | goto err_unlock; | |
1349 | } | |
1350 | ||
b6bc8123 | 1351 | s->data_block_counter = UINT_MAX; |
d3d10a4a | 1352 | } else { |
b6bc8123 | 1353 | s->data_block_counter = 0; |
d3d10a4a | 1354 | } |
31ef9134 | 1355 | |
1be4f21d | 1356 | // initialize packet buffer. |
2b3fc456 TS |
1357 | if (s->direction == AMDTP_IN_STREAM) { |
1358 | dir = DMA_FROM_DEVICE; | |
1359 | type = FW_ISO_CONTEXT_RECEIVE; | |
c75f3678 | 1360 | if (!(s->flags & CIP_NO_HEADER)) |
f11453c7 | 1361 | ctx_header_size = IR_CTX_HEADER_SIZE_CIP; |
c75f3678 | 1362 | else |
f11453c7 | 1363 | ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; |
2b3fc456 TS |
1364 | } else { |
1365 | dir = DMA_TO_DEVICE; | |
1366 | type = FW_ISO_CONTEXT_TRANSMIT; | |
df9160b9 | 1367 | ctx_header_size = 0; // No effect for IT context. |
b18f0cfa | 1368 | } |
c75f3678 | 1369 | max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s); |
f11453c7 | 1370 | |
c75f3678 | 1371 | err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir); |
31ef9134 CL |
1372 | if (err < 0) |
1373 | goto err_unlock; | |
af86b0b1 | 1374 | s->queue_size = queue_size; |
60dd4929 | 1375 | |
31ef9134 | 1376 | s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, |
d3d10a4a | 1377 | type, channel, speed, ctx_header_size, |
2472cfb3 | 1378 | amdtp_stream_first_callback, s); |
31ef9134 CL |
1379 | if (IS_ERR(s->context)) { |
1380 | err = PTR_ERR(s->context); | |
1381 | if (err == -EBUSY) | |
1382 | dev_err(&s->unit->device, | |
be4a2894 | 1383 | "no free stream on this controller\n"); |
31ef9134 CL |
1384 | goto err_buffer; |
1385 | } | |
1386 | ||
be4a2894 | 1387 | amdtp_stream_update(s); |
31ef9134 | 1388 | |
d3d10a4a | 1389 | if (s->direction == AMDTP_IN_STREAM) { |
f11453c7 | 1390 | s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; |
d3d10a4a | 1391 | s->ctx_data.tx.ctx_header_size = ctx_header_size; |
bd165079 | 1392 | } else { |
6f24bb8a TS |
1393 | static const struct { |
1394 | unsigned int data_block; | |
1395 | unsigned int syt_offset; | |
1396 | } *entry, initial_state[] = { | |
1397 | [CIP_SFC_32000] = { 4, 3072 }, | |
1398 | [CIP_SFC_48000] = { 6, 1024 }, | |
1399 | [CIP_SFC_96000] = { 12, 1024 }, | |
1400 | [CIP_SFC_192000] = { 24, 1024 }, | |
1401 | [CIP_SFC_44100] = { 0, 67 }, | |
1402 | [CIP_SFC_88200] = { 0, 67 }, | |
1403 | [CIP_SFC_176400] = { 0, 67 }, | |
1404 | }; | |
1405 | ||
1406 | s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL); | |
1407 | if (!s->ctx_data.rx.seq.descs) | |
1408 | goto err_context; | |
1409 | s->ctx_data.rx.seq.size = queue_size; | |
1410 | s->ctx_data.rx.seq.tail = 0; | |
1411 | s->ctx_data.rx.seq.head = 0; | |
1412 | ||
1413 | entry = &initial_state[s->sfc]; | |
1414 | s->ctx_data.rx.data_block_state = entry->data_block; | |
1415 | s->ctx_data.rx.syt_offset_state = entry->syt_offset; | |
1416 | s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE; | |
1417 | ||
bd165079 | 1418 | s->ctx_data.rx.event_count = 0; |
d3d10a4a | 1419 | } |
52759c09 | 1420 | |
3b196c39 TS |
1421 | if (s->flags & CIP_NO_HEADER) |
1422 | s->tag = TAG_NO_CIP_HEADER; | |
1423 | else | |
1424 | s->tag = TAG_CIP; | |
1425 | ||
a0e02331 | 1426 | s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs), |
04130cf8 TS |
1427 | GFP_KERNEL); |
1428 | if (!s->pkt_descs) { | |
1429 | err = -ENOMEM; | |
1430 | goto err_context; | |
1431 | } | |
1432 | ||
ec00f5e4 | 1433 | s->packet_index = 0; |
4b7da117 | 1434 | do { |
6007bf54 | 1435 | struct fw_iso_packet params; |
e229853d | 1436 | |
b18f0cfa | 1437 | if (s->direction == AMDTP_IN_STREAM) { |
60dd4929 | 1438 | err = queue_in_packet(s, ¶ms); |
b18f0cfa | 1439 | } else { |
60dd4929 TS |
1440 | bool sched_irq = false; |
1441 | ||
b18f0cfa TS |
1442 | params.header_length = 0; |
1443 | params.payload_length = 0; | |
60dd4929 TS |
1444 | |
1445 | if (is_irq_target) { | |
1446 | sched_irq = !((s->packet_index + 1) % | |
1447 | idle_irq_interval); | |
1448 | } | |
1449 | ||
e229853d | 1450 | err = queue_out_packet(s, ¶ms, sched_irq); |
b18f0cfa | 1451 | } |
4b7da117 | 1452 | if (err < 0) |
04130cf8 | 1453 | goto err_pkt_descs; |
4b7da117 | 1454 | } while (s->packet_index > 0); |
31ef9134 | 1455 | |
2b3fc456 | 1456 | /* NOTE: TAG1 matches CIP. This just affects in stream. */ |
7ab56645 | 1457 | tag = FW_ISO_CONTEXT_MATCH_TAG1; |
3b196c39 | 1458 | if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) |
7ab56645 TS |
1459 | tag |= FW_ISO_CONTEXT_MATCH_TAG0; |
1460 | ||
7b3b0d85 | 1461 | s->callbacked = false; |
bdaedca7 | 1462 | s->ready_processing = false; |
bd165079 | 1463 | err = fw_iso_context_start(s->context, -1, 0, tag); |
31ef9134 | 1464 | if (err < 0) |
04130cf8 | 1465 | goto err_pkt_descs; |
31ef9134 CL |
1466 | |
1467 | mutex_unlock(&s->mutex); | |
1468 | ||
1469 | return 0; | |
04130cf8 TS |
1470 | err_pkt_descs: |
1471 | kfree(s->pkt_descs); | |
31ef9134 | 1472 | err_context: |
6f24bb8a TS |
1473 | if (s->direction == AMDTP_OUT_STREAM) |
1474 | kfree(s->ctx_data.rx.seq.descs); | |
31ef9134 CL |
1475 | fw_iso_context_destroy(s->context); |
1476 | s->context = ERR_PTR(-1); | |
1477 | err_buffer: | |
1478 | iso_packets_buffer_destroy(&s->buffer, s->unit); | |
1479 | err_unlock: | |
1480 | mutex_unlock(&s->mutex); | |
1481 | ||
1482 | return err; | |
1483 | } | |
31ef9134 | 1484 | |
e9148ddd | 1485 | /** |
f890f9a0 TS |
1486 | * amdtp_domain_stream_pcm_pointer - get the PCM buffer position |
1487 | * @d: the AMDTP domain. | |
be4a2894 | 1488 | * @s: the AMDTP stream that transports the PCM data |
e9148ddd CL |
1489 | * |
1490 | * Returns the current buffer position, in frames. | |
1491 | */ | |
f890f9a0 TS |
1492 | unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, |
1493 | struct amdtp_stream *s) | |
e9148ddd | 1494 | { |
f890f9a0 TS |
1495 | struct amdtp_stream *irq_target = d->irq_target; |
1496 | ||
1497 | if (irq_target && amdtp_stream_running(irq_target)) { | |
1498 | // This function is called in software IRQ context of | |
2b3d2987 | 1499 | // period_work or process context. |
f890f9a0 TS |
1500 | // |
1501 | // When the software IRQ context was scheduled by software IRQ | |
1502 | // context of IT contexts, queued packets were already handled. | |
1503 | // Therefore, no need to flush the queue in buffer furthermore. | |
1504 | // | |
1505 | // When the process context reach here, some packets will be | |
1506 | // already queued in the buffer. These packets should be handled | |
1507 | // immediately to keep better granularity of PCM pointer. | |
1508 | // | |
1509 | // Later, the process context will sometimes schedules software | |
2b3d2987 | 1510 | // IRQ context of the period_work. Then, no need to flush the |
f890f9a0 | 1511 | // queue by the same reason as described in the above |
2b3d2987 | 1512 | if (current_work() != &s->period_work) { |
f890f9a0 TS |
1513 | // Queued packet should be processed without any kernel |
1514 | // preemption to keep latency against bus cycle. | |
1515 | preempt_disable(); | |
1516 | fw_iso_context_flush_completions(irq_target->context); | |
1517 | preempt_enable(); | |
1518 | } | |
1519 | } | |
e9148ddd | 1520 | |
6aa7de05 | 1521 | return READ_ONCE(s->pcm_buffer_pointer); |
e9148ddd | 1522 | } |
f890f9a0 | 1523 | EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer); |
e9148ddd | 1524 | |
875becf8 | 1525 | /** |
e6dcc92f TS |
1526 | * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames |
1527 | * @d: the AMDTP domain. | |
875becf8 TS |
1528 | * @s: the AMDTP stream that transfers the PCM frames |
1529 | * | |
1530 | * Returns zero always. | |
1531 | */ | |
e6dcc92f | 1532 | int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s) |
875becf8 | 1533 | { |
e6dcc92f TS |
1534 | struct amdtp_stream *irq_target = d->irq_target; |
1535 | ||
1536 | // Process isochronous packets for recent isochronous cycle to handle | |
1537 | // queued PCM frames. | |
1538 | if (irq_target && amdtp_stream_running(irq_target)) { | |
1539 | // Queued packet should be processed without any kernel | |
1540 | // preemption to keep latency against bus cycle. | |
1541 | preempt_disable(); | |
1542 | fw_iso_context_flush_completions(irq_target->context); | |
1543 | preempt_enable(); | |
1544 | } | |
875becf8 TS |
1545 | |
1546 | return 0; | |
1547 | } | |
e6dcc92f | 1548 | EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack); |
875becf8 | 1549 | |
31ef9134 | 1550 | /** |
be4a2894 TS |
1551 | * amdtp_stream_update - update the stream after a bus reset |
1552 | * @s: the AMDTP stream | |
31ef9134 | 1553 | */ |
be4a2894 | 1554 | void amdtp_stream_update(struct amdtp_stream *s) |
31ef9134 | 1555 | { |
9a2820c1 | 1556 | /* Precomputing. */ |
6aa7de05 MR |
1557 | WRITE_ONCE(s->source_node_id_field, |
1558 | (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); | |
31ef9134 | 1559 | } |
be4a2894 | 1560 | EXPORT_SYMBOL(amdtp_stream_update); |
31ef9134 CL |
1561 | |
1562 | /** | |
be4a2894 TS |
1563 | * amdtp_stream_stop - stop sending packets |
1564 | * @s: the AMDTP stream to stop | |
31ef9134 CL |
1565 | * |
1566 | * All PCM and MIDI devices of the stream must be stopped before the stream | |
1567 | * itself can be stopped. | |
1568 | */ | |
74f94e41 | 1569 | static void amdtp_stream_stop(struct amdtp_stream *s) |
31ef9134 CL |
1570 | { |
1571 | mutex_lock(&s->mutex); | |
1572 | ||
be4a2894 | 1573 | if (!amdtp_stream_running(s)) { |
31ef9134 CL |
1574 | mutex_unlock(&s->mutex); |
1575 | return; | |
1576 | } | |
1577 | ||
2b3d2987 | 1578 | cancel_work_sync(&s->period_work); |
31ef9134 CL |
1579 | fw_iso_context_stop(s->context); |
1580 | fw_iso_context_destroy(s->context); | |
1581 | s->context = ERR_PTR(-1); | |
1582 | iso_packets_buffer_destroy(&s->buffer, s->unit); | |
04130cf8 | 1583 | kfree(s->pkt_descs); |
31ef9134 | 1584 | |
6f24bb8a TS |
1585 | if (s->direction == AMDTP_OUT_STREAM) |
1586 | kfree(s->ctx_data.rx.seq.descs); | |
7b3b0d85 | 1587 | |
31ef9134 CL |
1588 | mutex_unlock(&s->mutex); |
1589 | } | |
31ef9134 CL |
1590 | |
1591 | /** | |
be4a2894 | 1592 | * amdtp_stream_pcm_abort - abort the running PCM device |
31ef9134 CL |
1593 | * @s: the AMDTP stream about to be stopped |
1594 | * | |
1595 | * If the isochronous stream needs to be stopped asynchronously, call this | |
1596 | * function first to stop the PCM device. | |
1597 | */ | |
be4a2894 | 1598 | void amdtp_stream_pcm_abort(struct amdtp_stream *s) |
31ef9134 CL |
1599 | { |
1600 | struct snd_pcm_substream *pcm; | |
1601 | ||
6aa7de05 | 1602 | pcm = READ_ONCE(s->pcm); |
1fb8510c TI |
1603 | if (pcm) |
1604 | snd_pcm_stop_xrun(pcm); | |
31ef9134 | 1605 | } |
be4a2894 | 1606 | EXPORT_SYMBOL(amdtp_stream_pcm_abort); |
3ec3d7a3 TS |
1607 | |
1608 | /** | |
1609 | * amdtp_domain_init - initialize an AMDTP domain structure | |
1610 | * @d: the AMDTP domain to initialize. | |
1611 | */ | |
1612 | int amdtp_domain_init(struct amdtp_domain *d) | |
1613 | { | |
1614 | INIT_LIST_HEAD(&d->streams); | |
1615 | ||
d68c3123 TS |
1616 | d->events_per_period = 0; |
1617 | ||
3ec3d7a3 TS |
1618 | return 0; |
1619 | } | |
1620 | EXPORT_SYMBOL_GPL(amdtp_domain_init); | |
1621 | ||
1622 | /** | |
1623 | * amdtp_domain_destroy - destroy an AMDTP domain structure | |
1624 | * @d: the AMDTP domain to destroy. | |
1625 | */ | |
1626 | void amdtp_domain_destroy(struct amdtp_domain *d) | |
1627 | { | |
8d0d5c3f TS |
1628 | // At present nothing to do. |
1629 | return; | |
3ec3d7a3 TS |
1630 | } |
1631 | EXPORT_SYMBOL_GPL(amdtp_domain_destroy); | |
6261f90b | 1632 | |
157a53ee TS |
1633 | /** |
1634 | * amdtp_domain_add_stream - register isoc context into the domain. | |
1635 | * @d: the AMDTP domain. | |
1636 | * @s: the AMDTP stream. | |
1637 | * @channel: the isochronous channel on the bus. | |
1638 | * @speed: firewire speed code. | |
1639 | */ | |
1640 | int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, | |
1641 | int channel, int speed) | |
1642 | { | |
1643 | struct amdtp_stream *tmp; | |
1644 | ||
1645 | list_for_each_entry(tmp, &d->streams, list) { | |
1646 | if (s == tmp) | |
1647 | return -EBUSY; | |
1648 | } | |
1649 | ||
1650 | list_add(&s->list, &d->streams); | |
1651 | ||
1652 | s->channel = channel; | |
1653 | s->speed = speed; | |
2472cfb3 | 1654 | s->domain = d; |
157a53ee TS |
1655 | |
1656 | return 0; | |
1657 | } | |
1658 | EXPORT_SYMBOL_GPL(amdtp_domain_add_stream); | |
1659 | ||
9b4702b0 TS |
1660 | /** |
1661 | * amdtp_domain_start - start sending packets for isoc context in the domain. | |
1662 | * @d: the AMDTP domain. | |
26541cb1 TS |
1663 | * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR |
1664 | * contexts. | |
9b4702b0 | 1665 | */ |
26541cb1 | 1666 | int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles) |
9b4702b0 | 1667 | { |
af86b0b1 TS |
1668 | unsigned int events_per_buffer = d->events_per_buffer; |
1669 | unsigned int events_per_period = d->events_per_period; | |
af86b0b1 | 1670 | unsigned int queue_size; |
9b4702b0 | 1671 | struct amdtp_stream *s; |
acfedcbe | 1672 | int err; |
9b4702b0 | 1673 | |
60dd4929 | 1674 | // Select an IT context as IRQ target. |
9b4702b0 | 1675 | list_for_each_entry(s, &d->streams, list) { |
60dd4929 | 1676 | if (s->direction == AMDTP_OUT_STREAM) |
9b4702b0 TS |
1677 | break; |
1678 | } | |
60dd4929 TS |
1679 | if (!s) |
1680 | return -ENXIO; | |
1681 | d->irq_target = s; | |
9b4702b0 | 1682 | |
26541cb1 TS |
1683 | d->processing_cycle.tx_init_skip = tx_init_skip_cycles; |
1684 | ||
af86b0b1 TS |
1685 | // This is a case that AMDTP streams in domain run just for MIDI |
1686 | // substream. Use the number of events equivalent to 10 msec as | |
1687 | // interval of hardware IRQ. | |
1688 | if (events_per_period == 0) | |
1689 | events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100; | |
1690 | if (events_per_buffer == 0) | |
1691 | events_per_buffer = events_per_period * 3; | |
1692 | ||
1693 | queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer, | |
1694 | amdtp_rate_table[d->irq_target->sfc]); | |
1695 | ||
60dd4929 | 1696 | list_for_each_entry(s, &d->streams, list) { |
bd165079 | 1697 | unsigned int idle_irq_interval = 0; |
acfedcbe | 1698 | |
bd165079 TS |
1699 | if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) { |
1700 | idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period, | |
1701 | amdtp_rate_table[d->irq_target->sfc]); | |
60dd4929 | 1702 | } |
9b4702b0 | 1703 | |
bd165079 TS |
1704 | // Starts immediately but actually DMA context starts several hundred cycles later. |
1705 | err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval); | |
1706 | if (err < 0) | |
1707 | goto error; | |
1708 | } | |
60dd4929 TS |
1709 | |
1710 | return 0; | |
1711 | error: | |
1712 | list_for_each_entry(s, &d->streams, list) | |
1713 | amdtp_stream_stop(s); | |
9b4702b0 TS |
1714 | return err; |
1715 | } | |
1716 | EXPORT_SYMBOL_GPL(amdtp_domain_start); | |
1717 | ||
6261f90b TS |
1718 | /** |
1719 | * amdtp_domain_stop - stop sending packets for isoc context in the same domain. | |
1720 | * @d: the AMDTP domain to which the isoc contexts belong. | |
1721 | */ | |
1722 | void amdtp_domain_stop(struct amdtp_domain *d) | |
1723 | { | |
1724 | struct amdtp_stream *s, *next; | |
1725 | ||
60dd4929 TS |
1726 | if (d->irq_target) |
1727 | amdtp_stream_stop(d->irq_target); | |
1728 | ||
6261f90b TS |
1729 | list_for_each_entry_safe(s, next, &d->streams, list) { |
1730 | list_del(&s->list); | |
1731 | ||
60dd4929 TS |
1732 | if (s != d->irq_target) |
1733 | amdtp_stream_stop(s); | |
6261f90b | 1734 | } |
d68c3123 TS |
1735 | |
1736 | d->events_per_period = 0; | |
60dd4929 | 1737 | d->irq_target = NULL; |
6261f90b TS |
1738 | } |
1739 | EXPORT_SYMBOL_GPL(amdtp_domain_stop); |