The number of packets in packet buffer has been fixed number (=48) since
first commit of ALSA IEC 61883-1/6 packet streaming engine.
This commit allows the engine to use variable number of packets in the
buffer. The size is calculated by a parameter in AMDTP domain structure
surely to store the number of events in the packets of buffer. Although
the value of parameter is expected to come from 'period size' parameter
of PCM substream, at present 48 is still used.
Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
Link: https://lore.kernel.org/r/20191017155424.885-2-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Iwai <tiwai@suse.de>
/* TODO: make these configurable */
#define INTERRUPT_INTERVAL 16
/* TODO: make these configurable */
#define INTERRUPT_INTERVAL 16
// For iso header, tstamp and 2 CIP header.
#define IR_CTX_HEADER_SIZE_CIP 16
// For iso header, tstamp and 2 CIP header.
#define IR_CTX_HEADER_SIZE_CIP 16
- if (++s->packet_index >= QUEUE_LENGTH)
+ if (++s->packet_index >= s->queue_size)
s->packet_index = 0;
end:
return err;
s->packet_index = 0;
end:
return err;
}
// Align to actual cycle count for the packet which is going to be scheduled.
}
// Align to actual cycle count for the packet which is going to be scheduled.
-// This module queued the same number of isochronous cycle as QUEUE_LENGTH to
-// skip isochronous cycle, therefore it's OK to just increment the cycle by
-// QUEUE_LENGTH for scheduled cycle.
-static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp)
+// This module queued the same number of isochronous cycle as the size of queue
+// to kip isochronous cycle, therefore it's OK to just increment the cycle by
+// the size of queue for scheduled cycle.
+static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
+ unsigned int queue_size)
{
u32 cycle = compute_cycle_count(ctx_header_tstamp);
{
u32 cycle = compute_cycle_count(ctx_header_tstamp);
- return increment_cycle_count(cycle, QUEUE_LENGTH);
+ return increment_cycle_count(cycle, queue_size);
}
static int generate_device_pkt_descs(struct amdtp_stream *s,
}
static int generate_device_pkt_descs(struct amdtp_stream *s,
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
unsigned int cycle;
unsigned int payload_length;
unsigned int data_blocks;
unsigned int cycle;
unsigned int payload_length;
unsigned int data_blocks;
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
for (i = 0; i < packets; ++i) {
struct pkt_desc *desc = descs + i;
- unsigned int index = (s->packet_index + i) % QUEUE_LENGTH;
+ unsigned int index = (s->packet_index + i) % s->queue_size;
- desc->cycle = compute_it_cycle(*ctx_header);
+ desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
desc->syt = calculate_syt(s, desc->cycle);
desc->data_blocks = calculate_data_blocks(s, desc->syt);
desc->syt = calculate_syt(s, desc->cycle);
desc->data_blocks = calculate_data_blocks(s, desc->syt);
{
struct amdtp_stream *s = private_data;
const __be32 *ctx_header = header;
{
struct amdtp_stream *s = private_data;
const __be32 *ctx_header = header;
- unsigned int packets = header_length / sizeof(*ctx_header);
int i;
if (s->packet_index < 0)
return;
int i;
if (s->packet_index < 0)
return;
+ // Calculate the number of packets in buffer and check XRUN.
+ packets = header_length / sizeof(*ctx_header);
+
generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets);
process_ctx_payloads(s, s->pkt_descs, packets);
generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets);
process_ctx_payloads(s, s->pkt_descs, packets);
if (s->packet_index < 0)
return;
if (s->packet_index < 0)
return;
- // The number of packets in buffer.
+ // Calculate the number of packets in buffer and check XRUN.
packets = header_length / s->ctx_data.tx.ctx_header_size;
err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
packets = header_length / s->ctx_data.tx.ctx_header_size;
err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
context->callback.sc = in_stream_callback;
} else {
context->callback.sc = in_stream_callback;
} else {
- cycle = compute_it_cycle(*ctx_header);
+ cycle = compute_it_cycle(*ctx_header, s->queue_size);
context->callback.sc = out_stream_callback;
}
context->callback.sc = out_stream_callback;
}
* amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
* device can be started.
*/
* amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
* device can be started.
*/
-static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
+static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
+ struct amdtp_domain *d)
{
static const struct {
unsigned int data_block;
{
static const struct {
unsigned int data_block;
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
[CIP_SFC_88200] = { 0, 67 },
[CIP_SFC_176400] = { 0, 67 },
};
+ unsigned int events_per_buffer = d->events_per_buffer;
unsigned int ctx_header_size;
unsigned int max_ctx_payload_size;
enum dma_data_direction dir;
unsigned int ctx_header_size;
unsigned int max_ctx_payload_size;
enum dma_data_direction dir;
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
}
- err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
+ if (events_per_buffer == 0)
+ events_per_buffer = INTERRUPT_INTERVAL * 3;
+
+ s->queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
+ amdtp_rate_table[s->sfc]);
+
+ err = iso_packets_buffer_init(&s->buffer, s->unit, s->queue_size,
max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
max_ctx_payload_size, dir);
if (err < 0)
goto err_unlock;
- s->pkt_descs = kcalloc(INTERRUPT_INTERVAL, sizeof(*s->pkt_descs),
+ s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
GFP_KERNEL);
if (!s->pkt_descs) {
err = -ENOMEM;
GFP_KERNEL);
if (!s->pkt_descs) {
err = -ENOMEM;
int err = 0;
list_for_each_entry(s, &d->streams, list) {
int err = 0;
list_for_each_entry(s, &d->streams, list) {
- err = amdtp_stream_start(s, s->channel, s->speed);
+ err = amdtp_stream_start(s, s->channel, s->speed, d);
/* For packet processing. */
struct fw_iso_context *context;
struct iso_packets_buffer buffer;
/* For packet processing. */
struct fw_iso_context *context;
struct iso_packets_buffer buffer;
+ unsigned int queue_size;
int packet_index;
struct pkt_desc *pkt_descs;
int tag;
int packet_index;
struct pkt_desc *pkt_descs;
int tag;
struct list_head streams;
unsigned int events_per_period;
struct list_head streams;
unsigned int events_per_period;
+ unsigned int events_per_buffer;
};
int amdtp_domain_init(struct amdtp_domain *d);
};
int amdtp_domain_init(struct amdtp_domain *d);
void amdtp_domain_stop(struct amdtp_domain *d);
static inline int amdtp_domain_set_events_per_period(struct amdtp_domain *d,
void amdtp_domain_stop(struct amdtp_domain *d);
static inline int amdtp_domain_set_events_per_period(struct amdtp_domain *d,
- unsigned int events_per_period)
+ unsigned int events_per_period,
+ unsigned int events_per_buffer)
{
d->events_per_period = events_per_period;
{
d->events_per_period = events_per_period;
+ d->events_per_buffer = events_per_buffer;
}
err = amdtp_domain_set_events_per_period(&bebob->domain,
}
err = amdtp_domain_set_events_per_period(&bebob->domain,
if (err < 0) {
cmp_connection_release(&bebob->out_conn);
cmp_connection_release(&bebob->in_conn);
if (err < 0) {
cmp_connection_release(&bebob->out_conn);
cmp_connection_release(&bebob->in_conn);
goto error;
err = amdtp_domain_set_events_per_period(&dice->domain,
goto error;
err = amdtp_domain_set_events_per_period(&dice->domain,
if (err < 0)
goto error;
}
if (err < 0)
goto error;
}
}
err = amdtp_domain_set_events_per_period(&dg00x->domain,
}
err = amdtp_domain_set_events_per_period(&dg00x->domain,
if (err < 0) {
fw_iso_resources_free(&dg00x->rx_resources);
fw_iso_resources_free(&dg00x->tx_resources);
if (err < 0) {
fw_iso_resources_free(&dg00x->rx_resources);
fw_iso_resources_free(&dg00x->tx_resources);
return err;
err = amdtp_domain_set_events_per_period(&ff->domain,
return err;
err = amdtp_domain_set_events_per_period(&ff->domain,
if (err < 0) {
fw_iso_resources_free(&ff->tx_resources);
fw_iso_resources_free(&ff->rx_resources);
if (err < 0) {
fw_iso_resources_free(&ff->tx_resources);
fw_iso_resources_free(&ff->rx_resources);
}
err = amdtp_domain_set_events_per_period(&efw->domain,
}
err = amdtp_domain_set_events_per_period(&efw->domain,
if (err < 0) {
cmp_connection_release(&efw->in_conn);
cmp_connection_release(&efw->out_conn);
if (err < 0) {
cmp_connection_release(&efw->in_conn);
cmp_connection_release(&efw->out_conn);
}
err = amdtp_domain_set_events_per_period(&motu->domain,
}
err = amdtp_domain_set_events_per_period(&motu->domain,
if (err < 0) {
fw_iso_resources_free(&motu->tx_resources);
fw_iso_resources_free(&motu->rx_resources);
if (err < 0) {
fw_iso_resources_free(&motu->tx_resources);
fw_iso_resources_free(&motu->rx_resources);
}
err = amdtp_domain_set_events_per_period(&oxfw->domain,
}
err = amdtp_domain_set_events_per_period(&oxfw->domain,
if (err < 0) {
cmp_connection_release(&oxfw->in_conn);
if (oxfw->has_output)
if (err < 0) {
cmp_connection_release(&oxfw->in_conn);
if (oxfw->has_output)
}
err = amdtp_domain_set_events_per_period(&tscm->domain,
}
err = amdtp_domain_set_events_per_period(&tscm->domain,
if (err < 0) {
fw_iso_resources_free(&tscm->tx_resources);
fw_iso_resources_free(&tscm->rx_resources);
if (err < 0) {
fw_iso_resources_free(&tscm->tx_resources);
fw_iso_resources_free(&tscm->rx_resources);