1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ALSA sequencer Timing queue handling
4 * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl>
7 * Nov. 13, 1999 Takashi Iwai <iwai@ww.uni-erlangen.de>
8 * - Queues are allocated dynamically via ioctl.
9 * - When owner client is deleted, all owned queues are deleted, too.
10 * - Owner of unlocked queue is kept unmodified even if it is
11 * manipulated by other clients.
12 * - Owner field in SET_QUEUE_OWNER ioctl must be identical with the
13 * caller client. i.e. Changing owner to a third client is not
16 * Aug. 30, 2000 Takashi Iwai
17 * - Queues are managed in static array again, but with better way.
18 * The API itself is identical.
19 * - The queue is locked when struct snd_seq_queue pointer is returned via
20 * queueptr(). This pointer *MUST* be released afterward by
22 * - Addition of experimental sync support.
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <sound/core.h>
29 #include "seq_memory.h"
30 #include "seq_queue.h"
31 #include "seq_clientmgr.h"
33 #include "seq_timer.h"
36 /* list of allocated queues */
37 static struct snd_seq_queue *queue_list[SNDRV_SEQ_MAX_QUEUES];
38 static DEFINE_SPINLOCK(queue_list_lock);
39 /* number of queues allocated */
40 static int num_queues;
42 int snd_seq_queue_get_cur_queues(void)
47 /*----------------------------------------------------------------*/
49 /* assign queue id and insert to list */
50 static int queue_list_add(struct snd_seq_queue *q)
54 guard(spinlock_irqsave)(&queue_list_lock);
55 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
56 if (! queue_list[i]) {
66 static struct snd_seq_queue *queue_list_remove(int id, int client)
68 struct snd_seq_queue *q;
70 guard(spinlock_irqsave)(&queue_list_lock);
73 guard(spinlock)(&q->owner_lock);
74 if (q->owner == client) {
77 queue_list[id] = NULL;
85 /*----------------------------------------------------------------*/
87 /* create new queue (constructor) */
88 static struct snd_seq_queue *queue_new(int owner, int locked)
90 struct snd_seq_queue *q;
92 q = kzalloc(sizeof(*q), GFP_KERNEL);
96 spin_lock_init(&q->owner_lock);
97 spin_lock_init(&q->check_lock);
98 mutex_init(&q->timer_mutex);
99 snd_use_lock_init(&q->use_lock);
102 q->tickq = snd_seq_prioq_new();
103 q->timeq = snd_seq_prioq_new();
104 q->timer = snd_seq_timer_new();
105 if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) {
106 snd_seq_prioq_delete(&q->tickq);
107 snd_seq_prioq_delete(&q->timeq);
108 snd_seq_timer_delete(&q->timer);
120 /* delete queue (destructor) */
121 static void queue_delete(struct snd_seq_queue *q)
123 /* stop and release the timer */
124 mutex_lock(&q->timer_mutex);
125 snd_seq_timer_stop(q->timer);
126 snd_seq_timer_close(q);
127 mutex_unlock(&q->timer_mutex);
128 /* wait until access free */
129 snd_use_lock_sync(&q->use_lock);
130 /* release resources... */
131 snd_seq_prioq_delete(&q->tickq);
132 snd_seq_prioq_delete(&q->timeq);
133 snd_seq_timer_delete(&q->timer);
139 /*----------------------------------------------------------------*/
141 /* delete all existing queues */
142 void snd_seq_queues_delete(void)
147 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
149 queue_delete(queue_list[i]);
153 static void queue_use(struct snd_seq_queue *queue, int client, int use);
155 /* allocate a new queue -
156 * return pointer to new queue or ERR_PTR(-errno) for error
157 * The new queue's use_lock is set to 1. It is the caller's responsibility to
158 * call snd_use_lock_free(&q->use_lock).
160 struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
162 struct snd_seq_queue *q;
164 q = queue_new(client, locked);
166 return ERR_PTR(-ENOMEM);
167 q->info_flags = info_flags;
168 queue_use(q, client, 1);
169 snd_use_lock_use(&q->use_lock);
170 if (queue_list_add(q) < 0) {
171 snd_use_lock_free(&q->use_lock);
173 return ERR_PTR(-ENOMEM);
178 /* delete a queue - queue must be owned by the client */
179 int snd_seq_queue_delete(int client, int queueid)
181 struct snd_seq_queue *q;
183 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
185 q = queue_list_remove(queueid, client);
194 /* return pointer to queue structure for specified id */
195 struct snd_seq_queue *queueptr(int queueid)
197 struct snd_seq_queue *q;
199 if (queueid < 0 || queueid >= SNDRV_SEQ_MAX_QUEUES)
201 guard(spinlock_irqsave)(&queue_list_lock);
202 q = queue_list[queueid];
204 snd_use_lock_use(&q->use_lock);
208 /* return the (first) queue matching with the specified name */
209 struct snd_seq_queue *snd_seq_queue_find_name(char *name)
212 struct snd_seq_queue *q;
214 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
217 if (strncmp(q->name, name, sizeof(q->name)) == 0)
226 /* -------------------------------------------------------- */
228 #define MAX_CELL_PROCESSES_IN_QUEUE 1000
230 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
232 struct snd_seq_event_cell *cell;
233 snd_seq_tick_time_t cur_tick;
234 snd_seq_real_time_t cur_time;
240 /* make this function non-reentrant */
241 scoped_guard(spinlock_irqsave, &q->check_lock) {
242 if (q->check_blocked) {
244 return; /* other thread is already checking queues */
246 q->check_blocked = 1;
250 /* Process tick queue... */
251 cur_tick = snd_seq_timer_get_cur_tick(q->timer);
253 cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
256 snd_seq_dispatch_event(cell, atomic, hop);
257 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
258 goto out; /* the rest processed at the next batch */
261 /* Process time queue... */
262 cur_time = snd_seq_timer_get_cur_time(q->timer, false);
264 cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
267 snd_seq_dispatch_event(cell, atomic, hop);
268 if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE)
269 goto out; /* the rest processed at the next batch */
274 scoped_guard(spinlock_irqsave, &q->check_lock) {
275 if (q->check_again) {
277 if (processed < MAX_CELL_PROCESSES_IN_QUEUE)
280 q->check_blocked = 0;
285 /* enqueue a event to singe queue */
286 int snd_seq_enqueue_event(struct snd_seq_event_cell *cell, int atomic, int hop)
289 struct snd_seq_queue *q;
291 if (snd_BUG_ON(!cell))
293 dest = cell->event.queue; /* destination queue */
297 /* handle relative time stamps, convert them into absolute */
298 if ((cell->event.flags & SNDRV_SEQ_TIME_MODE_MASK) == SNDRV_SEQ_TIME_MODE_REL) {
299 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
300 case SNDRV_SEQ_TIME_STAMP_TICK:
301 cell->event.time.tick += q->timer->tick.cur_tick;
304 case SNDRV_SEQ_TIME_STAMP_REAL:
305 snd_seq_inc_real_time(&cell->event.time.time,
306 &q->timer->cur_time);
309 cell->event.flags &= ~SNDRV_SEQ_TIME_MODE_MASK;
310 cell->event.flags |= SNDRV_SEQ_TIME_MODE_ABS;
312 /* enqueue event in the real-time or midi queue */
313 switch (cell->event.flags & SNDRV_SEQ_TIME_STAMP_MASK) {
314 case SNDRV_SEQ_TIME_STAMP_TICK:
315 err = snd_seq_prioq_cell_in(q->tickq, cell);
318 case SNDRV_SEQ_TIME_STAMP_REAL:
320 err = snd_seq_prioq_cell_in(q->timeq, cell);
325 queuefree(q); /* unlock */
329 /* trigger dispatching */
330 snd_seq_check_queue(q, atomic, hop);
332 queuefree(q); /* unlock */
338 /*----------------------------------------------------------------*/
340 static inline int check_access(struct snd_seq_queue *q, int client)
342 return (q->owner == client) || (!q->locked && !q->klocked);
345 /* check if the client has permission to modify queue parameters.
346 * if it does, lock the queue
348 static int queue_access_lock(struct snd_seq_queue *q, int client)
352 guard(spinlock_irqsave)(&q->owner_lock);
353 access_ok = check_access(q, client);
359 /* unlock the queue */
360 static inline void queue_access_unlock(struct snd_seq_queue *q)
362 guard(spinlock_irqsave)(&q->owner_lock);
366 /* exported - only checking permission */
367 int snd_seq_queue_check_access(int queueid, int client)
369 struct snd_seq_queue *q = queueptr(queueid);
374 scoped_guard(spinlock_irqsave, &q->owner_lock)
375 access_ok = check_access(q, client);
380 /*----------------------------------------------------------------*/
383 * change queue's owner and permission
385 int snd_seq_queue_set_owner(int queueid, int client, int locked)
387 struct snd_seq_queue *q = queueptr(queueid);
392 if (! queue_access_lock(q, client)) {
397 scoped_guard(spinlock_irqsave, &q->owner_lock) {
398 q->locked = locked ? 1 : 0;
401 queue_access_unlock(q);
408 /*----------------------------------------------------------------*/
411 * q->use mutex should be down before calling this function to avoid
412 * confliction with snd_seq_queue_use()
414 int snd_seq_queue_timer_open(int queueid)
417 struct snd_seq_queue *queue;
418 struct snd_seq_timer *tmr;
420 queue = queueptr(queueid);
424 result = snd_seq_timer_open(queue);
426 snd_seq_timer_defaults(tmr);
427 result = snd_seq_timer_open(queue);
434 * q->use mutex should be down before calling this function
436 int snd_seq_queue_timer_close(int queueid)
438 struct snd_seq_queue *queue;
441 queue = queueptr(queueid);
444 snd_seq_timer_close(queue);
449 /* change queue tempo and ppq */
450 int snd_seq_queue_timer_set_tempo(int queueid, int client,
451 struct snd_seq_queue_tempo *info)
453 struct snd_seq_queue *q = queueptr(queueid);
458 if (! queue_access_lock(q, client)) {
463 result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq,
465 if (result >= 0 && info->skew_base > 0)
466 result = snd_seq_timer_set_skew(q->timer, info->skew_value,
468 queue_access_unlock(q);
473 /* use or unuse this queue */
474 static void queue_use(struct snd_seq_queue *queue, int client, int use)
477 if (!test_and_set_bit(client, queue->clients_bitmap))
480 if (test_and_clear_bit(client, queue->clients_bitmap))
483 if (queue->clients) {
484 if (use && queue->clients == 1)
485 snd_seq_timer_defaults(queue->timer);
486 snd_seq_timer_open(queue);
488 snd_seq_timer_close(queue);
492 /* use or unuse this queue -
493 * if it is the first client, starts the timer.
494 * if it is not longer used by any clients, stop the timer.
496 int snd_seq_queue_use(int queueid, int client, int use)
498 struct snd_seq_queue *queue;
500 queue = queueptr(queueid);
503 mutex_lock(&queue->timer_mutex);
504 queue_use(queue, client, use);
505 mutex_unlock(&queue->timer_mutex);
511 * check if queue is used by the client
512 * return negative value if the queue is invalid.
513 * return 0 if not used, 1 if used.
515 int snd_seq_queue_is_used(int queueid, int client)
517 struct snd_seq_queue *q;
520 q = queueptr(queueid);
522 return -EINVAL; /* invalid queue */
523 result = test_bit(client, q->clients_bitmap) ? 1 : 0;
529 /*----------------------------------------------------------------*/
531 /* final stage notification -
532 * remove cells for no longer exist client (for non-owned queue)
533 * or delete this queue (for owned queue)
535 void snd_seq_queue_client_leave(int client)
538 struct snd_seq_queue *q;
540 /* delete own queues from queue list */
541 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
542 q = queue_list_remove(i, client);
547 /* remove cells from existing queues -
548 * they are not owned by this client
550 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
554 if (test_bit(client, q->clients_bitmap)) {
555 snd_seq_prioq_leave(q->tickq, client, 0);
556 snd_seq_prioq_leave(q->timeq, client, 0);
557 snd_seq_queue_use(q->queue, client, 0);
565 /*----------------------------------------------------------------*/
567 /* remove cells from all queues */
568 void snd_seq_queue_client_leave_cells(int client)
571 struct snd_seq_queue *q;
573 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
577 snd_seq_prioq_leave(q->tickq, client, 0);
578 snd_seq_prioq_leave(q->timeq, client, 0);
583 /* remove cells based on flush criteria */
584 void snd_seq_queue_remove_cells(int client, struct snd_seq_remove_events *info)
587 struct snd_seq_queue *q;
589 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
593 if (test_bit(client, q->clients_bitmap) &&
594 (! (info->remove_mode & SNDRV_SEQ_REMOVE_DEST) ||
595 q->queue == info->queue)) {
596 snd_seq_prioq_remove_events(q->tickq, client, info);
597 snd_seq_prioq_remove_events(q->timeq, client, info);
603 /*----------------------------------------------------------------*/
606 * send events to all subscribed ports
608 static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev,
611 struct snd_seq_event sev;
615 sev.flags = SNDRV_SEQ_TIME_STAMP_TICK|SNDRV_SEQ_TIME_MODE_ABS;
616 sev.time.tick = q->timer->tick.cur_tick;
617 sev.queue = q->queue;
618 sev.data.queue.queue = q->queue;
620 /* broadcast events from Timer port */
621 sev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
622 sev.source.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
623 sev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
624 snd_seq_kernel_client_dispatch(SNDRV_SEQ_CLIENT_SYSTEM, &sev, atomic, hop);
628 * process a received queue-control event.
629 * this function is exported for seq_sync.c.
631 static void snd_seq_queue_process_event(struct snd_seq_queue *q,
632 struct snd_seq_event *ev,
636 case SNDRV_SEQ_EVENT_START:
637 snd_seq_prioq_leave(q->tickq, ev->source.client, 1);
638 snd_seq_prioq_leave(q->timeq, ev->source.client, 1);
639 if (! snd_seq_timer_start(q->timer))
640 queue_broadcast_event(q, ev, atomic, hop);
643 case SNDRV_SEQ_EVENT_CONTINUE:
644 if (! snd_seq_timer_continue(q->timer))
645 queue_broadcast_event(q, ev, atomic, hop);
648 case SNDRV_SEQ_EVENT_STOP:
649 snd_seq_timer_stop(q->timer);
650 queue_broadcast_event(q, ev, atomic, hop);
653 case SNDRV_SEQ_EVENT_TEMPO:
654 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value);
655 queue_broadcast_event(q, ev, atomic, hop);
658 case SNDRV_SEQ_EVENT_SETPOS_TICK:
659 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) {
660 queue_broadcast_event(q, ev, atomic, hop);
664 case SNDRV_SEQ_EVENT_SETPOS_TIME:
665 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) {
666 queue_broadcast_event(q, ev, atomic, hop);
669 case SNDRV_SEQ_EVENT_QUEUE_SKEW:
670 if (snd_seq_timer_set_skew(q->timer,
671 ev->data.queue.param.skew.value,
672 ev->data.queue.param.skew.base) == 0) {
673 queue_broadcast_event(q, ev, atomic, hop);
681 * Queue control via timer control port:
682 * this function is exported as a callback of timer port.
684 int snd_seq_control_queue(struct snd_seq_event *ev, int atomic, int hop)
686 struct snd_seq_queue *q;
690 q = queueptr(ev->data.queue.queue);
695 if (! queue_access_lock(q, ev->source.client)) {
700 snd_seq_queue_process_event(q, ev, atomic, hop);
702 queue_access_unlock(q);
708 /*----------------------------------------------------------------*/
710 #ifdef CONFIG_SND_PROC_FS
711 /* exported to seq_info.c */
712 void snd_seq_info_queues_read(struct snd_info_entry *entry,
713 struct snd_info_buffer *buffer)
716 struct snd_seq_queue *q;
717 struct snd_seq_timer *tmr;
721 for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
728 bpm = (60000 * tmr->tempo_base) / tmr->tempo;
732 scoped_guard(spinlock_irq, &q->owner_lock) {
737 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
738 snd_iprintf(buffer, "owned by client : %d\n", owner);
739 snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
740 snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
741 snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
742 snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
743 snd_iprintf(buffer, "timer PPQ : %d\n", tmr->ppq);
744 snd_iprintf(buffer, "current tempo : %d\n", tmr->tempo);
745 snd_iprintf(buffer, "tempo base : %d ns\n", tmr->tempo_base);
746 snd_iprintf(buffer, "current BPM : %d\n", bpm);
747 snd_iprintf(buffer, "current time : %d.%09d s\n", tmr->cur_time.tv_sec, tmr->cur_time.tv_nsec);
748 snd_iprintf(buffer, "current tick : %d\n", tmr->tick.cur_tick);
749 snd_iprintf(buffer, "\n");
753 #endif /* CONFIG_SND_PROC_FS */