io_uring: complete request via task work in case of DEFER_TASKRUN
[linux-block.git] / kernel / watch_queue.c
CommitLineData
c73be61c
DH
1// SPDX-License-Identifier: GPL-2.0
2/* Watch queue and general notification mechanism, built on pipes
3 *
4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
c02b872a 7 * See Documentation/core-api/watch_queue.rst
c73be61c
DH
8 */
9
10#define pr_fmt(fmt) "watchq: " fmt
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/printk.h>
16#include <linux/miscdevice.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/poll.h>
21#include <linux/uaccess.h>
22#include <linux/vmalloc.h>
23#include <linux/file.h>
24#include <linux/security.h>
25#include <linux/cred.h>
26#include <linux/sched/signal.h>
27#include <linux/watch_queue.h>
28#include <linux/pipe_fs_i.h>
29
30MODULE_DESCRIPTION("Watch queue");
31MODULE_AUTHOR("Red Hat, Inc.");
32MODULE_LICENSE("GPL");
33
34#define WATCH_QUEUE_NOTE_SIZE 128
35#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
36
353f7988
LT
37/*
38 * This must be called under the RCU read-lock, which makes
39 * sure that the wqueue still exists. It can then take the lock,
40 * and check that the wqueue hasn't been destroyed, which in
41 * turn makes sure that the notification pipe still exists.
42 */
43static inline bool lock_wqueue(struct watch_queue *wqueue)
44{
45 spin_lock_bh(&wqueue->lock);
46 if (unlikely(wqueue->defunct)) {
47 spin_unlock_bh(&wqueue->lock);
48 return false;
49 }
50 return true;
51}
52
53static inline void unlock_wqueue(struct watch_queue *wqueue)
54{
55 spin_unlock_bh(&wqueue->lock);
56}
57
c73be61c
DH
58static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
59 struct pipe_buffer *buf)
60{
61 struct watch_queue *wqueue = (struct watch_queue *)buf->private;
62 struct page *page;
63 unsigned int bit;
64
65 /* We need to work out which note within the page this refers to, but
66 * the note might have been maximum size, so merely ANDing the offset
67 * off doesn't work. OTOH, the note must've been more than zero size.
68 */
69 bit = buf->offset + buf->len;
70 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0)
71 bit -= WATCH_QUEUE_NOTE_SIZE;
72 bit /= WATCH_QUEUE_NOTE_SIZE;
73
74 page = buf->page;
75 bit += page->index;
76
77 set_bit(bit, wqueue->notes_bitmap);
c1853fba 78 generic_pipe_buf_release(pipe, buf);
c73be61c
DH
79}
80
6c329784
LT
81// No try_steal function => no stealing
82#define watch_queue_pipe_buf_try_steal NULL
c73be61c
DH
83
84/* New data written to a pipe may be appended to a buffer with this type. */
85static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
c73be61c 86 .release = watch_queue_pipe_buf_release,
6c329784 87 .try_steal = watch_queue_pipe_buf_try_steal,
c73be61c
DH
88 .get = generic_pipe_buf_get,
89};
90
91/*
92 * Post a notification to a watch queue.
353f7988
LT
93 *
94 * Must be called with the RCU lock for reading, and the
95 * watch_queue lock held, which guarantees that the pipe
96 * hasn't been released.
c73be61c
DH
97 */
98static bool post_one_notification(struct watch_queue *wqueue,
99 struct watch_notification *n)
100{
101 void *p;
102 struct pipe_inode_info *pipe = wqueue->pipe;
103 struct pipe_buffer *buf;
104 struct page *page;
105 unsigned int head, tail, mask, note, offset, len;
106 bool done = false;
107
108 if (!pipe)
109 return false;
110
111 spin_lock_irq(&pipe->rd_wait.lock);
112
c73be61c
DH
113 mask = pipe->ring_size - 1;
114 head = pipe->head;
115 tail = pipe->tail;
116 if (pipe_full(head, tail, pipe->ring_size))
117 goto lost;
118
119 note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes);
120 if (note >= wqueue->nr_notes)
121 goto lost;
122
123 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE];
124 offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE;
125 get_page(page);
126 len = n->info & WATCH_INFO_LENGTH;
127 p = kmap_atomic(page);
128 memcpy(p + offset, n, len);
129 kunmap_atomic(p);
130
131 buf = &pipe->bufs[head & mask];
132 buf->page = page;
133 buf->private = (unsigned long)wqueue;
134 buf->ops = &watch_queue_pipe_buf_ops;
135 buf->offset = offset;
136 buf->len = len;
8cfba763 137 buf->flags = PIPE_BUF_FLAG_WHOLE;
2ed147f0 138 smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
c73be61c
DH
139
140 if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
141 spin_unlock_irq(&pipe->rd_wait.lock);
142 BUG();
143 }
144 wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
145 done = true;
146
147out:
148 spin_unlock_irq(&pipe->rd_wait.lock);
149 if (done)
150 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
151 return done;
152
153lost:
e7d553d6
DH
154 buf = &pipe->bufs[(head - 1) & mask];
155 buf->flags |= PIPE_BUF_FLAG_LOSS;
c73be61c
DH
156 goto out;
157}
158
159/*
160 * Apply filter rules to a notification.
161 */
162static bool filter_watch_notification(const struct watch_filter *wf,
163 const struct watch_notification *n)
164{
165 const struct watch_type_filter *wt;
166 unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8;
167 unsigned int st_index = n->subtype / st_bits;
168 unsigned int st_bit = 1U << (n->subtype % st_bits);
169 int i;
170
171 if (!test_bit(n->type, wf->type_filter))
172 return false;
173
174 for (i = 0; i < wf->nr_filters; i++) {
175 wt = &wf->filters[i];
176 if (n->type == wt->type &&
177 (wt->subtype_filter[st_index] & st_bit) &&
178 (n->info & wt->info_mask) == wt->info_filter)
179 return true;
180 }
181
182 return false; /* If there is a filter, the default is to reject. */
183}
184
185/**
186 * __post_watch_notification - Post an event notification
187 * @wlist: The watch list to post the event to.
188 * @n: The notification record to post.
189 * @cred: The creds of the process that triggered the notification.
190 * @id: The ID to match on the watch.
191 *
192 * Post a notification of an event into a set of watch queues and let the users
193 * know.
194 *
195 * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and
196 * should be in units of sizeof(*n).
197 */
198void __post_watch_notification(struct watch_list *wlist,
199 struct watch_notification *n,
200 const struct cred *cred,
201 u64 id)
202{
203 const struct watch_filter *wf;
204 struct watch_queue *wqueue;
205 struct watch *watch;
206
207 if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) {
208 WARN_ON(1);
209 return;
210 }
211
212 rcu_read_lock();
213
214 hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) {
215 if (watch->id != id)
216 continue;
217 n->info &= ~WATCH_INFO_ID;
218 n->info |= watch->info_id;
219
220 wqueue = rcu_dereference(watch->queue);
221 wf = rcu_dereference(wqueue->filter);
222 if (wf && !filter_watch_notification(wf, n))
223 continue;
224
225 if (security_post_notification(watch->cred, cred, n) < 0)
226 continue;
227
353f7988
LT
228 if (lock_wqueue(wqueue)) {
229 post_one_notification(wqueue, n);
44e29e64 230 unlock_wqueue(wqueue);
353f7988 231 }
c73be61c
DH
232 }
233
234 rcu_read_unlock();
235}
236EXPORT_SYMBOL(__post_watch_notification);
237
238/*
239 * Allocate sufficient pages to preallocation for the requested number of
240 * notifications.
241 */
242long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
243{
244 struct watch_queue *wqueue = pipe->watch_queue;
245 struct page **pages;
246 unsigned long *bitmap;
247 unsigned long user_bufs;
c73be61c
DH
248 int ret, i, nr_pages;
249
250 if (!wqueue)
251 return -ENODEV;
252 if (wqueue->notes)
253 return -EBUSY;
254
255 if (nr_notes < 1 ||
256 nr_notes > 512) /* TODO: choose a better hard limit */
257 return -EINVAL;
258
259 nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1);
260 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE;
261 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
262
263 if (nr_pages > pipe->max_usage &&
264 (too_many_pipe_buffers_hard(user_bufs) ||
265 too_many_pipe_buffers_soft(user_bufs)) &&
266 pipe_is_unprivileged_user()) {
267 ret = -EPERM;
268 goto error;
269 }
270
3b4c0371 271 nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
96a4d891 272 ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
c73be61c
DH
273 if (ret < 0)
274 goto error;
275
276 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
277 if (!pages)
278 goto error;
279
280 for (i = 0; i < nr_pages; i++) {
281 pages[i] = alloc_page(GFP_KERNEL);
282 if (!pages[i])
283 goto error_p;
284 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
285 }
286
a66bd757 287 bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
c73be61c
DH
288 if (!bitmap)
289 goto error_p;
290
a66bd757 291 bitmap_fill(bitmap, nr_notes);
c73be61c
DH
292 wqueue->notes = pages;
293 wqueue->notes_bitmap = bitmap;
294 wqueue->nr_pages = nr_pages;
3b4c0371 295 wqueue->nr_notes = nr_notes;
c73be61c
DH
296 return 0;
297
298error_p:
a635415a 299 while (--i >= 0)
c73be61c
DH
300 __free_page(pages[i]);
301 kfree(pages);
302error:
303 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
304 return ret;
305}
306
307/*
308 * Set the filter on a watch queue.
309 */
310long watch_queue_set_filter(struct pipe_inode_info *pipe,
311 struct watch_notification_filter __user *_filter)
312{
313 struct watch_notification_type_filter *tf;
314 struct watch_notification_filter filter;
315 struct watch_type_filter *q;
316 struct watch_filter *wfilter;
317 struct watch_queue *wqueue = pipe->watch_queue;
318 int ret, nr_filter = 0, i;
319
320 if (!wqueue)
321 return -ENODEV;
322
323 if (!_filter) {
324 /* Remove the old filter */
325 wfilter = NULL;
326 goto set;
327 }
328
329 /* Grab the user's filter specification */
330 if (copy_from_user(&filter, _filter, sizeof(filter)) != 0)
331 return -EFAULT;
332 if (filter.nr_filters == 0 ||
333 filter.nr_filters > 16 ||
334 filter.__reserved != 0)
335 return -EINVAL;
336
337 tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
338 if (IS_ERR(tf))
339 return PTR_ERR(tf);
340
341 ret = -EINVAL;
342 for (i = 0; i < filter.nr_filters; i++) {
343 if ((tf[i].info_filter & ~tf[i].info_mask) ||
344 tf[i].info_mask & WATCH_INFO_LENGTH)
345 goto err_filter;
346 /* Ignore any unknown types */
c993ee0f 347 if (tf[i].type >= WATCH_TYPE__NR)
c73be61c
DH
348 continue;
349 nr_filter++;
350 }
351
352 /* Now we need to build the internal filter from only the relevant
353 * user-specified filters.
354 */
355 ret = -ENOMEM;
356 wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL);
357 if (!wfilter)
358 goto err_filter;
359 wfilter->nr_filters = nr_filter;
360
361 q = wfilter->filters;
362 for (i = 0; i < filter.nr_filters; i++) {
c993ee0f 363 if (tf[i].type >= WATCH_TYPE__NR)
c73be61c
DH
364 continue;
365
366 q->type = tf[i].type;
367 q->info_filter = tf[i].info_filter;
368 q->info_mask = tf[i].info_mask;
369 q->subtype_filter[0] = tf[i].subtype_filter[0];
370 __set_bit(q->type, wfilter->type_filter);
371 q++;
372 }
373
374 kfree(tf);
375set:
376 pipe_lock(pipe);
377 wfilter = rcu_replace_pointer(wqueue->filter, wfilter,
378 lockdep_is_held(&pipe->mutex));
379 pipe_unlock(pipe);
380 if (wfilter)
381 kfree_rcu(wfilter, rcu);
382 return 0;
383
384err_filter:
385 kfree(tf);
386 return ret;
387}
388
389static void __put_watch_queue(struct kref *kref)
390{
391 struct watch_queue *wqueue =
392 container_of(kref, struct watch_queue, usage);
393 struct watch_filter *wfilter;
394 int i;
395
396 for (i = 0; i < wqueue->nr_pages; i++)
397 __free_page(wqueue->notes[i]);
b4902070 398 kfree(wqueue->notes);
7ea1a012 399 bitmap_free(wqueue->notes_bitmap);
c73be61c
DH
400
401 wfilter = rcu_access_pointer(wqueue->filter);
402 if (wfilter)
403 kfree_rcu(wfilter, rcu);
404 kfree_rcu(wqueue, rcu);
405}
406
407/**
408 * put_watch_queue - Dispose of a ref on a watchqueue.
409 * @wqueue: The watch queue to unref.
410 */
411void put_watch_queue(struct watch_queue *wqueue)
412{
413 kref_put(&wqueue->usage, __put_watch_queue);
414}
415EXPORT_SYMBOL(put_watch_queue);
416
417static void free_watch(struct rcu_head *rcu)
418{
419 struct watch *watch = container_of(rcu, struct watch, rcu);
420
421 put_watch_queue(rcu_access_pointer(watch->queue));
29e44f45 422 atomic_dec(&watch->cred->user->nr_watches);
c73be61c 423 put_cred(watch->cred);
3d8dcf27 424 kfree(watch);
c73be61c
DH
425}
426
427static void __put_watch(struct kref *kref)
428{
429 struct watch *watch = container_of(kref, struct watch, usage);
430
431 call_rcu(&watch->rcu, free_watch);
432}
433
434/*
435 * Discard a watch.
436 */
437static void put_watch(struct watch *watch)
438{
439 kref_put(&watch->usage, __put_watch);
440}
441
442/**
8f0bfc25 443 * init_watch - Initialise a watch
c73be61c
DH
444 * @watch: The watch to initialise.
445 * @wqueue: The queue to assign.
446 *
447 * Initialise a watch and set the watch queue.
448 */
449void init_watch(struct watch *watch, struct watch_queue *wqueue)
450{
451 kref_init(&watch->usage);
452 INIT_HLIST_NODE(&watch->list_node);
453 INIT_HLIST_NODE(&watch->queue_node);
454 rcu_assign_pointer(watch->queue, wqueue);
455}
456
e64ab2db
LT
457static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
458{
459 const struct cred *cred;
460 struct watch *w;
461
462 hlist_for_each_entry(w, &wlist->watchers, list_node) {
463 struct watch_queue *wq = rcu_access_pointer(w->queue);
464 if (wqueue == wq && watch->id == w->id)
465 return -EBUSY;
466 }
467
468 cred = current_cred();
469 if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
470 atomic_dec(&cred->user->nr_watches);
471 return -EAGAIN;
472 }
473
474 watch->cred = get_cred(cred);
475 rcu_assign_pointer(watch->watch_list, wlist);
476
477 kref_get(&wqueue->usage);
478 kref_get(&watch->usage);
479 hlist_add_head(&watch->queue_node, &wqueue->watches);
480 hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
481 return 0;
482}
483
c73be61c
DH
484/**
485 * add_watch_to_object - Add a watch on an object to a watch list
486 * @watch: The watch to add
487 * @wlist: The watch list to add to
488 *
489 * @watch->queue must have been set to point to the queue to post notifications
490 * to and the watch list of the object to be watched. @watch->cred must also
491 * have been set to the appropriate credentials and a ref taken on them.
492 *
493 * The caller must pin the queue and the list both and must hold the list
494 * locked against racing watch additions/removals.
495 */
496int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
497{
e64ab2db
LT
498 struct watch_queue *wqueue;
499 int ret = -ENOENT;
c73be61c 500
e64ab2db 501 rcu_read_lock();
29e44f45 502
e64ab2db 503 wqueue = rcu_access_pointer(watch->queue);
353f7988 504 if (lock_wqueue(wqueue)) {
e64ab2db
LT
505 spin_lock(&wlist->lock);
506 ret = add_one_watch(watch, wlist, wqueue);
507 spin_unlock(&wlist->lock);
353f7988
LT
508 unlock_wqueue(wqueue);
509 }
c73be61c 510
e64ab2db
LT
511 rcu_read_unlock();
512 return ret;
c73be61c
DH
513}
514EXPORT_SYMBOL(add_watch_to_object);
515
516/**
517 * remove_watch_from_object - Remove a watch or all watches from an object.
518 * @wlist: The watch list to remove from
519 * @wq: The watch queue of interest (ignored if @all is true)
520 * @id: The ID of the watch to remove (ignored if @all is true)
521 * @all: True to remove all objects
522 *
523 * Remove a specific watch or all watches from an object. A notification is
524 * sent to the watcher to tell them that this happened.
525 */
526int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
527 u64 id, bool all)
528{
529 struct watch_notification_removal n;
530 struct watch_queue *wqueue;
531 struct watch *watch;
532 int ret = -EBADSLT;
533
534 rcu_read_lock();
535
536again:
537 spin_lock(&wlist->lock);
538 hlist_for_each_entry(watch, &wlist->watchers, list_node) {
539 if (all ||
540 (watch->id == id && rcu_access_pointer(watch->queue) == wq))
541 goto found;
542 }
543 spin_unlock(&wlist->lock);
544 goto out;
545
546found:
547 ret = 0;
548 hlist_del_init_rcu(&watch->list_node);
549 rcu_assign_pointer(watch->watch_list, NULL);
550 spin_unlock(&wlist->lock);
551
552 /* We now own the reference on watch that used to belong to wlist. */
553
554 n.watch.type = WATCH_TYPE_META;
555 n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION;
556 n.watch.info = watch->info_id | watch_sizeof(n.watch);
557 n.id = id;
558 if (id != 0)
559 n.watch.info = watch->info_id | watch_sizeof(n);
560
561 wqueue = rcu_dereference(watch->queue);
562
353f7988 563 if (lock_wqueue(wqueue)) {
c73be61c
DH
564 post_one_notification(wqueue, &n.watch);
565
c73be61c
DH
566 if (!hlist_unhashed(&watch->queue_node)) {
567 hlist_del_init_rcu(&watch->queue_node);
568 put_watch(watch);
569 }
570
353f7988 571 unlock_wqueue(wqueue);
c73be61c
DH
572 }
573
574 if (wlist->release_watch) {
575 void (*release_watch)(struct watch *);
576
577 release_watch = wlist->release_watch;
578 rcu_read_unlock();
579 (*release_watch)(watch);
580 rcu_read_lock();
581 }
582 put_watch(watch);
583
584 if (all && !hlist_empty(&wlist->watchers))
585 goto again;
586out:
587 rcu_read_unlock();
588 return ret;
589}
590EXPORT_SYMBOL(remove_watch_from_object);
591
592/*
593 * Remove all the watches that are contributory to a queue. This has the
594 * potential to race with removal of the watches by the destruction of the
595 * objects being watched or with the distribution of notifications.
596 */
597void watch_queue_clear(struct watch_queue *wqueue)
598{
599 struct watch_list *wlist;
600 struct watch *watch;
601 bool release;
602
603 rcu_read_lock();
604 spin_lock_bh(&wqueue->lock);
605
4edc0760 606 /* Prevent new notifications from being stored. */
c73be61c
DH
607 wqueue->defunct = true;
608
609 while (!hlist_empty(&wqueue->watches)) {
610 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
611 hlist_del_init_rcu(&watch->queue_node);
612 /* We now own a ref on the watch. */
613 spin_unlock_bh(&wqueue->lock);
614
615 /* We can't do the next bit under the queue lock as we need to
616 * get the list lock - which would cause a deadlock if someone
617 * was removing from the opposite direction at the same time or
618 * posting a notification.
619 */
620 wlist = rcu_dereference(watch->watch_list);
621 if (wlist) {
622 void (*release_watch)(struct watch *);
623
624 spin_lock(&wlist->lock);
625
626 release = !hlist_unhashed(&watch->list_node);
627 if (release) {
628 hlist_del_init_rcu(&watch->list_node);
629 rcu_assign_pointer(watch->watch_list, NULL);
630
631 /* We now own a second ref on the watch. */
632 }
633
634 release_watch = wlist->release_watch;
635 spin_unlock(&wlist->lock);
636
637 if (release) {
638 if (release_watch) {
639 rcu_read_unlock();
640 /* This might need to call dput(), so
641 * we have to drop all the locks.
642 */
643 (*release_watch)(watch);
644 rcu_read_lock();
645 }
646 put_watch(watch);
647 }
648 }
649
650 put_watch(watch);
651 spin_lock_bh(&wqueue->lock);
652 }
653
654 spin_unlock_bh(&wqueue->lock);
655 rcu_read_unlock();
656}
657
658/**
659 * get_watch_queue - Get a watch queue from its file descriptor.
660 * @fd: The fd to query.
661 */
662struct watch_queue *get_watch_queue(int fd)
663{
664 struct pipe_inode_info *pipe;
665 struct watch_queue *wqueue = ERR_PTR(-EINVAL);
666 struct fd f;
667
668 f = fdget(fd);
669 if (f.file) {
670 pipe = get_pipe_info(f.file, false);
671 if (pipe && pipe->watch_queue) {
672 wqueue = pipe->watch_queue;
673 kref_get(&wqueue->usage);
674 }
675 fdput(f);
676 }
677
678 return wqueue;
679}
680EXPORT_SYMBOL(get_watch_queue);
681
682/*
683 * Initialise a watch queue
684 */
685int watch_queue_init(struct pipe_inode_info *pipe)
686{
687 struct watch_queue *wqueue;
688
689 wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL);
690 if (!wqueue)
691 return -ENOMEM;
692
693 wqueue->pipe = pipe;
694 kref_init(&wqueue->usage);
695 spin_lock_init(&wqueue->lock);
696 INIT_HLIST_HEAD(&wqueue->watches);
697
698 pipe->watch_queue = wqueue;
699 return 0;
700}