1 /* cyanblkdev_queue.h - Antioch Linux Block Driver queue source file
2 ## ===========================
3 ## Copyright (C) 2010 Cypress Semiconductor
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 ## Boston, MA 02110-1301, USA.
19 ## ===========================
23 * Request queue handling for Antioch block device driver.
24 * Based on the mmc queue handling code by Russell King in the
25 * linux 2.6.10 kernel.
29 * linux/drivers/mmc/mmc_queue.c
31 * Copyright (C) 2003 Russell King, All Rights Reserved.
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License version 2 as
35 * published by the Free Software Foundation.
39 #include <linux/module.h>
40 #include <linux/blkdev.h>
42 #include "cyasblkdev_queue.h"
44 #define CYASBLKDEV_QUEUE_EXIT (1 << 0)
45 #define CYASBLKDEV_QUEUE_SUSPENDED (1 << 1)
46 #define CY_AS_USE_ASYNC_API
50 /* print flags by name */
51 const char *rq_flag_bit_names[] = {
52 "REQ_RW", /* not set, read. set, write */
53 "REQ_FAILFAST", /* no low level driver retries */
54 "REQ_SORTED", /* elevator knows about this request */
55 "REQ_SOFTBARRIER", /* may not be passed by ioscheduler */
56 "REQ_HARDBARRIER", /* may not be passed by drive either */
57 "REQ_FUA", /* forced unit access */
58 "REQ_NOMERGE", /* don't touch this for merging */
59 "REQ_STARTED", /* drive already may have started this one */
60 "REQ_DONTPREP", /* don't call prep for this one */
61 "REQ_QUEUED", /* uses queueing */
62 "REQ_ELVPRIV", /* elevator private data attached */
63 "REQ_FAILED", /* set if the request failed */
64 "REQ_QUIET", /* don't worry about errors */
65 "REQ_PREEMPT", /* set for "ide_preempt" requests */
66 "REQ_ORDERED_COLOR",/* is before or after barrier */
67 "REQ_RW_SYNC", /* request is sync (O_DIRECT) */
68 "REQ_ALLOCED", /* request came from our alloc pool */
69 "REQ_RW_META", /* metadata io request */
70 "REQ_COPY_USER", /* contains copies of user pages */
71 "REQ_NR_BITS", /* stops here */
74 void verbose_rq_flags(int flags)
79 for (i = 0; i < 32; i++) {
81 DBGPRN("<1>%s", rq_flag_bit_names[i]);
88 * Prepare a -BLK_DEV request. Essentially, this means passing the
89 * preparation off to the media driver. The media driver will
90 * create request to CyAsDev.
92 static int cyasblkdev_prep_request(
93 struct request_queue *q, struct request *req)
97 /* we only like normal block requests.*/
98 if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
99 #ifndef WESTBRIDGE_NDEBUG
100 cy_as_hal_print_message("%s:%x bad request received\n",
101 __func__, current->pid);
104 blk_dump_rq_flags(req, "cyasblkdev bad request");
108 req->cmd_flags |= REQ_DONTPREP;
113 /* queue worker thread */
114 static int cyasblkdev_queue_thread(void *d)
116 DECLARE_WAITQUEUE(wait, current);
117 struct cyasblkdev_queue *bq = d;
118 struct request_queue *q = bq->queue;
124 * set iothread to ensure that we aren't put to sleep by
125 * the process freezing. we handle suspension ourselves.
127 daemonize("cyasblkdev_queue_thread");
129 /* signal to queue_init() so it could contnue */
130 complete(&bq->thread_complete);
132 down(&bq->thread_sem);
133 add_wait_queue(&bq->thread_wq, &wait);
135 qth_pid = current->pid;
137 #ifndef WESTBRIDGE_NDEBUG
138 cy_as_hal_print_message(
139 "%s:%x started, bq:%p, q:%p\n", __func__, qth_pid, bq, q);
143 struct request *req = NULL;
145 /* the thread wants to be woken up by signals as well */
146 set_current_state(TASK_INTERRUPTIBLE);
148 spin_lock_irq(q->queue_lock);
150 #ifndef WESTBRIDGE_NDEBUG
151 cy_as_hal_print_message(
152 "%s: for bq->queue is null\n", __func__);
156 /* chk if queue is plugged */
157 if (!blk_queue_plugged(q)) {
158 bq->req = req = blk_fetch_request(q);
159 #ifndef WESTBRIDGE_NDEBUG
160 cy_as_hal_print_message(
161 "%s: blk_fetch_request:%x\n",
162 __func__, (uint32_t)req);
165 #ifndef WESTBRIDGE_NDEBUG
166 cy_as_hal_print_message(
167 "%s: queue plugged, "
168 "skip blk_fetch()\n", __func__);
172 spin_unlock_irq(q->queue_lock);
174 #ifndef WESTBRIDGE_NDEBUG
175 cy_as_hal_print_message(
176 "%s: checking if request queue is null\n", __func__);
180 if (bq->flags & CYASBLKDEV_QUEUE_EXIT) {
181 #ifndef WESTBRIDGE_NDEBUG
182 cy_as_hal_print_message(
183 "%s:got QUEUE_EXIT flag\n", __func__);
189 #ifndef WESTBRIDGE_NDEBUG
190 cy_as_hal_print_message(
191 "%s: request queue is null, goto sleep, "
192 "thread_sem->count=%d\n",
193 __func__, bq->thread_sem.count);
194 if (spin_is_locked(q->queue_lock)) {
195 cy_as_hal_print_message("%s: queue_lock "
196 "is locked, need to release\n", __func__);
197 spin_unlock(q->queue_lock);
199 if (spin_is_locked(q->queue_lock))
200 cy_as_hal_print_message(
201 "%s: unlock did not work\n",
204 cy_as_hal_print_message(
205 "%s: checked lock, is not locked\n",
212 /* yields to the next rdytorun proc,
213 * then goes back to sleep*/
215 down(&bq->thread_sem);
217 #ifndef WESTBRIDGE_NDEBUG
218 cy_as_hal_print_message(
219 "%s: wake_up,continue\n",
225 /* new req received, issue it to the driver */
226 set_current_state(TASK_RUNNING);
228 #ifndef WESTBRIDGE_NDEBUG
229 cy_as_hal_print_message(
230 "%s: issued a RQ:%x\n",
231 __func__, (uint32_t)req);
234 bq->issue_fn(bq, req);
236 #ifndef WESTBRIDGE_NDEBUG
237 cy_as_hal_print_message(
238 "%s: bq->issue_fn() returned\n",
245 set_current_state(TASK_RUNNING);
246 remove_wait_queue(&bq->thread_wq, &wait);
249 complete_and_exit(&bq->thread_complete, 0);
251 #ifndef WESTBRIDGE_NDEBUG
252 cy_as_hal_print_message("%s: is finished\n", __func__);
259 * Generic request handler. it is called for any queue on a
260 * particular host. When the host is not busy, we look for a request
261 * on any queue on this host, and attempt to issue it. This may
262 * not be the queue we were asked to process.
264 static void cyasblkdev_request(struct request_queue *q)
266 struct cyasblkdev_queue *bq = q->queuedata;
269 #ifndef WESTBRIDGE_NDEBUG
270 cy_as_hal_print_message(
271 "%s new request on cyasblkdev_queue_t bq:=%x\n",
272 __func__, (uint32_t)bq);
276 #ifndef WESTBRIDGE_NDEBUG
277 cy_as_hal_print_message("%s wake_up(&bq->thread_wq)\n",
281 /* wake up cyasblkdev_queue worker thread*/
282 wake_up(&bq->thread_wq);
284 #ifndef WESTBRIDGE_NDEBUG
285 cy_as_hal_print_message("%s: don't wake Q_thr, bq->req:%x\n",
286 __func__, (uint32_t)bq->req);
292 * cyasblkdev_init_queue - initialise a queue structure.
293 * @bq: cyasblkdev queue
294 * @dev: CyAsDeviceHandle to attach this queue
297 * Initialise a cyasblkdev_request queue.
300 /* MAX NUMBER OF SECTORS PER REQUEST **/
301 #define Q_MAX_SECTORS 128
303 /* MAX NUMBER OF PHYS SEGMENTS (entries in the SG list)*/
306 int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock)
312 /* 1st param is a function that wakes up the queue thread */
313 bq->queue = blk_init_queue(cyasblkdev_request, lock);
317 blk_queue_prep_rq(bq->queue, cyasblkdev_prep_request);
319 blk_queue_bounce_limit(bq->queue, BLK_BOUNCE_ANY);
320 blk_queue_max_hw_sectors(bq->queue, Q_MAX_SECTORS);
322 /* As of now, we have the HAL/driver support to
323 * merge scattered segments and handle them simultaneously.
324 * so, setting the max_phys_segments to 8. */
325 /*blk_queue_max_phys_segments(bq->queue, Q_MAX_SGS);
326 blk_queue_max_hw_segments(bq->queue, Q_MAX_SGS);*/
327 blk_queue_max_segments(bq->queue, Q_MAX_SGS);
329 /* should be < then HAL can handle */
330 blk_queue_max_segment_size(bq->queue, 512*Q_MAX_SECTORS);
332 bq->queue->queuedata = bq;
335 init_completion(&bq->thread_complete);
336 init_waitqueue_head(&bq->thread_wq);
337 sema_init(&bq->thread_sem, 1);
339 ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL);
341 /* wait until the thread is spawned */
342 wait_for_completion(&bq->thread_complete);
344 /* reinitialize the completion */
345 init_completion(&bq->thread_complete);
353 EXPORT_SYMBOL(cyasblkdev_init_queue);
355 /*called from blk_put() */
356 void cyasblkdev_cleanup_queue(struct cyasblkdev_queue *bq)
360 bq->flags |= CYASBLKDEV_QUEUE_EXIT;
361 wake_up(&bq->thread_wq);
362 wait_for_completion(&bq->thread_complete);
364 blk_cleanup_queue(bq->queue);
366 EXPORT_SYMBOL(cyasblkdev_cleanup_queue);
370 * cyasblkdev_queue_suspend - suspend a CyAsBlkDev request queue
371 * @bq: CyAsBlkDev queue to suspend
373 * Stop the block request queue, and wait for our thread to
374 * complete any outstanding requests. This ensures that we
375 * won't suspend while a request is being processed.
377 void cyasblkdev_queue_suspend(struct cyasblkdev_queue *bq)
379 struct request_queue *q = bq->queue;
384 if (!(bq->flags & CYASBLKDEV_QUEUE_SUSPENDED)) {
385 bq->flags |= CYASBLKDEV_QUEUE_SUSPENDED;
387 spin_lock_irqsave(q->queue_lock, flags);
389 spin_unlock_irqrestore(q->queue_lock, flags);
391 down(&bq->thread_sem);
394 EXPORT_SYMBOL(cyasblkdev_queue_suspend);
396 /*cyasblkdev_queue_resume - resume a previously suspended
397 * CyAsBlkDev request queue @bq: CyAsBlkDev queue to resume */
398 void cyasblkdev_queue_resume(struct cyasblkdev_queue *bq)
400 struct request_queue *q = bq->queue;
405 if (bq->flags & CYASBLKDEV_QUEUE_SUSPENDED) {
406 bq->flags &= ~CYASBLKDEV_QUEUE_SUSPENDED;
410 spin_lock_irqsave(q->queue_lock, flags);
412 spin_unlock_irqrestore(q->queue_lock, flags);
415 EXPORT_SYMBOL(cyasblkdev_queue_resume);