block: Remove annoying "unknown partition table" message
[linux-2.6-block.git] / include / linux / blkdev.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
85fd0bc9
RK
4#include <linux/sched.h>
5
f5ff8422
JA
6#ifdef CONFIG_BLOCK
7
1da177e4
LT
8#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
320ae51f 11#include <linux/llist.h>
1da177e4
LT
12#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/pagemap.h>
15#include <linux/backing-dev.h>
16#include <linux/wait.h>
17#include <linux/mempool.h>
18#include <linux/bio.h>
1da177e4 19#include <linux/stringify.h>
3e6053d7 20#include <linux/gfp.h>
d351af01 21#include <linux/bsg.h>
c7c22e4d 22#include <linux/smp.h>
548bc8e1 23#include <linux/rcupdate.h>
add703fd 24#include <linux/percpu-refcount.h>
1da177e4
LT
25
26#include <asm/scatterlist.h>
27
de477254 28struct module;
21b2f0c8
CH
29struct scsi_ioctl_command;
30
1da177e4 31struct request_queue;
1da177e4 32struct elevator_queue;
1da177e4 33struct request_pm_state;
2056a782 34struct blk_trace;
3d6392cf
JA
35struct request;
36struct sg_io_hdr;
aa387cc8 37struct bsg_job;
3c798398 38struct blkcg_gq;
7c94e1c1 39struct blk_flush_queue;
1da177e4
LT
40
41#define BLKDEV_MIN_RQ 4
42#define BLKDEV_MAX_RQ 128 /* Default maximum */
43
8bd435b3
TH
44/*
45 * Maximum number of blkcg policies allowed to be registered concurrently.
46 * Defined here to simplify include dependency.
47 */
48#define BLKCG_MAX_POLS 2
49
1da177e4 50struct request;
8ffdc655 51typedef void (rq_end_io_fn)(struct request *, int);
1da177e4 52
5b788ce3
TH
53#define BLK_RL_SYNCFULL (1U << 0)
54#define BLK_RL_ASYNCFULL (1U << 1)
55
1da177e4 56struct request_list {
5b788ce3 57 struct request_queue *q; /* the queue this rl belongs to */
a051661c
TH
58#ifdef CONFIG_BLK_CGROUP
59 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
60#endif
1faa16d2
JA
61 /*
62 * count[], starved[], and wait[] are indexed by
63 * BLK_RW_SYNC/BLK_RW_ASYNC
64 */
8a5ecdd4
TH
65 int count[2];
66 int starved[2];
67 mempool_t *rq_pool;
68 wait_queue_head_t wait[2];
5b788ce3 69 unsigned int flags;
1da177e4
LT
70};
71
4aff5e23
JA
72/*
73 * request command types
74 */
75enum rq_cmd_type_bits {
76 REQ_TYPE_FS = 1, /* fs request */
77 REQ_TYPE_BLOCK_PC, /* scsi command */
78 REQ_TYPE_SENSE, /* sense request */
79 REQ_TYPE_PM_SUSPEND, /* suspend request */
80 REQ_TYPE_PM_RESUME, /* resume request */
81 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
4aff5e23 82 REQ_TYPE_SPECIAL, /* driver defined type */
4aff5e23
JA
83 /*
84 * for ATA/ATAPI devices. this really doesn't belong here, ide should
85 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
86 * private REQ_LB opcodes to differentiate what type of request this is
87 */
4aff5e23 88 REQ_TYPE_ATA_TASKFILE,
cea2885a 89 REQ_TYPE_ATA_PC,
4aff5e23
JA
90};
91
1da177e4
LT
92#define BLK_MAX_CDB 16
93
94/*
af76e555
CH
95 * Try to put the fields that are referenced together in the same cacheline.
96 *
97 * If you modify this structure, make sure to update blk_rq_init() and
98 * especially blk_mq_rq_ctx_init() to take care of the added fields.
1da177e4
LT
99 */
100struct request {
6897fc22 101 struct list_head queuelist;
320ae51f
JA
102 union {
103 struct call_single_data csd;
8b4922d3 104 unsigned long fifo_time;
320ae51f 105 };
ff856bad 106
165125e1 107 struct request_queue *q;
320ae51f 108 struct blk_mq_ctx *mq_ctx;
e6a1c874 109
5953316d 110 u64 cmd_flags;
4aff5e23 111 enum rq_cmd_type_bits cmd_type;
242f9dcb 112 unsigned long atomic_flags;
1da177e4 113
181fdde3
RK
114 int cpu;
115
a2dec7b3 116 /* the following two fields are internal, NEVER access directly */
a2dec7b3 117 unsigned int __data_len; /* total data len */
181fdde3 118 sector_t __sector; /* sector cursor */
1da177e4
LT
119
120 struct bio *bio;
121 struct bio *biotail;
122
360f92c2
JA
123 /*
124 * The hash is used inside the scheduler, and killed once the
125 * request reaches the dispatch list. The ipi_list is only used
126 * to queue the request for softirq completion, which is long
127 * after the request has been unhashed (and even removed from
128 * the dispatch list).
129 */
130 union {
131 struct hlist_node hash; /* merge hash */
132 struct list_head ipi_list;
133 };
134
e6a1c874
JA
135 /*
136 * The rb_node is only used inside the io scheduler, requests
137 * are pruned when moved to the dispatch queue. So let the
c186794d 138 * completion_data share space with the rb_node.
e6a1c874
JA
139 */
140 union {
141 struct rb_node rb_node; /* sort/lookup */
c186794d 142 void *completion_data;
e6a1c874 143 };
9817064b 144
ff7d145f 145 /*
7f1dc8a2 146 * Three pointers are available for the IO schedulers, if they need
c186794d
MS
147 * more they have to dynamically allocate it. Flush requests are
148 * never put on the IO scheduler. So let the flush fields share
a612fddf 149 * space with the elevator data.
ff7d145f 150 */
c186794d 151 union {
a612fddf
TH
152 struct {
153 struct io_cq *icq;
154 void *priv[2];
155 } elv;
156
c186794d
MS
157 struct {
158 unsigned int seq;
159 struct list_head list;
4853abaa 160 rq_end_io_fn *saved_end_io;
c186794d
MS
161 } flush;
162 };
ff7d145f 163
8f34ee75 164 struct gendisk *rq_disk;
09e099d4 165 struct hd_struct *part;
1da177e4 166 unsigned long start_time;
9195291e 167#ifdef CONFIG_BLK_CGROUP
a051661c 168 struct request_list *rl; /* rl this rq is alloced from */
9195291e
DS
169 unsigned long long start_time_ns;
170 unsigned long long io_start_time_ns; /* when passed to hardware */
171#endif
1da177e4
LT
172 /* Number of scatter-gather DMA addr+len pairs after
173 * physical address coalescing is performed.
174 */
175 unsigned short nr_phys_segments;
13f05c8d
MP
176#if defined(CONFIG_BLK_DEV_INTEGRITY)
177 unsigned short nr_integrity_segments;
178#endif
1da177e4 179
8f34ee75
JA
180 unsigned short ioprio;
181
731ec497 182 void *special; /* opaque pointer available for LLD use */
1da177e4 183
cdd60262
JA
184 int tag;
185 int errors;
186
1da177e4
LT
187 /*
188 * when request is used as a packet command carrier
189 */
d7e3c324
FT
190 unsigned char __cmd[BLK_MAX_CDB];
191 unsigned char *cmd;
181fdde3 192 unsigned short cmd_len;
1da177e4 193
7a85f889 194 unsigned int extra_len; /* length of alignment and padding */
1da177e4 195 unsigned int sense_len;
c3a4d78c 196 unsigned int resid_len; /* residual count */
1da177e4
LT
197 void *sense;
198
242f9dcb
JA
199 unsigned long deadline;
200 struct list_head timeout_list;
1da177e4 201 unsigned int timeout;
17e01f21 202 int retries;
1da177e4 203
1da177e4 204 /*
c00895ab 205 * completion callback.
1da177e4
LT
206 */
207 rq_end_io_fn *end_io;
208 void *end_io_data;
abae1fde
FT
209
210 /* for bidi */
211 struct request *next_rq;
1da177e4
LT
212};
213
766ca442
FLVC
214static inline unsigned short req_get_ioprio(struct request *req)
215{
216 return req->ioprio;
217}
218
1da177e4 219/*
4aff5e23 220 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
1da177e4
LT
221 * requests. Some step values could eventually be made generic.
222 */
223struct request_pm_state
224{
225 /* PM state machine step value, currently driver specific */
226 int pm_step;
227 /* requested PM state value (S1, S2, S3, S4, ...) */
228 u32 pm_state;
229 void* data; /* for driver use */
230};
231
232#include <linux/elevator.h>
233
320ae51f
JA
234struct blk_queue_ctx;
235
165125e1 236typedef void (request_fn_proc) (struct request_queue *q);
5a7bbad2 237typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
165125e1 238typedef int (prep_rq_fn) (struct request_queue *, struct request *);
28018c24 239typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
1da177e4
LT
240
241struct bio_vec;
cc371e66
AK
242struct bvec_merge_data {
243 struct block_device *bi_bdev;
244 sector_t bi_sector;
245 unsigned bi_size;
246 unsigned long bi_rw;
247};
248typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
249 struct bio_vec *);
ff856bad 250typedef void (softirq_done_fn)(struct request *);
2fb98e84 251typedef int (dma_drain_needed_fn)(struct request *);
ef9e3fac 252typedef int (lld_busy_fn) (struct request_queue *q);
aa387cc8 253typedef int (bsg_job_fn) (struct bsg_job *);
1da177e4 254
242f9dcb
JA
255enum blk_eh_timer_return {
256 BLK_EH_NOT_HANDLED,
257 BLK_EH_HANDLED,
258 BLK_EH_RESET_TIMER,
259};
260
261typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
262
1da177e4
LT
263enum blk_queue_state {
264 Queue_down,
265 Queue_up,
266};
267
1da177e4
LT
268struct blk_queue_tag {
269 struct request **tag_index; /* map of busy tags */
270 unsigned long *tag_map; /* bit map of free/busy tags */
1da177e4
LT
271 int busy; /* current depth */
272 int max_depth; /* what we will send to device */
ba025082 273 int real_max_depth; /* what the array can hold */
1da177e4
LT
274 atomic_t refcnt; /* map can be shared */
275};
276
abf54393
FT
277#define BLK_SCSI_MAX_CMDS (256)
278#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
279
025146e1
MP
280struct queue_limits {
281 unsigned long bounce_pfn;
282 unsigned long seg_boundary_mask;
283
284 unsigned int max_hw_sectors;
762380ad 285 unsigned int chunk_sectors;
025146e1
MP
286 unsigned int max_sectors;
287 unsigned int max_segment_size;
c72758f3
MP
288 unsigned int physical_block_size;
289 unsigned int alignment_offset;
290 unsigned int io_min;
291 unsigned int io_opt;
67efc925 292 unsigned int max_discard_sectors;
4363ac7c 293 unsigned int max_write_same_sectors;
86b37281
MP
294 unsigned int discard_granularity;
295 unsigned int discard_alignment;
025146e1
MP
296
297 unsigned short logical_block_size;
8a78362c 298 unsigned short max_segments;
13f05c8d 299 unsigned short max_integrity_segments;
025146e1 300
c72758f3 301 unsigned char misaligned;
86b37281 302 unsigned char discard_misaligned;
e692cb66 303 unsigned char cluster;
a934a00a 304 unsigned char discard_zeroes_data;
c78afc62 305 unsigned char raid_partial_stripes_expensive;
025146e1
MP
306};
307
d7b76301 308struct request_queue {
1da177e4
LT
309 /*
310 * Together with queue_head for cacheline sharing
311 */
312 struct list_head queue_head;
313 struct request *last_merge;
b374d18a 314 struct elevator_queue *elevator;
8a5ecdd4
TH
315 int nr_rqs[2]; /* # allocated [a]sync rqs */
316 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
1da177e4
LT
317
318 /*
a051661c
TH
319 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
320 * is used, root blkg allocates from @q->root_rl and all other
321 * blkgs from their own blkg->rl. Which one to use should be
322 * determined using bio_request_list().
1da177e4 323 */
a051661c 324 struct request_list root_rl;
1da177e4
LT
325
326 request_fn_proc *request_fn;
1da177e4
LT
327 make_request_fn *make_request_fn;
328 prep_rq_fn *prep_rq_fn;
28018c24 329 unprep_rq_fn *unprep_rq_fn;
1da177e4 330 merge_bvec_fn *merge_bvec_fn;
ff856bad 331 softirq_done_fn *softirq_done_fn;
242f9dcb 332 rq_timed_out_fn *rq_timed_out_fn;
2fb98e84 333 dma_drain_needed_fn *dma_drain_needed;
ef9e3fac 334 lld_busy_fn *lld_busy_fn;
1da177e4 335
320ae51f
JA
336 struct blk_mq_ops *mq_ops;
337
338 unsigned int *mq_map;
339
340 /* sw queues */
e6cdb092 341 struct blk_mq_ctx __percpu *queue_ctx;
320ae51f
JA
342 unsigned int nr_queues;
343
344 /* hw dispatch queues */
345 struct blk_mq_hw_ctx **queue_hw_ctx;
346 unsigned int nr_hw_queues;
347
8922e16c
TH
348 /*
349 * Dispatch queue sorting
350 */
1b47f531 351 sector_t end_sector;
8922e16c 352 struct request *boundary_rq;
8922e16c 353
1da177e4 354 /*
3cca6dc1 355 * Delayed queue handling
1da177e4 356 */
3cca6dc1 357 struct delayed_work delay_work;
1da177e4
LT
358
359 struct backing_dev_info backing_dev_info;
360
361 /*
362 * The queue owner gets to use this for whatever they like.
363 * ll_rw_blk doesn't touch it.
364 */
365 void *queuedata;
366
1da177e4 367 /*
d7b76301 368 * various queue flags, see QUEUE_* below
1da177e4 369 */
d7b76301 370 unsigned long queue_flags;
1da177e4 371
a73f730d
TH
372 /*
373 * ida allocated id for this queue. Used to index queues from
374 * ioctx.
375 */
376 int id;
377
1da177e4 378 /*
d7b76301 379 * queue needs bounce pages for pages above this limit
1da177e4 380 */
d7b76301 381 gfp_t bounce_gfp;
1da177e4
LT
382
383 /*
152587de 384 * protects queue structures from reentrancy. ->__queue_lock should
385 * _never_ be used directly, it is queue private. always use
386 * ->queue_lock.
1da177e4 387 */
152587de 388 spinlock_t __queue_lock;
1da177e4
LT
389 spinlock_t *queue_lock;
390
391 /*
392 * queue kobject
393 */
394 struct kobject kobj;
395
320ae51f
JA
396 /*
397 * mq queue kobject
398 */
399 struct kobject mq_kobj;
400
47fafbc7 401#ifdef CONFIG_PM
6c954667
LM
402 struct device *dev;
403 int rpm_status;
404 unsigned int nr_pending;
405#endif
406
1da177e4
LT
407 /*
408 * queue settings
409 */
410 unsigned long nr_requests; /* Max # of requests */
411 unsigned int nr_congestion_on;
412 unsigned int nr_congestion_off;
413 unsigned int nr_batching;
414
fa0ccd83 415 unsigned int dma_drain_size;
d7b76301 416 void *dma_drain_buffer;
e3790c7d 417 unsigned int dma_pad_mask;
1da177e4
LT
418 unsigned int dma_alignment;
419
420 struct blk_queue_tag *queue_tags;
6eca9004 421 struct list_head tag_busy_list;
1da177e4 422
15853af9 423 unsigned int nr_sorted;
0a7ae2ff 424 unsigned int in_flight[2];
24faf6f6
BVA
425 /*
426 * Number of active block driver functions for which blk_drain_queue()
427 * must wait. Must be incremented around functions that unlock the
428 * queue_lock internally, e.g. scsi_request_fn().
429 */
430 unsigned int request_fn_active;
1da177e4 431
242f9dcb
JA
432 unsigned int rq_timeout;
433 struct timer_list timeout;
434 struct list_head timeout_list;
435
a612fddf 436 struct list_head icq_list;
4eef3049 437#ifdef CONFIG_BLK_CGROUP
a2b1693b 438 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
3c798398 439 struct blkcg_gq *root_blkg;
03aa264a 440 struct list_head blkg_list;
4eef3049 441#endif
a612fddf 442
025146e1
MP
443 struct queue_limits limits;
444
1da177e4
LT
445 /*
446 * sg stuff
447 */
448 unsigned int sg_timeout;
449 unsigned int sg_reserved_size;
1946089a 450 int node;
6c5c9341 451#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 452 struct blk_trace *blk_trace;
6c5c9341 453#endif
1da177e4 454 /*
4913efe4 455 * for flush operations
1da177e4 456 */
4913efe4 457 unsigned int flush_flags;
f3876930 458 unsigned int flush_not_queueable:1;
7c94e1c1 459 struct blk_flush_queue *fq;
483f4afc 460
6fca6a61
CH
461 struct list_head requeue_list;
462 spinlock_t requeue_lock;
463 struct work_struct requeue_work;
464
483f4afc 465 struct mutex sysfs_lock;
d351af01 466
d732580b 467 int bypass_depth;
780db207 468 int mq_freeze_depth;
d732580b 469
d351af01 470#if defined(CONFIG_BLK_DEV_BSG)
aa387cc8
MC
471 bsg_job_fn *bsg_job_fn;
472 int bsg_job_size;
d351af01
FT
473 struct bsg_class_device bsg_dev;
474#endif
e43473b7
VG
475
476#ifdef CONFIG_BLK_DEV_THROTTLING
477 /* Throttle data */
478 struct throtl_data *td;
479#endif
548bc8e1 480 struct rcu_head rcu_head;
320ae51f 481 wait_queue_head_t mq_freeze_wq;
add703fd 482 struct percpu_ref mq_usage_counter;
320ae51f 483 struct list_head all_q_node;
0d2602ca
JA
484
485 struct blk_mq_tag_set *tag_set;
486 struct list_head tag_set_list;
1da177e4
LT
487};
488
1da177e4
LT
489#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
490#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
1faa16d2
JA
491#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
492#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
3f3299d5 493#define QUEUE_FLAG_DYING 5 /* queue being torn down */
d732580b 494#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
c21e6beb
JA
495#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
496#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
5757a6d7 497#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
c21e6beb
JA
498#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
499#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
500#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
88e740f1 501#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
c21e6beb
JA
502#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
503#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
504#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
505#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
506#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
5757a6d7 507#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
c246e80d 508#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
320ae51f 509#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
05f1dd53 510#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
66cb45aa 511#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
bc58ba94
JA
512
513#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
01e97f6b 514 (1 << QUEUE_FLAG_STACKABLE) | \
e2e1a148
JA
515 (1 << QUEUE_FLAG_SAME_COMP) | \
516 (1 << QUEUE_FLAG_ADD_RANDOM))
797e7dbb 517
94eddfbe
JA
518#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
519 (1 << QUEUE_FLAG_SAME_COMP))
520
8bcb6c7d 521static inline void queue_lockdep_assert_held(struct request_queue *q)
8f45c1a5 522{
8bcb6c7d
AK
523 if (q->queue_lock)
524 lockdep_assert_held(q->queue_lock);
8f45c1a5
LT
525}
526
75ad23bc
NP
527static inline void queue_flag_set_unlocked(unsigned int flag,
528 struct request_queue *q)
529{
530 __set_bit(flag, &q->queue_flags);
531}
532
e48ec690
JA
533static inline int queue_flag_test_and_clear(unsigned int flag,
534 struct request_queue *q)
535{
8bcb6c7d 536 queue_lockdep_assert_held(q);
e48ec690
JA
537
538 if (test_bit(flag, &q->queue_flags)) {
539 __clear_bit(flag, &q->queue_flags);
540 return 1;
541 }
542
543 return 0;
544}
545
546static inline int queue_flag_test_and_set(unsigned int flag,
547 struct request_queue *q)
548{
8bcb6c7d 549 queue_lockdep_assert_held(q);
e48ec690
JA
550
551 if (!test_bit(flag, &q->queue_flags)) {
552 __set_bit(flag, &q->queue_flags);
553 return 0;
554 }
555
556 return 1;
557}
558
75ad23bc
NP
559static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
560{
8bcb6c7d 561 queue_lockdep_assert_held(q);
75ad23bc
NP
562 __set_bit(flag, &q->queue_flags);
563}
564
565static inline void queue_flag_clear_unlocked(unsigned int flag,
566 struct request_queue *q)
567{
568 __clear_bit(flag, &q->queue_flags);
569}
570
0a7ae2ff
JA
571static inline int queue_in_flight(struct request_queue *q)
572{
573 return q->in_flight[0] + q->in_flight[1];
574}
575
75ad23bc
NP
576static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
577{
8bcb6c7d 578 queue_lockdep_assert_held(q);
75ad23bc
NP
579 __clear_bit(flag, &q->queue_flags);
580}
581
1da177e4
LT
582#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
583#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
3f3299d5 584#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
c246e80d 585#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
d732580b 586#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
320ae51f 587#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
ac9fafa1 588#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
488991e2
AB
589#define blk_queue_noxmerges(q) \
590 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
a68bbddb 591#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
bc58ba94 592#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
e2e1a148 593#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
4ee5eaf4
KU
594#define blk_queue_stackable(q) \
595 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
c15227de 596#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
8d57a98c
AH
597#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
598 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
1da177e4 599
33659ebb
CH
600#define blk_noretry_request(rq) \
601 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
602 REQ_FAILFAST_DRIVER))
603
604#define blk_account_rq(rq) \
605 (((rq)->cmd_flags & REQ_STARTED) && \
e2a60da7 606 ((rq)->cmd_type == REQ_TYPE_FS))
33659ebb 607
1da177e4 608#define blk_pm_request(rq) \
33659ebb
CH
609 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
610 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
1da177e4 611
ab780f1e 612#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
abae1fde 613#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
336cdb40
KU
614/* rq->queuelist of dequeued request must be list_empty() */
615#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
1da177e4
LT
616
617#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
618
5953316d 619#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
1da177e4 620
49fd524f
JA
621/*
622 * Driver can handle struct request, if it either has an old style
623 * request_fn defined, or is blk-mq based.
624 */
625static inline bool queue_is_rq_based(struct request_queue *q)
626{
627 return q->request_fn || q->mq_ops;
628}
629
e692cb66
MP
630static inline unsigned int blk_queue_cluster(struct request_queue *q)
631{
632 return q->limits.cluster;
633}
634
9e2585a8 635/*
1faa16d2 636 * We regard a request as sync, if either a read or a sync write
9e2585a8 637 */
1faa16d2
JA
638static inline bool rw_is_sync(unsigned int rw_flags)
639{
7b6d91da 640 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
1faa16d2
JA
641}
642
643static inline bool rq_is_sync(struct request *rq)
644{
645 return rw_is_sync(rq->cmd_flags);
646}
647
5b788ce3 648static inline bool blk_rl_full(struct request_list *rl, bool sync)
1da177e4 649{
5b788ce3
TH
650 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
651
652 return rl->flags & flag;
1da177e4
LT
653}
654
5b788ce3 655static inline void blk_set_rl_full(struct request_list *rl, bool sync)
1da177e4 656{
5b788ce3
TH
657 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
658
659 rl->flags |= flag;
1da177e4
LT
660}
661
5b788ce3 662static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
1da177e4 663{
5b788ce3
TH
664 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
665
666 rl->flags &= ~flag;
1da177e4
LT
667}
668
e2a60da7
MP
669static inline bool rq_mergeable(struct request *rq)
670{
671 if (rq->cmd_type != REQ_TYPE_FS)
672 return false;
1da177e4 673
e2a60da7
MP
674 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
675 return false;
676
677 return true;
678}
1da177e4 679
f31dc1cd
MP
680static inline bool blk_check_merge_flags(unsigned int flags1,
681 unsigned int flags2)
682{
683 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
684 return false;
685
686 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
687 return false;
688
4363ac7c
MP
689 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
690 return false;
691
f31dc1cd
MP
692 return true;
693}
694
4363ac7c
MP
695static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
696{
697 if (bio_data(a) == bio_data(b))
698 return true;
699
700 return false;
701}
702
1da177e4
LT
703/*
704 * q->prep_rq_fn return values
705 */
706#define BLKPREP_OK 0 /* serve it */
707#define BLKPREP_KILL 1 /* fatal error, kill */
708#define BLKPREP_DEFER 2 /* leave on queue */
709
710extern unsigned long blk_max_low_pfn, blk_max_pfn;
711
712/*
713 * standard bounce addresses:
714 *
715 * BLK_BOUNCE_HIGH : bounce all highmem pages
716 * BLK_BOUNCE_ANY : don't bounce anything
717 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
718 */
2472892a
AK
719
720#if BITS_PER_LONG == 32
1da177e4 721#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
2472892a
AK
722#else
723#define BLK_BOUNCE_HIGH -1ULL
724#endif
725#define BLK_BOUNCE_ANY (-1ULL)
bfe17231 726#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
1da177e4 727
3d6392cf
JA
728/*
729 * default timeout for SG_IO if none specified
730 */
731#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
f2f1fa78 732#define BLK_MIN_SG_TIMEOUT (7 * HZ)
3d6392cf 733
2a7326b5 734#ifdef CONFIG_BOUNCE
1da177e4 735extern int init_emergency_isa_pool(void);
165125e1 736extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
1da177e4
LT
737#else
738static inline int init_emergency_isa_pool(void)
739{
740 return 0;
741}
165125e1 742static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
1da177e4
LT
743{
744}
745#endif /* CONFIG_MMU */
746
152e283f
FT
747struct rq_map_data {
748 struct page **pages;
749 int page_order;
750 int nr_entries;
56c451f4 751 unsigned long offset;
97ae77a1 752 int null_mapped;
ecb554a8 753 int from_user;
152e283f
FT
754};
755
5705f702 756struct req_iterator {
7988613b 757 struct bvec_iter iter;
5705f702
N
758 struct bio *bio;
759};
760
761/* This should not be used directly - use rq_for_each_segment */
1e428079
JA
762#define for_each_bio(_bio) \
763 for (; _bio; _bio = _bio->bi_next)
5705f702 764#define __rq_for_each_bio(_bio, rq) \
1da177e4
LT
765 if ((rq->bio)) \
766 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
767
5705f702
N
768#define rq_for_each_segment(bvl, _rq, _iter) \
769 __rq_for_each_bio(_iter.bio, _rq) \
7988613b 770 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
5705f702 771
4550dd6c 772#define rq_iter_last(bvec, _iter) \
7988613b 773 (_iter.bio->bi_next == NULL && \
4550dd6c 774 bio_iter_last(bvec, _iter.iter))
5705f702 775
2d4dc890
IL
776#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
777# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
778#endif
779#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
780extern void rq_flush_dcache_pages(struct request *rq);
781#else
782static inline void rq_flush_dcache_pages(struct request *rq)
783{
784}
785#endif
786
1da177e4
LT
787extern int blk_register_queue(struct gendisk *disk);
788extern void blk_unregister_queue(struct gendisk *disk);
1da177e4 789extern void generic_make_request(struct bio *bio);
2a4aa30c 790extern void blk_rq_init(struct request_queue *q, struct request *rq);
1da177e4 791extern void blk_put_request(struct request *);
165125e1 792extern void __blk_put_request(struct request_queue *, struct request *);
165125e1 793extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
79eb63e9
BH
794extern struct request *blk_make_request(struct request_queue *, struct bio *,
795 gfp_t);
f27b087b 796extern void blk_rq_set_block_pc(struct request *);
165125e1 797extern void blk_requeue_request(struct request_queue *, struct request *);
66ac0280
CH
798extern void blk_add_request_payload(struct request *rq, struct page *page,
799 unsigned int len);
82124d60 800extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
ef9e3fac 801extern int blk_lld_busy(struct request_queue *q);
b0fd271d
KU
802extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
803 struct bio_set *bs, gfp_t gfp_mask,
804 int (*bio_ctr)(struct bio *, struct bio *, void *),
805 void *data);
806extern void blk_rq_unprep_clone(struct request *rq);
82124d60
KU
807extern int blk_insert_cloned_request(struct request_queue *q,
808 struct request *rq);
3cca6dc1 809extern void blk_delay_queue(struct request_queue *, unsigned long);
165125e1 810extern void blk_recount_segments(struct request_queue *, struct bio *);
0bfc96cb 811extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
577ebb37
PB
812extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
813 unsigned int, void __user *);
74f3c8af
AV
814extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
815 unsigned int, void __user *);
e915e872
AV
816extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
817 struct scsi_ioctl_command __user *);
3fcfab16 818
5a7bbad2 819extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
166e1f90 820
3fcfab16
AM
821/*
822 * A queue has just exitted congestion. Note this in the global counter of
823 * congested queues, and wake up anyone who was waiting for requests to be
824 * put back.
825 */
8aa7e847 826static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
3fcfab16 827{
8aa7e847 828 clear_bdi_congested(&q->backing_dev_info, sync);
3fcfab16
AM
829}
830
831/*
832 * A queue has just entered congestion. Flag that in the queue's VM-visible
833 * state flags and increment the global gounter of congested queues.
834 */
8aa7e847 835static inline void blk_set_queue_congested(struct request_queue *q, int sync)
3fcfab16 836{
8aa7e847 837 set_bdi_congested(&q->backing_dev_info, sync);
3fcfab16
AM
838}
839
165125e1
JA
840extern void blk_start_queue(struct request_queue *q);
841extern void blk_stop_queue(struct request_queue *q);
1da177e4 842extern void blk_sync_queue(struct request_queue *q);
165125e1 843extern void __blk_stop_queue(struct request_queue *q);
24ecfbe2 844extern void __blk_run_queue(struct request_queue *q);
165125e1 845extern void blk_run_queue(struct request_queue *);
c21e6beb 846extern void blk_run_queue_async(struct request_queue *q);
a3bce90e 847extern int blk_rq_map_user(struct request_queue *, struct request *,
152e283f
FT
848 struct rq_map_data *, void __user *, unsigned long,
849 gfp_t);
8e5cfc45 850extern int blk_rq_unmap_user(struct bio *);
165125e1
JA
851extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
852extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
86d564c8
AV
853 struct rq_map_data *, const struct sg_iovec *,
854 int, unsigned int, gfp_t);
165125e1 855extern int blk_execute_rq(struct request_queue *, struct gendisk *,
994ca9a1 856 struct request *, int);
165125e1 857extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
15fc858a 858 struct request *, int, rq_end_io_fn *);
6e39b69e 859
165125e1 860static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1da177e4 861{
ff9ea323 862 return bdev->bd_disk->queue; /* this is never NULL */
1da177e4
LT
863}
864
5efccd17 865/*
80a761fd
TH
866 * blk_rq_pos() : the current sector
867 * blk_rq_bytes() : bytes left in the entire request
868 * blk_rq_cur_bytes() : bytes left in the current segment
869 * blk_rq_err_bytes() : bytes left till the next error boundary
870 * blk_rq_sectors() : sectors left in the entire request
871 * blk_rq_cur_sectors() : sectors left in the current segment
5efccd17 872 */
5b93629b
TH
873static inline sector_t blk_rq_pos(const struct request *rq)
874{
a2dec7b3 875 return rq->__sector;
2e46e8b2
TH
876}
877
878static inline unsigned int blk_rq_bytes(const struct request *rq)
879{
a2dec7b3 880 return rq->__data_len;
5b93629b
TH
881}
882
2e46e8b2
TH
883static inline int blk_rq_cur_bytes(const struct request *rq)
884{
885 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
886}
5efccd17 887
80a761fd
TH
888extern unsigned int blk_rq_err_bytes(const struct request *rq);
889
5b93629b
TH
890static inline unsigned int blk_rq_sectors(const struct request *rq)
891{
2e46e8b2 892 return blk_rq_bytes(rq) >> 9;
5b93629b
TH
893}
894
895static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
896{
2e46e8b2 897 return blk_rq_cur_bytes(rq) >> 9;
5b93629b
TH
898}
899
f31dc1cd
MP
900static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
901 unsigned int cmd_flags)
902{
903 if (unlikely(cmd_flags & REQ_DISCARD))
871dd928 904 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
f31dc1cd 905
4363ac7c
MP
906 if (unlikely(cmd_flags & REQ_WRITE_SAME))
907 return q->limits.max_write_same_sectors;
908
f31dc1cd
MP
909 return q->limits.max_sectors;
910}
911
762380ad
JA
912/*
913 * Return maximum size of a request at given offset. Only valid for
914 * file system requests.
915 */
916static inline unsigned int blk_max_size_offset(struct request_queue *q,
917 sector_t offset)
918{
919 if (!q->limits.chunk_sectors)
736ed4de 920 return q->limits.max_sectors;
762380ad
JA
921
922 return q->limits.chunk_sectors -
923 (offset & (q->limits.chunk_sectors - 1));
924}
925
f31dc1cd
MP
926static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
927{
928 struct request_queue *q = rq->q;
929
930 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
931 return q->limits.max_hw_sectors;
932
762380ad
JA
933 if (!q->limits.chunk_sectors)
934 return blk_queue_get_max_sectors(q, rq->cmd_flags);
935
936 return min(blk_max_size_offset(q, blk_rq_pos(rq)),
937 blk_queue_get_max_sectors(q, rq->cmd_flags));
f31dc1cd
MP
938}
939
75afb352
JN
940static inline unsigned int blk_rq_count_bios(struct request *rq)
941{
942 unsigned int nr_bios = 0;
943 struct bio *bio;
944
945 __rq_for_each_bio(bio, rq)
946 nr_bios++;
947
948 return nr_bios;
949}
950
9934c8c0
TH
951/*
952 * Request issue related functions.
953 */
954extern struct request *blk_peek_request(struct request_queue *q);
955extern void blk_start_request(struct request *rq);
956extern struct request *blk_fetch_request(struct request_queue *q);
957
1da177e4 958/*
2e60e022
TH
959 * Request completion related functions.
960 *
961 * blk_update_request() completes given number of bytes and updates
962 * the request without completing it.
963 *
f06d9a2b
TH
964 * blk_end_request() and friends. __blk_end_request() must be called
965 * with the request queue spinlock acquired.
1da177e4
LT
966 *
967 * Several drivers define their own end_request and call
3bcddeac
KU
968 * blk_end_request() for parts of the original function.
969 * This prevents code duplication in drivers.
1da177e4 970 */
2e60e022
TH
971extern bool blk_update_request(struct request *rq, int error,
972 unsigned int nr_bytes);
12120077 973extern void blk_finish_request(struct request *rq, int error);
b1f74493
FT
974extern bool blk_end_request(struct request *rq, int error,
975 unsigned int nr_bytes);
976extern void blk_end_request_all(struct request *rq, int error);
977extern bool blk_end_request_cur(struct request *rq, int error);
80a761fd 978extern bool blk_end_request_err(struct request *rq, int error);
b1f74493
FT
979extern bool __blk_end_request(struct request *rq, int error,
980 unsigned int nr_bytes);
981extern void __blk_end_request_all(struct request *rq, int error);
982extern bool __blk_end_request_cur(struct request *rq, int error);
80a761fd 983extern bool __blk_end_request_err(struct request *rq, int error);
2e60e022 984
ff856bad 985extern void blk_complete_request(struct request *);
242f9dcb
JA
986extern void __blk_complete_request(struct request *);
987extern void blk_abort_request(struct request *);
28018c24 988extern void blk_unprep_request(struct request *);
ff856bad 989
1da177e4
LT
990/*
991 * Access functions for manipulating queue properties
992 */
165125e1 993extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1946089a 994 spinlock_t *lock, int node_id);
165125e1 995extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
01effb0d
MS
996extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
997 request_fn_proc *, spinlock_t *);
165125e1
JA
998extern void blk_cleanup_queue(struct request_queue *);
999extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1000extern void blk_queue_bounce_limit(struct request_queue *, u64);
72d4cd9f 1001extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
086fa5ff 1002extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
762380ad 1003extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
8a78362c 1004extern void blk_queue_max_segments(struct request_queue *, unsigned short);
165125e1 1005extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
67efc925
CH
1006extern void blk_queue_max_discard_sectors(struct request_queue *q,
1007 unsigned int max_discard_sectors);
4363ac7c
MP
1008extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1009 unsigned int max_write_same_sectors);
e1defc4f 1010extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
892b6f90 1011extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
c72758f3
MP
1012extern void blk_queue_alignment_offset(struct request_queue *q,
1013 unsigned int alignment);
7c958e32 1014extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
c72758f3 1015extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
3c5820c7 1016extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
c72758f3 1017extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
e475bba2 1018extern void blk_set_default_limits(struct queue_limits *lim);
b1bd055d 1019extern void blk_set_stacking_limits(struct queue_limits *lim);
c72758f3
MP
1020extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1021 sector_t offset);
17be8c24
MP
1022extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1023 sector_t offset);
c72758f3
MP
1024extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1025 sector_t offset);
165125e1 1026extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
e3790c7d 1027extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
27f8221a 1028extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
2fb98e84
TH
1029extern int blk_queue_dma_drain(struct request_queue *q,
1030 dma_drain_needed_fn *dma_drain_needed,
1031 void *buf, unsigned int size);
ef9e3fac 1032extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
165125e1
JA
1033extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1034extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
28018c24 1035extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
165125e1
JA
1036extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
1037extern void blk_queue_dma_alignment(struct request_queue *, int);
11c3e689 1038extern void blk_queue_update_dma_alignment(struct request_queue *, int);
165125e1 1039extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
242f9dcb
JA
1040extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1041extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
4913efe4 1042extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
f3876930 1043extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1da177e4 1044extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1da177e4 1045
165125e1 1046extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
85b9f66a
AH
1047extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
1048 struct scatterlist *sglist);
1da177e4 1049extern void blk_dump_rq_flags(struct request *, char *);
1da177e4 1050extern long nr_blockdev_pages(void);
1da177e4 1051
09ac46c4 1052bool __must_check blk_get_queue(struct request_queue *);
165125e1
JA
1053struct request_queue *blk_alloc_queue(gfp_t);
1054struct request_queue *blk_alloc_queue_node(gfp_t, int);
1055extern void blk_put_queue(struct request_queue *);
1da177e4 1056
6c954667
LM
1057/*
1058 * block layer runtime pm functions
1059 */
47fafbc7 1060#ifdef CONFIG_PM
6c954667
LM
1061extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1062extern int blk_pre_runtime_suspend(struct request_queue *q);
1063extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1064extern void blk_pre_runtime_resume(struct request_queue *q);
1065extern void blk_post_runtime_resume(struct request_queue *q, int err);
1066#else
1067static inline void blk_pm_runtime_init(struct request_queue *q,
1068 struct device *dev) {}
1069static inline int blk_pre_runtime_suspend(struct request_queue *q)
1070{
1071 return -ENOSYS;
1072}
1073static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1074static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1075static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1076#endif
1077
316cc67d 1078/*
75df7136
SJ
1079 * blk_plug permits building a queue of related requests by holding the I/O
1080 * fragments for a short period. This allows merging of sequential requests
1081 * into single larger request. As the requests are moved from a per-task list to
1082 * the device's request_queue in a batch, this results in improved scalability
1083 * as the lock contention for request_queue lock is reduced.
1084 *
1085 * It is ok not to disable preemption when adding the request to the plug list
1086 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1087 * the plug list when the task sleeps by itself. For details, please see
1088 * schedule() where blk_schedule_flush_plug() is called.
316cc67d 1089 */
73c10101 1090struct blk_plug {
75df7136 1091 struct list_head list; /* requests */
320ae51f 1092 struct list_head mq_list; /* blk-mq requests */
75df7136 1093 struct list_head cb_list; /* md requires an unplug callback */
73c10101 1094};
55c022bb
SL
1095#define BLK_MAX_REQUEST_COUNT 16
1096
9cbb1750 1097struct blk_plug_cb;
74018dc3 1098typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
048c9374
N
1099struct blk_plug_cb {
1100 struct list_head list;
9cbb1750
N
1101 blk_plug_cb_fn callback;
1102 void *data;
048c9374 1103};
9cbb1750
N
1104extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1105 void *data, int size);
73c10101
JA
1106extern void blk_start_plug(struct blk_plug *);
1107extern void blk_finish_plug(struct blk_plug *);
f6603783 1108extern void blk_flush_plug_list(struct blk_plug *, bool);
73c10101
JA
1109
1110static inline void blk_flush_plug(struct task_struct *tsk)
1111{
1112 struct blk_plug *plug = tsk->plug;
1113
a237c1c5
JA
1114 if (plug)
1115 blk_flush_plug_list(plug, false);
1116}
1117
1118static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1119{
1120 struct blk_plug *plug = tsk->plug;
1121
88b996cd 1122 if (plug)
f6603783 1123 blk_flush_plug_list(plug, true);
73c10101
JA
1124}
1125
1126static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1127{
1128 struct blk_plug *plug = tsk->plug;
1129
320ae51f
JA
1130 return plug &&
1131 (!list_empty(&plug->list) ||
1132 !list_empty(&plug->mq_list) ||
1133 !list_empty(&plug->cb_list));
73c10101
JA
1134}
1135
1da177e4
LT
1136/*
1137 * tag stuff
1138 */
165125e1
JA
1139extern int blk_queue_start_tag(struct request_queue *, struct request *);
1140extern struct request *blk_queue_find_tag(struct request_queue *, int);
1141extern void blk_queue_end_tag(struct request_queue *, struct request *);
1142extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
1143extern void blk_queue_free_tags(struct request_queue *);
1144extern int blk_queue_resize_tags(struct request_queue *, int);
1145extern void blk_queue_invalidate_tags(struct request_queue *);
492dfb48
JB
1146extern struct blk_queue_tag *blk_init_tags(int);
1147extern void blk_free_tags(struct blk_queue_tag *);
1da177e4 1148
f583f492
DS
1149static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1150 int tag)
1151{
1152 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1153 return NULL;
1154 return bqt->tag_index[tag];
1155}
dd3932ed
CH
1156
1157#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
1158
1159extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
fbd9b09a
DM
1160extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1161 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
4363ac7c
MP
1162extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1163 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
3f14d792 1164extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
d93ba7a5 1165 sector_t nr_sects, gfp_t gfp_mask, bool discard);
2cf6d26a
CH
1166static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1167 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
fb2dce86 1168{
2cf6d26a
CH
1169 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1170 nr_blocks << (sb->s_blocksize_bits - 9),
1171 gfp_mask, flags);
fb2dce86 1172}
e6fa0be6 1173static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
a107e5a3 1174 sector_t nr_blocks, gfp_t gfp_mask)
e6fa0be6
LC
1175{
1176 return blkdev_issue_zeroout(sb->s_bdev,
1177 block << (sb->s_blocksize_bits - 9),
1178 nr_blocks << (sb->s_blocksize_bits - 9),
d93ba7a5 1179 gfp_mask, true);
e6fa0be6 1180}
1da177e4 1181
018e0446 1182extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
0b07de85 1183
eb28d31b
MP
1184enum blk_default_limits {
1185 BLK_MAX_SEGMENTS = 128,
1186 BLK_SAFE_MAX_SECTORS = 255,
eb28d31b
MP
1187 BLK_MAX_SEGMENT_SIZE = 65536,
1188 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1189};
0e435ac2 1190
1da177e4
LT
1191#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1192
ae03bf63
MP
1193static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1194{
025146e1 1195 return q->limits.bounce_pfn;
ae03bf63
MP
1196}
1197
1198static inline unsigned long queue_segment_boundary(struct request_queue *q)
1199{
025146e1 1200 return q->limits.seg_boundary_mask;
ae03bf63
MP
1201}
1202
1203static inline unsigned int queue_max_sectors(struct request_queue *q)
1204{
025146e1 1205 return q->limits.max_sectors;
ae03bf63
MP
1206}
1207
1208static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1209{
025146e1 1210 return q->limits.max_hw_sectors;
ae03bf63
MP
1211}
1212
8a78362c 1213static inline unsigned short queue_max_segments(struct request_queue *q)
ae03bf63 1214{
8a78362c 1215 return q->limits.max_segments;
ae03bf63
MP
1216}
1217
1218static inline unsigned int queue_max_segment_size(struct request_queue *q)
1219{
025146e1 1220 return q->limits.max_segment_size;
ae03bf63
MP
1221}
1222
e1defc4f 1223static inline unsigned short queue_logical_block_size(struct request_queue *q)
1da177e4
LT
1224{
1225 int retval = 512;
1226
025146e1
MP
1227 if (q && q->limits.logical_block_size)
1228 retval = q->limits.logical_block_size;
1da177e4
LT
1229
1230 return retval;
1231}
1232
e1defc4f 1233static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1da177e4 1234{
e1defc4f 1235 return queue_logical_block_size(bdev_get_queue(bdev));
1da177e4
LT
1236}
1237
c72758f3
MP
1238static inline unsigned int queue_physical_block_size(struct request_queue *q)
1239{
1240 return q->limits.physical_block_size;
1241}
1242
892b6f90 1243static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
ac481c20
MP
1244{
1245 return queue_physical_block_size(bdev_get_queue(bdev));
1246}
1247
c72758f3
MP
1248static inline unsigned int queue_io_min(struct request_queue *q)
1249{
1250 return q->limits.io_min;
1251}
1252
ac481c20
MP
1253static inline int bdev_io_min(struct block_device *bdev)
1254{
1255 return queue_io_min(bdev_get_queue(bdev));
1256}
1257
c72758f3
MP
1258static inline unsigned int queue_io_opt(struct request_queue *q)
1259{
1260 return q->limits.io_opt;
1261}
1262
ac481c20
MP
1263static inline int bdev_io_opt(struct block_device *bdev)
1264{
1265 return queue_io_opt(bdev_get_queue(bdev));
1266}
1267
c72758f3
MP
1268static inline int queue_alignment_offset(struct request_queue *q)
1269{
ac481c20 1270 if (q->limits.misaligned)
c72758f3
MP
1271 return -1;
1272
ac481c20 1273 return q->limits.alignment_offset;
c72758f3
MP
1274}
1275
e03a72e1 1276static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
81744ee4
MP
1277{
1278 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
b8839b8c 1279 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
81744ee4 1280
b8839b8c 1281 return (granularity + lim->alignment_offset - alignment) % granularity;
c72758f3
MP
1282}
1283
ac481c20
MP
1284static inline int bdev_alignment_offset(struct block_device *bdev)
1285{
1286 struct request_queue *q = bdev_get_queue(bdev);
1287
1288 if (q->limits.misaligned)
1289 return -1;
1290
1291 if (bdev != bdev->bd_contains)
1292 return bdev->bd_part->alignment_offset;
1293
1294 return q->limits.alignment_offset;
1295}
1296
86b37281
MP
1297static inline int queue_discard_alignment(struct request_queue *q)
1298{
1299 if (q->limits.discard_misaligned)
1300 return -1;
1301
1302 return q->limits.discard_alignment;
1303}
1304
e03a72e1 1305static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
86b37281 1306{
59771079 1307 unsigned int alignment, granularity, offset;
dd3d145d 1308
a934a00a
MP
1309 if (!lim->max_discard_sectors)
1310 return 0;
1311
59771079
LT
1312 /* Why are these in bytes, not sectors? */
1313 alignment = lim->discard_alignment >> 9;
1314 granularity = lim->discard_granularity >> 9;
1315 if (!granularity)
1316 return 0;
1317
1318 /* Offset of the partition start in 'granularity' sectors */
1319 offset = sector_div(sector, granularity);
1320
1321 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1322 offset = (granularity + alignment - offset) % granularity;
1323
1324 /* Turn it back into bytes, gaah */
1325 return offset << 9;
86b37281
MP
1326}
1327
c6e66634
PB
1328static inline int bdev_discard_alignment(struct block_device *bdev)
1329{
1330 struct request_queue *q = bdev_get_queue(bdev);
1331
1332 if (bdev != bdev->bd_contains)
1333 return bdev->bd_part->discard_alignment;
1334
1335 return q->limits.discard_alignment;
1336}
1337
98262f27
MP
1338static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1339{
a934a00a 1340 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
98262f27
MP
1341 return 1;
1342
1343 return 0;
1344}
1345
1346static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1347{
1348 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1349}
1350
4363ac7c
MP
1351static inline unsigned int bdev_write_same(struct block_device *bdev)
1352{
1353 struct request_queue *q = bdev_get_queue(bdev);
1354
1355 if (q)
1356 return q->limits.max_write_same_sectors;
1357
1358 return 0;
1359}
1360
165125e1 1361static inline int queue_dma_alignment(struct request_queue *q)
1da177e4 1362{
482eb689 1363 return q ? q->dma_alignment : 511;
1da177e4
LT
1364}
1365
14417799 1366static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
87904074
FT
1367 unsigned int len)
1368{
1369 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
14417799 1370 return !(addr & alignment) && !(len & alignment);
87904074
FT
1371}
1372
1da177e4
LT
1373/* assumes size > 256 */
1374static inline unsigned int blksize_bits(unsigned int size)
1375{
1376 unsigned int bits = 8;
1377 do {
1378 bits++;
1379 size >>= 1;
1380 } while (size > 256);
1381 return bits;
1382}
1383
2befb9e3 1384static inline unsigned int block_size(struct block_device *bdev)
1da177e4
LT
1385{
1386 return bdev->bd_block_size;
1387}
1388
f3876930 1389static inline bool queue_flush_queueable(struct request_queue *q)
1390{
1391 return !q->flush_not_queueable;
1392}
1393
1da177e4
LT
1394typedef struct {struct page *v;} Sector;
1395
1396unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1397
1398static inline void put_dev_sector(Sector p)
1399{
1400 page_cache_release(p.v);
1401}
1402
1403struct work_struct;
59c3d45e
JA
1404int kblockd_schedule_work(struct work_struct *work);
1405int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
8ab14595 1406int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1da177e4 1407
9195291e 1408#ifdef CONFIG_BLK_CGROUP
28f4197e
JA
1409/*
1410 * This should not be using sched_clock(). A real patch is in progress
1411 * to fix this up, until that is in place we need to disable preemption
1412 * around sched_clock() in this function and set_io_start_time_ns().
1413 */
9195291e
DS
1414static inline void set_start_time_ns(struct request *req)
1415{
28f4197e 1416 preempt_disable();
9195291e 1417 req->start_time_ns = sched_clock();
28f4197e 1418 preempt_enable();
9195291e
DS
1419}
1420
1421static inline void set_io_start_time_ns(struct request *req)
1422{
28f4197e 1423 preempt_disable();
9195291e 1424 req->io_start_time_ns = sched_clock();
28f4197e 1425 preempt_enable();
9195291e 1426}
84c124da
DS
1427
1428static inline uint64_t rq_start_time_ns(struct request *req)
1429{
1430 return req->start_time_ns;
1431}
1432
1433static inline uint64_t rq_io_start_time_ns(struct request *req)
1434{
1435 return req->io_start_time_ns;
1436}
9195291e
DS
1437#else
1438static inline void set_start_time_ns(struct request *req) {}
1439static inline void set_io_start_time_ns(struct request *req) {}
84c124da
DS
1440static inline uint64_t rq_start_time_ns(struct request *req)
1441{
1442 return 0;
1443}
1444static inline uint64_t rq_io_start_time_ns(struct request *req)
1445{
1446 return 0;
1447}
9195291e
DS
1448#endif
1449
1da177e4
LT
1450#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1451 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1452#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1453 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1454
7ba1ba12
MP
1455#if defined(CONFIG_BLK_DEV_INTEGRITY)
1456
8288f496
MP
1457enum blk_integrity_flags {
1458 BLK_INTEGRITY_VERIFY = 1 << 0,
1459 BLK_INTEGRITY_GENERATE = 1 << 1,
3aec2f41 1460 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
aae7df50 1461 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
8288f496 1462};
7ba1ba12 1463
18593088 1464struct blk_integrity_iter {
7ba1ba12
MP
1465 void *prot_buf;
1466 void *data_buf;
3be91c4a 1467 sector_t seed;
7ba1ba12 1468 unsigned int data_size;
3be91c4a 1469 unsigned short interval;
7ba1ba12
MP
1470 const char *disk_name;
1471};
1472
18593088 1473typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
7ba1ba12
MP
1474
1475struct blk_integrity {
18593088
MP
1476 integrity_processing_fn *generate_fn;
1477 integrity_processing_fn *verify_fn;
7ba1ba12
MP
1478
1479 unsigned short flags;
1480 unsigned short tuple_size;
3be91c4a 1481 unsigned short interval;
7ba1ba12
MP
1482 unsigned short tag_size;
1483
1484 const char *name;
1485
1486 struct kobject kobj;
1487};
1488
a63a5cf8 1489extern bool blk_integrity_is_initialized(struct gendisk *);
7ba1ba12
MP
1490extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1491extern void blk_integrity_unregister(struct gendisk *);
ad7fce93 1492extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
13f05c8d
MP
1493extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1494 struct scatterlist *);
1495extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
4eaf99be
MP
1496extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1497 struct request *);
1498extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1499 struct bio *);
7ba1ba12 1500
b04accc4
JA
1501static inline
1502struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1503{
1504 return bdev->bd_disk->integrity;
1505}
1506
b02739b0
MP
1507static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1508{
1509 return disk->integrity;
1510}
1511
180b2f95 1512static inline bool blk_integrity_rq(struct request *rq)
7ba1ba12 1513{
180b2f95 1514 return rq->cmd_flags & REQ_INTEGRITY;
7ba1ba12
MP
1515}
1516
13f05c8d
MP
1517static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1518 unsigned int segs)
1519{
1520 q->limits.max_integrity_segments = segs;
1521}
1522
1523static inline unsigned short
1524queue_max_integrity_segments(struct request_queue *q)
1525{
1526 return q->limits.max_integrity_segments;
1527}
1528
7ba1ba12
MP
1529#else /* CONFIG_BLK_DEV_INTEGRITY */
1530
fd83240a
SR
1531struct bio;
1532struct block_device;
1533struct gendisk;
1534struct blk_integrity;
1535
1536static inline int blk_integrity_rq(struct request *rq)
1537{
1538 return 0;
1539}
1540static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1541 struct bio *b)
1542{
1543 return 0;
1544}
1545static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1546 struct bio *b,
1547 struct scatterlist *s)
1548{
1549 return 0;
1550}
1551static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1552{
61a04e5b 1553 return NULL;
fd83240a
SR
1554}
1555static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1556{
1557 return NULL;
1558}
1559static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1560{
1561 return 0;
1562}
1563static inline int blk_integrity_register(struct gendisk *d,
1564 struct blk_integrity *b)
1565{
1566 return 0;
1567}
1568static inline void blk_integrity_unregister(struct gendisk *d)
1569{
1570}
1571static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1572 unsigned int segs)
1573{
1574}
1575static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1576{
1577 return 0;
1578}
4eaf99be
MP
1579static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1580 struct request *r1,
1581 struct request *r2)
fd83240a 1582{
cb1a5ab6 1583 return true;
fd83240a 1584}
4eaf99be
MP
1585static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1586 struct request *r,
1587 struct bio *b)
fd83240a 1588{
cb1a5ab6 1589 return true;
fd83240a
SR
1590}
1591static inline bool blk_integrity_is_initialized(struct gendisk *g)
1592{
1593 return 0;
1594}
7ba1ba12
MP
1595
1596#endif /* CONFIG_BLK_DEV_INTEGRITY */
1597
08f85851 1598struct block_device_operations {
d4430d62 1599 int (*open) (struct block_device *, fmode_t);
db2a144b 1600 void (*release) (struct gendisk *, fmode_t);
47a191fd 1601 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
d4430d62
AV
1602 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1603 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
dd22f551
MW
1604 long (*direct_access)(struct block_device *, sector_t,
1605 void **, unsigned long *pfn, long size);
77ea887e
TH
1606 unsigned int (*check_events) (struct gendisk *disk,
1607 unsigned int clearing);
1608 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
08f85851 1609 int (*media_changed) (struct gendisk *);
c3e33e04 1610 void (*unlock_native_capacity) (struct gendisk *);
08f85851
AV
1611 int (*revalidate_disk) (struct gendisk *);
1612 int (*getgeo)(struct block_device *, struct hd_geometry *);
b3a27d05
NG
1613 /* this callback is with swap_lock and sometimes page table lock held */
1614 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
08f85851
AV
1615 struct module *owner;
1616};
1617
633a08b8
AV
1618extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1619 unsigned long);
47a191fd
MW
1620extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1621extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1622 struct writeback_control *);
dd22f551
MW
1623extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
1624 unsigned long *pfn, long size);
9361401e 1625#else /* CONFIG_BLOCK */
ac13a829
FF
1626
1627struct block_device;
1628
9361401e
DH
1629/*
1630 * stubs for when the block layer is configured out
1631 */
1632#define buffer_heads_over_limit 0
1633
9361401e
DH
1634static inline long nr_blockdev_pages(void)
1635{
1636 return 0;
1637}
1638
1f940bdf
JA
1639struct blk_plug {
1640};
1641
1642static inline void blk_start_plug(struct blk_plug *plug)
73c10101
JA
1643{
1644}
1645
1f940bdf 1646static inline void blk_finish_plug(struct blk_plug *plug)
73c10101
JA
1647{
1648}
1649
1f940bdf 1650static inline void blk_flush_plug(struct task_struct *task)
73c10101
JA
1651{
1652}
1653
a237c1c5
JA
1654static inline void blk_schedule_flush_plug(struct task_struct *task)
1655{
1656}
1657
1658
73c10101
JA
1659static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1660{
1661 return false;
1662}
1663
ac13a829
FF
1664static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1665 sector_t *error_sector)
1666{
1667 return 0;
1668}
1669
9361401e
DH
1670#endif /* CONFIG_BLOCK */
1671
1da177e4 1672#endif