block: Reorder struct bio_set
[linux-block.git] / include / linux / bio.h
CommitLineData
1da177e4
LT
1/*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7cc01581 12 *
1da177e4
LT
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20#ifndef __LINUX_BIO_H
21#define __LINUX_BIO_H
22
23#include <linux/highmem.h>
24#include <linux/mempool.h>
22e2c507 25#include <linux/ioprio.h>
187f1882 26#include <linux/bug.h>
1da177e4 27
02a5e0ac
DH
28#ifdef CONFIG_BLOCK
29
1da177e4
LT
30#include <asm/io.h>
31
7cc01581
TH
32/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
33#include <linux/blk_types.h>
34
1da177e4
LT
35#define BIO_DEBUG
36
37#ifdef BIO_DEBUG
38#define BIO_BUG_ON BUG_ON
39#else
40#define BIO_BUG_ON
41#endif
42
d84a8477 43#define BIO_MAX_PAGES 256
1da177e4
LT
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
22e2c507
JA
47/*
48 * upper 16 bits of bi_rw define the io priority of this bio
49 */
50#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
51#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
53
54#define bio_set_prio(bio, prio) do { \
55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
58} while (0)
59
1da177e4
LT
60/*
61 * various member access, note that bio_data should of course not be used
62 * on highmem page vectors
63 */
64#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
65#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
66#define bio_page(bio) bio_iovec((bio))->bv_page
67#define bio_offset(bio) bio_iovec((bio))->bv_offset
68#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
69#define bio_sectors(bio) ((bio)->bi_size >> 9)
bf2de6f5 70
2e46e8b2 71static inline unsigned int bio_cur_bytes(struct bio *bio)
bf2de6f5
JA
72{
73 if (bio->bi_vcnt)
2e46e8b2 74 return bio_iovec(bio)->bv_len;
fb2dce86 75 else /* dataless requests such as discard */
2e46e8b2 76 return bio->bi_size;
bf2de6f5
JA
77}
78
79static inline void *bio_data(struct bio *bio)
80{
81 if (bio->bi_vcnt)
82 return page_address(bio_page(bio)) + bio_offset(bio);
83
84 return NULL;
85}
1da177e4 86
392ddc32
JA
87static inline int bio_has_allocated_vec(struct bio *bio)
88{
89 return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
90}
91
1da177e4
LT
92/*
93 * will die
94 */
95#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
96#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
97
98/*
99 * queues that have highmem support enabled may still need to revert to
100 * PIO transfers occasionally and thus map high pages temporarily. For
101 * permanent PIO fall back, user is probably better off disabling highmem
102 * I/O completely on that queue (see ide-dma for example)
103 */
104#define __bio_kmap_atomic(bio, idx, kmtype) \
e8e3c3d6 105 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
1da177e4
LT
106 bio_iovec_idx((bio), (idx))->bv_offset)
107
e8e3c3d6 108#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
1da177e4
LT
109
110/*
111 * merge helpers etc
112 */
113
114#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
115#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
116
f92131c3
JF
117/* Default implementation of BIOVEC_PHYS_MERGEABLE */
118#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
119 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
120
1da177e4
LT
121/*
122 * allow arch override, for eg virtualized architectures (put in asm/io.h)
123 */
124#ifndef BIOVEC_PHYS_MERGEABLE
125#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
f92131c3 126 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
1da177e4
LT
127#endif
128
1da177e4
LT
129#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
130 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
131#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
ae03bf63 132 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
1da177e4
LT
133#define BIO_SEG_BOUNDARY(q, b1, b2) \
134 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
135
6712ecf8 136#define bio_io_error(bio) bio_endio((bio), -EIO)
1da177e4
LT
137
138/*
139 * drivers should not use the __ version unless they _really_ want to
140 * run through the entire bio and not just pending pieces
141 */
142#define __bio_for_each_segment(bvl, bio, i, start_idx) \
143 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
144 i < (bio)->bi_vcnt; \
145 bvl++, i++)
146
147#define bio_for_each_segment(bvl, bio, i) \
148 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
149
150/*
151 * get a reference to a bio, so it won't disappear. the intended use is
152 * something like:
153 *
154 * bio_get(bio);
155 * submit_bio(rw, bio);
156 * if (bio->bi_flags ...)
157 * do_something
158 * bio_put(bio);
159 *
160 * without the bio_get(), it could potentially complete I/O before submit_bio
161 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
162 * runs
163 */
164#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
165
7ba1ba12
MP
166#if defined(CONFIG_BLK_DEV_INTEGRITY)
167/*
168 * bio integrity payload
169 */
170struct bio_integrity_payload {
171 struct bio *bip_bio; /* parent bio */
7ba1ba12
MP
172
173 sector_t bip_sector; /* virtual start sector */
174
175 void *bip_buf; /* generated integrity data */
176 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
177
7ba1ba12
MP
178 unsigned int bip_size;
179
7878cba9 180 unsigned short bip_slab; /* slab the bip came from */
7ba1ba12
MP
181 unsigned short bip_vcnt; /* # of integrity bio_vecs */
182 unsigned short bip_idx; /* current bip_vec index */
183
184 struct work_struct bip_work; /* I/O completion */
7878cba9 185 struct bio_vec bip_vec[0]; /* embedded bvec array */
7ba1ba12
MP
186};
187#endif /* CONFIG_BLK_DEV_INTEGRITY */
1da177e4
LT
188
189/*
190 * A bio_pair is used when we need to split a bio.
191 * This can only happen for a bio that refers to just one
192 * page of data, and in the unusual situation when the
193 * page crosses a chunk/device boundary
194 *
195 * The address of the master bio is stored in bio1.bi_private
196 * The address of the pool the pair was allocated from is stored
197 * in bio2.bi_private
198 */
199struct bio_pair {
7ba1ba12
MP
200 struct bio bio1, bio2;
201 struct bio_vec bv1, bv2;
202#if defined(CONFIG_BLK_DEV_INTEGRITY)
203 struct bio_integrity_payload bip1, bip2;
204 struct bio_vec iv1, iv2;
205#endif
206 atomic_t cnt;
207 int error;
1da177e4 208};
6feef531 209extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
1da177e4
LT
210extern void bio_pair_release(struct bio_pair *dbio);
211
bb799ca0 212extern struct bio_set *bioset_create(unsigned int, unsigned int);
1da177e4
LT
213extern void bioset_free(struct bio_set *);
214
dd0fc66f 215extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
1da177e4
LT
216extern void bio_put(struct bio *);
217
bf800ef1
KO
218extern void __bio_clone(struct bio *, struct bio *);
219extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
220
3f86a82a
KO
221extern struct bio_set *fs_bio_set;
222
223static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
224{
225 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
226}
227
bf800ef1
KO
228static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
229{
230 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
231}
232
3f86a82a
KO
233static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
234{
235 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
236}
237
bf800ef1
KO
238static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
239{
240 return bio_clone_bioset(bio, gfp_mask, NULL);
241
242}
243
6712ecf8 244extern void bio_endio(struct bio *, int);
1da177e4
LT
245struct request_queue;
246extern int bio_phys_segments(struct request_queue *, struct bio *);
1da177e4 247
1da177e4 248extern void bio_init(struct bio *);
f44b48c7 249extern void bio_reset(struct bio *);
1da177e4
LT
250
251extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
6e68af66
MC
252extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
253 unsigned int, unsigned int);
1da177e4 254extern int bio_get_nr_vecs(struct block_device *);
ad3316bf 255extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
1da177e4 256extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
a3bce90e 257 unsigned long, unsigned int, int, gfp_t);
f1970baf 258struct sg_iovec;
152e283f 259struct rq_map_data;
f1970baf
JB
260extern struct bio *bio_map_user_iov(struct request_queue *,
261 struct block_device *,
a3bce90e 262 struct sg_iovec *, int, int, gfp_t);
1da177e4 263extern void bio_unmap_user(struct bio *);
df46b9a4 264extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
27496a8c 265 gfp_t);
68154e90
FT
266extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
267 gfp_t, int);
1da177e4
LT
268extern void bio_set_pages_dirty(struct bio *bio);
269extern void bio_check_pages_dirty(struct bio *bio);
2d4dc890
IL
270
271#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
272# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
273#endif
274#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
275extern void bio_flush_dcache_pages(struct bio *bi);
276#else
277static inline void bio_flush_dcache_pages(struct bio *bi)
278{
279}
280#endif
281
152e283f
FT
282extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
283 unsigned long, unsigned int, int, gfp_t);
284extern struct bio *bio_copy_user_iov(struct request_queue *,
285 struct rq_map_data *, struct sg_iovec *,
a3bce90e 286 int, int, gfp_t);
1da177e4
LT
287extern int bio_uncopy_user(struct bio *);
288void zero_fill_bio(struct bio *bio);
51d654e1 289extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
bb799ca0 290extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
7ba1ba12 291extern unsigned int bvec_nr_vecs(unsigned short idx);
51d654e1 292
852c788f
TH
293#ifdef CONFIG_BLK_CGROUP
294int bio_associate_current(struct bio *bio);
295void bio_disassociate_task(struct bio *bio);
296#else /* CONFIG_BLK_CGROUP */
297static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
298static inline void bio_disassociate_task(struct bio *bio) { }
299#endif /* CONFIG_BLK_CGROUP */
300
1da177e4
LT
301#ifdef CONFIG_HIGHMEM
302/*
20b636bf
AB
303 * remember never ever reenable interrupts between a bvec_kmap_irq and
304 * bvec_kunmap_irq!
1da177e4 305 */
4f570f99 306static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
1da177e4
LT
307{
308 unsigned long addr;
309
310 /*
311 * might not be a highmem page, but the preempt/irq count
312 * balancing is a lot nicer this way
313 */
314 local_irq_save(*flags);
e8e3c3d6 315 addr = (unsigned long) kmap_atomic(bvec->bv_page);
1da177e4
LT
316
317 BUG_ON(addr & ~PAGE_MASK);
318
319 return (char *) addr + bvec->bv_offset;
320}
321
4f570f99 322static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
1da177e4
LT
323{
324 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
325
e8e3c3d6 326 kunmap_atomic((void *) ptr);
1da177e4
LT
327 local_irq_restore(*flags);
328}
329
330#else
11a691be
GU
331static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
332{
333 return page_address(bvec->bv_page) + bvec->bv_offset;
334}
335
336static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
337{
338 *flags = 0;
339}
1da177e4
LT
340#endif
341
c2d08dad 342static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
1da177e4
LT
343 unsigned long *flags)
344{
345 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
346}
347#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
348
349#define bio_kmap_irq(bio, flags) \
350 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
351#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
352
7a67f63b
JA
353/*
354 * Check whether this bio carries any data or not. A NULL bio is allowed.
355 */
e2a60da7 356static inline bool bio_has_data(struct bio *bio)
7a67f63b 357{
e2a60da7
MP
358 if (bio && bio->bi_vcnt)
359 return true;
360
361 return false;
362}
363
364static inline bool bio_is_rw(struct bio *bio)
365{
366 if (!bio_has_data(bio))
367 return false;
368
4363ac7c
MP
369 if (bio->bi_rw & REQ_WRITE_SAME)
370 return false;
371
e2a60da7
MP
372 return true;
373}
374
375static inline bool bio_mergeable(struct bio *bio)
376{
377 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
378 return false;
379
380 return true;
7a67f63b
JA
381}
382
8f3d8ba2 383/*
e686307f 384 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
8f3d8ba2
CH
385 *
386 * A bio_list anchors a singly-linked list of bios chained through the bi_next
387 * member of the bio. The bio_list also caches the last list member to allow
388 * fast access to the tail.
389 */
390struct bio_list {
391 struct bio *head;
392 struct bio *tail;
393};
394
395static inline int bio_list_empty(const struct bio_list *bl)
396{
397 return bl->head == NULL;
398}
399
400static inline void bio_list_init(struct bio_list *bl)
401{
402 bl->head = bl->tail = NULL;
403}
404
405#define bio_list_for_each(bio, bl) \
406 for (bio = (bl)->head; bio; bio = bio->bi_next)
407
408static inline unsigned bio_list_size(const struct bio_list *bl)
409{
410 unsigned sz = 0;
411 struct bio *bio;
412
413 bio_list_for_each(bio, bl)
414 sz++;
415
416 return sz;
417}
418
419static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
420{
421 bio->bi_next = NULL;
422
423 if (bl->tail)
424 bl->tail->bi_next = bio;
425 else
426 bl->head = bio;
427
428 bl->tail = bio;
429}
430
431static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
432{
433 bio->bi_next = bl->head;
434
435 bl->head = bio;
436
437 if (!bl->tail)
438 bl->tail = bio;
439}
440
441static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
442{
443 if (!bl2->head)
444 return;
445
446 if (bl->tail)
447 bl->tail->bi_next = bl2->head;
448 else
449 bl->head = bl2->head;
450
451 bl->tail = bl2->tail;
452}
453
454static inline void bio_list_merge_head(struct bio_list *bl,
455 struct bio_list *bl2)
456{
457 if (!bl2->head)
458 return;
459
460 if (bl->head)
461 bl2->tail->bi_next = bl->head;
462 else
463 bl->tail = bl2->tail;
464
465 bl->head = bl2->head;
466}
467
13685a16
GU
468static inline struct bio *bio_list_peek(struct bio_list *bl)
469{
470 return bl->head;
471}
472
8f3d8ba2
CH
473static inline struct bio *bio_list_pop(struct bio_list *bl)
474{
475 struct bio *bio = bl->head;
476
477 if (bio) {
478 bl->head = bl->head->bi_next;
479 if (!bl->head)
480 bl->tail = NULL;
481
482 bio->bi_next = NULL;
483 }
484
485 return bio;
486}
487
488static inline struct bio *bio_list_get(struct bio_list *bl)
489{
490 struct bio *bio = bl->head;
491
492 bl->head = bl->tail = NULL;
493
494 return bio;
495}
496
57fb233f
KO
497/*
498 * bio_set is used to allow other portions of the IO system to
499 * allocate their own private memory pools for bio and iovec structures.
500 * These memory pools in turn all allocate from the bio_slab
501 * and the bvec_slabs[].
502 */
503#define BIO_POOL_SIZE 2
504#define BIOVEC_NR_POOLS 6
505#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
506
507struct bio_set {
508 struct kmem_cache *bio_slab;
509 unsigned int front_pad;
510
511 mempool_t *bio_pool;
512#if defined(CONFIG_BLK_DEV_INTEGRITY)
513 mempool_t *bio_integrity_pool;
514#endif
515 mempool_t *bvec_pool;
516};
517
518struct biovec_slab {
519 int nr_vecs;
520 char *name;
521 struct kmem_cache *slab;
522};
523
524/*
525 * a small number of entries is fine, not going to be performance critical.
526 * basically we just need to survive
527 */
528#define BIO_SPLIT_ENTRIES 2
529
7ba1ba12
MP
530#if defined(CONFIG_BLK_DEV_INTEGRITY)
531
532#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
533#define bip_vec(bip) bip_vec_idx(bip, 0)
534
535#define __bip_for_each_vec(bvl, bip, i, start_idx) \
536 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
537 i < (bip)->bip_vcnt; \
538 bvl++, i++)
539
540#define bip_for_each_vec(bvl, bip, i) \
541 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
542
13f05c8d
MP
543#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
544 for_each_bio(_bio) \
545 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
546
8deaf721 547#define bio_integrity(bio) (bio->bi_integrity != NULL)
7ba1ba12 548
7ba1ba12 549extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
1e2a410f 550extern void bio_integrity_free(struct bio *);
7ba1ba12
MP
551extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
552extern int bio_integrity_enabled(struct bio *bio);
553extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
554extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
555extern int bio_integrity_prep(struct bio *);
556extern void bio_integrity_endio(struct bio *, int);
557extern void bio_integrity_advance(struct bio *, unsigned int);
558extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
559extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
1e2a410f 560extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
7878cba9
MP
561extern int bioset_integrity_create(struct bio_set *, int);
562extern void bioset_integrity_free(struct bio_set *);
563extern void bio_integrity_init(void);
7ba1ba12
MP
564
565#else /* CONFIG_BLK_DEV_INTEGRITY */
566
6898e3bd
MP
567static inline int bio_integrity(struct bio *bio)
568{
569 return 0;
570}
571
572static inline int bio_integrity_enabled(struct bio *bio)
573{
574 return 0;
575}
576
577static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
578{
579 return 0;
580}
581
582static inline void bioset_integrity_free (struct bio_set *bs)
583{
584 return;
585}
586
587static inline int bio_integrity_prep(struct bio *bio)
588{
589 return 0;
590}
591
1e2a410f 592static inline void bio_integrity_free(struct bio *bio)
6898e3bd
MP
593{
594 return;
595}
596
0c614e2d 597static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
1e2a410f 598 gfp_t gfp_mask)
0c614e2d
SR
599{
600 return 0;
601}
6898e3bd
MP
602
603static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
604 int sectors)
605{
606 return;
607}
608
609static inline void bio_integrity_advance(struct bio *bio,
610 unsigned int bytes_done)
611{
612 return;
613}
614
615static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
616 unsigned int sectors)
617{
618 return;
619}
620
621static inline void bio_integrity_init(void)
622{
623 return;
624}
7ba1ba12
MP
625
626#endif /* CONFIG_BLK_DEV_INTEGRITY */
627
02a5e0ac 628#endif /* CONFIG_BLOCK */
1da177e4 629#endif /* __LINUX_BIO_H */