block: Convert some code to bio_for_each_segment_all()
[linux-2.6-block.git] / include / linux / bio.h
CommitLineData
1da177e4
LT
1/*
2 * 2.5 block I/O model
3 *
4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7cc01581 12 *
1da177e4
LT
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 */
20#ifndef __LINUX_BIO_H
21#define __LINUX_BIO_H
22
23#include <linux/highmem.h>
24#include <linux/mempool.h>
22e2c507 25#include <linux/ioprio.h>
187f1882 26#include <linux/bug.h>
1da177e4 27
02a5e0ac
DH
28#ifdef CONFIG_BLOCK
29
1da177e4
LT
30#include <asm/io.h>
31
7cc01581
TH
32/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
33#include <linux/blk_types.h>
34
1da177e4
LT
35#define BIO_DEBUG
36
37#ifdef BIO_DEBUG
38#define BIO_BUG_ON BUG_ON
39#else
40#define BIO_BUG_ON
41#endif
42
d84a8477 43#define BIO_MAX_PAGES 256
1da177e4
LT
44#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
45#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
46
22e2c507
JA
47/*
48 * upper 16 bits of bi_rw define the io priority of this bio
49 */
50#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
51#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
52#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
53
54#define bio_set_prio(bio, prio) do { \
55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \
56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
58} while (0)
59
1da177e4
LT
60/*
61 * various member access, note that bio_data should of course not be used
62 * on highmem page vectors
63 */
64#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
65#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
66#define bio_page(bio) bio_iovec((bio))->bv_page
67#define bio_offset(bio) bio_iovec((bio))->bv_offset
68#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
69#define bio_sectors(bio) ((bio)->bi_size >> 9)
f73a1c7d 70#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio)))
bf2de6f5 71
2e46e8b2 72static inline unsigned int bio_cur_bytes(struct bio *bio)
bf2de6f5
JA
73{
74 if (bio->bi_vcnt)
2e46e8b2 75 return bio_iovec(bio)->bv_len;
fb2dce86 76 else /* dataless requests such as discard */
2e46e8b2 77 return bio->bi_size;
bf2de6f5
JA
78}
79
80static inline void *bio_data(struct bio *bio)
81{
82 if (bio->bi_vcnt)
83 return page_address(bio_page(bio)) + bio_offset(bio);
84
85 return NULL;
86}
1da177e4 87
392ddc32
JA
88static inline int bio_has_allocated_vec(struct bio *bio)
89{
90 return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
91}
92
1da177e4
LT
93/*
94 * will die
95 */
96#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
97#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
98
99/*
100 * queues that have highmem support enabled may still need to revert to
101 * PIO transfers occasionally and thus map high pages temporarily. For
102 * permanent PIO fall back, user is probably better off disabling highmem
103 * I/O completely on that queue (see ide-dma for example)
104 */
105#define __bio_kmap_atomic(bio, idx, kmtype) \
e8e3c3d6 106 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
1da177e4
LT
107 bio_iovec_idx((bio), (idx))->bv_offset)
108
e8e3c3d6 109#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
1da177e4
LT
110
111/*
112 * merge helpers etc
113 */
114
115#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
116#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
117
f92131c3
JF
118/* Default implementation of BIOVEC_PHYS_MERGEABLE */
119#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
120 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
121
1da177e4
LT
122/*
123 * allow arch override, for eg virtualized architectures (put in asm/io.h)
124 */
125#ifndef BIOVEC_PHYS_MERGEABLE
126#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
f92131c3 127 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
1da177e4
LT
128#endif
129
1da177e4
LT
130#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
131 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
132#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
ae03bf63 133 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
1da177e4
LT
134#define BIO_SEG_BOUNDARY(q, b1, b2) \
135 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
136
6712ecf8 137#define bio_io_error(bio) bio_endio((bio), -EIO)
1da177e4
LT
138
139/*
d74c6d51
KO
140 * drivers should not use the __ version unless they _really_ know what
141 * they're doing
1da177e4
LT
142 */
143#define __bio_for_each_segment(bvl, bio, i, start_idx) \
144 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
145 i < (bio)->bi_vcnt; \
146 bvl++, i++)
147
d74c6d51
KO
148/*
149 * drivers should _never_ use the all version - the bio may have been split
150 * before it got to the driver and the driver won't own all of it
151 */
152#define bio_for_each_segment_all(bvl, bio, i) \
153 for (i = 0; \
154 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
155 i++)
156
1da177e4 157#define bio_for_each_segment(bvl, bio, i) \
d74c6d51
KO
158 for (i = (bio)->bi_idx; \
159 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
160 i++)
1da177e4
LT
161
162/*
163 * get a reference to a bio, so it won't disappear. the intended use is
164 * something like:
165 *
166 * bio_get(bio);
167 * submit_bio(rw, bio);
168 * if (bio->bi_flags ...)
169 * do_something
170 * bio_put(bio);
171 *
172 * without the bio_get(), it could potentially complete I/O before submit_bio
173 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
174 * runs
175 */
176#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
177
7ba1ba12
MP
178#if defined(CONFIG_BLK_DEV_INTEGRITY)
179/*
180 * bio integrity payload
181 */
182struct bio_integrity_payload {
183 struct bio *bip_bio; /* parent bio */
7ba1ba12
MP
184
185 sector_t bip_sector; /* virtual start sector */
186
187 void *bip_buf; /* generated integrity data */
188 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
189
7ba1ba12
MP
190 unsigned int bip_size;
191
7878cba9 192 unsigned short bip_slab; /* slab the bip came from */
7ba1ba12
MP
193 unsigned short bip_vcnt; /* # of integrity bio_vecs */
194 unsigned short bip_idx; /* current bip_vec index */
195
196 struct work_struct bip_work; /* I/O completion */
6fda981c
KO
197
198 struct bio_vec *bip_vec;
199 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
7ba1ba12
MP
200};
201#endif /* CONFIG_BLK_DEV_INTEGRITY */
1da177e4
LT
202
203/*
204 * A bio_pair is used when we need to split a bio.
205 * This can only happen for a bio that refers to just one
206 * page of data, and in the unusual situation when the
207 * page crosses a chunk/device boundary
208 *
209 * The address of the master bio is stored in bio1.bi_private
210 * The address of the pool the pair was allocated from is stored
211 * in bio2.bi_private
212 */
213struct bio_pair {
7ba1ba12
MP
214 struct bio bio1, bio2;
215 struct bio_vec bv1, bv2;
216#if defined(CONFIG_BLK_DEV_INTEGRITY)
217 struct bio_integrity_payload bip1, bip2;
218 struct bio_vec iv1, iv2;
219#endif
220 atomic_t cnt;
221 int error;
1da177e4 222};
6feef531 223extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
1da177e4
LT
224extern void bio_pair_release(struct bio_pair *dbio);
225
bb799ca0 226extern struct bio_set *bioset_create(unsigned int, unsigned int);
1da177e4 227extern void bioset_free(struct bio_set *);
9f060e22 228extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
1da177e4 229
dd0fc66f 230extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
1da177e4
LT
231extern void bio_put(struct bio *);
232
bf800ef1
KO
233extern void __bio_clone(struct bio *, struct bio *);
234extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
235
3f86a82a
KO
236extern struct bio_set *fs_bio_set;
237
238static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
239{
240 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
241}
242
bf800ef1
KO
243static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
244{
245 return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
246}
247
3f86a82a
KO
248static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
249{
250 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
251}
252
bf800ef1
KO
253static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
254{
255 return bio_clone_bioset(bio, gfp_mask, NULL);
256
257}
258
6712ecf8 259extern void bio_endio(struct bio *, int);
1da177e4
LT
260struct request_queue;
261extern int bio_phys_segments(struct request_queue *, struct bio *);
1da177e4 262
9e882242 263extern int submit_bio_wait(int rw, struct bio *bio);
054bdf64
KO
264extern void bio_advance(struct bio *, unsigned);
265
1da177e4 266extern void bio_init(struct bio *);
f44b48c7 267extern void bio_reset(struct bio *);
1da177e4
LT
268
269extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
6e68af66
MC
270extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
271 unsigned int, unsigned int);
1da177e4 272extern int bio_get_nr_vecs(struct block_device *);
ad3316bf 273extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
1da177e4 274extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
a3bce90e 275 unsigned long, unsigned int, int, gfp_t);
f1970baf 276struct sg_iovec;
152e283f 277struct rq_map_data;
f1970baf
JB
278extern struct bio *bio_map_user_iov(struct request_queue *,
279 struct block_device *,
a3bce90e 280 struct sg_iovec *, int, int, gfp_t);
1da177e4 281extern void bio_unmap_user(struct bio *);
df46b9a4 282extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
27496a8c 283 gfp_t);
68154e90
FT
284extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
285 gfp_t, int);
1da177e4
LT
286extern void bio_set_pages_dirty(struct bio *bio);
287extern void bio_check_pages_dirty(struct bio *bio);
2d4dc890
IL
288
289#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
290# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
291#endif
292#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
293extern void bio_flush_dcache_pages(struct bio *bi);
294#else
295static inline void bio_flush_dcache_pages(struct bio *bi)
296{
297}
298#endif
299
16ac3d63
KO
300extern void bio_copy_data(struct bio *dst, struct bio *src);
301
152e283f
FT
302extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
303 unsigned long, unsigned int, int, gfp_t);
304extern struct bio *bio_copy_user_iov(struct request_queue *,
305 struct rq_map_data *, struct sg_iovec *,
a3bce90e 306 int, int, gfp_t);
1da177e4
LT
307extern int bio_uncopy_user(struct bio *);
308void zero_fill_bio(struct bio *bio);
9f060e22
KO
309extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
310extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
7ba1ba12 311extern unsigned int bvec_nr_vecs(unsigned short idx);
51d654e1 312
852c788f
TH
313#ifdef CONFIG_BLK_CGROUP
314int bio_associate_current(struct bio *bio);
315void bio_disassociate_task(struct bio *bio);
316#else /* CONFIG_BLK_CGROUP */
317static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
318static inline void bio_disassociate_task(struct bio *bio) { }
319#endif /* CONFIG_BLK_CGROUP */
320
1da177e4
LT
321#ifdef CONFIG_HIGHMEM
322/*
20b636bf
AB
323 * remember never ever reenable interrupts between a bvec_kmap_irq and
324 * bvec_kunmap_irq!
1da177e4 325 */
4f570f99 326static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
1da177e4
LT
327{
328 unsigned long addr;
329
330 /*
331 * might not be a highmem page, but the preempt/irq count
332 * balancing is a lot nicer this way
333 */
334 local_irq_save(*flags);
e8e3c3d6 335 addr = (unsigned long) kmap_atomic(bvec->bv_page);
1da177e4
LT
336
337 BUG_ON(addr & ~PAGE_MASK);
338
339 return (char *) addr + bvec->bv_offset;
340}
341
4f570f99 342static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
1da177e4
LT
343{
344 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
345
e8e3c3d6 346 kunmap_atomic((void *) ptr);
1da177e4
LT
347 local_irq_restore(*flags);
348}
349
350#else
11a691be
GU
351static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
352{
353 return page_address(bvec->bv_page) + bvec->bv_offset;
354}
355
356static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
357{
358 *flags = 0;
359}
1da177e4
LT
360#endif
361
c2d08dad 362static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
1da177e4
LT
363 unsigned long *flags)
364{
365 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
366}
367#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
368
369#define bio_kmap_irq(bio, flags) \
370 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
371#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
372
7a67f63b
JA
373/*
374 * Check whether this bio carries any data or not. A NULL bio is allowed.
375 */
e2a60da7 376static inline bool bio_has_data(struct bio *bio)
7a67f63b 377{
e2a60da7
MP
378 if (bio && bio->bi_vcnt)
379 return true;
380
381 return false;
382}
383
384static inline bool bio_is_rw(struct bio *bio)
385{
386 if (!bio_has_data(bio))
387 return false;
388
4363ac7c
MP
389 if (bio->bi_rw & REQ_WRITE_SAME)
390 return false;
391
e2a60da7
MP
392 return true;
393}
394
395static inline bool bio_mergeable(struct bio *bio)
396{
397 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
398 return false;
399
400 return true;
7a67f63b
JA
401}
402
8f3d8ba2 403/*
e686307f 404 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
8f3d8ba2
CH
405 *
406 * A bio_list anchors a singly-linked list of bios chained through the bi_next
407 * member of the bio. The bio_list also caches the last list member to allow
408 * fast access to the tail.
409 */
410struct bio_list {
411 struct bio *head;
412 struct bio *tail;
413};
414
415static inline int bio_list_empty(const struct bio_list *bl)
416{
417 return bl->head == NULL;
418}
419
420static inline void bio_list_init(struct bio_list *bl)
421{
422 bl->head = bl->tail = NULL;
423}
424
425#define bio_list_for_each(bio, bl) \
426 for (bio = (bl)->head; bio; bio = bio->bi_next)
427
428static inline unsigned bio_list_size(const struct bio_list *bl)
429{
430 unsigned sz = 0;
431 struct bio *bio;
432
433 bio_list_for_each(bio, bl)
434 sz++;
435
436 return sz;
437}
438
439static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
440{
441 bio->bi_next = NULL;
442
443 if (bl->tail)
444 bl->tail->bi_next = bio;
445 else
446 bl->head = bio;
447
448 bl->tail = bio;
449}
450
451static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
452{
453 bio->bi_next = bl->head;
454
455 bl->head = bio;
456
457 if (!bl->tail)
458 bl->tail = bio;
459}
460
461static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
462{
463 if (!bl2->head)
464 return;
465
466 if (bl->tail)
467 bl->tail->bi_next = bl2->head;
468 else
469 bl->head = bl2->head;
470
471 bl->tail = bl2->tail;
472}
473
474static inline void bio_list_merge_head(struct bio_list *bl,
475 struct bio_list *bl2)
476{
477 if (!bl2->head)
478 return;
479
480 if (bl->head)
481 bl2->tail->bi_next = bl->head;
482 else
483 bl->tail = bl2->tail;
484
485 bl->head = bl2->head;
486}
487
13685a16
GU
488static inline struct bio *bio_list_peek(struct bio_list *bl)
489{
490 return bl->head;
491}
492
8f3d8ba2
CH
493static inline struct bio *bio_list_pop(struct bio_list *bl)
494{
495 struct bio *bio = bl->head;
496
497 if (bio) {
498 bl->head = bl->head->bi_next;
499 if (!bl->head)
500 bl->tail = NULL;
501
502 bio->bi_next = NULL;
503 }
504
505 return bio;
506}
507
508static inline struct bio *bio_list_get(struct bio_list *bl)
509{
510 struct bio *bio = bl->head;
511
512 bl->head = bl->tail = NULL;
513
514 return bio;
515}
516
57fb233f
KO
517/*
518 * bio_set is used to allow other portions of the IO system to
519 * allocate their own private memory pools for bio and iovec structures.
520 * These memory pools in turn all allocate from the bio_slab
521 * and the bvec_slabs[].
522 */
523#define BIO_POOL_SIZE 2
524#define BIOVEC_NR_POOLS 6
525#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
526
527struct bio_set {
528 struct kmem_cache *bio_slab;
529 unsigned int front_pad;
530
531 mempool_t *bio_pool;
9f060e22 532 mempool_t *bvec_pool;
57fb233f
KO
533#if defined(CONFIG_BLK_DEV_INTEGRITY)
534 mempool_t *bio_integrity_pool;
9f060e22 535 mempool_t *bvec_integrity_pool;
57fb233f 536#endif
df2cb6da
KO
537
538 /*
539 * Deadlock avoidance for stacking block drivers: see comments in
540 * bio_alloc_bioset() for details
541 */
542 spinlock_t rescue_lock;
543 struct bio_list rescue_list;
544 struct work_struct rescue_work;
545 struct workqueue_struct *rescue_workqueue;
57fb233f
KO
546};
547
548struct biovec_slab {
549 int nr_vecs;
550 char *name;
551 struct kmem_cache *slab;
552};
553
554/*
555 * a small number of entries is fine, not going to be performance critical.
556 * basically we just need to survive
557 */
558#define BIO_SPLIT_ENTRIES 2
559
7ba1ba12
MP
560#if defined(CONFIG_BLK_DEV_INTEGRITY)
561
562#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
563#define bip_vec(bip) bip_vec_idx(bip, 0)
564
565#define __bip_for_each_vec(bvl, bip, i, start_idx) \
566 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
567 i < (bip)->bip_vcnt; \
568 bvl++, i++)
569
570#define bip_for_each_vec(bvl, bip, i) \
571 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
572
13f05c8d
MP
573#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
574 for_each_bio(_bio) \
575 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
576
8deaf721 577#define bio_integrity(bio) (bio->bi_integrity != NULL)
7ba1ba12 578
7ba1ba12 579extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
1e2a410f 580extern void bio_integrity_free(struct bio *);
7ba1ba12
MP
581extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
582extern int bio_integrity_enabled(struct bio *bio);
583extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
584extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
585extern int bio_integrity_prep(struct bio *);
586extern void bio_integrity_endio(struct bio *, int);
587extern void bio_integrity_advance(struct bio *, unsigned int);
588extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
589extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
1e2a410f 590extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
7878cba9
MP
591extern int bioset_integrity_create(struct bio_set *, int);
592extern void bioset_integrity_free(struct bio_set *);
593extern void bio_integrity_init(void);
7ba1ba12
MP
594
595#else /* CONFIG_BLK_DEV_INTEGRITY */
596
6898e3bd
MP
597static inline int bio_integrity(struct bio *bio)
598{
599 return 0;
600}
601
602static inline int bio_integrity_enabled(struct bio *bio)
603{
604 return 0;
605}
606
607static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
608{
609 return 0;
610}
611
612static inline void bioset_integrity_free (struct bio_set *bs)
613{
614 return;
615}
616
617static inline int bio_integrity_prep(struct bio *bio)
618{
619 return 0;
620}
621
1e2a410f 622static inline void bio_integrity_free(struct bio *bio)
6898e3bd
MP
623{
624 return;
625}
626
0c614e2d 627static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
1e2a410f 628 gfp_t gfp_mask)
0c614e2d
SR
629{
630 return 0;
631}
6898e3bd
MP
632
633static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
634 int sectors)
635{
636 return;
637}
638
639static inline void bio_integrity_advance(struct bio *bio,
640 unsigned int bytes_done)
641{
642 return;
643}
644
645static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
646 unsigned int sectors)
647{
648 return;
649}
650
651static inline void bio_integrity_init(void)
652{
653 return;
654}
7ba1ba12
MP
655
656#endif /* CONFIG_BLK_DEV_INTEGRITY */
657
02a5e0ac 658#endif /* CONFIG_BLOCK */
1da177e4 659#endif /* __LINUX_BIO_H */