ext2: async get_block and support code
[linux-block.git] / include / linux / buffer_head.h
index 16ed0284d780c2177e0f5a4b617a9fcb4a316d41..fe62037d34f0f77767a29f3eee40894aae20ad4a 100644 (file)
@@ -167,6 +167,7 @@ void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
 
 void mark_buffer_async_write(struct buffer_head *bh);
 void __wait_on_buffer(struct buffer_head *);
+int __wait_on_buffer_async(struct buffer_head *, struct wait_bit_queue *);
 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
                        unsigned size);
@@ -175,12 +176,13 @@ struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
 void __brelse(struct buffer_head *);
 void __bforget(struct buffer_head *);
 void __breadahead(struct block_device *, sector_t block, unsigned int size);
-struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
+struct buffer_head *__bread_async(struct block_device *, sector_t block, unsigned size, struct wait_bit_queue *);
 void invalidate_bh_lrus(void);
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
 void free_buffer_head(struct buffer_head * bh);
 void unlock_buffer(struct buffer_head *bh);
 void __lock_buffer(struct buffer_head *bh);
+int __lock_buffer_async(struct buffer_head *bh, struct wait_bit_queue *wait);
 void ll_rw_block(int, int, struct buffer_head * bh[]);
 int sync_dirty_buffer(struct buffer_head *bh);
 int submit_bh(int, struct buffer_head *);
@@ -272,10 +274,22 @@ static inline void bforget(struct buffer_head *bh)
                __bforget(bh);
 }
 
+static inline struct buffer_head *__bread(struct block_device *bdev,
+                                         sector_t block, unsigned size)
+{
+       return __bread_async(bdev, block, size, NULL);
+}
+
 static inline struct buffer_head *
 sb_bread(struct super_block *sb, sector_t block)
 {
-       return __bread(sb->s_bdev, block, sb->s_blocksize);
+       return __bread_async(sb->s_bdev, block, sb->s_blocksize, NULL);
+}
+
+static inline struct buffer_head *
+sb_bread_async(struct super_block *sb, sector_t block, struct wait_bit_queue *w)
+{
+       return __bread_async(sb->s_bdev, block, sb->s_blocksize, w);
 }
 
 static inline void
@@ -317,6 +331,15 @@ static inline void wait_on_buffer(struct buffer_head *bh)
                __wait_on_buffer(bh);
 }
 
+static inline int wait_on_buffer_async(struct buffer_head *bh,
+                                       struct wait_bit_queue *wait)
+{
+       if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
+               return __wait_on_buffer_async(bh, wait);
+
+       return 0;
+}
+
 static inline int trylock_buffer(struct buffer_head *bh)
 {
        return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
@@ -329,6 +352,25 @@ static inline void lock_buffer(struct buffer_head *bh)
                __lock_buffer(bh);
 }
 
+static inline int lock_buffer_async(struct buffer_head *bh,
+                                   struct wait_bit_queue *wait)
+{
+       if (!trylock_buffer(bh)) {
+               DEFINE_WAIT_BIT(wq_stack, &bh->b_state, BH_Lock);
+
+               if (!wait)
+                       wait = &wq_stack;
+               else {
+                       wait->key.flags = &bh->b_state;
+                       wait->key.bit_nr = BH_Lock;
+               }
+
+               return __lock_buffer_async(bh, wait);
+       }
+
+       return 0;
+}
+
 extern int __set_page_dirty_buffers(struct page *page);
 
 #else /* CONFIG_BLOCK */