Commit | Line | Data |
---|---|---|
3bd94003 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
95d402f0 MP |
2 | /* |
3 | * Copyright (C) 2009-2011 Red Hat, Inc. | |
4 | * | |
5 | * Author: Mikulas Patocka <mpatocka@redhat.com> | |
6 | * | |
7 | * This file is released under the GPL. | |
8 | */ | |
9 | ||
afa53df8 MP |
10 | #ifndef _LINUX_DM_BUFIO_H |
11 | #define _LINUX_DM_BUFIO_H | |
95d402f0 MP |
12 | |
13 | #include <linux/blkdev.h> | |
14 | #include <linux/types.h> | |
15 | ||
16 | /*----------------------------------------------------------------*/ | |
17 | ||
18 | struct dm_bufio_client; | |
19 | struct dm_buffer; | |
20 | ||
b32d4582 NH |
21 | /* |
22 | * Flags for dm_bufio_client_create | |
23 | */ | |
24 | #define DM_BUFIO_CLIENT_NO_SLEEP 0x1 | |
25 | ||
95d402f0 MP |
26 | /* |
27 | * Create a buffered IO cache on a given device | |
28 | */ | |
29 | struct dm_bufio_client * | |
86a3238c HM |
30 | dm_bufio_client_create(struct block_device *bdev, unsigned int block_size, |
31 | unsigned int reserved_buffers, unsigned int aux_size, | |
95d402f0 | 32 | void (*alloc_callback)(struct dm_buffer *), |
0fcb100d NH |
33 | void (*write_callback)(struct dm_buffer *), |
34 | unsigned int flags); | |
95d402f0 MP |
35 | |
36 | /* | |
37 | * Release a buffered IO cache. | |
38 | */ | |
39 | void dm_bufio_client_destroy(struct dm_bufio_client *c); | |
40 | ||
d4830012 LL |
41 | void dm_bufio_client_reset(struct dm_bufio_client *c); |
42 | ||
400a0bef MP |
43 | /* |
44 | * Set the sector range. | |
45 | * When this function is called, there must be no I/O in progress on the bufio | |
46 | * client. | |
47 | */ | |
48 | void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start); | |
49 | ||
95d402f0 MP |
50 | /* |
51 | * WARNING: to avoid deadlocks, these conditions are observed: | |
52 | * | |
53 | * - At most one thread can hold at most "reserved_buffers" simultaneously. | |
54 | * - Each other threads can hold at most one buffer. | |
55 | * - Threads which call only dm_bufio_get can hold unlimited number of | |
56 | * buffers. | |
57 | */ | |
58 | ||
59 | /* | |
60 | * Read a given block from disk. Returns pointer to data. Returns a | |
61 | * pointer to dm_buffer that can be used to release the buffer or to make | |
62 | * it dirty. | |
63 | */ | |
64 | void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, | |
65 | struct dm_buffer **bp); | |
66 | ||
e9b2238e HJ |
67 | void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, |
68 | struct dm_buffer **bp, unsigned short ioprio); | |
69 | ||
95d402f0 MP |
70 | /* |
71 | * Like dm_bufio_read, but return buffer from cache, don't read | |
72 | * it. If the buffer is not in the cache, return NULL. | |
73 | */ | |
74 | void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, | |
75 | struct dm_buffer **bp); | |
76 | ||
77 | /* | |
78 | * Like dm_bufio_read, but don't read anything from the disk. It is | |
79 | * expected that the caller initializes the buffer and marks it dirty. | |
80 | */ | |
81 | void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, | |
82 | struct dm_buffer **bp); | |
83 | ||
a66cc28f MP |
84 | /* |
85 | * Prefetch the specified blocks to the cache. | |
86 | * The function starts to read the blocks and returns without waiting for | |
87 | * I/O to finish. | |
88 | */ | |
89 | void dm_bufio_prefetch(struct dm_bufio_client *c, | |
86a3238c | 90 | sector_t block, unsigned int n_blocks); |
a66cc28f | 91 | |
e9b2238e HJ |
92 | void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, |
93 | sector_t block, unsigned int n_blocks, | |
94 | unsigned short ioprio); | |
95 | ||
95d402f0 MP |
96 | /* |
97 | * Release a reference obtained with dm_bufio_{read,get,new}. The data | |
98 | * pointer and dm_buffer pointer is no longer valid after this call. | |
99 | */ | |
100 | void dm_bufio_release(struct dm_buffer *b); | |
101 | ||
102 | /* | |
103 | * Mark a buffer dirty. It should be called after the buffer is modified. | |
104 | * | |
105 | * In case of memory pressure, the buffer may be written after | |
106 | * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So | |
107 | * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but | |
108 | * the actual writing may occur earlier. | |
109 | */ | |
110 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); | |
111 | ||
1e3b21c6 MP |
112 | /* |
113 | * Mark a part of the buffer dirty. | |
114 | * | |
115 | * The specified part of the buffer is scheduled to be written. dm-bufio may | |
116 | * write the specified part of the buffer or it may write a larger superset. | |
117 | */ | |
118 | void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b, | |
86a3238c | 119 | unsigned int start, unsigned int end); |
1e3b21c6 | 120 | |
95d402f0 MP |
121 | /* |
122 | * Initiate writing of dirty buffers, without waiting for completion. | |
123 | */ | |
124 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); | |
125 | ||
126 | /* | |
127 | * Write all dirty buffers. Guarantees that all dirty buffers created prior | |
128 | * to this call are on disk when this call exits. | |
129 | */ | |
130 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); | |
131 | ||
132 | /* | |
133 | * Send an empty write barrier to the device to flush hardware disk cache. | |
134 | */ | |
135 | int dm_bufio_issue_flush(struct dm_bufio_client *c); | |
136 | ||
6fbeb004 MP |
137 | /* |
138 | * Send a discard request to the underlying device. | |
139 | */ | |
140 | int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count); | |
6fbeb004 | 141 | |
55494bf2 MP |
142 | /* |
143 | * Free the given buffer. | |
144 | * This is just a hint, if the buffer is in use or dirty, this function | |
145 | * does nothing. | |
146 | */ | |
147 | void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); | |
148 | ||
33a18062 MP |
149 | /* |
150 | * Free the given range of buffers. | |
151 | * This is just a hint, if the buffer is in use or dirty, this function | |
152 | * does nothing. | |
153 | */ | |
154 | void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks); | |
155 | ||
55b082e6 MP |
156 | /* |
157 | * Set the minimum number of buffers before cleanup happens. | |
158 | */ | |
86a3238c | 159 | void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n); |
55b082e6 | 160 | |
86a3238c | 161 | unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c); |
95d402f0 | 162 | sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); |
9b594826 | 163 | struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c); |
95d402f0 MP |
164 | sector_t dm_bufio_get_block_number(struct dm_buffer *b); |
165 | void *dm_bufio_get_block_data(struct dm_buffer *b); | |
166 | void *dm_bufio_get_aux_data(struct dm_buffer *b); | |
167 | struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b); | |
168 | ||
169 | /*----------------------------------------------------------------*/ | |
170 | ||
171 | #endif |