Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/ext3/dir.c | |
3 | * | |
4 | * Copyright (C) 1992, 1993, 1994, 1995 | |
5 | * Remy Card (card@masi.ibp.fr) | |
6 | * Laboratoire MASI - Institut Blaise Pascal | |
7 | * Universite Pierre et Marie Curie (Paris VI) | |
8 | * | |
9 | * from | |
10 | * | |
11 | * linux/fs/minix/dir.c | |
12 | * | |
13 | * Copyright (C) 1991, 1992 Linus Torvalds | |
14 | * | |
15 | * ext3 directory handling functions | |
16 | * | |
17 | * Big-endian to little-endian byte-swapping/bitmaps by | |
18 | * David S. Miller (davem@caip.rutgers.edu), 1995 | |
19 | * | |
20 | * Hash Tree Directory indexing (c) 2001 Daniel Phillips | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/fs.h> | |
25 | #include <linux/jbd.h> | |
26 | #include <linux/ext3_fs.h> | |
27 | #include <linux/buffer_head.h> | |
28 | #include <linux/smp_lock.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/rbtree.h> | |
31 | ||
32 | static unsigned char ext3_filetype_table[] = { | |
33 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK | |
34 | }; | |
35 | ||
36 | static int ext3_readdir(struct file *, void *, filldir_t); | |
37 | static int ext3_dx_readdir(struct file * filp, | |
38 | void * dirent, filldir_t filldir); | |
39 | static int ext3_release_dir (struct inode * inode, | |
40 | struct file * filp); | |
41 | ||
42 | struct file_operations ext3_dir_operations = { | |
43 | .llseek = generic_file_llseek, | |
44 | .read = generic_read_dir, | |
45 | .readdir = ext3_readdir, /* we take BKL. needed?*/ | |
46 | .ioctl = ext3_ioctl, /* BKL held */ | |
47 | .fsync = ext3_sync_file, /* BKL held */ | |
48 | #ifdef CONFIG_EXT3_INDEX | |
49 | .release = ext3_release_dir, | |
50 | #endif | |
51 | }; | |
52 | ||
53 | ||
54 | static unsigned char get_dtype(struct super_block *sb, int filetype) | |
55 | { | |
56 | if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) || | |
57 | (filetype >= EXT3_FT_MAX)) | |
58 | return DT_UNKNOWN; | |
59 | ||
60 | return (ext3_filetype_table[filetype]); | |
61 | } | |
62 | ||
63 | ||
64 | int ext3_check_dir_entry (const char * function, struct inode * dir, | |
65 | struct ext3_dir_entry_2 * de, | |
66 | struct buffer_head * bh, | |
67 | unsigned long offset) | |
68 | { | |
69 | const char * error_msg = NULL; | |
70 | const int rlen = le16_to_cpu(de->rec_len); | |
71 | ||
72 | if (rlen < EXT3_DIR_REC_LEN(1)) | |
73 | error_msg = "rec_len is smaller than minimal"; | |
74 | else if (rlen % 4 != 0) | |
75 | error_msg = "rec_len % 4 != 0"; | |
76 | else if (rlen < EXT3_DIR_REC_LEN(de->name_len)) | |
77 | error_msg = "rec_len is too small for name_len"; | |
78 | else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) | |
79 | error_msg = "directory entry across blocks"; | |
80 | else if (le32_to_cpu(de->inode) > | |
81 | le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)) | |
82 | error_msg = "inode out of bounds"; | |
83 | ||
84 | if (error_msg != NULL) | |
85 | ext3_error (dir->i_sb, function, | |
86 | "bad entry in directory #%lu: %s - " | |
87 | "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", | |
88 | dir->i_ino, error_msg, offset, | |
89 | (unsigned long) le32_to_cpu(de->inode), | |
90 | rlen, de->name_len); | |
91 | return error_msg == NULL ? 1 : 0; | |
92 | } | |
93 | ||
94 | static int ext3_readdir(struct file * filp, | |
95 | void * dirent, filldir_t filldir) | |
96 | { | |
97 | int error = 0; | |
d8733c29 AM |
98 | unsigned long offset; |
99 | int i, stored; | |
100 | struct ext3_dir_entry_2 *de; | |
101 | struct super_block *sb; | |
1da177e4 LT |
102 | int err; |
103 | struct inode *inode = filp->f_dentry->d_inode; | |
104 | int ret = 0; | |
105 | ||
106 | sb = inode->i_sb; | |
107 | ||
108 | #ifdef CONFIG_EXT3_INDEX | |
109 | if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb, | |
110 | EXT3_FEATURE_COMPAT_DIR_INDEX) && | |
111 | ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) || | |
112 | ((inode->i_size >> sb->s_blocksize_bits) == 1))) { | |
113 | err = ext3_dx_readdir(filp, dirent, filldir); | |
114 | if (err != ERR_BAD_DX_DIR) { | |
115 | ret = err; | |
116 | goto out; | |
117 | } | |
118 | /* | |
119 | * We don't set the inode dirty flag since it's not | |
120 | * critical that it get flushed back to the disk. | |
121 | */ | |
122 | EXT3_I(filp->f_dentry->d_inode)->i_flags &= ~EXT3_INDEX_FL; | |
123 | } | |
124 | #endif | |
125 | stored = 0; | |
1da177e4 LT |
126 | offset = filp->f_pos & (sb->s_blocksize - 1); |
127 | ||
128 | while (!error && !stored && filp->f_pos < inode->i_size) { | |
d8733c29 AM |
129 | unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb); |
130 | struct buffer_head map_bh; | |
131 | struct buffer_head *bh = NULL; | |
132 | ||
133 | map_bh.b_state = 0; | |
134 | err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); | |
135 | if (!err) { | |
136 | page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, | |
137 | &filp->f_ra, | |
138 | filp, | |
139 | map_bh.b_blocknr >> | |
140 | (PAGE_CACHE_SHIFT - inode->i_blkbits), | |
141 | 1); | |
142 | bh = ext3_bread(NULL, inode, blk, 0, &err); | |
143 | } | |
144 | ||
145 | /* | |
146 | * We ignore I/O errors on directories so users have a chance | |
147 | * of recovering data when there's a bad sector | |
148 | */ | |
1da177e4 LT |
149 | if (!bh) { |
150 | ext3_error (sb, "ext3_readdir", | |
151 | "directory #%lu contains a hole at offset %lu", | |
152 | inode->i_ino, (unsigned long)filp->f_pos); | |
153 | filp->f_pos += sb->s_blocksize - offset; | |
154 | continue; | |
155 | } | |
156 | ||
1da177e4 LT |
157 | revalidate: |
158 | /* If the dir block has changed since the last call to | |
159 | * readdir(2), then we might be pointing to an invalid | |
160 | * dirent right now. Scan from the start of the block | |
161 | * to make sure. */ | |
162 | if (filp->f_version != inode->i_version) { | |
163 | for (i = 0; i < sb->s_blocksize && i < offset; ) { | |
164 | de = (struct ext3_dir_entry_2 *) | |
165 | (bh->b_data + i); | |
166 | /* It's too expensive to do a full | |
167 | * dirent test each time round this | |
168 | * loop, but we do have to test at | |
169 | * least that it is non-zero. A | |
170 | * failure will be detected in the | |
171 | * dirent test below. */ | |
172 | if (le16_to_cpu(de->rec_len) < | |
173 | EXT3_DIR_REC_LEN(1)) | |
174 | break; | |
175 | i += le16_to_cpu(de->rec_len); | |
176 | } | |
177 | offset = i; | |
178 | filp->f_pos = (filp->f_pos & ~(sb->s_blocksize - 1)) | |
179 | | offset; | |
180 | filp->f_version = inode->i_version; | |
181 | } | |
182 | ||
183 | while (!error && filp->f_pos < inode->i_size | |
184 | && offset < sb->s_blocksize) { | |
185 | de = (struct ext3_dir_entry_2 *) (bh->b_data + offset); | |
186 | if (!ext3_check_dir_entry ("ext3_readdir", inode, de, | |
187 | bh, offset)) { | |
188 | /* On error, skip the f_pos to the | |
189 | next block. */ | |
190 | filp->f_pos = (filp->f_pos | | |
191 | (sb->s_blocksize - 1)) + 1; | |
192 | brelse (bh); | |
193 | ret = stored; | |
194 | goto out; | |
195 | } | |
196 | offset += le16_to_cpu(de->rec_len); | |
197 | if (le32_to_cpu(de->inode)) { | |
198 | /* We might block in the next section | |
199 | * if the data destination is | |
200 | * currently swapped out. So, use a | |
201 | * version stamp to detect whether or | |
202 | * not the directory has been modified | |
203 | * during the copy operation. | |
204 | */ | |
205 | unsigned long version = filp->f_version; | |
206 | ||
207 | error = filldir(dirent, de->name, | |
208 | de->name_len, | |
209 | filp->f_pos, | |
210 | le32_to_cpu(de->inode), | |
211 | get_dtype(sb, de->file_type)); | |
212 | if (error) | |
213 | break; | |
214 | if (version != filp->f_version) | |
215 | goto revalidate; | |
216 | stored ++; | |
217 | } | |
218 | filp->f_pos += le16_to_cpu(de->rec_len); | |
219 | } | |
220 | offset = 0; | |
221 | brelse (bh); | |
222 | } | |
223 | out: | |
224 | return ret; | |
225 | } | |
226 | ||
227 | #ifdef CONFIG_EXT3_INDEX | |
228 | /* | |
229 | * These functions convert from the major/minor hash to an f_pos | |
230 | * value. | |
231 | * | |
232 | * Currently we only use major hash numer. This is unfortunate, but | |
233 | * on 32-bit machines, the same VFS interface is used for lseek and | |
234 | * llseek, so if we use the 64 bit offset, then the 32-bit versions of | |
235 | * lseek/telldir/seekdir will blow out spectacularly, and from within | |
236 | * the ext2 low-level routine, we don't know if we're being called by | |
237 | * a 64-bit version of the system call or the 32-bit version of the | |
238 | * system call. Worse yet, NFSv2 only allows for a 32-bit readdir | |
239 | * cookie. Sigh. | |
240 | */ | |
241 | #define hash2pos(major, minor) (major >> 1) | |
242 | #define pos2maj_hash(pos) ((pos << 1) & 0xffffffff) | |
243 | #define pos2min_hash(pos) (0) | |
244 | ||
245 | /* | |
246 | * This structure holds the nodes of the red-black tree used to store | |
247 | * the directory entry in hash order. | |
248 | */ | |
249 | struct fname { | |
250 | __u32 hash; | |
251 | __u32 minor_hash; | |
252 | struct rb_node rb_hash; | |
253 | struct fname *next; | |
254 | __u32 inode; | |
255 | __u8 name_len; | |
256 | __u8 file_type; | |
257 | char name[0]; | |
258 | }; | |
259 | ||
260 | /* | |
261 | * This functoin implements a non-recursive way of freeing all of the | |
262 | * nodes in the red-black tree. | |
263 | */ | |
264 | static void free_rb_tree_fname(struct rb_root *root) | |
265 | { | |
266 | struct rb_node *n = root->rb_node; | |
267 | struct rb_node *parent; | |
268 | struct fname *fname; | |
269 | ||
270 | while (n) { | |
271 | /* Do the node's children first */ | |
272 | if ((n)->rb_left) { | |
273 | n = n->rb_left; | |
274 | continue; | |
275 | } | |
276 | if (n->rb_right) { | |
277 | n = n->rb_right; | |
278 | continue; | |
279 | } | |
280 | /* | |
281 | * The node has no children; free it, and then zero | |
282 | * out parent's link to it. Finally go to the | |
283 | * beginning of the loop and try to free the parent | |
284 | * node. | |
285 | */ | |
286 | parent = n->rb_parent; | |
287 | fname = rb_entry(n, struct fname, rb_hash); | |
288 | while (fname) { | |
289 | struct fname * old = fname; | |
290 | fname = fname->next; | |
291 | kfree (old); | |
292 | } | |
293 | if (!parent) | |
294 | root->rb_node = NULL; | |
295 | else if (parent->rb_left == n) | |
296 | parent->rb_left = NULL; | |
297 | else if (parent->rb_right == n) | |
298 | parent->rb_right = NULL; | |
299 | n = parent; | |
300 | } | |
301 | root->rb_node = NULL; | |
302 | } | |
303 | ||
304 | ||
305 | static struct dir_private_info *create_dir_info(loff_t pos) | |
306 | { | |
307 | struct dir_private_info *p; | |
308 | ||
309 | p = kmalloc(sizeof(struct dir_private_info), GFP_KERNEL); | |
310 | if (!p) | |
311 | return NULL; | |
312 | p->root.rb_node = NULL; | |
313 | p->curr_node = NULL; | |
314 | p->extra_fname = NULL; | |
315 | p->last_pos = 0; | |
316 | p->curr_hash = pos2maj_hash(pos); | |
317 | p->curr_minor_hash = pos2min_hash(pos); | |
318 | p->next_hash = 0; | |
319 | return p; | |
320 | } | |
321 | ||
322 | void ext3_htree_free_dir_info(struct dir_private_info *p) | |
323 | { | |
324 | free_rb_tree_fname(&p->root); | |
325 | kfree(p); | |
326 | } | |
327 | ||
328 | /* | |
329 | * Given a directory entry, enter it into the fname rb tree. | |
330 | */ | |
331 | int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, | |
332 | __u32 minor_hash, | |
333 | struct ext3_dir_entry_2 *dirent) | |
334 | { | |
335 | struct rb_node **p, *parent = NULL; | |
336 | struct fname * fname, *new_fn; | |
337 | struct dir_private_info *info; | |
338 | int len; | |
339 | ||
340 | info = (struct dir_private_info *) dir_file->private_data; | |
341 | p = &info->root.rb_node; | |
342 | ||
343 | /* Create and allocate the fname structure */ | |
344 | len = sizeof(struct fname) + dirent->name_len + 1; | |
345 | new_fn = kmalloc(len, GFP_KERNEL); | |
346 | if (!new_fn) | |
347 | return -ENOMEM; | |
348 | memset(new_fn, 0, len); | |
349 | new_fn->hash = hash; | |
350 | new_fn->minor_hash = minor_hash; | |
351 | new_fn->inode = le32_to_cpu(dirent->inode); | |
352 | new_fn->name_len = dirent->name_len; | |
353 | new_fn->file_type = dirent->file_type; | |
354 | memcpy(new_fn->name, dirent->name, dirent->name_len); | |
355 | new_fn->name[dirent->name_len] = 0; | |
356 | ||
357 | while (*p) { | |
358 | parent = *p; | |
359 | fname = rb_entry(parent, struct fname, rb_hash); | |
360 | ||
361 | /* | |
362 | * If the hash and minor hash match up, then we put | |
363 | * them on a linked list. This rarely happens... | |
364 | */ | |
365 | if ((new_fn->hash == fname->hash) && | |
366 | (new_fn->minor_hash == fname->minor_hash)) { | |
367 | new_fn->next = fname->next; | |
368 | fname->next = new_fn; | |
369 | return 0; | |
370 | } | |
371 | ||
372 | if (new_fn->hash < fname->hash) | |
373 | p = &(*p)->rb_left; | |
374 | else if (new_fn->hash > fname->hash) | |
375 | p = &(*p)->rb_right; | |
376 | else if (new_fn->minor_hash < fname->minor_hash) | |
377 | p = &(*p)->rb_left; | |
378 | else /* if (new_fn->minor_hash > fname->minor_hash) */ | |
379 | p = &(*p)->rb_right; | |
380 | } | |
381 | ||
382 | rb_link_node(&new_fn->rb_hash, parent, p); | |
383 | rb_insert_color(&new_fn->rb_hash, &info->root); | |
384 | return 0; | |
385 | } | |
386 | ||
387 | ||
388 | ||
389 | /* | |
390 | * This is a helper function for ext3_dx_readdir. It calls filldir | |
391 | * for all entres on the fname linked list. (Normally there is only | |
392 | * one entry on the linked list, unless there are 62 bit hash collisions.) | |
393 | */ | |
394 | static int call_filldir(struct file * filp, void * dirent, | |
395 | filldir_t filldir, struct fname *fname) | |
396 | { | |
397 | struct dir_private_info *info = filp->private_data; | |
398 | loff_t curr_pos; | |
399 | struct inode *inode = filp->f_dentry->d_inode; | |
400 | struct super_block * sb; | |
401 | int error; | |
402 | ||
403 | sb = inode->i_sb; | |
404 | ||
405 | if (!fname) { | |
406 | printk("call_filldir: called with null fname?!?\n"); | |
407 | return 0; | |
408 | } | |
409 | curr_pos = hash2pos(fname->hash, fname->minor_hash); | |
410 | while (fname) { | |
411 | error = filldir(dirent, fname->name, | |
412 | fname->name_len, curr_pos, | |
413 | fname->inode, | |
414 | get_dtype(sb, fname->file_type)); | |
415 | if (error) { | |
416 | filp->f_pos = curr_pos; | |
417 | info->extra_fname = fname->next; | |
418 | return error; | |
419 | } | |
420 | fname = fname->next; | |
421 | } | |
422 | return 0; | |
423 | } | |
424 | ||
425 | static int ext3_dx_readdir(struct file * filp, | |
426 | void * dirent, filldir_t filldir) | |
427 | { | |
428 | struct dir_private_info *info = filp->private_data; | |
429 | struct inode *inode = filp->f_dentry->d_inode; | |
430 | struct fname *fname; | |
431 | int ret; | |
432 | ||
433 | if (!info) { | |
434 | info = create_dir_info(filp->f_pos); | |
435 | if (!info) | |
436 | return -ENOMEM; | |
437 | filp->private_data = info; | |
438 | } | |
439 | ||
440 | if (filp->f_pos == EXT3_HTREE_EOF) | |
441 | return 0; /* EOF */ | |
442 | ||
443 | /* Some one has messed with f_pos; reset the world */ | |
444 | if (info->last_pos != filp->f_pos) { | |
445 | free_rb_tree_fname(&info->root); | |
446 | info->curr_node = NULL; | |
447 | info->extra_fname = NULL; | |
448 | info->curr_hash = pos2maj_hash(filp->f_pos); | |
449 | info->curr_minor_hash = pos2min_hash(filp->f_pos); | |
450 | } | |
451 | ||
452 | /* | |
453 | * If there are any leftover names on the hash collision | |
454 | * chain, return them first. | |
455 | */ | |
456 | if (info->extra_fname && | |
457 | call_filldir(filp, dirent, filldir, info->extra_fname)) | |
458 | goto finished; | |
459 | ||
460 | if (!info->curr_node) | |
461 | info->curr_node = rb_first(&info->root); | |
462 | ||
463 | while (1) { | |
464 | /* | |
465 | * Fill the rbtree if we have no more entries, | |
466 | * or the inode has changed since we last read in the | |
467 | * cached entries. | |
468 | */ | |
469 | if ((!info->curr_node) || | |
470 | (filp->f_version != inode->i_version)) { | |
471 | info->curr_node = NULL; | |
472 | free_rb_tree_fname(&info->root); | |
473 | filp->f_version = inode->i_version; | |
474 | ret = ext3_htree_fill_tree(filp, info->curr_hash, | |
475 | info->curr_minor_hash, | |
476 | &info->next_hash); | |
477 | if (ret < 0) | |
478 | return ret; | |
479 | if (ret == 0) { | |
480 | filp->f_pos = EXT3_HTREE_EOF; | |
481 | break; | |
482 | } | |
483 | info->curr_node = rb_first(&info->root); | |
484 | } | |
485 | ||
486 | fname = rb_entry(info->curr_node, struct fname, rb_hash); | |
487 | info->curr_hash = fname->hash; | |
488 | info->curr_minor_hash = fname->minor_hash; | |
489 | if (call_filldir(filp, dirent, filldir, fname)) | |
490 | break; | |
491 | ||
492 | info->curr_node = rb_next(info->curr_node); | |
493 | if (!info->curr_node) { | |
494 | if (info->next_hash == ~0) { | |
495 | filp->f_pos = EXT3_HTREE_EOF; | |
496 | break; | |
497 | } | |
498 | info->curr_hash = info->next_hash; | |
499 | info->curr_minor_hash = 0; | |
500 | } | |
501 | } | |
502 | finished: | |
503 | info->last_pos = filp->f_pos; | |
504 | return 0; | |
505 | } | |
506 | ||
507 | static int ext3_release_dir (struct inode * inode, struct file * filp) | |
508 | { | |
509 | if (filp->private_data) | |
510 | ext3_htree_free_dir_info(filp->private_data); | |
511 | ||
512 | return 0; | |
513 | } | |
514 | ||
515 | #endif |