We could try to optimize algorithm to first fill just small table and
after that use bigger table all the way up to ARRAY_SIZE(offs). This
way we can use bigger search array, but not lose benefits with entry
count smaller < ARRAY_SIZE(offs).
Signed-off-by: Kari Argillander <kari.argillander@gmail.com>
Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
+#include <linux/kernel.h>
#include "debug.h"
#include "ntfs.h"
#ifdef NTFS3_INDEX_BINARY_SEARCH
struct NTFS_DE *found = NULL;
int min_idx = 0, mid_idx, max_idx = 0;
+ int table_size = 8;
int diff2;
- u16 offs[64];
+ u16 offs[128];
if (end > 0x10000)
goto next;
off += e_size;
max_idx++;
- if (max_idx < ARRAY_SIZE(offs))
+ if (max_idx < table_size)
goto fill_table;
max_idx--;
return NULL;
max_idx = 0;
+ table_size = min(table_size * 2, 128);
goto fill_table;
}
} else if (diff2 < 0) {