static unsigned long mb_cache_shrink(struct mb_cache *cache,
unsigned int nr_to_scan);
-static inline bool mb_cache_entry_referenced(struct mb_cache_entry *entry)
+static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
+ u32 key)
{
- return entry->_e_hash_list_head & 1;
-}
-
-static inline void mb_cache_entry_set_referenced(struct mb_cache_entry *entry)
-{
- entry->_e_hash_list_head |= 1;
-}
-
-static inline void mb_cache_entry_clear_referenced(
- struct mb_cache_entry *entry)
-{
- entry->_e_hash_list_head &= ~1;
-}
-
-static inline struct hlist_bl_head *mb_cache_entry_head(
- struct mb_cache_entry *entry)
-{
- return (struct hlist_bl_head *)
- (entry->_e_hash_list_head & ~1);
+ return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
}
/*
* @mask - gfp mask with which the entry should be allocated
* @key - key of the entry
* @block - block that contains data
+ * @reusable - is the block reusable by other inodes?
*
* Creates entry in @cache with key @key and records that data is stored in
* block @block. The function returns -EBUSY if entry with the same key
* and for the same block already exists in cache. Otherwise 0 is returned.
*/
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
- sector_t block)
+ sector_t block, bool reusable)
{
struct mb_cache_entry *entry, *dup;
struct hlist_bl_node *dup_node;
atomic_set(&entry->e_refcnt, 1);
entry->e_key = key;
entry->e_block = block;
- head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
- entry->_e_hash_list_head = (unsigned long)head;
+ entry->e_reusable = reusable;
+ head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
if (dup->e_key == key && dup->e_block == block) {
struct hlist_bl_node *node;
struct hlist_bl_head *head;
- if (entry)
- head = mb_cache_entry_head(entry);
- else
- head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
+ head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
node = entry->e_hash_list.next;
while (node) {
entry = hlist_bl_entry(node, struct mb_cache_entry,
e_hash_list);
- if (entry->e_key == key) {
+ if (entry->e_key == key && entry->e_reusable) {
atomic_inc(&entry->e_refcnt);
goto out;
}
}
EXPORT_SYMBOL(mb_cache_entry_find_next);
+/*
+ * mb_cache_entry_get - get a cache entry by block number (and key)
+ * @cache - cache we work with
+ * @key - key of block number @block
+ * @block - block number
+ */
+struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
+ sector_t block)
+{
+ struct hlist_bl_node *node;
+ struct hlist_bl_head *head;
+ struct mb_cache_entry *entry;
+
+ head = mb_cache_entry_head(cache, key);
+ hlist_bl_lock(head);
+ hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
+ if (entry->e_key == key && entry->e_block == block) {
+ atomic_inc(&entry->e_refcnt);
+ goto out;
+ }
+ }
+ entry = NULL;
+out:
+ hlist_bl_unlock(head);
+ return entry;
+}
+EXPORT_SYMBOL(mb_cache_entry_get);
+
/* mb_cache_entry_delete_block - remove information about block from cache
* @cache - cache we work with
- * @key - key of the entry to remove
- * @block - block containing data for @key
+ * @key - key of block @block
+ * @block - block number
*
* Remove entry from cache @cache with key @key with data stored in @block.
*/
struct hlist_bl_head *head;
struct mb_cache_entry *entry;
- head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
+ head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
if (entry->e_key == key && entry->e_block == block) {
void mb_cache_entry_touch(struct mb_cache *cache,
struct mb_cache_entry *entry)
{
- mb_cache_entry_set_referenced(entry);
+ entry->e_referenced = 1;
}
EXPORT_SYMBOL(mb_cache_entry_touch);
while (nr_to_scan-- && !list_empty(&cache->c_list)) {
entry = list_first_entry(&cache->c_list,
struct mb_cache_entry, e_list);
- if (mb_cache_entry_referenced(entry)) {
- mb_cache_entry_clear_referenced(entry);
+ if (entry->e_referenced) {
+ entry->e_referenced = 0;
list_move_tail(&cache->c_list, &entry->e_list);
continue;
}
* from under us.
*/
spin_unlock(&cache->c_list_lock);
- head = mb_cache_entry_head(entry);
+ head = mb_cache_entry_head(cache, entry->e_key);
hlist_bl_lock(head);
if (!hlist_bl_unhashed(&entry->e_hash_list)) {
hlist_bl_del_init(&entry->e_hash_list);