mm: multi-gen LRU: groundwork
[linux-2.6-block.git] / mm / memory.c
index 3a9b00c765c2bfafe2bef671fefbfe6fa52276a8..63832dab15d366a18fe96eac0aa467e3c82bd4ef 100644 (file)
@@ -5117,6 +5117,27 @@ static inline void mm_account_fault(struct pt_regs *regs,
                perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
 }
 
+#ifdef CONFIG_LRU_GEN
+static void lru_gen_enter_fault(struct vm_area_struct *vma)
+{
+       /* the LRU algorithm doesn't apply to sequential or random reads */
+       current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
+}
+
+static void lru_gen_exit_fault(void)
+{
+       current->in_lru_fault = false;
+}
+#else
+static void lru_gen_enter_fault(struct vm_area_struct *vma)
+{
+}
+
+static void lru_gen_exit_fault(void)
+{
+}
+#endif /* CONFIG_LRU_GEN */
+
 /*
  * By the time we get here, we already hold the mm semaphore
  *
@@ -5148,11 +5169,15 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
        if (flags & FAULT_FLAG_USER)
                mem_cgroup_enter_user_fault();
 
+       lru_gen_enter_fault(vma);
+
        if (unlikely(is_vm_hugetlb_page(vma)))
                ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
        else
                ret = __handle_mm_fault(vma, address, flags);
 
+       lru_gen_exit_fault();
+
        if (flags & FAULT_FLAG_USER) {
                mem_cgroup_exit_user_fault();
                /*