perf_counter: allow for data addresses to be recorded
[linux-2.6-block.git] / arch / powerpc / mm / fault.c
index 91c7b8636b8a751ba160a7791df929be25d32543..ac0e112031b29f4a6b96d6b6a9b8b61cdfdc5d34 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
+#include <linux/perf_counter.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                die("Weird page fault", regs, SIGSEGV);
        }
 
+       perf_swcounter_event(PERF_COUNT_PAGE_FAULTS, 1, 0, regs, address);
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -253,45 +256,33 @@ good_area:
 #endif /* CONFIG_8xx */
 
        if (is_exec) {
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
-               /* protection fault */
+#ifdef CONFIG_PPC_STD_MMU
+               /* Protection fault on exec go straight to failure on
+                * Hash based MMUs as they either don't support per-page
+                * execute permission, or if they do, it's handled already
+                * at the hash level. This test would probably have to
+                * be removed if we change the way this works to make hash
+                * processors use the same I/D cache coherency mechanism
+                * as embedded.
+                */
                if (error_code & DSISR_PROTFAULT)
                        goto bad_area;
+#endif /* CONFIG_PPC_STD_MMU */
+
                /*
                 * Allow execution from readable areas if the MMU does not
                 * provide separate controls over reading and executing.
+                *
+                * Note: That code used to not be enabled for 4xx/BookE.
+                * It is now as I/D cache coherency for these is done at
+                * set_pte_at() time and I see no reason why the test
+                * below wouldn't be valid on those processors. This -may-
+                * break programs compiled with a really old ABI though.
                 */
                if (!(vma->vm_flags & VM_EXEC) &&
                    (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
                     !(vma->vm_flags & (VM_READ | VM_WRITE))))
                        goto bad_area;
-#else
-               pte_t *ptep;
-               pmd_t *pmdp;
-
-               /* Since 4xx/Book-E supports per-page execute permission,
-                * we lazily flush dcache to icache. */
-               ptep = NULL;
-               if (get_pteptr(mm, address, &ptep, &pmdp)) {
-                       spinlock_t *ptl = pte_lockptr(mm, pmdp);
-                       spin_lock(ptl);
-                       if (pte_present(*ptep)) {
-                               struct page *page = pte_page(*ptep);
-
-                               if (!test_bit(PG_arch_1, &page->flags)) {
-                                       flush_dcache_icache_page(page);
-                                       set_bit(PG_arch_1, &page->flags);
-                               }
-                               pte_update(ptep, 0, _PAGE_HWEXEC |
-                                          _PAGE_ACCESSED);
-                               local_flush_tlb_page(vma, address);
-                               pte_unmap_unlock(ptep, ptl);
-                               up_read(&mm->mmap_sem);
-                               return 0;
-                       }
-                       pte_unmap_unlock(ptep, ptl);
-               }
-#endif
        /* a write */
        } else if (is_write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -321,6 +312,8 @@ good_area:
        }
        if (ret & VM_FAULT_MAJOR) {
                current->maj_flt++;
+               perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MAJ, 1, 0,
+                                    regs, address);
 #ifdef CONFIG_PPC_SMLPAR
                if (firmware_has_feature(FW_FEATURE_CMO)) {
                        preempt_disable();
@@ -328,8 +321,11 @@ good_area:
                        preempt_enable();
                }
 #endif
-       } else
+       } else {
                current->min_flt++;
+               perf_swcounter_event(PERF_COUNT_PAGE_FAULTS_MIN, 1, 0,
+                                    regs, address);
+       }
        up_read(&mm->mmap_sem);
        return 0;