Merge branch 'x86/urgent' into x86/pat
authorIngo Molnar <mingo@elte.hu>
Fri, 22 Aug 2008 04:06:51 +0000 (06:06 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 22 Aug 2008 04:06:51 +0000 (06:06 +0200)
Conflicts:
arch/x86/mm/pageattr.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/pageattr-test.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
drivers/char/agp/agp.h
drivers/char/agp/generic.c
drivers/char/agp/intel-agp.c
include/asm-x86/cacheflush.h
mm/highmem.c

index d4aa503caaa29696c055218bdfbd9cd7faf903fb..7c3017287119c2a5cd94469ad45d8794820aecdf 100644 (file)
@@ -118,6 +118,7 @@ static int pageattr_test(void)
        unsigned int level;
        int i, k;
        int err;
+       unsigned long test_addr;
 
        if (print)
                printk(KERN_INFO "CPA self-test:\n");
@@ -172,7 +173,8 @@ static int pageattr_test(void)
                        continue;
                }
 
-               err = change_page_attr_set(addr[i], len[i], PAGE_TESTBIT);
+               test_addr = addr[i];
+               err = change_page_attr_set(&test_addr, len[i], PAGE_TESTBIT, 0);
                if (err < 0) {
                        printk(KERN_ERR "CPA %d failed %d\n", i, err);
                        failed++;
@@ -204,7 +206,8 @@ static int pageattr_test(void)
                        failed++;
                        continue;
                }
-               err = change_page_attr_clear(addr[i], len[i], PAGE_TESTBIT);
+               test_addr = addr[i];
+               err = change_page_attr_clear(&test_addr, len[i], PAGE_TESTBIT, 0);
                if (err < 0) {
                        printk(KERN_ERR "CPA reverting failed: %d\n", err);
                        failed++;
index 43e2f8483e4f59c33559263c0011e1e5bf041f9d..1785591808bd590b446987b78518acfb91f087be 100644 (file)
  * The current flushing context - we pass it instead of 5 arguments:
  */
 struct cpa_data {
-       unsigned long   vaddr;
+       unsigned long   *vaddr;
        pgprot_t        mask_set;
        pgprot_t        mask_clr;
        int             numpages;
-       int             flushtlb;
+       int             flags;
        unsigned long   pfn;
        unsigned        force_split : 1;
+       int             curpage;
 };
 
+#define CPA_FLUSHTLB 1
+#define CPA_ARRAY 2
+
 #ifdef CONFIG_PROC_FS
 static unsigned long direct_pages_count[PG_LEVEL_NUM];
 
@@ -190,6 +194,41 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
        }
 }
 
+static void cpa_flush_array(unsigned long *start, int numpages, int cache)
+{
+       unsigned int i, level;
+       unsigned long *addr;
+
+       BUG_ON(irqs_disabled());
+
+       on_each_cpu(__cpa_flush_range, NULL, 1);
+
+       if (!cache)
+               return;
+
+       /* 4M threshold */
+       if (numpages >= 1024) {
+               if (boot_cpu_data.x86_model >= 4)
+                       wbinvd();
+               return;
+       }
+       /*
+        * We only need to flush on one CPU,
+        * clflush is a MESI-coherent instruction that
+        * will cause all other CPUs to flush the same
+        * cachelines:
+        */
+       for (i = 0, addr = start; i < numpages; i++, addr++) {
+               pte_t *pte = lookup_address(*addr, &level);
+
+               /*
+                * Only flush present addresses:
+                */
+               if (pte && (pte_val(*pte) & _PAGE_PRESENT))
+                       clflush_cache_range((void *) *addr, PAGE_SIZE);
+       }
+}
+
 /*
  * Certain areas of memory on x86 require very specific protection flags,
  * for example the BIOS area or kernel text. Callers don't always get this
@@ -398,7 +437,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
                 */
                new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
                __set_pmd_pte(kpte, address, new_pte);
-               cpa->flushtlb = 1;
+               cpa->flags |= CPA_FLUSHTLB;
                do_split = 0;
        }
 
@@ -584,11 +623,16 @@ out_unlock:
 
 static int __change_page_attr(struct cpa_data *cpa, int primary)
 {
-       unsigned long address = cpa->vaddr;
+       unsigned long address;
        int do_split, err;
        unsigned int level;
        pte_t *kpte, old_pte;
 
+       if (cpa->flags & CPA_ARRAY)
+               address = cpa->vaddr[cpa->curpage];
+       else
+               address = *cpa->vaddr;
+
 repeat:
        kpte = lookup_address(address, &level);
        if (!kpte)
@@ -600,7 +644,7 @@ repeat:
                        return 0;
                WARN(1, KERN_WARNING "CPA: called for zero pte. "
                       "vaddr = %lx cpa->vaddr = %lx\n", address,
-                      cpa->vaddr);
+                      *cpa->vaddr);
                return -EINVAL;
        }
 
@@ -626,7 +670,7 @@ repeat:
                 */
                if (pte_val(old_pte) != pte_val(new_pte)) {
                        set_pte_atomic(kpte, new_pte);
-                       cpa->flushtlb = 1;
+                       cpa->flags |= CPA_FLUSHTLB;
                }
                cpa->numpages = 1;
                return 0;
@@ -650,7 +694,7 @@ repeat:
         */
        err = split_large_page(kpte, address);
        if (!err) {
-               cpa->flushtlb = 1;
+               cpa->flags |= CPA_FLUSHTLB;
                goto repeat;
        }
 
@@ -663,6 +707,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
 {
        struct cpa_data alias_cpa;
        int ret = 0;
+       unsigned long temp_cpa_vaddr, vaddr;
 
        if (cpa->pfn >= max_pfn_mapped)
                return 0;
@@ -675,16 +720,24 @@ static int cpa_process_alias(struct cpa_data *cpa)
         * No need to redo, when the primary call touched the direct
         * mapping already:
         */
-       if (!(within(cpa->vaddr, PAGE_OFFSET,
+       if (cpa->flags & CPA_ARRAY)
+               vaddr = cpa->vaddr[cpa->curpage];
+       else
+               vaddr = *cpa->vaddr;
+
+       if (!(within(vaddr, PAGE_OFFSET,
                    PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT))
 #ifdef CONFIG_X86_64
-               || within(cpa->vaddr, PAGE_OFFSET + (1UL<<32),
+               || within(vaddr, PAGE_OFFSET + (1UL<<32),
                    PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))
 #endif
        )) {
 
                alias_cpa = *cpa;
-               alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
+               temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
+               alias_cpa.vaddr = &temp_cpa_vaddr;
+               alias_cpa.flags &= ~CPA_ARRAY;
+
 
                ret = __change_page_attr_set_clr(&alias_cpa, 0);
        }
@@ -696,7 +749,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
         * No need to redo, when the primary call touched the high
         * mapping already:
         */
-       if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end))
+       if (within(vaddr, (unsigned long) _text, (unsigned long) _end))
                return 0;
 
        /*
@@ -707,8 +760,9 @@ static int cpa_process_alias(struct cpa_data *cpa)
                return 0;
 
        alias_cpa = *cpa;
-       alias_cpa.vaddr =
-               (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
+       temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
+       alias_cpa.vaddr = &temp_cpa_vaddr;
+       alias_cpa.flags &= ~CPA_ARRAY;
 
        /*
         * The high mapping range is imprecise, so ignore the return value.
@@ -728,6 +782,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
                 * preservation check.
                 */
                cpa->numpages = numpages;
+               /* for array changes, we can't use large page */
+               if (cpa->flags & CPA_ARRAY)
+                       cpa->numpages = 1;
 
                ret = __change_page_attr(cpa, checkalias);
                if (ret)
@@ -746,7 +803,11 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
                 */
                BUG_ON(cpa->numpages > numpages);
                numpages -= cpa->numpages;
-               cpa->vaddr += cpa->numpages * PAGE_SIZE;
+               if (cpa->flags & CPA_ARRAY)
+                       cpa->curpage++;
+               else
+                       *cpa->vaddr += cpa->numpages * PAGE_SIZE;
+
        }
        return 0;
 }
@@ -757,9 +818,9 @@ static inline int cache_attr(pgprot_t attr)
                (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
 }
 
-static int change_page_attr_set_clr(unsigned long addr, int numpages,
+static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                                    pgprot_t mask_set, pgprot_t mask_clr,
-                                   int force_split)
+                                   int force_split, int array)
 {
        struct cpa_data cpa;
        int ret, cache, checkalias;
@@ -774,21 +835,38 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
                return 0;
 
        /* Ensure we are PAGE_SIZE aligned */
-       if (addr & ~PAGE_MASK) {
-               addr &= PAGE_MASK;
-               /*
-                * People should not be passing in unaligned addresses:
-                */
-               WARN_ON_ONCE(1);
+       if (!array) {
+               if (*addr & ~PAGE_MASK) {
+                       *addr &= PAGE_MASK;
+                       /*
+                        * People should not be passing in unaligned addresses:
+                        */
+                       WARN_ON_ONCE(1);
+               }
+       } else {
+               int i;
+               for (i = 0; i < numpages; i++) {
+                       if (addr[i] & ~PAGE_MASK) {
+                               addr[i] &= PAGE_MASK;
+                               WARN_ON_ONCE(1);
+                       }
+               }
        }
 
+       /* Must avoid aliasing mappings in the highmem code */
+       kmap_flush_unused();
+
        cpa.vaddr = addr;
        cpa.numpages = numpages;
        cpa.mask_set = mask_set;
        cpa.mask_clr = mask_clr;
-       cpa.flushtlb = 0;
+       cpa.flags = 0;
+       cpa.curpage = 0;
        cpa.force_split = force_split;
 
+       if (array)
+               cpa.flags |= CPA_ARRAY;
+
        /* No alias checking for _NX bit modifications */
        checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
 
@@ -797,7 +875,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
        /*
         * Check whether we really changed something:
         */
-       if (!cpa.flushtlb)
+       if (!(cpa.flags & CPA_FLUSHTLB))
                goto out;
 
        /*
@@ -812,9 +890,12 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
         * error case we fall back to cpa_flush_all (which uses
         * wbindv):
         */
-       if (!ret && cpu_has_clflush)
-               cpa_flush_range(addr, numpages, cache);
-       else
+       if (!ret && cpu_has_clflush) {
+               if (cpa.flags & CPA_ARRAY)
+                       cpa_flush_array(addr, numpages, cache);
+               else
+                       cpa_flush_range(*addr, numpages, cache);
+       } else
                cpa_flush_all(cache);
 
 out:
@@ -823,16 +904,18 @@ out:
        return ret;
 }
 
-static inline int change_page_attr_set(unsigned long addr, int numpages,
-                                      pgprot_t mask)
+static inline int change_page_attr_set(unsigned long *addr, int numpages,
+                                      pgprot_t mask, int array)
 {
-       return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0);
+       return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
+               array);
 }
 
-static inline int change_page_attr_clear(unsigned long addr, int numpages,
-                                        pgprot_t mask)
+static inline int change_page_attr_clear(unsigned long *addr, int numpages,
+                                        pgprot_t mask, int array)
 {
-       return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0);
+       return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
+               array);
 }
 
 int _set_memory_uc(unsigned long addr, int numpages)
@@ -840,8 +923,8 @@ int _set_memory_uc(unsigned long addr, int numpages)
        /*
         * for now UC MINUS. see comments in ioremap_nocache()
         */
-       return change_page_attr_set(addr, numpages,
-                                   __pgprot(_PAGE_CACHE_UC_MINUS));
+       return change_page_attr_set(&addr, numpages,
+                                   __pgprot(_PAGE_CACHE_UC_MINUS), 0);
 }
 
 int set_memory_uc(unsigned long addr, int numpages)
@@ -857,10 +940,31 @@ int set_memory_uc(unsigned long addr, int numpages)
 }
 EXPORT_SYMBOL(set_memory_uc);
 
+int set_memory_array_uc(unsigned long *addr, int addrinarray)
+{
+       int i;
+       /*
+        * for now UC MINUS. see comments in ioremap_nocache()
+        */
+       for (i = 0; i < addrinarray; i++) {
+               if (reserve_memtype(addr[i], addr[i] + PAGE_SIZE,
+                           _PAGE_CACHE_UC_MINUS, NULL))
+                       goto out;
+       }
+
+       return change_page_attr_set(addr, addrinarray,
+                                   __pgprot(_PAGE_CACHE_UC_MINUS), 1);
+out:
+       while (--i >= 0)
+               free_memtype(addr[i], addr[i] + PAGE_SIZE);
+       return -EINVAL;
+}
+EXPORT_SYMBOL(set_memory_array_uc);
+
 int _set_memory_wc(unsigned long addr, int numpages)
 {
-       return change_page_attr_set(addr, numpages,
-                                   __pgprot(_PAGE_CACHE_WC));
+       return change_page_attr_set(&addr, numpages,
+                                   __pgprot(_PAGE_CACHE_WC), 0);
 }
 
 int set_memory_wc(unsigned long addr, int numpages)
@@ -878,8 +982,8 @@ EXPORT_SYMBOL(set_memory_wc);
 
 int _set_memory_wb(unsigned long addr, int numpages)
 {
-       return change_page_attr_clear(addr, numpages,
-                                     __pgprot(_PAGE_CACHE_MASK));
+       return change_page_attr_clear(&addr, numpages,
+                                     __pgprot(_PAGE_CACHE_MASK), 0);
 }
 
 int set_memory_wb(unsigned long addr, int numpages)
@@ -890,37 +994,48 @@ int set_memory_wb(unsigned long addr, int numpages)
 }
 EXPORT_SYMBOL(set_memory_wb);
 
+int set_memory_array_wb(unsigned long *addr, int addrinarray)
+{
+       int i;
+       for (i = 0; i < addrinarray; i++)
+               free_memtype(addr[i], addr[i] + PAGE_SIZE);
+
+       return change_page_attr_clear(addr, addrinarray,
+                                     __pgprot(_PAGE_CACHE_MASK), 1);
+}
+EXPORT_SYMBOL(set_memory_array_wb);
+
 int set_memory_x(unsigned long addr, int numpages)
 {
-       return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
+       return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
 }
 EXPORT_SYMBOL(set_memory_x);
 
 int set_memory_nx(unsigned long addr, int numpages)
 {
-       return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
+       return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
 }
 EXPORT_SYMBOL(set_memory_nx);
 
 int set_memory_ro(unsigned long addr, int numpages)
 {
-       return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
+       return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
 }
 
 int set_memory_rw(unsigned long addr, int numpages)
 {
-       return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
+       return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
 }
 
 int set_memory_np(unsigned long addr, int numpages)
 {
-       return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
+       return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
 }
 
 int set_memory_4k(unsigned long addr, int numpages)
 {
-       return change_page_attr_set_clr(addr, numpages, __pgprot(0),
-                                       __pgprot(0), 1);
+       return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
+                                       __pgprot(0), 1, 0);
 }
 
 int set_pages_uc(struct page *page, int numpages)
@@ -973,20 +1088,24 @@ int set_pages_rw(struct page *page, int numpages)
 
 static int __set_pages_p(struct page *page, int numpages)
 {
-       struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
+       unsigned long tempaddr = (unsigned long) page_address(page);
+       struct cpa_data cpa = { .vaddr = &tempaddr,
                                .numpages = numpages,
                                .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
-                               .mask_clr = __pgprot(0)};
+                               .mask_clr = __pgprot(0),
+                               .flags = 0};
 
        return __change_page_attr_set_clr(&cpa, 1);
 }
 
 static int __set_pages_np(struct page *page, int numpages)
 {
-       struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
+       unsigned long tempaddr = (unsigned long) page_address(page);
+       struct cpa_data cpa = { .vaddr = &tempaddr,
                                .numpages = numpages,
                                .mask_set = __pgprot(0),
-                               .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
+                               .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
+                               .flags = 0};
 
        return __change_page_attr_set_clr(&cpa, 1);
 }
index 2a50e0fa64a53290726e9eb1d1b4252afcce1fc8..f049b1d6ebdfaff92ae7f11d1d8123a2b82a3f86 100644 (file)
@@ -514,7 +514,7 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
        free_memtype(addr, addr + size);
 }
 
-#if defined(CONFIG_DEBUG_FS)
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
 
 /* get Nth element of the linked list */
 static struct memtype *memtype_get_idx(loff_t pos)
@@ -598,4 +598,4 @@ static int __init pat_memtype_list_init(void)
 
 late_initcall(pat_memtype_list_init);
 
-#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
index 4bada0e8b8128ad548771b170b71f91579fb2dca..46f507531177dc91f85af1398755d2ff9f9cd829 100644 (file)
@@ -116,7 +116,9 @@ struct agp_bridge_driver {
        struct agp_memory *(*alloc_by_type) (size_t, int);
        void (*free_by_type)(struct agp_memory *);
        void *(*agp_alloc_page)(struct agp_bridge_data *);
+       int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t);
        void (*agp_destroy_page)(void *, int flags);
+       void (*agp_destroy_pages)(struct agp_memory *);
        int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
        void (*chipset_flush)(struct agp_bridge_data *);
 };
@@ -277,7 +279,10 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
 void agp_generic_free_by_type(struct agp_memory *curr);
 void *agp_generic_alloc_page(struct agp_bridge_data *bridge);
+int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge,
+                           struct agp_memory *memory, size_t page_count);
 void agp_generic_destroy_page(void *addr, int flags);
+void agp_generic_destroy_pages(struct agp_memory *memory);
 void agp_free_key(int key);
 int agp_num_entries(void);
 u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
index 118dbde25dc71ac53ca2afa24f784d2a0dfbce17..10d6cbd7c05e59126f175eb1d8217cc95113d826 100644 (file)
@@ -201,14 +201,22 @@ void agp_free_memory(struct agp_memory *curr)
                return;
        }
        if (curr->page_count != 0) {
-               for (i = 0; i < curr->page_count; i++) {
-                       curr->memory[i] = (unsigned long)gart_to_virt(curr->memory[i]);
-                       curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
-                                                              AGP_PAGE_DESTROY_UNMAP);
-               }
-               for (i = 0; i < curr->page_count; i++) {
-                       curr->bridge->driver->agp_destroy_page((void *)curr->memory[i],
-                                                              AGP_PAGE_DESTROY_FREE);
+               if (curr->bridge->driver->agp_destroy_pages) {
+                       curr->bridge->driver->agp_destroy_pages(curr);
+               } else {
+
+                       for (i = 0; i < curr->page_count; i++) {
+                               curr->memory[i] = (unsigned long)gart_to_virt(
+                                       curr->memory[i]);
+                               curr->bridge->driver->agp_destroy_page(
+                                       (void *)curr->memory[i],
+                                       AGP_PAGE_DESTROY_UNMAP);
+                       }
+                       for (i = 0; i < curr->page_count; i++) {
+                               curr->bridge->driver->agp_destroy_page(
+                                       (void *)curr->memory[i],
+                                       AGP_PAGE_DESTROY_FREE);
+                       }
                }
        }
        agp_free_key(curr->key);
@@ -264,6 +272,15 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
        if (new == NULL)
                return NULL;
 
+       if (bridge->driver->agp_alloc_pages) {
+               if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) {
+                       agp_free_memory(new);
+                       return NULL;
+               }
+               new->bridge = bridge;
+               return new;
+       }
+
        for (i = 0; i < page_count; i++) {
                void *addr = bridge->driver->agp_alloc_page(bridge);
 
@@ -1203,6 +1220,39 @@ EXPORT_SYMBOL(agp_generic_alloc_user);
  * against a maximum value.
  */
 
+int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
+{
+       struct page * page;
+       int i, ret = -ENOMEM;
+
+       for (i = 0; i < num_pages; i++) {
+               page = alloc_page(GFP_KERNEL | GFP_DMA32);
+               /* agp_free_memory() needs gart address */
+               if (page == NULL)
+                       goto out;
+
+#ifndef CONFIG_X86
+               map_page_into_agp(page);
+#endif
+               get_page(page);
+               atomic_inc(&agp_bridge->current_memory_agp);
+
+               /* set_memory_array_uc() needs virtual address */
+               mem->memory[i] = (unsigned long)page_address(page);
+               mem->page_count++;
+       }
+
+#ifdef CONFIG_X86
+       set_memory_array_uc(mem->memory, num_pages);
+#endif
+       ret = 0;
+out:
+       for (i = 0; i < mem->page_count; i++)
+               mem->memory[i] = virt_to_gart((void *)mem->memory[i]);
+       return ret;
+}
+EXPORT_SYMBOL(agp_generic_alloc_pages);
+
 void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
 {
        struct page * page;
@@ -1219,6 +1269,37 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
 }
 EXPORT_SYMBOL(agp_generic_alloc_page);
 
+void agp_generic_destroy_pages(struct agp_memory *mem)
+{
+       int i;
+       void *addr;
+       struct page *page;
+
+       if (!mem)
+               return;
+
+       for (i = 0; i < mem->page_count; i++)
+               mem->memory[i] = (unsigned long)gart_to_virt(mem->memory[i]);
+
+#ifdef CONFIG_X86
+       set_memory_array_wb(mem->memory, mem->page_count);
+#endif
+
+       for (i = 0; i < mem->page_count; i++) {
+               addr = (void *)mem->memory[i];
+               page = virt_to_page(addr);
+
+#ifndef CONFIG_X86
+               unmap_page_from_agp(page);
+#endif
+
+               put_page(page);
+               free_page((unsigned long)addr);
+               atomic_dec(&agp_bridge->current_memory_agp);
+               mem->memory[i] = 0;
+       }
+}
+EXPORT_SYMBOL(agp_generic_destroy_pages);
 
 void agp_generic_destroy_page(void *addr, int flags)
 {
index 016fdf0623a4ce7b5f849efc8ab099a2c5026497..043e36628d6d7faf03c34e5f7a2f9956f5429694 100644 (file)
@@ -1711,7 +1711,9 @@ static const struct agp_bridge_driver intel_generic_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1736,7 +1738,9 @@ static const struct agp_bridge_driver intel_810_driver = {
        .alloc_by_type          = intel_i810_alloc_by_type,
        .free_by_type           = intel_i810_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1760,7 +1764,9 @@ static const struct agp_bridge_driver intel_815_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1785,7 +1791,9 @@ static const struct agp_bridge_driver intel_830_driver = {
        .alloc_by_type          = intel_i830_alloc_by_type,
        .free_by_type           = intel_i810_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
        .chipset_flush          = intel_i830_chipset_flush,
 };
@@ -1810,7 +1818,9 @@ static const struct agp_bridge_driver intel_820_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1834,7 +1844,9 @@ static const struct agp_bridge_driver intel_830mp_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1858,7 +1870,9 @@ static const struct agp_bridge_driver intel_840_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1882,7 +1896,9 @@ static const struct agp_bridge_driver intel_845_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
        .chipset_flush          = intel_i830_chipset_flush,
 };
@@ -1907,7 +1923,9 @@ static const struct agp_bridge_driver intel_850_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1931,7 +1949,9 @@ static const struct agp_bridge_driver intel_860_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -1956,7 +1976,9 @@ static const struct agp_bridge_driver intel_915_driver = {
        .alloc_by_type          = intel_i830_alloc_by_type,
        .free_by_type           = intel_i810_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
 };
@@ -1982,7 +2004,9 @@ static const struct agp_bridge_driver intel_i965_driver = {
        .alloc_by_type          = intel_i830_alloc_by_type,
        .free_by_type           = intel_i810_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
 };
@@ -2007,7 +2031,9 @@ static const struct agp_bridge_driver intel_7505_driver = {
        .alloc_by_type          = agp_generic_alloc_by_type,
        .free_by_type           = agp_generic_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 
@@ -2032,7 +2058,9 @@ static const struct agp_bridge_driver intel_g33_driver = {
        .alloc_by_type          = intel_i830_alloc_by_type,
        .free_by_type           = intel_i810_free_by_type,
        .agp_alloc_page         = agp_generic_alloc_page,
+       .agp_alloc_pages        = agp_generic_alloc_pages,
        .agp_destroy_page       = agp_generic_destroy_page,
+       .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
 };
index f4c0ab50d2c2bc4d8e5d5beea1c84c26409225d7..0a5f71817b3eae4994355df2ec67a1f7101572aa 100644 (file)
@@ -66,6 +66,9 @@ int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_np(unsigned long addr, int numpages);
 int set_memory_4k(unsigned long addr, int numpages);
 
+int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wb(unsigned long *addr, int addrinarray);
+
 /*
  * For legacy compatibility with the old APIs, a few functions
  * are provided that work on a "struct page".
index e16e1523b688680572513b16ef0e49d5ac0314f0..b36b83b920ffe862f0d7060e29687c9bb998a0d8 100644 (file)
@@ -70,6 +70,7 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
 static void flush_all_zero_pkmaps(void)
 {
        int i;
+       int need_flush = 0;
 
        flush_cache_kmaps();
 
@@ -101,8 +102,10 @@ static void flush_all_zero_pkmaps(void)
                          &pkmap_page_table[i]);
 
                set_page_address(page, NULL);
+               need_flush = 1;
        }
-       flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
+       if (need_flush)
+               flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
 }
 
 /**