2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "i915_trace.h"
30 #include "intel_drv.h"
32 /* PPGTT support for Sandybdrige/Gen6 and later */
33 static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
39 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
40 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
43 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
44 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
47 last_pte = first_pte + num_entries;
48 if (last_pte > I915_PPGTT_PT_ENTRIES)
49 last_pte = I915_PPGTT_PT_ENTRIES;
51 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
53 for (i = first_pte; i < last_pte; i++)
54 pt_vaddr[i] = scratch_pte;
56 kunmap_atomic(pt_vaddr);
58 num_entries -= last_pte - first_pte;
64 int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
66 struct drm_i915_private *dev_priv = dev->dev_private;
67 struct i915_hw_ppgtt *ppgtt;
68 unsigned first_pd_entry_in_global_pt;
72 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
73 * entries. For aliasing ppgtt support we just steal them at the end for
75 first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
77 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
81 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
82 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
87 for (i = 0; i < ppgtt->num_pd_entries; i++) {
88 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
89 if (!ppgtt->pt_pages[i])
93 if (dev_priv->mm.gtt->needs_dmar) {
94 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
95 *ppgtt->num_pd_entries,
97 if (!ppgtt->pt_dma_addr)
101 for (i = 0; i < ppgtt->num_pd_entries; i++) {
103 if (dev_priv->mm.gtt->needs_dmar) {
104 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
106 PCI_DMA_BIDIRECTIONAL);
108 if (pci_dma_mapping_error(dev->pdev,
114 ppgtt->pt_dma_addr[i] = pt_addr;
116 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
119 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
121 i915_ppgtt_clear_range(ppgtt, 0,
122 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
124 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
126 dev_priv->mm.aliasing_ppgtt = ppgtt;
131 if (ppgtt->pt_dma_addr) {
132 for (i--; i >= 0; i--)
133 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
134 4096, PCI_DMA_BIDIRECTIONAL);
137 kfree(ppgtt->pt_dma_addr);
138 for (i = 0; i < ppgtt->num_pd_entries; i++) {
139 if (ppgtt->pt_pages[i])
140 __free_page(ppgtt->pt_pages[i]);
142 kfree(ppgtt->pt_pages);
149 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
151 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
158 if (ppgtt->pt_dma_addr) {
159 for (i = 0; i < ppgtt->num_pd_entries; i++)
160 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
161 4096, PCI_DMA_BIDIRECTIONAL);
164 kfree(ppgtt->pt_dma_addr);
165 for (i = 0; i < ppgtt->num_pd_entries; i++)
166 __free_page(ppgtt->pt_pages[i]);
167 kfree(ppgtt->pt_pages);
171 static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
172 struct scatterlist *sg_list,
174 unsigned first_entry,
177 uint32_t *pt_vaddr, pte;
178 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
179 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
180 unsigned i, j, m, segment_len;
181 dma_addr_t page_addr;
182 struct scatterlist *sg;
184 /* init sg walking */
187 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
191 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
193 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
194 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
195 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
196 pt_vaddr[j] = pte | pte_flags;
198 /* grab the next page */
200 if (m == segment_len) {
206 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
211 kunmap_atomic(pt_vaddr);
218 static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
219 unsigned first_entry, unsigned num_entries,
220 struct page **pages, uint32_t pte_flags)
222 uint32_t *pt_vaddr, pte;
223 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
224 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
225 unsigned last_pte, i;
226 dma_addr_t page_addr;
228 while (num_entries) {
229 last_pte = first_pte + num_entries;
230 last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
232 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
234 for (i = first_pte; i < last_pte; i++) {
235 page_addr = page_to_phys(*pages);
236 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
237 pt_vaddr[i] = pte | pte_flags;
242 kunmap_atomic(pt_vaddr);
244 num_entries -= last_pte - first_pte;
250 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
251 struct drm_i915_gem_object *obj,
252 enum i915_cache_level cache_level)
254 struct drm_device *dev = obj->base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t pte_flags = GEN6_PTE_VALID;
258 switch (cache_level) {
259 case I915_CACHE_LLC_MLC:
260 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
263 pte_flags |= GEN6_PTE_CACHE_LLC;
265 case I915_CACHE_NONE:
266 pte_flags |= GEN6_PTE_UNCACHED;
272 if (dev_priv->mm.gtt->needs_dmar) {
273 BUG_ON(!obj->sg_list);
275 i915_ppgtt_insert_sg_entries(ppgtt,
278 obj->gtt_space->start >> PAGE_SHIFT,
281 i915_ppgtt_insert_pages(ppgtt,
282 obj->gtt_space->start >> PAGE_SHIFT,
283 obj->base.size >> PAGE_SHIFT,
288 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
289 struct drm_i915_gem_object *obj)
291 i915_ppgtt_clear_range(ppgtt,
292 obj->gtt_space->start >> PAGE_SHIFT,
293 obj->base.size >> PAGE_SHIFT);
296 /* XXX kill agp_type! */
297 static unsigned int cache_level_to_agp_type(struct drm_device *dev,
298 enum i915_cache_level cache_level)
300 switch (cache_level) {
301 case I915_CACHE_LLC_MLC:
302 if (INTEL_INFO(dev)->gen >= 6)
303 return AGP_USER_CACHED_MEMORY_LLC_MLC;
304 /* Older chipsets do not have this extra level of CPU
305 * cacheing, so fallthrough and request the PTE simply
309 return AGP_USER_CACHED_MEMORY;
311 case I915_CACHE_NONE:
312 return AGP_USER_MEMORY;
316 static bool do_idling(struct drm_i915_private *dev_priv)
318 bool ret = dev_priv->mm.interruptible;
320 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
321 dev_priv->mm.interruptible = false;
322 if (i915_gpu_idle(dev_priv->dev, false)) {
323 DRM_ERROR("Couldn't idle GPU\n");
324 /* Wait a bit, in hopes it avoids the hang */
332 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
334 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
335 dev_priv->mm.interruptible = interruptible;
338 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
340 struct drm_i915_private *dev_priv = dev->dev_private;
341 struct drm_i915_gem_object *obj;
343 /* First fill our portion of the GTT with scratch pages */
344 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
345 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
347 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
348 i915_gem_clflush_object(obj);
349 i915_gem_gtt_bind_object(obj, obj->cache_level);
352 intel_gtt_chipset_flush();
355 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
357 struct drm_device *dev = obj->base.dev;
358 struct drm_i915_private *dev_priv = dev->dev_private;
360 if (dev_priv->mm.gtt->needs_dmar)
361 return intel_gtt_map_memory(obj->pages,
362 obj->base.size >> PAGE_SHIFT,
369 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
370 enum i915_cache_level cache_level)
372 struct drm_device *dev = obj->base.dev;
373 struct drm_i915_private *dev_priv = dev->dev_private;
374 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
376 if (dev_priv->mm.gtt->needs_dmar) {
377 BUG_ON(!obj->sg_list);
379 intel_gtt_insert_sg_entries(obj->sg_list,
381 obj->gtt_space->start >> PAGE_SHIFT,
384 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
385 obj->base.size >> PAGE_SHIFT,
389 obj->has_global_gtt_mapping = 1;
392 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
394 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
395 obj->base.size >> PAGE_SHIFT);
397 obj->has_global_gtt_mapping = 0;
400 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
402 struct drm_device *dev = obj->base.dev;
403 struct drm_i915_private *dev_priv = dev->dev_private;
406 interruptible = do_idling(dev_priv);
409 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
413 undo_idling(dev_priv, interruptible);
416 void i915_gem_init_global_gtt(struct drm_device *dev,
418 unsigned long mappable_end,
421 drm_i915_private_t *dev_priv = dev->dev_private;
423 /* Substract the guard page ... */
424 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
426 dev_priv->mm.gtt_start = start;
427 dev_priv->mm.gtt_mappable_end = mappable_end;
428 dev_priv->mm.gtt_end = end;
429 dev_priv->mm.gtt_total = end - start;
430 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
432 /* ... but ensure that we clear the entire range. */
433 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);