Merge branch 'drm-platform' into drm-testing
authorDave Airlie <airlied@redhat.com>
Wed, 7 Jul 2010 08:37:35 +0000 (18:37 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 7 Jul 2010 08:37:35 +0000 (18:37 +1000)
* drm-platform:
  drm: Make sure the DRM offset matches the CPU
  drm: Add __arm defines to DRM
  drm: Add support for platform devices to register as DRM devices
  drm: Remove drm_resource wrappers

16 files changed:
1  2 
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rs690.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

index 2092e7bb788f90302ff226c69a383c1a6a5aea67,4fcbc445a8e5454cb8ea5f0cb0cbc5729645f7d6..a5c9ce93bbcba1cf4d7390cfa3f7fe1c1e504078
  #include <asm/shmparam.h>
  #include "drmP.h"
  
- resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
- {
-       return pci_resource_start(dev->pdev, resource);
- }
- EXPORT_SYMBOL(drm_get_resource_start);
- resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
- {
-       return pci_resource_len(dev->pdev, resource);
- }
- EXPORT_SYMBOL(drm_get_resource_len);
  static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
                                                  struct drm_local_map *map)
  {
@@@ -189,7 -176,7 +176,7 @@@ static int drm_addmap_core(struct drm_d
        switch (map->type) {
        case _DRM_REGISTERS:
        case _DRM_FRAME_BUFFER:
- #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+ #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
                if (map->offset + (map->size-1) < map->offset ||
                    map->offset < virt_to_phys(high_memory)) {
                        kfree(map);
@@@ -961,7 -948,7 +948,7 @@@ int drm_addbufs_pci(struct drm_device 
                dma->buflist[i + dma->buf_count] = &entry->buflist[i];
        }
  
 -      /* No allocations failed, so now we can replace the orginal pagelist
 +      /* No allocations failed, so now we can replace the original pagelist
         * with the new one.
         */
        if (dma->page_count) {
index 101d381e9d86ced4967677d639753297df43ae77,14d9d829ef27736f652bc62e5e05ec85c9715136..86118a742231b42fd711f18b64deb182a8d60d3a
@@@ -193,9 -193,8 +193,9 @@@ static ssize_t enabled_show(struct devi
                        "disabled");
  }
  
 -static ssize_t edid_show(struct kobject *kobj, struct bin_attribute *attr,
 -                       char *buf, loff_t off, size_t count)
 +static ssize_t edid_show(struct file *filp, struct kobject *kobj,
 +                       struct bin_attribute *attr, char *buf, loff_t off,
 +                       size_t count)
  {
        struct device *connector_dev = container_of(kobj, struct device, kobj);
        struct drm_connector *connector = to_drm_connector(connector_dev);
@@@ -489,7 -488,8 +489,8 @@@ int drm_sysfs_device_add(struct drm_min
        int err;
        char *minor_str;
  
-       minor->kdev.parent = &minor->dev->pdev->dev;
+       minor->kdev.parent = minor->dev->dev;
        minor->kdev.class = drm_class;
        minor->kdev.release = drm_sysfs_device_release;
        minor->kdev.devt = minor->device;
index f00c5ae9556ccb47358ce1f5267dd9cc5cb17a21,9bed5617e0ead2cca0339ab51ed3bf54442c0259..92898035845d606e134865b6d9348d187909b979
  #include "i915_drm.h"
  #include "i915_drv.h"
  #include "i915_trace.h"
+ #include <linux/pci.h>
  #include <linux/vgaarb.h>
  #include <linux/acpi.h>
  #include <linux/pnp.h>
  #include <linux/vga_switcheroo.h>
  #include <linux/slab.h>
  
 -/* Really want an OS-independent resettable timer.  Would like to have
 - * this loop run for (eg) 3 sec, but have the timer reset every time
 - * the head pointer changes, so that EBUSY only happens if the ring
 - * actually stalls for (eg) 3 seconds.
 - */
 -int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 -      u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
 -      u32 last_acthd = I915_READ(acthd_reg);
 -      u32 acthd;
 -      u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 -      int i;
 -
 -      trace_i915_ring_wait_begin (dev);
 -
 -      for (i = 0; i < 100000; i++) {
 -              ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 -              acthd = I915_READ(acthd_reg);
 -              ring->space = ring->head - (ring->tail + 8);
 -              if (ring->space < 0)
 -                      ring->space += ring->Size;
 -              if (ring->space >= n) {
 -                      trace_i915_ring_wait_end (dev);
 -                      return 0;
 -              }
 -
 -              if (dev->primary->master) {
 -                      struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 -                      if (master_priv->sarea_priv)
 -                              master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 -              }
 -
 -
 -              if (ring->head != last_head)
 -                      i = 0;
 -              if (acthd != last_acthd)
 -                      i = 0;
 -
 -              last_head = ring->head;
 -              last_acthd = acthd;
 -              msleep_interruptible(10);
 -
 -      }
 -
 -      trace_i915_ring_wait_end (dev);
 -      return -EBUSY;
 -}
 -
 -/* As a ringbuffer is only allowed to wrap between instructions, fill
 - * the tail with NOOPs.
 - */
 -int i915_wrap_ring(struct drm_device *dev)
 -{
 -      drm_i915_private_t *dev_priv = dev->dev_private;
 -      volatile unsigned int *virt;
 -      int rem;
 -
 -      rem = dev_priv->ring.Size - dev_priv->ring.tail;
 -      if (dev_priv->ring.space < rem) {
 -              int ret = i915_wait_ring(dev, rem, __func__);
 -              if (ret)
 -                      return ret;
 -      }
 -      dev_priv->ring.space -= rem;
 -
 -      virt = (unsigned int *)
 -              (dev_priv->ring.virtual_start + dev_priv->ring.tail);
 -      rem /= 4;
 -      while (rem--)
 -              *virt++ = MI_NOOP;
 -
 -      dev_priv->ring.tail = 0;
 -
 -      return 0;
 -}
 -
  /**
   * Sets up the hardware status page for devices that need a physical address
   * in the register.
@@@ -55,11 -134,10 +56,11 @@@ static int i915_init_phys_hws(struct dr
                DRM_ERROR("Can not allocate hardware status page\n");
                return -ENOMEM;
        }
 -      dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
 +      dev_priv->render_ring.status_page.page_addr
 +              = dev_priv->status_page_dmah->vaddr;
        dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
  
 -      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 +      memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
  
        if (IS_I965G(dev))
                dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@@ -82,8 -160,8 +83,8 @@@ static void i915_free_hws(struct drm_de
                dev_priv->status_page_dmah = NULL;
        }
  
 -      if (dev_priv->status_gfx_addr) {
 -              dev_priv->status_gfx_addr = 0;
 +      if (dev_priv->render_ring.status_page.gfx_addr) {
 +              dev_priv->render_ring.status_page.gfx_addr = 0;
                drm_core_ioremapfree(&dev_priv->hws_map, dev);
        }
  
@@@ -95,7 -173,7 +96,7 @@@ void i915_kernel_lost_context(struct dr
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv;
 -      drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 +      struct intel_ring_buffer *ring = &dev_priv->render_ring;
  
        /*
         * We should never lose context on the ring with modesetting
        ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
        ring->space = ring->head - (ring->tail + 8);
        if (ring->space < 0)
 -              ring->space += ring->Size;
 +              ring->space += ring->size;
  
        if (!dev->primary->master)
                return;
@@@ -128,11 -206,12 +129,11 @@@ static int i915_dma_cleanup(struct drm_
        if (dev->irq_enabled)
                drm_irq_uninstall(dev);
  
 -      if (dev_priv->ring.virtual_start) {
 -              drm_core_ioremapfree(&dev_priv->ring.map, dev);
 -              dev_priv->ring.virtual_start = NULL;
 -              dev_priv->ring.map.handle = NULL;
 -              dev_priv->ring.map.size = 0;
 -      }
 +      mutex_lock(&dev->struct_mutex);
 +      intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
 +      if (HAS_BSD(dev))
 +              intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
 +      mutex_unlock(&dev->struct_mutex);
  
        /* Clear the HWS virtual address at teardown */
        if (I915_NEED_GFX_HWS(dev))
@@@ -155,24 -234,24 +156,24 @@@ static int i915_initialize(struct drm_d
        }
  
        if (init->ring_size != 0) {
 -              if (dev_priv->ring.ring_obj != NULL) {
 +              if (dev_priv->render_ring.gem_object != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
                                  "GEM mode\n");
                        return -EINVAL;
                }
  
 -              dev_priv->ring.Size = init->ring_size;
 +              dev_priv->render_ring.size = init->ring_size;
  
 -              dev_priv->ring.map.offset = init->ring_start;
 -              dev_priv->ring.map.size = init->ring_size;
 -              dev_priv->ring.map.type = 0;
 -              dev_priv->ring.map.flags = 0;
 -              dev_priv->ring.map.mtrr = 0;
 +              dev_priv->render_ring.map.offset = init->ring_start;
 +              dev_priv->render_ring.map.size = init->ring_size;
 +              dev_priv->render_ring.map.type = 0;
 +              dev_priv->render_ring.map.flags = 0;
 +              dev_priv->render_ring.map.mtrr = 0;
  
 -              drm_core_ioremap_wc(&dev_priv->ring.map, dev);
 +              drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
  
 -              if (dev_priv->ring.map.handle == NULL) {
 +              if (dev_priv->render_ring.map.handle == NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("can not ioremap virtual address for"
                                  " ring buffer\n");
                }
        }
  
 -      dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
 +      dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
  
        dev_priv->cpp = init->cpp;
        dev_priv->back_offset = init->back_offset;
@@@ -200,29 -279,26 +201,29 @@@ static int i915_dma_resume(struct drm_d
  {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  
 +      struct intel_ring_buffer *ring;
        DRM_DEBUG_DRIVER("%s\n", __func__);
  
 -      if (dev_priv->ring.map.handle == NULL) {
 +      ring = &dev_priv->render_ring;
 +
 +      if (ring->map.handle == NULL) {
                DRM_ERROR("can not ioremap virtual address for"
                          " ring buffer\n");
                return -ENOMEM;
        }
  
        /* Program Hardware Status Page */
 -      if (!dev_priv->hw_status_page) {
 +      if (!ring->status_page.page_addr) {
                DRM_ERROR("Can not find hardware status page\n");
                return -EINVAL;
        }
        DRM_DEBUG_DRIVER("hw status page @ %p\n",
 -                              dev_priv->hw_status_page);
 -
 -      if (dev_priv->status_gfx_addr != 0)
 -              I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 +                              ring->status_page.page_addr);
 +      if (ring->status_page.gfx_addr != 0)
 +              ring->setup_status_page(dev, ring);
        else
                I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 +
        DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  
        return 0;
@@@ -332,8 -408,9 +333,8 @@@ static int i915_emit_cmds(struct drm_de
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
 -      RING_LOCALS;
  
 -      if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
 +      if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
                return -EINVAL;
  
        BEGIN_LP_RING((dwords+1)&~1);
@@@ -366,7 -443,9 +367,7 @@@ i915_emit_box(struct drm_device *dev
              struct drm_clip_rect *boxes,
              int i, int DR1, int DR4)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_clip_rect box = boxes[i];
 -      RING_LOCALS;
  
        if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
                DRM_ERROR("Bad box %d,%d..%d,%d\n",
@@@ -403,6 -482,7 +404,6 @@@ static void i915_emit_breadcrumb(struc
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 -      RING_LOCALS;
  
        dev_priv->counter++;
        if (dev_priv->counter > 0x7FFFFFFFUL)
@@@ -456,8 -536,10 +457,8 @@@ static int i915_dispatch_batchbuffer(st
                                     drm_i915_batchbuffer_t * batch,
                                     struct drm_clip_rect *cliprects)
  {
 -      drm_i915_private_t *dev_priv = dev->dev_private;
        int nbox = batch->num_cliprects;
        int i = 0, count;
 -      RING_LOCALS;
  
        if ((batch->start | batch->used) & 0x7) {
                DRM_ERROR("alignment");
@@@ -506,6 -588,7 +507,6 @@@ static int i915_dispatch_flip(struct dr
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv =
                dev->primary->master->driver_priv;
 -      RING_LOCALS;
  
        if (!master_priv->sarea_priv)
                return -EINVAL;
@@@ -558,8 -641,7 +559,8 @@@ static int i915_quiescent(struct drm_de
        drm_i915_private_t *dev_priv = dev->dev_private;
  
        i915_kernel_lost_context(dev);
 -      return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
 +      return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
 +                                    dev_priv->render_ring.size - 8);
  }
  
  static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@@ -746,9 -828,6 +747,9 @@@ static int i915_getparam(struct drm_dev
                /* depends on GEM */
                value = dev_priv->has_gem;
                break;
 +      case I915_PARAM_HAS_BSD:
 +              value = HAS_BSD(dev);
 +              break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@@ -804,7 -883,6 +805,7 @@@ static int i915_set_status_page(struct 
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
 +      struct intel_ring_buffer *ring = &dev_priv->render_ring;
  
        if (!I915_NEED_GFX_HWS(dev))
                return -EINVAL;
  
        DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
  
 -      dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
 +      ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
  
        dev_priv->hws_map.offset = dev->agp->base + hws->addr;
        dev_priv->hws_map.size = 4*1024;
        drm_core_ioremap_wc(&dev_priv->hws_map, dev);
        if (dev_priv->hws_map.handle == NULL) {
                i915_dma_cleanup(dev);
 -              dev_priv->status_gfx_addr = 0;
 +              ring->status_page.gfx_addr = 0;
                DRM_ERROR("can not ioremap virtual address for"
                                " G33 hw status page\n");
                return -ENOMEM;
        }
 -      dev_priv->hw_status_page = dev_priv->hws_map.handle;
 +      ring->status_page.page_addr = dev_priv->hws_map.handle;
 +      memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 +      I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
  
 -      memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 -      I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
        DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
 -                              dev_priv->status_gfx_addr);
 +                       ring->status_page.gfx_addr);
        DRM_DEBUG_DRIVER("load hws at %p\n",
 -                              dev_priv->hw_status_page);
 +                       ring->status_page.page_addr);
        return 0;
  }
  
@@@ -1231,7 -1309,7 +1232,7 @@@ static void i915_warn_stolen(struct drm
  static void i915_setup_compression(struct drm_device *dev, int size)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_mm_node *compressed_fb, *compressed_llb;
 +      struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
        unsigned long cfb_base;
        unsigned long ll_base = 0;
  
@@@ -1322,14 -1400,12 +1323,14 @@@ static void i915_switcheroo_set_state(s
        struct drm_device *dev = pci_get_drvdata(pdev);
        pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
        if (state == VGA_SWITCHEROO_ON) {
 -              printk(KERN_INFO "i915: switched off\n");
 +              printk(KERN_INFO "i915: switched on\n");
                /* i915 resume handler doesn't set to D0 */
                pci_set_power_state(dev->pdev, PCI_D0);
                i915_resume(dev);
 +              drm_kms_helper_poll_enable(dev);
        } else {
                printk(KERN_ERR "i915: switched off\n");
 +              drm_kms_helper_poll_disable(dev);
                i915_suspend(dev, pmm);
        }
  }
@@@ -1354,7 -1430,7 +1355,7 @@@ static int i915_load_modeset_init(struc
        int fb_bar = IS_I9XX(dev) ? 2 : 0;
        int ret = 0;
  
-       dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
+       dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
                0xff000000;
  
        /* Basic memrange allocator for stolen space (aka vram) */
        /* if we have > 1 VGA cards, then disable the radeon VGA resources */
        ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
        if (ret)
 -              goto destroy_ringbuffer;
 +              goto cleanup_ringbuffer;
  
        ret = vga_switcheroo_register_client(dev->pdev,
                                             i915_switcheroo_set_state,
                                             i915_switcheroo_can_switch);
        if (ret)
 -              goto destroy_ringbuffer;
 +              goto cleanup_vga_client;
 +
 +      /* IIR "flip pending" bit means done if this bit is set */
 +      if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
 +              dev_priv->flip_pending_is_done = true;
  
        intel_modeset_init(dev);
  
        ret = drm_irq_install(dev);
        if (ret)
 -              goto destroy_ringbuffer;
 +              goto cleanup_vga_switcheroo;
  
        /* Always safe in the mode setting case. */
        /* FIXME: do pre/post-mode set stuff in core KMS code */
  
        I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
  
 -      intel_fbdev_init(dev);
 +      ret = intel_fbdev_init(dev);
 +      if (ret)
 +              goto cleanup_irq;
 +
        drm_kms_helper_poll_init(dev);
        return 0;
  
 -destroy_ringbuffer:
 +cleanup_irq:
 +      drm_irq_uninstall(dev);
 +cleanup_vga_switcheroo:
 +      vga_switcheroo_unregister_client(dev->pdev);
 +cleanup_vga_client:
 +      vga_client_register(dev->pdev, NULL, NULL, NULL);
 +cleanup_ringbuffer:
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
        mutex_unlock(&dev->struct_mutex);
@@@ -1477,11 -1540,14 +1478,11 @@@ void i915_master_destroy(struct drm_dev
        master->driver_priv = NULL;
  }
  
 -static void i915_get_mem_freq(struct drm_device *dev)
 +static void i915_pineview_get_mem_freq(struct drm_device *dev)
  {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 tmp;
  
 -      if (!IS_PINEVIEW(dev))
 -              return;
 -
        tmp = I915_READ(CLKCFG);
  
        switch (tmp & CLKCFG_FSB_MASK) {
                dev_priv->mem_freq = 800;
                break;
        }
 +
 +      /* detect pineview DDR3 setting */
 +      tmp = I915_READ(CSHRDDR3CTL);
 +      dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
 +}
 +
 +static void i915_ironlake_get_mem_freq(struct drm_device *dev)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      u16 ddrpll, csipll;
 +
 +      ddrpll = I915_READ16(DDRMPLL1);
 +      csipll = I915_READ16(CSIPLL0);
 +
 +      switch (ddrpll & 0xff) {
 +      case 0xc:
 +              dev_priv->mem_freq = 800;
 +              break;
 +      case 0x10:
 +              dev_priv->mem_freq = 1066;
 +              break;
 +      case 0x14:
 +              dev_priv->mem_freq = 1333;
 +              break;
 +      case 0x18:
 +              dev_priv->mem_freq = 1600;
 +              break;
 +      default:
 +              DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
 +                               ddrpll & 0xff);
 +              dev_priv->mem_freq = 0;
 +              break;
 +      }
 +
 +      dev_priv->r_t = dev_priv->mem_freq;
 +
 +      switch (csipll & 0x3ff) {
 +      case 0x00c:
 +              dev_priv->fsb_freq = 3200;
 +              break;
 +      case 0x00e:
 +              dev_priv->fsb_freq = 3733;
 +              break;
 +      case 0x010:
 +              dev_priv->fsb_freq = 4266;
 +              break;
 +      case 0x012:
 +              dev_priv->fsb_freq = 4800;
 +              break;
 +      case 0x014:
 +              dev_priv->fsb_freq = 5333;
 +              break;
 +      case 0x016:
 +              dev_priv->fsb_freq = 5866;
 +              break;
 +      case 0x018:
 +              dev_priv->fsb_freq = 6400;
 +              break;
 +      default:
 +              DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
 +                               csipll & 0x3ff);
 +              dev_priv->fsb_freq = 0;
 +              break;
 +      }
 +
 +      if (dev_priv->fsb_freq == 3200) {
 +              dev_priv->c_m = 0;
 +      } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
 +              dev_priv->c_m = 1;
 +      } else {
 +              dev_priv->c_m = 2;
 +      }
  }
  
 +struct v_table {
 +      u8 vid;
 +      unsigned long vd; /* in .1 mil */
 +      unsigned long vm; /* in .1 mil */
 +      u8 pvid;
 +};
 +
 +static struct v_table v_table[] = {
 +      { 0, 16125, 15000, 0x7f, },
 +      { 1, 16000, 14875, 0x7e, },
 +      { 2, 15875, 14750, 0x7d, },
 +      { 3, 15750, 14625, 0x7c, },
 +      { 4, 15625, 14500, 0x7b, },
 +      { 5, 15500, 14375, 0x7a, },
 +      { 6, 15375, 14250, 0x79, },
 +      { 7, 15250, 14125, 0x78, },
 +      { 8, 15125, 14000, 0x77, },
 +      { 9, 15000, 13875, 0x76, },
 +      { 10, 14875, 13750, 0x75, },
 +      { 11, 14750, 13625, 0x74, },
 +      { 12, 14625, 13500, 0x73, },
 +      { 13, 14500, 13375, 0x72, },
 +      { 14, 14375, 13250, 0x71, },
 +      { 15, 14250, 13125, 0x70, },
 +      { 16, 14125, 13000, 0x6f, },
 +      { 17, 14000, 12875, 0x6e, },
 +      { 18, 13875, 12750, 0x6d, },
 +      { 19, 13750, 12625, 0x6c, },
 +      { 20, 13625, 12500, 0x6b, },
 +      { 21, 13500, 12375, 0x6a, },
 +      { 22, 13375, 12250, 0x69, },
 +      { 23, 13250, 12125, 0x68, },
 +      { 24, 13125, 12000, 0x67, },
 +      { 25, 13000, 11875, 0x66, },
 +      { 26, 12875, 11750, 0x65, },
 +      { 27, 12750, 11625, 0x64, },
 +      { 28, 12625, 11500, 0x63, },
 +      { 29, 12500, 11375, 0x62, },
 +      { 30, 12375, 11250, 0x61, },
 +      { 31, 12250, 11125, 0x60, },
 +      { 32, 12125, 11000, 0x5f, },
 +      { 33, 12000, 10875, 0x5e, },
 +      { 34, 11875, 10750, 0x5d, },
 +      { 35, 11750, 10625, 0x5c, },
 +      { 36, 11625, 10500, 0x5b, },
 +      { 37, 11500, 10375, 0x5a, },
 +      { 38, 11375, 10250, 0x59, },
 +      { 39, 11250, 10125, 0x58, },
 +      { 40, 11125, 10000, 0x57, },
 +      { 41, 11000, 9875, 0x56, },
 +      { 42, 10875, 9750, 0x55, },
 +      { 43, 10750, 9625, 0x54, },
 +      { 44, 10625, 9500, 0x53, },
 +      { 45, 10500, 9375, 0x52, },
 +      { 46, 10375, 9250, 0x51, },
 +      { 47, 10250, 9125, 0x50, },
 +      { 48, 10125, 9000, 0x4f, },
 +      { 49, 10000, 8875, 0x4e, },
 +      { 50, 9875, 8750, 0x4d, },
 +      { 51, 9750, 8625, 0x4c, },
 +      { 52, 9625, 8500, 0x4b, },
 +      { 53, 9500, 8375, 0x4a, },
 +      { 54, 9375, 8250, 0x49, },
 +      { 55, 9250, 8125, 0x48, },
 +      { 56, 9125, 8000, 0x47, },
 +      { 57, 9000, 7875, 0x46, },
 +      { 58, 8875, 7750, 0x45, },
 +      { 59, 8750, 7625, 0x44, },
 +      { 60, 8625, 7500, 0x43, },
 +      { 61, 8500, 7375, 0x42, },
 +      { 62, 8375, 7250, 0x41, },
 +      { 63, 8250, 7125, 0x40, },
 +      { 64, 8125, 7000, 0x3f, },
 +      { 65, 8000, 6875, 0x3e, },
 +      { 66, 7875, 6750, 0x3d, },
 +      { 67, 7750, 6625, 0x3c, },
 +      { 68, 7625, 6500, 0x3b, },
 +      { 69, 7500, 6375, 0x3a, },
 +      { 70, 7375, 6250, 0x39, },
 +      { 71, 7250, 6125, 0x38, },
 +      { 72, 7125, 6000, 0x37, },
 +      { 73, 7000, 5875, 0x36, },
 +      { 74, 6875, 5750, 0x35, },
 +      { 75, 6750, 5625, 0x34, },
 +      { 76, 6625, 5500, 0x33, },
 +      { 77, 6500, 5375, 0x32, },
 +      { 78, 6375, 5250, 0x31, },
 +      { 79, 6250, 5125, 0x30, },
 +      { 80, 6125, 5000, 0x2f, },
 +      { 81, 6000, 4875, 0x2e, },
 +      { 82, 5875, 4750, 0x2d, },
 +      { 83, 5750, 4625, 0x2c, },
 +      { 84, 5625, 4500, 0x2b, },
 +      { 85, 5500, 4375, 0x2a, },
 +      { 86, 5375, 4250, 0x29, },
 +      { 87, 5250, 4125, 0x28, },
 +      { 88, 5125, 4000, 0x27, },
 +      { 89, 5000, 3875, 0x26, },
 +      { 90, 4875, 3750, 0x25, },
 +      { 91, 4750, 3625, 0x24, },
 +      { 92, 4625, 3500, 0x23, },
 +      { 93, 4500, 3375, 0x22, },
 +      { 94, 4375, 3250, 0x21, },
 +      { 95, 4250, 3125, 0x20, },
 +      { 96, 4125, 3000, 0x1f, },
 +      { 97, 4125, 3000, 0x1e, },
 +      { 98, 4125, 3000, 0x1d, },
 +      { 99, 4125, 3000, 0x1c, },
 +      { 100, 4125, 3000, 0x1b, },
 +      { 101, 4125, 3000, 0x1a, },
 +      { 102, 4125, 3000, 0x19, },
 +      { 103, 4125, 3000, 0x18, },
 +      { 104, 4125, 3000, 0x17, },
 +      { 105, 4125, 3000, 0x16, },
 +      { 106, 4125, 3000, 0x15, },
 +      { 107, 4125, 3000, 0x14, },
 +      { 108, 4125, 3000, 0x13, },
 +      { 109, 4125, 3000, 0x12, },
 +      { 110, 4125, 3000, 0x11, },
 +      { 111, 4125, 3000, 0x10, },
 +      { 112, 4125, 3000, 0x0f, },
 +      { 113, 4125, 3000, 0x0e, },
 +      { 114, 4125, 3000, 0x0d, },
 +      { 115, 4125, 3000, 0x0c, },
 +      { 116, 4125, 3000, 0x0b, },
 +      { 117, 4125, 3000, 0x0a, },
 +      { 118, 4125, 3000, 0x09, },
 +      { 119, 4125, 3000, 0x08, },
 +      { 120, 1125, 0, 0x07, },
 +      { 121, 1000, 0, 0x06, },
 +      { 122, 875, 0, 0x05, },
 +      { 123, 750, 0, 0x04, },
 +      { 124, 625, 0, 0x03, },
 +      { 125, 500, 0, 0x02, },
 +      { 126, 375, 0, 0x01, },
 +      { 127, 0, 0, 0x00, },
 +};
 +
 +struct cparams {
 +      int i;
 +      int t;
 +      int m;
 +      int c;
 +};
 +
 +static struct cparams cparams[] = {
 +      { 1, 1333, 301, 28664 },
 +      { 1, 1066, 294, 24460 },
 +      { 1, 800, 294, 25192 },
 +      { 0, 1333, 276, 27605 },
 +      { 0, 1066, 276, 27605 },
 +      { 0, 800, 231, 23784 },
 +};
 +
 +unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 +{
 +      u64 total_count, diff, ret;
 +      u32 count1, count2, count3, m = 0, c = 0;
 +      unsigned long now = jiffies_to_msecs(jiffies), diff1;
 +      int i;
 +
 +      diff1 = now - dev_priv->last_time1;
 +
 +      count1 = I915_READ(DMIEC);
 +      count2 = I915_READ(DDREC);
 +      count3 = I915_READ(CSIEC);
 +
 +      total_count = count1 + count2 + count3;
 +
 +      /* FIXME: handle per-counter overflow */
 +      if (total_count < dev_priv->last_count1) {
 +              diff = ~0UL - dev_priv->last_count1;
 +              diff += total_count;
 +      } else {
 +              diff = total_count - dev_priv->last_count1;
 +      }
 +
 +      for (i = 0; i < ARRAY_SIZE(cparams); i++) {
 +              if (cparams[i].i == dev_priv->c_m &&
 +                  cparams[i].t == dev_priv->r_t) {
 +                      m = cparams[i].m;
 +                      c = cparams[i].c;
 +                      break;
 +              }
 +      }
 +
 +      div_u64(diff, diff1);
 +      ret = ((m * diff) + c);
 +      div_u64(ret, 10);
 +
 +      dev_priv->last_count1 = total_count;
 +      dev_priv->last_time1 = now;
 +
 +      return ret;
 +}
 +
 +unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
 +{
 +      unsigned long m, x, b;
 +      u32 tsfs;
 +
 +      tsfs = I915_READ(TSFS);
 +
 +      m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
 +      x = I915_READ8(TR1);
 +
 +      b = tsfs & TSFS_INTR_MASK;
 +
 +      return ((m * x) / 127) - b;
 +}
 +
 +static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 +{
 +      unsigned long val = 0;
 +      int i;
 +
 +      for (i = 0; i < ARRAY_SIZE(v_table); i++) {
 +              if (v_table[i].pvid == pxvid) {
 +                      if (IS_MOBILE(dev_priv->dev))
 +                              val = v_table[i].vm;
 +                      else
 +                              val = v_table[i].vd;
 +              }
 +      }
 +
 +      return val;
 +}
 +
 +void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 +{
 +      struct timespec now, diff1;
 +      u64 diff;
 +      unsigned long diffms;
 +      u32 count;
 +
 +      getrawmonotonic(&now);
 +      diff1 = timespec_sub(now, dev_priv->last_time2);
 +
 +      /* Don't divide by 0 */
 +      diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
 +      if (!diffms)
 +              return;
 +
 +      count = I915_READ(GFXEC);
 +
 +      if (count < dev_priv->last_count2) {
 +              diff = ~0UL - dev_priv->last_count2;
 +              diff += count;
 +      } else {
 +              diff = count - dev_priv->last_count2;
 +      }
 +
 +      dev_priv->last_count2 = count;
 +      dev_priv->last_time2 = now;
 +
 +      /* More magic constants... */
 +      diff = diff * 1181;
 +      div_u64(diff, diffms * 10);
 +      dev_priv->gfx_power = diff;
 +}
 +
 +unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 +{
 +      unsigned long t, corr, state1, corr2, state2;
 +      u32 pxvid, ext_v;
 +
 +      pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
 +      pxvid = (pxvid >> 24) & 0x7f;
 +      ext_v = pvid_to_extvid(dev_priv, pxvid);
 +
 +      state1 = ext_v;
 +
 +      t = i915_mch_val(dev_priv);
 +
 +      /* Revel in the empirically derived constants */
 +
 +      /* Correction factor in 1/100000 units */
 +      if (t > 80)
 +              corr = ((t * 2349) + 135940);
 +      else if (t >= 50)
 +              corr = ((t * 964) + 29317);
 +      else /* < 50 */
 +              corr = ((t * 301) + 1004);
 +
 +      corr = corr * ((150142 * state1) / 10000 - 78642);
 +      corr /= 100000;
 +      corr2 = (corr * dev_priv->corr);
 +
 +      state2 = (corr2 * state1) / 10000;
 +      state2 /= 100; /* convert to mW */
 +
 +      i915_update_gfx_val(dev_priv);
 +
 +      return dev_priv->gfx_power + state2;
 +}
 +
 +/* Global for IPS driver to get at the current i915 device */
 +static struct drm_i915_private *i915_mch_dev;
 +/*
 + * Lock protecting IPS related data structures
 + *   - i915_mch_dev
 + *   - dev_priv->max_delay
 + *   - dev_priv->min_delay
 + *   - dev_priv->fmax
 + *   - dev_priv->gpu_busy
 + */
 +DEFINE_SPINLOCK(mchdev_lock);
 +
 +/**
 + * i915_read_mch_val - return value for IPS use
 + *
 + * Calculate and return a value for the IPS driver to use when deciding whether
 + * we have thermal and power headroom to increase CPU or GPU power budget.
 + */
 +unsigned long i915_read_mch_val(void)
 +{
 +      struct drm_i915_private *dev_priv;
 +      unsigned long chipset_val, graphics_val, ret = 0;
 +
 +      spin_lock(&mchdev_lock);
 +      if (!i915_mch_dev)
 +              goto out_unlock;
 +      dev_priv = i915_mch_dev;
 +
 +      chipset_val = i915_chipset_val(dev_priv);
 +      graphics_val = i915_gfx_val(dev_priv);
 +
 +      ret = chipset_val + graphics_val;
 +
 +out_unlock:
 +      spin_unlock(&mchdev_lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(i915_read_mch_val);
 +
 +/**
 + * i915_gpu_raise - raise GPU frequency limit
 + *
 + * Raise the limit; IPS indicates we have thermal headroom.
 + */
 +bool i915_gpu_raise(void)
 +{
 +      struct drm_i915_private *dev_priv;
 +      bool ret = true;
 +
 +      spin_lock(&mchdev_lock);
 +      if (!i915_mch_dev) {
 +              ret = false;
 +              goto out_unlock;
 +      }
 +      dev_priv = i915_mch_dev;
 +
 +      if (dev_priv->max_delay > dev_priv->fmax)
 +              dev_priv->max_delay--;
 +
 +out_unlock:
 +      spin_unlock(&mchdev_lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(i915_gpu_raise);
 +
 +/**
 + * i915_gpu_lower - lower GPU frequency limit
 + *
 + * IPS indicates we're close to a thermal limit, so throttle back the GPU
 + * frequency maximum.
 + */
 +bool i915_gpu_lower(void)
 +{
 +      struct drm_i915_private *dev_priv;
 +      bool ret = true;
 +
 +      spin_lock(&mchdev_lock);
 +      if (!i915_mch_dev) {
 +              ret = false;
 +              goto out_unlock;
 +      }
 +      dev_priv = i915_mch_dev;
 +
 +      if (dev_priv->max_delay < dev_priv->min_delay)
 +              dev_priv->max_delay++;
 +
 +out_unlock:
 +      spin_unlock(&mchdev_lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(i915_gpu_lower);
 +
 +/**
 + * i915_gpu_busy - indicate GPU business to IPS
 + *
 + * Tell the IPS driver whether or not the GPU is busy.
 + */
 +bool i915_gpu_busy(void)
 +{
 +      struct drm_i915_private *dev_priv;
 +      bool ret = false;
 +
 +      spin_lock(&mchdev_lock);
 +      if (!i915_mch_dev)
 +              goto out_unlock;
 +      dev_priv = i915_mch_dev;
 +
 +      ret = dev_priv->busy;
 +
 +out_unlock:
 +      spin_unlock(&mchdev_lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(i915_gpu_busy);
 +
 +/**
 + * i915_gpu_turbo_disable - disable graphics turbo
 + *
 + * Disable graphics turbo by resetting the max frequency and setting the
 + * current frequency to the default.
 + */
 +bool i915_gpu_turbo_disable(void)
 +{
 +      struct drm_i915_private *dev_priv;
 +      bool ret = true;
 +
 +      spin_lock(&mchdev_lock);
 +      if (!i915_mch_dev) {
 +              ret = false;
 +              goto out_unlock;
 +      }
 +      dev_priv = i915_mch_dev;
 +
 +      dev_priv->max_delay = dev_priv->fstart;
 +
 +      if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
 +              ret = false;
 +
 +out_unlock:
 +      spin_unlock(&mchdev_lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
 +
  /**
   * i915_driver_load - setup chip and create an initial config
   * @dev: DRM device
@@@ -2046,6 -1595,7 +2047,6 @@@ int i915_driver_load(struct drm_device 
        resource_size_t base, size;
        int ret = 0, mmio_bar;
        uint32_t agp_size, prealloc_size, prealloc_start;
 -
        /* i915 has 4 more counters */
        dev->counters += 4;
        dev->types[6] = _DRM_STAT_IRQ;
  
        /* Add register map (needed for suspend/resume) */
        mmio_bar = IS_I9XX(dev) ? 0 : 1;
-       base = drm_get_resource_start(dev, mmio_bar);
-       size = drm_get_resource_len(dev, mmio_bar);
+       base = pci_resource_start(dev->pdev, mmio_bar);
+       size = pci_resource_len(dev->pdev, mmio_bar);
  
        if (i915_get_bridge_dev(dev)) {
                ret = -EIO;
                dev_priv->has_gem = 0;
        }
  
 +      if (dev_priv->has_gem == 0 &&
 +          drm_core_check_feature(dev, DRIVER_MODESET)) {
 +              DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
 +              ret = -ENODEV;
 +              goto out_iomapfree;
 +      }
 +
        dev->driver->get_vblank_counter = i915_get_vblank_counter;
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
        if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
                        goto out_workqueue_free;
        }
  
 -      i915_get_mem_freq(dev);
 +      if (IS_PINEVIEW(dev))
 +              i915_pineview_get_mem_freq(dev);
 +      else if (IS_IRONLAKE(dev))
 +              i915_ironlake_get_mem_freq(dev);
  
        /* On the 945G/GM, the chipset reports the MSI capability on the
         * integrated graphics even though the support isn't actually there
  
        spin_lock_init(&dev_priv->user_irq_lock);
        spin_lock_init(&dev_priv->error_lock);
 -      dev_priv->user_irq_refcount = 0;
        dev_priv->trace_irq_seqno = 0;
  
        ret = drm_vblank_init(dev, I915_NUM_PIPE);
  
        setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
                    (unsigned long) dev);
 +
 +      spin_lock(&mchdev_lock);
 +      i915_mch_dev = dev_priv;
 +      dev_priv->mchdev_lock = &mchdev_lock;
 +      spin_unlock(&mchdev_lock);
 +
        return 0;
  
  out_workqueue_free:
@@@ -2225,10 -1760,6 +2226,10 @@@ int i915_driver_unload(struct drm_devic
  
        i915_destroy_error_state(dev);
  
 +      spin_lock(&mchdev_lock);
 +      i915_mch_dev = NULL;
 +      spin_unlock(&mchdev_lock);
 +
        destroy_workqueue(dev_priv->wq);
        del_timer_sync(&dev_priv->hangcheck_timer);
  
index 423dc90c1e20589e7669f1ba41880308af9fb044,b7aecf5ea1fc65b7ffca28f457a4259184a73bc4..65d3f3e8475b2e5a81bc0f3f14e20ad28b86dc6b
@@@ -60,95 -60,95 +60,95 @@@ extern int intel_agp_enabled
        .subdevice = PCI_ANY_ID,                \
        .driver_data = (unsigned long) info }
  
 -const static struct intel_device_info intel_i830_info = {
 +static const struct intel_device_info intel_i830_info = {
        .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
  };
  
 -const static struct intel_device_info intel_845g_info = {
 +static const struct intel_device_info intel_845g_info = {
        .is_i8xx = 1,
  };
  
 -const static struct intel_device_info intel_i85x_info = {
 +static const struct intel_device_info intel_i85x_info = {
        .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
        .cursor_needs_physical = 1,
  };
  
 -const static struct intel_device_info intel_i865g_info = {
 +static const struct intel_device_info intel_i865g_info = {
        .is_i8xx = 1,
  };
  
 -const static struct intel_device_info intel_i915g_info = {
 +static const struct intel_device_info intel_i915g_info = {
        .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
  };
 -const static struct intel_device_info intel_i915gm_info = {
 +static const struct intel_device_info intel_i915gm_info = {
        .is_i9xx = 1,  .is_mobile = 1,
        .cursor_needs_physical = 1,
  };
 -const static struct intel_device_info intel_i945g_info = {
 +static const struct intel_device_info intel_i945g_info = {
        .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
  };
 -const static struct intel_device_info intel_i945gm_info = {
 +static const struct intel_device_info intel_i945gm_info = {
        .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
        .has_hotplug = 1, .cursor_needs_physical = 1,
  };
  
 -const static struct intel_device_info intel_i965g_info = {
 +static const struct intel_device_info intel_i965g_info = {
        .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_i965gm_info = {
 +static const struct intel_device_info intel_i965gm_info = {
        .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
        .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
        .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_g33_info = {
 +static const struct intel_device_info intel_g33_info = {
        .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
        .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_g45_info = {
 +static const struct intel_device_info intel_g45_info = {
        .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
        .has_pipe_cxsr = 1,
        .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_gm45_info = {
 +static const struct intel_device_info intel_gm45_info = {
        .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
        .has_pipe_cxsr = 1,
        .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_pineview_info = {
 +static const struct intel_device_info intel_pineview_info = {
        .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
        .need_gfx_hws = 1,
        .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_ironlake_d_info = {
 +static const struct intel_device_info intel_ironlake_d_info = {
        .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
        .has_pipe_cxsr = 1,
        .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_ironlake_m_info = {
 +static const struct intel_device_info intel_ironlake_m_info = {
        .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
        .need_gfx_hws = 1, .has_rc6 = 1,
        .has_hotplug = 1,
  };
  
 -const static struct intel_device_info intel_sandybridge_d_info = {
 +static const struct intel_device_info intel_sandybridge_d_info = {
        .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
        .has_hotplug = 1, .is_gen6 = 1,
  };
  
 -const static struct intel_device_info intel_sandybridge_m_info = {
 +static const struct intel_device_info intel_sandybridge_m_info = {
        .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
        .has_hotplug = 1, .is_gen6 = 1,
  };
  
 -const static struct pci_device_id pciidlist[] = {
 +static const struct pci_device_id pciidlist[] = {
        INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
        INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
        INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
@@@ -340,7 -340,7 +340,7 @@@ int i965_reset(struct drm_device *dev, 
        /*
         * Clear request list
         */
 -      i915_gem_retire_requests(dev);
 +      i915_gem_retire_requests(dev, &dev_priv->render_ring);
  
        if (need_display)
                i915_save_display(dev);
                }
        } else {
                DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
 +              mutex_unlock(&dev->struct_mutex);
                return -ENODEV;
        }
  
         * switched away).
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 -          !dev_priv->mm.suspended) {
 -              drm_i915_ring_buffer_t *ring = &dev_priv->ring;
 -              struct drm_gem_object *obj = ring->ring_obj;
 -              struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 +                      !dev_priv->mm.suspended) {
 +              struct intel_ring_buffer *ring = &dev_priv->render_ring;
                dev_priv->mm.suspended = 0;
 -
 -              /* Stop the ring if it's running. */
 -              I915_WRITE(PRB0_CTL, 0);
 -              I915_WRITE(PRB0_TAIL, 0);
 -              I915_WRITE(PRB0_HEAD, 0);
 -
 -              /* Initialize the ring. */
 -              I915_WRITE(PRB0_START, obj_priv->gtt_offset);
 -              I915_WRITE(PRB0_CTL,
 -                         ((obj->size - 4096) & RING_NR_PAGES) |
 -                         RING_NO_REPORT |
 -                         RING_VALID);
 -              if (!drm_core_check_feature(dev, DRIVER_MODESET))
 -                      i915_kernel_lost_context(dev);
 -              else {
 -                      ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 -                      ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
 -                      ring->space = ring->head - (ring->tail + 8);
 -                      if (ring->space < 0)
 -                              ring->space += ring->Size;
 -              }
 -
 +              ring->init(dev, ring);
                mutex_unlock(&dev->struct_mutex);
                drm_irq_uninstall(dev);
                drm_irq_install(dev);
  static int __devinit
  i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
-       return drm_get_dev(pdev, ent, &driver);
+       return drm_get_pci_dev(pdev, ent, &driver);
  }
  
  static void
index 273770432298b72ae5687505e156fe5b03e0e4e4,f60a2b2ae4453bbbe8cfd29e815baaa587dd065b..0fd8e10dbbdebe259539618562322f5eba48e02f
@@@ -132,7 -132,7 +132,7 @@@ static struct drm_driver driver
  static int __devinit
  nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
-       return drm_get_dev(pdev, ent, &driver);
+       return drm_get_pci_dev(pdev, ent, &driver);
  }
  
  static void
@@@ -175,13 -175,6 +175,13 @@@ nouveau_pci_suspend(struct pci_dev *pde
                nouveau_bo_unpin(nouveau_fb->nvbo);
        }
  
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +              struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 +
 +              nouveau_bo_unmap(nv_crtc->cursor.nvbo);
 +              nouveau_bo_unpin(nv_crtc->cursor.nvbo);
 +      }
 +
        NV_INFO(dev, "Evicting buffers...\n");
        ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
  
@@@ -321,34 -314,12 +321,34 @@@ nouveau_pci_resume(struct pci_dev *pdev
                nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
        }
  
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +              struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 +              int ret;
 +
 +              ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
 +              if (!ret)
 +                      ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
 +              if (ret)
 +                      NV_ERROR(dev, "Could not pin/map cursor.\n");
 +      }
 +
        if (dev_priv->card_type < NV_50) {
                nv04_display_restore(dev);
                NVLockVgaCrtcs(dev, false);
        } else
                nv50_display_init(dev);
  
 +      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +              struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 +
 +              nv_crtc->cursor.set_offset(nv_crtc,
 +                                      nv_crtc->cursor.nvbo->bo.offset -
 +                                      dev_priv->vm_vram_base);
 +
 +              nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
 +                      nv_crtc->cursor_saved_y);
 +      }
 +
        /* Force CLUT to get re-loaded during modeset */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
index c1fd42b0dad19cb18e34463548c8ff4b60093747,37c7bf8e82960443b6796a286304c745245f85c5..fb6b791506b2cf7ec0f243f48b3388dd2e728f48
@@@ -471,8 -471,9 +471,9 @@@ void nouveau_mem_close(struct drm_devic
        }
  
        if (dev_priv->fb_mtrr) {
-               drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
-                            drm_get_resource_len(dev, 1), DRM_MTRR_WC);
+               drm_mtrr_del(dev_priv->fb_mtrr,
+                            pci_resource_start(dev->pdev, 1),
+                            pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
                dev_priv->fb_mtrr = 0;
        }
  }
@@@ -540,8 -541,7 +541,8 @@@ nouveau_mem_detect(struct drm_device *d
                dev_priv->vram_size  = nv_rd32(dev, NV04_FIFO_DATA);
                dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
                if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
 -                      dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
 +                      dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
 +                      dev_priv->vram_sys_base <<= 12;
        }
  
        NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
@@@ -633,7 -633,7 +634,7 @@@ nouveau_mem_init(struct drm_device *dev
        struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
        int ret, dma_bits = 32;
  
-       dev_priv->fb_phys = drm_get_resource_start(dev, 1);
+       dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
        dev_priv->gart_info.type = NOUVEAU_GART_NONE;
  
        if (dev_priv->card_type >= NV_50 &&
  
        dev_priv->fb_available_size = dev_priv->vram_size;
        dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
-       if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
-               dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
+       if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+               dev_priv->fb_mappable_pages =
+                       pci_resource_len(dev->pdev, 1);
        dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
  
        /* remove reserved space at end of vram from available amount */
                return ret;
        }
  
-       dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
-                                        drm_get_resource_len(dev, 1),
+       dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+                                        pci_resource_len(dev->pdev, 1),
                                         DRM_MTRR_WC);
  
        return 0;
index 1caf625e472b607b92b2841f0dcf7fae44785cde,a4745e49ecf13cef848faa080bbf64bd60d64c9b..057192acdd36600abc91dc73d0850a541b65fd1c
@@@ -41,18 -41,7 +41,18 @@@ void evergreen_fini(struct radeon_devic
  
  void evergreen_pm_misc(struct radeon_device *rdev)
  {
 -
 +      int req_ps_idx = rdev->pm.requested_power_state_index;
 +      int req_cm_idx = rdev->pm.requested_clock_mode_index;
 +      struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
 +      struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 +
 +      if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
 +              if (voltage->voltage != rdev->pm.current_vddc) {
 +                      radeon_atom_set_voltage(rdev, voltage->voltage);
 +                      rdev->pm.current_vddc = voltage->voltage;
 +                      DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
 +              }
 +      }
  }
  
  void evergreen_pm_prepare(struct radeon_device *rdev)
@@@ -607,7 -596,7 +607,7 @@@ static void evergreen_mc_program(struc
        WREG32(MC_VM_FB_LOCATION, tmp);
        WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
        WREG32(HDP_NONSURFACE_INFO, (2 << 7));
 -      WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
 +      WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
        if (rdev->flags & RADEON_IS_AGP) {
                WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
                WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
@@@ -1222,11 -1211,11 +1222,11 @@@ static void evergreen_gpu_init(struct r
                ps_thread_count = 128;
  
        sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
 -      sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 -      sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 -      sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 -      sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 -      sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
 +      sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
 +      sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
 +      sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
 +      sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
 +      sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
  
        sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
        sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
        WREG32(VGT_GS_VERTEX_REUSE, 16);
        WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
  
 +      WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
 +      WREG32(VGT_OUT_DEALLOC_CNTL, 16);
 +
        WREG32(CB_PERF_CTR0_SEL_0, 0);
        WREG32(CB_PERF_CTR0_SEL_1, 0);
        WREG32(CB_PERF_CTR1_SEL_0, 0);
        WREG32(CB_PERF_CTR3_SEL_0, 0);
        WREG32(CB_PERF_CTR3_SEL_1, 0);
  
 +      /* clear render buffer base addresses */
 +      WREG32(CB_COLOR0_BASE, 0);
 +      WREG32(CB_COLOR1_BASE, 0);
 +      WREG32(CB_COLOR2_BASE, 0);
 +      WREG32(CB_COLOR3_BASE, 0);
 +      WREG32(CB_COLOR4_BASE, 0);
 +      WREG32(CB_COLOR5_BASE, 0);
 +      WREG32(CB_COLOR6_BASE, 0);
 +      WREG32(CB_COLOR7_BASE, 0);
 +      WREG32(CB_COLOR8_BASE, 0);
 +      WREG32(CB_COLOR9_BASE, 0);
 +      WREG32(CB_COLOR10_BASE, 0);
 +      WREG32(CB_COLOR11_BASE, 0);
 +
 +      /* set the shader const cache sizes to 0 */
 +      for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
 +              WREG32(i, 0);
 +      for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
 +              WREG32(i, 0);
 +
        hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
        WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
  
@@@ -1334,8 -1300,8 +1334,8 @@@ int evergreen_mc_init(struct radeon_dev
        }
        rdev->mc.vram_width = numchan * chansize;
        /* Could aper size report 0 ? */
-       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+       rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+       rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        /* Setup GPU memory space */
        /* size in MB on evergreen */
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
@@@ -2182,7 -2148,7 +2182,7 @@@ int evergreen_init(struct radeon_devic
        if (r)
                return r;
  
 -      rdev->accel_working = false;
 +      rdev->accel_working = true;
        r = evergreen_startup(rdev);
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
index 3970e62eaab8f75582e0838e5770a7346731bff6,c485c2cec4da50f292ce46027c9961c46503f4ff..ab37717a5d39739209af7e58dc3ad715b99ee9af
@@@ -162,11 -162,6 +162,11 @@@ void r100_pm_init_profile(struct radeon
        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +      /* mid sh */
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
        /* high sh */
        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +      /* mid mh */
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
        /* high mh */
        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
@@@ -1628,7 -1618,6 +1628,7 @@@ static int r100_packet0_check(struct ra
                case RADEON_TXFORMAT_RGB332:
                case RADEON_TXFORMAT_Y8:
                        track->textures[i].cpp = 1;
 +                      track->textures[i].compress_format = R100_TRACK_COMP_NONE;
                        break;
                case RADEON_TXFORMAT_AI88:
                case RADEON_TXFORMAT_ARGB1555:
                case RADEON_TXFORMAT_LDUDV655:
                case RADEON_TXFORMAT_DUDV88:
                        track->textures[i].cpp = 2;
 +                      track->textures[i].compress_format = R100_TRACK_COMP_NONE;
                        break;
                case RADEON_TXFORMAT_ARGB8888:
                case RADEON_TXFORMAT_RGBA8888:
                case RADEON_TXFORMAT_SHADOW32:
                case RADEON_TXFORMAT_LDUDUV8888:
                        track->textures[i].cpp = 4;
 +                      track->textures[i].compress_format = R100_TRACK_COMP_NONE;
                        break;
                case RADEON_TXFORMAT_DXT1:
                        track->textures[i].cpp = 1;
@@@ -2297,8 -2284,8 +2297,8 @@@ void r100_vram_init_sizes(struct radeon
        u64 config_aper_size;
  
        /* work out accessible VRAM */
-       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+       rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+       rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
        /* FIXME we don't use the second aperture yet when we could use it */
        if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
@@@ -2607,6 -2594,12 +2607,6 @@@ int r100_set_surface_reg(struct radeon_
        int surf_index = reg * 16;
        int flags = 0;
  
 -      /* r100/r200 divide by 16 */
 -      if (rdev->family < CHIP_R300)
 -              flags = pitch / 16;
 -      else
 -              flags = pitch / 8;
 -
        if (rdev->family <= CHIP_RS200) {
                if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
                                 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
        if (tiling_flags & RADEON_TILING_SWAP_32BIT)
                flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
  
 +      /* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
 +      if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
 +              if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
 +                      if (ASIC_IS_RN50(rdev))
 +                              pitch /= 16;
 +      }
 +
 +      /* r100/r200 divide by 16 */
 +      if (rdev->family < CHIP_R300)
 +              flags |= pitch / 16;
 +      else
 +              flags |= pitch / 8;
 +
 +
        DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
        WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
        WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
@@@ -3158,6 -3137,33 +3158,6 @@@ static inline void r100_cs_track_textur
        DRM_ERROR("compress format            %d\n", t->compress_format);
  }
  
 -static int r100_cs_track_cube(struct radeon_device *rdev,
 -                            struct r100_cs_track *track, unsigned idx)
 -{
 -      unsigned face, w, h;
 -      struct radeon_bo *cube_robj;
 -      unsigned long size;
 -
 -      for (face = 0; face < 5; face++) {
 -              cube_robj = track->textures[idx].cube_info[face].robj;
 -              w = track->textures[idx].cube_info[face].width;
 -              h = track->textures[idx].cube_info[face].height;
 -
 -              size = w * h;
 -              size *= track->textures[idx].cpp;
 -
 -              size += track->textures[idx].cube_info[face].offset;
 -
 -              if (size > radeon_bo_size(cube_robj)) {
 -                      DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
 -                                size, radeon_bo_size(cube_robj));
 -                      r100_cs_track_texture_print(&track->textures[idx]);
 -                      return -1;
 -              }
 -      }
 -      return 0;
 -}
 -
  static int r100_track_compress_size(int compress_format, int w, int h)
  {
        int block_width, block_height, block_bytes;
        return sz;
  }
  
 +static int r100_cs_track_cube(struct radeon_device *rdev,
 +                            struct r100_cs_track *track, unsigned idx)
 +{
 +      unsigned face, w, h;
 +      struct radeon_bo *cube_robj;
 +      unsigned long size;
 +      unsigned compress_format = track->textures[idx].compress_format;
 +
 +      for (face = 0; face < 5; face++) {
 +              cube_robj = track->textures[idx].cube_info[face].robj;
 +              w = track->textures[idx].cube_info[face].width;
 +              h = track->textures[idx].cube_info[face].height;
 +
 +              if (compress_format) {
 +                      size = r100_track_compress_size(compress_format, w, h);
 +              } else
 +                      size = w * h;
 +              size *= track->textures[idx].cpp;
 +
 +              size += track->textures[idx].cube_info[face].offset;
 +
 +              if (size > radeon_bo_size(cube_robj)) {
 +                      DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
 +                                size, radeon_bo_size(cube_robj));
 +                      r100_cs_track_texture_print(&track->textures[idx]);
 +                      return -1;
 +              }
 +      }
 +      return 0;
 +}
 +
  static int r100_cs_track_texture_check(struct radeon_device *rdev,
                                       struct r100_cs_track *track)
  {
index 3d6645ce21518a640dd0981e38a67e223e86a49e,4959619f8851eb4fea1df29d98266aae662be641..a73a6e17588d06c613c78ef8631ad4b2fc4f2c07
@@@ -130,14 -130,9 +130,14 @@@ void r600_pm_get_dynpm_state(struct rad
                                                        break;
                                                }
                                        }
 -                              } else
 -                                      rdev->pm.requested_power_state_index =
 -                                              rdev->pm.current_power_state_index - 1;
 +                              } else {
 +                                      if (rdev->pm.current_power_state_index == 0)
 +                                              rdev->pm.requested_power_state_index =
 +                                                      rdev->pm.num_power_states - 1;
 +                                      else
 +                                              rdev->pm.requested_power_state_index =
 +                                                      rdev->pm.current_power_state_index - 1;
 +                              }
                        }
                        rdev->pm.requested_clock_mode_index = 0;
                        /* don't use the power state if crtcs are active and no display flag is set */
@@@ -296,11 -291,6 +296,11 @@@ void rs780_pm_init_profile(struct radeo
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +              /* mid sh */
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
                /* high sh */
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +              /* mid mh */
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
                /* high mh */
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +              /* mid sh */
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
                /* high sh */
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +              /* mid mh */
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
                /* high mh */
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +              /* mid sh */
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
                /* high sh */
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +              /* mid mh */
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
                /* high mh */
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
@@@ -410,11 -375,6 +410,11 @@@ void r600_pm_init_profile(struct radeon
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +              /* mid sh */
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
                /* high sh */
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
                rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +              /* mid mh */
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
                /* high mh */
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
                rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
                        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
                        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
                        rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 -                      rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
 +                      rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +                      /* mid sh */
 +                      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
 +                      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
 +                      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +                      rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
                        /* high sh */
                        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
                        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
                        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
                        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
                        rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 -                      rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
 +                      rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +                      /* low mh */
 +                      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
 +                      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
 +                      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +                      rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
                        /* high mh */
                        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
                        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 -                              rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
 +                              rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
                        } else {
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
                                rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
 -                              rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 1;
 +                              rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 +                      }
 +                      /* mid sh */
 +                      if (rdev->flags & RADEON_IS_MOBILITY) {
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
 +                      } else {
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
 +                              rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
                        }
                        /* high sh */
                        rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
                                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
                                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 -                              rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 2;
 +                              rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
                        } else {
                                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
                                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
                                        r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
                                rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
 -                              rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 1;
 +                              rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 +                      }
 +                      /* mid mh */
 +                      if (rdev->flags & RADEON_IS_MOBILITY) {
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
 +                      } else {
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
 +                                      r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
 +                              rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
                        }
                        /* high mh */
                        rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
  
  void r600_pm_misc(struct radeon_device *rdev)
  {
 +      int req_ps_idx = rdev->pm.requested_power_state_index;
 +      int req_cm_idx = rdev->pm.requested_clock_mode_index;
 +      struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
 +      struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
  
 +      if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
 +              if (voltage->voltage != rdev->pm.current_vddc) {
 +                      radeon_atom_set_voltage(rdev, voltage->voltage);
 +                      rdev->pm.current_vddc = voltage->voltage;
 +                      DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
 +              }
 +      }
  }
  
  bool r600_gui_idle(struct radeon_device *rdev)
@@@ -1102,7 -1004,7 +1102,7 @@@ static void r600_mc_program(struct rade
        WREG32(MC_VM_FB_LOCATION, tmp);
        WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
        WREG32(HDP_NONSURFACE_INFO, (2 << 7));
 -      WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
 +      WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
        if (rdev->flags & RADEON_IS_AGP) {
                WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
                WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
@@@ -1216,18 -1118,16 +1216,18 @@@ int r600_mc_init(struct radeon_device *
        }
        rdev->mc.vram_width = numchan * chansize;
        /* Could aper size report 0 ? */
-       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+       rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+       rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        /* Setup GPU memory space */
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        r600_vram_gtt_location(rdev, &rdev->mc);
  
 -      if (rdev->flags & RADEON_IS_IGP)
 +      if (rdev->flags & RADEON_IS_IGP) {
 +              rs690_pm_info(rdev);
                rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
 +      }
        radeon_update_bandwidth_info(rdev);
        return 0;
  }
index 2c92137399995f17d9df664c2ae28aa9ad8bbefa,91f5b5a29a9f25b071cd36715e319f38dd5ae089..654787ec43f4d33687f2c7dd0e0318c17be92dd6
@@@ -48,12 -48,8 +48,12 @@@ static bool igp_read_bios_from_vram(str
        resource_size_t vram_base;
        resource_size_t size = 256 * 1024; /* ??? */
  
 +      if (!(rdev->flags & RADEON_IS_IGP))
 +              if (!radeon_card_posted(rdev))
 +                      return false;
 +
        rdev->bios = NULL;
-       vram_base = drm_get_resource_start(rdev->ddev, 0);
+       vram_base = pci_resource_start(rdev->pdev, 0);
        bios = ioremap(vram_base, size);
        if (!bios) {
                return false;
index 5f317317aba29dc972331f32994c0acae66e9840,2a897a7ca26f0a9b24f3c830d6ca374f8bb401ae..37533bec1f25fd92f3bb45b9cd3e39e6653e7b03
@@@ -546,10 -546,8 +546,10 @@@ static void radeon_switcheroo_set_state
                /* don't suspend or resume card normally */
                rdev->powered_down = false;
                radeon_resume_kms(dev);
 +              drm_kms_helper_poll_enable(dev);
        } else {
                printk(KERN_INFO "radeon: switched off\n");
 +              drm_kms_helper_poll_disable(dev);
                radeon_suspend_kms(dev, pmm);
                /* don't suspend or resume card normally */
                rdev->powered_down = true;
@@@ -650,8 -648,8 +650,8 @@@ int radeon_device_init(struct radeon_de
  
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
-       rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
-       rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
+       rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
+       rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
        rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
        if (rdev->rmmio == NULL) {
                return -ENOMEM;
@@@ -713,7 -711,6 +713,7 @@@ int radeon_suspend_kms(struct drm_devic
  {
        struct radeon_device *rdev;
        struct drm_crtc *crtc;
 +      struct drm_connector *connector;
        int r;
  
        if (dev == NULL || dev->dev_private == NULL) {
  
        if (rdev->powered_down)
                return 0;
 +
 +      /* turn off display hw */
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 +              drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 +      }
 +
        /* unpin the front buffers */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
  
  int radeon_resume_kms(struct drm_device *dev)
  {
 +      struct drm_connector *connector;
        struct radeon_device *rdev = dev->dev_private;
  
        if (rdev->powered_down)
        radeon_resume(rdev);
        radeon_pm_resume(rdev);
        radeon_restore_bios_scratch_regs(rdev);
 +
 +      /* turn on display hw */
 +      list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 +              drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
 +      }
 +
        radeon_fbdev_set_suspend(rdev, 0);
        release_console_sem();
  
index e166fe4d7c308f55ab451a710687a679dcf8720f,683e281b409298a5950f6da362ac80cc709694c6..ed0ceb3fc40a22cbc6747cbcf1f1aa8c85cb44cb
   * - 2.2.0 - add r6xx/r7xx const buffer support
   * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
   * - 2.4.0 - add crtc id query
 + * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
   */
  #define KMS_DRIVER_MAJOR      2
 -#define KMS_DRIVER_MINOR      4
 +#define KMS_DRIVER_MINOR      5
  #define KMS_DRIVER_PATCHLEVEL 0
  int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
  int radeon_driver_unload_kms(struct drm_device *dev);
@@@ -214,7 -213,6 +214,7 @@@ static struct drm_driver driver_old = 
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .read = drm_read,
  #ifdef CONFIG_COMPAT
                 .compat_ioctl = radeon_compat_ioctl,
  #endif
@@@ -238,7 -236,7 +238,7 @@@ static struct drm_driver kms_driver
  static int __devinit
  radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
-       return drm_get_dev(pdev, ent, &kms_driver);
+       return drm_get_pci_dev(pdev, ent, &kms_driver);
  }
  
  static void
@@@ -303,7 -301,6 +303,7 @@@ static struct drm_driver kms_driver = 
                 .mmap = radeon_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
 +               .read = drm_read,
  #ifdef CONFIG_COMPAT
                 .compat_ioctl = radeon_kms_compat_ioctl,
  #endif
index 7bb4c3e52f3b055ceafc27471e516477e4a3fa5b,340c7611f2ac8c458dfdbef9800749eefaeec16b..5ce3ccc7a42378ccecb3601d6d6b5e337f850676
@@@ -74,8 -74,7 +74,8 @@@ void rs600_pm_misc(struct radeon_devic
                        if (voltage->delay)
                                udelay(voltage->delay);
                }
 -      }
 +      } else if (voltage->type == VOLTAGE_VDDC)
 +              radeon_atom_set_voltage(rdev, voltage->vddc_id);
  
        dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
        dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
@@@ -686,8 -685,8 +686,8 @@@ void rs600_mc_init(struct radeon_devic
  {
        u64 base;
  
-       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+       rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+       rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        rdev->mc.vram_is_ddr = true;
        rdev->mc.vram_width = 128;
        rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
index f4f0a61bcdce3d188dd01a5737120c4e9a0d6432,a18ba98885f398ac6c35f1708504769948710a59..5fea094ed8cb08fd223793b77e5298b6c8b87d0f
@@@ -79,13 -79,7 +79,13 @@@ void rs690_pm_info(struct radeon_devic
                        tmp.full = dfixed_const(100);
                        rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
                        rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
 -                      rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
 +                      if (info->info.usK8MemoryClock)
 +                              rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
 +                      else if (rdev->clock.default_mclk) {
 +                              rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
 +                              rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
 +                      } else
 +                              rdev->pm.igp_system_mclk.full = dfixed_const(400);
                        rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
                        rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
                        break;
                        tmp.full = dfixed_const(100);
                        rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
                        rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
 -                      rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
 +                      if (info->info_v2.ulBootUpUMAClock)
 +                              rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
 +                      else if (rdev->clock.default_mclk)
 +                              rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
 +                      else
 +                              rdev->pm.igp_system_mclk.full = dfixed_const(66700);
                        rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
                        rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
                        rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
                        rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
                        break;
                default:
 -                      tmp.full = dfixed_const(100);
                        /* We assume the slower possible clock ie worst case */
 -                      /* DDR 333Mhz */
 -                      rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
 -                      /* FIXME: system clock ? */
 -                      rdev->pm.igp_system_mclk.full = dfixed_const(100);
 -                      rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
 -                      rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
 +                      rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
 +                      rdev->pm.igp_system_mclk.full = dfixed_const(200);
 +                      rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
                        rdev->pm.igp_ht_link_width.full = dfixed_const(8);
                        DRM_ERROR("No integrated system info for your GPU, using safe default\n");
                        break;
                }
        } else {
 -              tmp.full = dfixed_const(100);
                /* We assume the slower possible clock ie worst case */
 -              /* DDR 333Mhz */
 -              rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
 -              /* FIXME: system clock ? */
 -              rdev->pm.igp_system_mclk.full = dfixed_const(100);
 -              rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
 -              rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
 +              rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
 +              rdev->pm.igp_system_mclk.full = dfixed_const(200);
 +              rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
                rdev->pm.igp_ht_link_width.full = dfixed_const(8);
                DRM_ERROR("No integrated system info for your GPU, using safe default\n");
        }
@@@ -154,8 -151,8 +154,8 @@@ void rs690_mc_init(struct radeon_devic
        rdev->mc.vram_width = 128;
        rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
        rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+       rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+       rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        rdev->mc.visible_vram_size = rdev->mc.aper_size;
        base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
        base = G_000100_MC_FB_START(base) << 16;
@@@ -231,6 -228,10 +231,6 @@@ void rs690_crtc_bandwidth_compute(struc
        fixed20_12 a, b, c;
        fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
        fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
 -      /* FIXME: detect IGP with sideport memory, i don't think there is any
 -       * such product available
 -       */
 -      bool sideport = false;
  
        if (!crtc->base.enabled) {
                /* FIXME: wouldn't it better to set priority mark to maximum */
  
        /* Maximun bandwidth is the minimun bandwidth of all component */
        rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
 -      if (sideport) {
 +      if (rdev->mc.igp_sideport_enabled) {
                if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
                        rdev->pm.sideport_bandwidth.full)
                        rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
index b7fd82064922342d420cacb56a7ece8a00964393,5c7f0b97c6aaab53070735ef2dd05f48caf13071..6a7bf109197157c395ffe3053477a43505d00612
@@@ -44,18 -44,7 +44,18 @@@ void rv770_fini(struct radeon_device *r
  
  void rv770_pm_misc(struct radeon_device *rdev)
  {
 -
 +      int req_ps_idx = rdev->pm.requested_power_state_index;
 +      int req_cm_idx = rdev->pm.requested_clock_mode_index;
 +      struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
 +      struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 +
 +      if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
 +              if (voltage->voltage != rdev->pm.current_vddc) {
 +                      radeon_atom_set_voltage(rdev, voltage->voltage);
 +                      rdev->pm.current_vddc = voltage->voltage;
 +                      DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
 +              }
 +      }
  }
  
  /*
@@@ -224,7 -213,7 +224,7 @@@ static void rv770_mc_program(struct rad
        WREG32(MC_VM_FB_LOCATION, tmp);
        WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
        WREG32(HDP_NONSURFACE_INFO, (2 << 7));
 -      WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
 +      WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
        if (rdev->flags & RADEON_IS_AGP) {
                WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
                WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
@@@ -919,8 -908,8 +919,8 @@@ int rv770_mc_init(struct radeon_device 
        }
        rdev->mc.vram_width = numchan * chansize;
        /* Could aper size report 0 ? */
-       rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-       rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+       rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+       rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
        /* Setup GPU memory space */
        rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
        rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
index b793c8c9acb3162049fabe03b7cf43cfab1939fb,f7f248dbff5896c9b450f7413e514a5045b12980..9dd395b90216b17c54598c9309502785a2684220
@@@ -88,9 -88,6 +88,9 @@@
  #define DRM_IOCTL_VMW_FENCE_WAIT                              \
        DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
                 struct drm_vmw_fence_wait_arg)
 +#define DRM_IOCTL_VMW_UPDATE_LAYOUT                           \
 +      DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,      \
 +               struct drm_vmw_update_layout_arg)
  
  
  /**
@@@ -138,9 -135,7 +138,9 @@@ static struct drm_ioctl_desc vmw_ioctls
        VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
                      DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
        VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
 -                    DRM_AUTH | DRM_UNLOCKED)
 +                    DRM_AUTH | DRM_UNLOCKED),
 +      VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
 +                    DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
  };
  
  static struct pci_device_id vmw_pci_id_list[] = {
@@@ -323,15 -318,6 +323,15 @@@ static int vmw_driver_load(struct drm_d
                goto out_err3;
        }
  
 +      /* Need mmio memory to check for fifo pitchlock cap. */
 +      if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
 +          !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
 +          !vmw_fifo_have_pitchlock(dev_priv)) {
 +              ret = -ENOSYS;
 +              DRM_ERROR("Hardware has no pitchlock\n");
 +              goto out_err4;
 +      }
 +
        dev_priv->tdev = ttm_object_device_init
            (dev_priv->mem_global_ref.object, 12);
  
@@@ -413,6 -399,8 +413,6 @@@ static int vmw_driver_unload(struct drm
  {
        struct vmw_private *dev_priv = vmw_priv(dev);
  
 -      DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
 -
        unregister_pm_notifier(&dev_priv->pm_nb);
  
        vmw_fb_close(dev_priv);
@@@ -558,6 -546,7 +558,6 @@@ static int vmw_master_create(struct drm
  {
        struct vmw_master *vmaster;
  
 -      DRM_INFO("Master create.\n");
        vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
        if (unlikely(vmaster == NULL))
                return -ENOMEM;
@@@ -574,6 -563,7 +574,6 @@@ static void vmw_master_destroy(struct d
  {
        struct vmw_master *vmaster = vmw_master(master);
  
 -      DRM_INFO("Master destroy.\n");
        master->driver_priv = NULL;
        kfree(vmaster);
  }
@@@ -589,6 -579,8 +589,6 @@@ static int vmw_master_set(struct drm_de
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret = 0;
  
 -      DRM_INFO("Master set.\n");
 -
        if (active) {
                BUG_ON(active != &dev_priv->fbdev_master);
                ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
@@@ -630,6 -622,8 +630,6 @@@ static void vmw_master_drop(struct drm_
        struct vmw_master *vmaster = vmw_master(file_priv->master);
        int ret;
  
 -      DRM_INFO("Master drop.\n");
 -
        /**
         * Make sure the master doesn't disappear while we have
         * it locked.
@@@ -764,7 -758,7 +764,7 @@@ static struct drm_driver driver = 
  
  static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
-       return drm_get_dev(pdev, ent, &driver);
+       return drm_get_pci_dev(pdev, ent, &driver);
  }
  
  static int __init vmwgfx_init(void)