/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
- if (!r) {
- acpi_status = amdgpu_acpi_init(adev);
- if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
- }
+
+ acpi_status = amdgpu_acpi_init(adev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ /* only need to skip on ATPX */
+ if (amdgpu_device_supports_boco(dev) &&
+ !amdgpu_is_atpx_hybrid())
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
++ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
- pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
struct nouveau_dmem {
struct nouveau_drm *drm;
- struct dev_pagemap pagemap;
struct nouveau_dmem_migrate migrate;
- struct list_head chunk_free;
- struct list_head chunk_full;
- struct list_head chunk_empty;
+ struct list_head chunks;
struct mutex mutex;
+ struct page *free_pages;
+ spinlock_t lock;
};
- static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+ static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
+ {
+ return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+ }
+
+ static struct nouveau_drm *page_to_drm(struct page *page)
{
- return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+
+ return chunk->drm;
}
-static unsigned long nouveau_dmem_page_addr(struct page *page)
+unsigned long nouveau_dmem_page_addr(struct page *page)
{
- struct nouveau_dmem_chunk *chunk = page->zone_device_data;
- unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+ unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
+ chunk->pagemap.res.start;
- return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
+ return chunk->bo->bo.offset + off;
}
static void nouveau_dmem_page_free(struct page *page)
struct mutex context_lock;
};
- /* virtio_ioctl.c */
+ /* virtgpu_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
- /* virtio_kms.c */
+ /* virtgpu_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
* Attachment operations implemented by the importer.
*/
struct dma_buf_attach_ops {
+ /**
+ * @allow_peer2peer:
+ *
+ * If this is set to true the importer must be able to handle peer
+ * resources without struct pages.
+ */
+ bool allow_peer2peer;
+
/**
- * @move_notify
+ * @move_notify: [optional] notification that the DMA-buf is moving
*
* If this callback is provided the framework can avoid pinning the
* backing store while mappings exists.