lib: test_hmm add module param for zone device type
authorAlex Sierra <alex.sierra@amd.com>
Fri, 15 Jul 2022 15:05:16 +0000 (10:05 -0500)
committerakpm <akpm@linux-foundation.org>
Mon, 18 Jul 2022 00:14:28 +0000 (17:14 -0700)
In order to configure device coherent in test_hmm, two module parameters
should be passed, which correspond to the SP start address of each device
(2) spm_addr_dev0 & spm_addr_dev1.  If no parameters are passed, private
device type is configured.

Link: https://lkml.kernel.org/r/20220715150521.18165-10-alex.sierra@amd.com
Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Alistair Poppple <apopple@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/test_hmm.c
lib/test_hmm_uapi.h

index ed737eae5959b4c65129fdef35107cb6c8a8b7f6..436124da00e665d3990d1291d5c596b764e15992 100644 (file)
 #define DEVMEM_CHUNK_SIZE              (256 * 1024 * 1024U)
 #define DEVMEM_CHUNKS_RESERVE          16
 
+static unsigned long spm_addr_dev0;
+module_param(spm_addr_dev0, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev0,
+               "Specify start address for SPM (special purpose memory) used for device 0. By setting this Coherent device type will be used. Make sure spm_addr_dev1 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
+static unsigned long spm_addr_dev1;
+module_param(spm_addr_dev1, long, 0644);
+MODULE_PARM_DESC(spm_addr_dev1,
+               "Specify start address for SPM (special purpose memory) used for device 1. By setting this Coherent device type will be used. Make sure spm_addr_dev0 is set too. Minimum SPM size should be DEVMEM_CHUNK_SIZE.");
+
 static const struct dev_pagemap_ops dmirror_devmem_ops;
 static const struct mmu_interval_notifier_ops dmirror_min_ops;
 static dev_t dmirror_dev;
@@ -455,28 +465,44 @@ fini:
        return ret;
 }
 
-static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
+static int dmirror_allocate_chunk(struct dmirror_device *mdevice,
                                   struct page **ppage)
 {
        struct dmirror_chunk *devmem;
-       struct resource *res;
+       struct resource *res = NULL;
        unsigned long pfn;
        unsigned long pfn_first;
        unsigned long pfn_last;
        void *ptr;
+       int ret = -ENOMEM;
 
        devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
        if (!devmem)
-               return false;
+               return ret;
 
-       res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
-                                     "hmm_dmirror");
-       if (IS_ERR(res))
+       switch (mdevice->zone_device_type) {
+       case HMM_DMIRROR_MEMORY_DEVICE_PRIVATE:
+               res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
+                                             "hmm_dmirror");
+               if (IS_ERR_OR_NULL(res))
+                       goto err_devmem;
+               devmem->pagemap.range.start = res->start;
+               devmem->pagemap.range.end = res->end;
+               devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+               break;
+       case HMM_DMIRROR_MEMORY_DEVICE_COHERENT:
+               devmem->pagemap.range.start = (MINOR(mdevice->cdevice.dev) - 2) ?
+                                                       spm_addr_dev0 :
+                                                       spm_addr_dev1;
+               devmem->pagemap.range.end = devmem->pagemap.range.start +
+                                           DEVMEM_CHUNK_SIZE - 1;
+               devmem->pagemap.type = MEMORY_DEVICE_COHERENT;
+               break;
+       default:
+               ret = -EINVAL;
                goto err_devmem;
+       }
 
-       devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
-       devmem->pagemap.range.start = res->start;
-       devmem->pagemap.range.end = res->end;
        devmem->pagemap.nr_range = 1;
        devmem->pagemap.ops = &dmirror_devmem_ops;
        devmem->pagemap.owner = mdevice;
@@ -497,10 +523,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
                mdevice->devmem_capacity = new_capacity;
                mdevice->devmem_chunks = new_chunks;
        }
-
        ptr = memremap_pages(&devmem->pagemap, numa_node_id());
-       if (IS_ERR(ptr))
+       if (IS_ERR_OR_NULL(ptr)) {
+               if (ptr)
+                       ret = PTR_ERR(ptr);
+               else
+                       ret = -EFAULT;
                goto err_release;
+       }
 
        devmem->mdevice = mdevice;
        pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
@@ -529,15 +559,17 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
        }
        spin_unlock(&mdevice->lock);
 
-       return true;
+       return 0;
 
 err_release:
        mutex_unlock(&mdevice->devmem_lock);
-       release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
+       if (res && devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+               release_mem_region(devmem->pagemap.range.start,
+                                  range_len(&devmem->pagemap.range));
 err_devmem:
        kfree(devmem);
 
-       return false;
+       return ret;
 }
 
 static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
@@ -562,7 +594,7 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror_device *mdevice)
                spin_unlock(&mdevice->lock);
        } else {
                spin_unlock(&mdevice->lock);
-               if (!dmirror_allocate_chunk(mdevice, &dpage))
+               if (dmirror_allocate_chunk(mdevice, &dpage))
                        goto error;
        }
 
@@ -1238,10 +1270,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
        if (ret)
                return ret;
 
-       /* Build a list of free ZONE_DEVICE private struct pages */
-       dmirror_allocate_chunk(mdevice, NULL);
-
-       return 0;
+       /* Build a list of free ZONE_DEVICE struct pages */
+       return dmirror_allocate_chunk(mdevice, NULL);
 }
 
 static void dmirror_device_remove(struct dmirror_device *mdevice)
@@ -1254,8 +1284,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
                                mdevice->devmem_chunks[i];
 
                        memunmap_pages(&devmem->pagemap);
-                       release_mem_region(devmem->pagemap.range.start,
-                                          range_len(&devmem->pagemap.range));
+                       if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
+                               release_mem_region(devmem->pagemap.range.start,
+                                                  range_len(&devmem->pagemap.range));
                        kfree(devmem);
                }
                kfree(mdevice->devmem_chunks);
index 0511af7464eed243c9aac163d72f449ba20c2d0f..f700da7807c1c02b5a3476df060be58909d08d75 100644 (file)
@@ -66,6 +66,7 @@ enum {
 enum {
        /* 0 is reserved to catch uninitialized type fields */
        HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1,
+       HMM_DMIRROR_MEMORY_DEVICE_COHERENT,
 };
 
 #endif /* _LIB_TEST_HMM_UAPI_H */