On Tuesday, 14 September 2021 2:16:01 AM AEST Alex Sierra wrote:
In order to configure device public in test_hmm, two module parameters should be passed, which correspond to the SP start address of each device (2) spm_addr_dev0 & spm_addr_dev1. If no parameters are passed, private device type is configured.
It's a pity that testing this seems to require some amount of special setup to test. Is there a way this could be made to work on a more standard setup similar to how DEVICE_PRIVATE is tested?
Signed-off-by: Alex Sierra alex.sierra@amd.com
v5: Remove devmem->pagemap.type = MEMORY_DEVICE_PRIVATE at dmirror_allocate_chunk that was forcing to configure pagemap.type to MEMORY_DEVICE_PRIVATE
v6: Check for null pointers for resource and memremap references at dmirror_allocate_chunk
v7: Due to patch dropped from these patch series "kernel: resource: lookup_resource as exported symbol", lookup_resource was not longer a callable function. This was used in public device configuration, to get start and end addresses, to create pgmap->range struct. This information is now taken directly from the spm_addr_devX parameters and the fixed size DEVMEM_CHUNK_SIZE.
lib/test_hmm.c | 66 +++++++++++++++++++++++++++++++-------------- lib/test_hmm_uapi.h | 1 + 2 files changed, 47 insertions(+), 20 deletions(-)
diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 3cd91ca31dd7..ef27e355738a 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -33,6 +33,16 @@ #define DEVMEM_CHUNK_SIZE (256 * 1024 * 1024U) #define DEVMEM_CHUNKS_RESERVE 16
+static unsigned long spm_addr_dev0; +module_param(spm_addr_dev0, long, 0644); +MODULE_PARM_DESC(spm_addr_dev0,
"Specify start address for SPM (special purpose memory) used for device 0. By setting this Generic device type will be used. Make sure spm_addr_dev1 is set too");
+static unsigned long spm_addr_dev1; +module_param(spm_addr_dev1, long, 0644); +MODULE_PARM_DESC(spm_addr_dev1,
"Specify start address for SPM (special purpose memory) used for device 1. By setting this Generic device type will be used. Make sure spm_addr_dev0 is set too");
static const struct dev_pagemap_ops dmirror_devmem_ops; static const struct mmu_interval_notifier_ops dmirror_min_ops; static dev_t dmirror_dev; @@ -450,11 +460,11 @@ static int dmirror_write(struct dmirror *dmirror, struct hmm_dmirror_cmd *cmd) return ret; }
-static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, +static int dmirror_allocate_chunk(struct dmirror_device *mdevice, struct page **ppage) { struct dmirror_chunk *devmem;
- struct resource *res;
- struct resource *res = NULL; unsigned long pfn; unsigned long pfn_first; unsigned long pfn_last;
@@ -462,17 +472,29 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
devmem = kzalloc(sizeof(*devmem), GFP_KERNEL); if (!devmem)
return false;
return -ENOMEM;
- res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
"hmm_dmirror");
- if (IS_ERR(res))
goto err_devmem;
- if (!spm_addr_dev0 && !spm_addr_dev1) {
res = request_free_mem_region(&iomem_resource, DEVMEM_CHUNK_SIZE,
"hmm_dmirror");
if (IS_ERR_OR_NULL(res))
goto err_devmem;
devmem->pagemap.range.start = res->start;
devmem->pagemap.range.end = res->end;
devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
mdevice->zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
- } else if (spm_addr_dev0 && spm_addr_dev1) {
devmem->pagemap.range.start = MINOR(mdevice->cdevice.dev) ?
spm_addr_dev0 :
spm_addr_dev1;
devmem->pagemap.range.end = devmem->pagemap.range.start +
DEVMEM_CHUNK_SIZE - 1;
devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
mdevice->zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PUBLIC;
- } else {
pr_err("Both spm_addr_dev parameters should be set\n");
- }
- mdevice->zone_device_type = HMM_DMIRROR_MEMORY_DEVICE_PRIVATE;
- devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
- devmem->pagemap.range.start = res->start;
- devmem->pagemap.range.end = res->end; devmem->pagemap.nr_range = 1; devmem->pagemap.ops = &dmirror_devmem_ops; devmem->pagemap.owner = mdevice;
@@ -493,10 +515,14 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice, mdevice->devmem_capacity = new_capacity; mdevice->devmem_chunks = new_chunks; }
- ptr = memremap_pages(&devmem->pagemap, numa_node_id());
- if (IS_ERR(ptr))
if (IS_ERR_OR_NULL(ptr)) {
if (ptr)
ret = PTR_ERR(ptr);
else
ret = -EFAULT;
goto err_release;
}
devmem->mdevice = mdevice; pfn_first = devmem->pagemap.range.start >> PAGE_SHIFT;
@@ -529,7 +555,8 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
err_release: mutex_unlock(&mdevice->devmem_lock);
- release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
- if (res)
release_mem_region(devmem->pagemap.range.start, range_len(&devmem->pagemap.range));
err_devmem: kfree(devmem);
@@ -1097,10 +1124,8 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id) if (ret) return ret;
- /* Build a list of free ZONE_DEVICE private struct pages */
- dmirror_allocate_chunk(mdevice, NULL);
- return 0;
- /* Build a list of free ZONE_DEVICE struct pages */
- return dmirror_allocate_chunk(mdevice, NULL);
}
static void dmirror_device_remove(struct dmirror_device *mdevice) @@ -1113,8 +1138,9 @@ static void dmirror_device_remove(struct dmirror_device *mdevice) mdevice->devmem_chunks[i];
memunmap_pages(&devmem->pagemap);
release_mem_region(devmem->pagemap.range.start,
range_len(&devmem->pagemap.range));
if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE)
release_mem_region(devmem->pagemap.range.start,
} kfree(mdevice->devmem_chunks);range_len(&devmem->pagemap.range)); kfree(devmem);
diff --git a/lib/test_hmm_uapi.h b/lib/test_hmm_uapi.h index ee88701793d5..00259d994410 100644 --- a/lib/test_hmm_uapi.h +++ b/lib/test_hmm_uapi.h @@ -65,6 +65,7 @@ enum { enum { /* 0 is reserved to catch uninitialized type fields */ HMM_DMIRROR_MEMORY_DEVICE_PRIVATE = 1,
- HMM_DMIRROR_MEMORY_DEVICE_PUBLIC,
};
#endif /* _LIB_TEST_HMM_UAPI_H */