If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages (hugetlbfs and not THP), we have to first find the hugepage(s) where the Guest allocations are located and then extract the regular 4k sized subpages from them.
Cc: Gerd Hoffmann kraxel@redhat.com Signed-off-by: Vivek Kasireddy vivek.kasireddy@intel.com --- drivers/dma-buf/udmabuf.c | 40 +++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59a..4a976a56cbc3 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,6 +11,7 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> +#include <linux/hugetlb.h>
static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ @@ -162,8 +163,9 @@ static long udmabuf_create(struct miscdevice *device, struct file *memfd = NULL; struct udmabuf *ubuf; struct dma_buf *buf; - pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit, subpgoff; + struct page *page, *hpage = NULL; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags;
@@ -194,7 +196,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + if (!shmem_mapping(file_inode(memfd)->i_mapping) && + !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -205,17 +208,38 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; - for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + hpage = find_get_page_flags( + file_inode(memfd)->i_mapping, + pgoff, FGP_ACCESSED); + if (IS_ERR(hpage)) { + ret = PTR_ERR(hpage); goto err; } + } + for (pgidx = 0; pgidx < pgcnt; pgidx++) { + if (is_file_hugepages(memfd)) { + page = hpage + subpgoff + pgidx; + get_page(page); + } else { + page = shmem_read_mapping_page( + file_inode(memfd)->i_mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } + } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; + if (hpage) + put_page(hpage); }
exp_info.ops = &udmabuf_ops;
If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages (hugetlbfs and not THP), we have to first find the hugepage(s) where the Guest allocations are located and then extract the regular 4k sized subpages from them.
v2: Ensure that the subpage offsets are calculated correctly when the range of subpage allocations cuts across multiple hugepages.
Cc: Gerd Hoffmann kraxel@redhat.com Signed-off-by: Vivek Kasireddy vivek.kasireddy@intel.com --- drivers/dma-buf/udmabuf.c | 46 ++++++++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 8 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59a..f053d12a1eb3 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,6 +11,7 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> +#include <linux/hugetlb.h>
static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ @@ -162,8 +163,10 @@ static long udmabuf_create(struct miscdevice *device, struct file *memfd = NULL; struct udmabuf *ubuf; struct dma_buf *buf; - pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit, subpgoff; + uint32_t maxsubpgs; + struct page *page, *hpage = NULL; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags;
@@ -194,7 +197,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + if (!shmem_mapping(file_inode(memfd)->i_mapping) && + !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -205,12 +209,38 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; + } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; + if (is_file_hugepages(memfd)) { + hpage = find_get_page_flags( + file_inode(memfd)->i_mapping, + pgoff, FGP_ACCESSED); + if (IS_ERR(hpage)) { + ret = PTR_ERR(hpage); + goto err; + } + + page = hpage + (subpgoff % maxsubpgs); + get_page(page); + put_page(hpage); + + subpgoff++; + if (subpgoff % maxsubpgs == 0) + pgoff++; + } else { + page = shmem_read_mapping_page( + file_inode(memfd)->i_mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } } ubuf->pages[pgbuf++] = page; }
Hi,
if (hpage && subpgoff == maxsubpgs) { put_page(hpage); hpage = NULL; } if (!hpage) { hpage = find_get_page_flags(...) [ ... ] }
Only lookup the huge page when you cross a hugepage border.
take care, Gerd
If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages (hugetlbfs and not THP), we have to first find the hugepage(s) where the Guest allocations are located and then extract the regular 4k sized subpages from them.
v2: Ensure that the subpage and hugepage offsets are calculated correctly when the range of subpage allocations cuts across multiple hugepages.
v3: Instead of repeatedly looking up the hugepage for each subpage, only do it when the subpage allocation crosses over into a different hugepage. (suggested by Gerd and DW)
Cc: Gerd Hoffmann kraxel@redhat.com Signed-off-by: Vivek Kasireddy vivek.kasireddy@intel.com Signed-off-by: Dongwon Kim dongwon.kim@intel.com --- drivers/dma-buf/udmabuf.c | 51 +++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 7 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59a..2e02bbfe30fd 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,6 +11,7 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> +#include <linux/hugetlb.h>
static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ @@ -163,7 +164,9 @@ static long udmabuf_create(struct miscdevice *device, struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + struct page *page, *hpage = NULL; + pgoff_t subpgoff, maxsubpgs; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags;
@@ -194,7 +197,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + if (!shmem_mapping(file_inode(memfd)->i_mapping) && + !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -205,17 +209,50 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; + } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; + if (is_file_hugepages(memfd)) { + if (!hpage) { + hpage = find_get_page_flags( + file_inode(memfd)->i_mapping, + pgoff, FGP_ACCESSED); + if (IS_ERR(hpage)) { + ret = PTR_ERR(hpage); + goto err; + } + } + page = hpage + subpgoff; + get_page(page); + subpgoff++; + if (subpgoff == maxsubpgs) { + put_page(hpage); + hpage = NULL; + subpgoff = 0; + pgoff++; + } + } else { + page = shmem_read_mapping_page( + file_inode(memfd)->i_mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; + if (hpage) { + put_page(hpage); + hpage = NULL; + } }
exp_info.ops = &udmabuf_ops;
I see the number of entries in the list often exceeds list_limit currently hardcoded to 1024 for full HD scanout resource (== 1920*1080*4 bytes). Can we include a change to increase it to something like 4096 or higher in this patch?
On Fri, Jun 04, 2021 at 01:59:39PM -0700, Vivek Kasireddy wrote:
If the VMM's (Qemu) memory backend is backed up by memfd + Hugepages (hugetlbfs and not THP), we have to first find the hugepage(s) where the Guest allocations are located and then extract the regular 4k sized subpages from them.
v2: Ensure that the subpage and hugepage offsets are calculated correctly when the range of subpage allocations cuts across multiple hugepages.
v3: Instead of repeatedly looking up the hugepage for each subpage, only do it when the subpage allocation crosses over into a different hugepage. (suggested by Gerd and DW)
v4: Fix the following warning identified by checkpatch: CHECK:OPEN_ENDED_LINE: Lines should not end with a '('
Cc: Gerd Hoffmann kraxel@redhat.com Signed-off-by: Vivek Kasireddy vivek.kasireddy@intel.com Signed-off-by: Dongwon Kim dongwon.kim@intel.com --- drivers/dma-buf/udmabuf.c | 50 +++++++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 7 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59a..d509f0d60794 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,6 +11,7 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/udmabuf.h> +#include <linux/hugetlb.h>
static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ @@ -160,10 +161,13 @@ static long udmabuf_create(struct miscdevice *device, { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct file *memfd = NULL; + struct address_space *mapping = NULL; struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + struct page *page, *hpage = NULL; + pgoff_t subpgoff, maxsubpgs; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags;
@@ -194,7 +198,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + mapping = file_inode(memfd)->i_mapping; + if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -205,17 +210,48 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; + } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; + if (is_file_hugepages(memfd)) { + if (!hpage) { + hpage = find_get_page_flags(mapping, + pgoff, FGP_ACCESSED); + if (IS_ERR(hpage)) { + ret = PTR_ERR(hpage); + goto err; + } + } + page = hpage + subpgoff; + get_page(page); + subpgoff++; + if (subpgoff == maxsubpgs) { + put_page(hpage); + hpage = NULL; + subpgoff = 0; + pgoff++; + } + } else { + page = shmem_read_mapping_page(mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; + if (hpage) { + put_page(hpage); + hpage = NULL; + } }
exp_info.ops = &udmabuf_ops;
dri-devel@lists.freedesktop.org