On Fri, Jun 18, 2021 at 7:30 AM Stefano Stabellini sstabellini@kernel.org wrote:
On Thu, 17 Jun 2021, Claire Chang wrote:
Add a new function, swiotlb_init_io_tlb_mem, for the io_tlb_mem struct initialization to make the code reusable.
Signed-off-by: Claire Chang tientzu@chromium.org Reviewed-by: Christoph Hellwig hch@lst.de Tested-by: Stefano Stabellini sstabellini@kernel.org Tested-by: Will Deacon will@kernel.org
kernel/dma/swiotlb.c | 50 ++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 25 deletions(-)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 52e2ac526757..47bb2a766798 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -168,9 +168,28 @@ void __init swiotlb_update_mem_attributes(void) memset(vaddr, 0, bytes); }
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
unsigned long nslabs, bool late_alloc)
{
void *vaddr = phys_to_virt(start); unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
mem->nslabs = nslabs;
mem->start = start;
mem->end = mem->start + bytes;
mem->index = 0;
mem->late_alloc = late_alloc;
spin_lock_init(&mem->lock);
for (i = 0; i < mem->nslabs; i++) {
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
}
memset(vaddr, 0, bytes);
+}
+int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) +{ struct io_tlb_mem *mem; size_t alloc_size;
@@ -186,16 +205,8 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) if (!mem) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE);
mem->nslabs = nslabs;
mem->start = __pa(tlb);
mem->end = mem->start + bytes;
mem->index = 0;
spin_lock_init(&mem->lock);
for (i = 0; i < mem->nslabs; i++) {
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
}
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); io_tlb_default_mem = mem; if (verbose)
@@ -282,8 +293,8 @@ swiotlb_late_init_with_default_size(size_t default_size) int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) {
unsigned long bytes = nslabs << IO_TLB_SHIFT, i; struct io_tlb_mem *mem;
unsigned long bytes = nslabs << IO_TLB_SHIFT; if (swiotlb_force == SWIOTLB_NO_FORCE) return 0;
@@ -297,20 +308,9 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!mem) return -ENOMEM;
mem->nslabs = nslabs;
mem->start = virt_to_phys(tlb);
mem->end = mem->start + bytes;
mem->index = 0;
mem->late_alloc = 1;
spin_lock_init(&mem->lock);
for (i = 0; i < mem->nslabs; i++) {
mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
}
memset(mem, 0, sizeof(*mem));
swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
memset(tlb, 0, bytes);
This is good for swiotlb_late_init_with_tbl. However I have just noticed that mem could also be allocated from swiotlb_init_with_tbl, in which case the zeroing is missing. I think we need another memset in swiotlb_init_with_tbl as well. Or maybe it could be better to have a single memset at the beginning of swiotlb_init_io_tlb_mem instead. Up to you.
swiotlb_init_with_tbl uses memblock_alloc to allocate the io_tlb_mem and memblock_alloc[1] will do memset in memblock_alloc_try_nid[2], so swiotlb_init_with_tbl is also good. I'm happy to add the memset in swiotlb_init_io_tlb_mem if you think it's clearer and safer.
[1] https://elixir.bootlin.com/linux/v5.13-rc6/source/include/linux/memblock.h#L... [2] https://elixir.bootlin.com/linux/v5.13-rc6/source/mm/memblock.c#L1555