On 5/17/2021 11:42 PM, Claire Chang wrote:
Add the initialization function to create restricted DMA pools from matching reserved-memory nodes.
Signed-off-by: Claire Chang tientzu@chromium.org
include/linux/device.h | 4 +++ include/linux/swiotlb.h | 3 +- kernel/dma/swiotlb.c | 76 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 1 deletion(-)
diff --git a/include/linux/device.h b/include/linux/device.h index 38a2071cf776..4987608ea4ff 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -416,6 +416,7 @@ struct dev_links_info {
- @dma_pools: Dma pools (if dma'ble device).
- @dma_mem: Internal for coherent mem override.
- @cma_area: Contiguous memory area for dma allocations
- @dma_io_tlb_mem: Internal for swiotlb io_tlb_mem override.
- @archdata: For arch-specific additions.
- @of_node: Associated device tree node.
- @fwnode: Associated device node supplied by platform firmware.
@@ -521,6 +522,9 @@ struct device { #ifdef CONFIG_DMA_CMA struct cma *cma_area; /* contiguous memory area for dma allocations */ +#endif +#ifdef CONFIG_DMA_RESTRICTED_POOL
- struct io_tlb_mem *dma_io_tlb_mem;
#endif /* arch specific additions */ struct dev_archdata archdata; diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 216854a5e513..03ad6e3b4056 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -72,7 +72,8 @@ extern enum swiotlb_force swiotlb_force;
range check to see if the memory was in fact allocated by this
API.
- @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
@end. This is command line adjustable via setup_io_tlb_npages.
@end. For default swiotlb, this is command line adjustable via
setup_io_tlb_npages.
- @used: The number of used IO TLB block.
- @list: The free list describing the number of free entries available
from each index.
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index b849b01a446f..1d8eb4de0d01 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -39,6 +39,13 @@ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #endif +#ifdef CONFIG_DMA_RESTRICTED_POOL +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_reserved_mem.h> +#include <linux/slab.h> +#endif
#include <asm/io.h> #include <asm/dma.h> @@ -690,3 +697,72 @@ static int __init swiotlb_create_default_debugfs(void) late_initcall(swiotlb_create_default_debugfs);
#endif
+#ifdef CONFIG_DMA_RESTRICTED_POOL +static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
struct device *dev)
+{
- struct io_tlb_mem *mem = rmem->priv;
- unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
- if (dev->dma_io_tlb_mem)
return 0;
- /*
* Since multiple devices can share the same pool, the private data,
* io_tlb_mem struct, will be initialized by the first device attached
* to it.
*/
- if (!mem) {
mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL);
if (!mem)
return -ENOMEM;
if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
kfree(mem);
return -EINVAL;
This could probably deserve a warning here to indicate that the reserved area must be accessible within the linear mapping as I would expect a lot of people to trip over that.
Reviewed-by: Florian Fainelli f.fainelli@gmail.com