On Tue, Jan 12, 2021 at 09:10:31AM +0100, Thomas Zimmermann wrote:
The function is declared in drm_cache.h. I also removed the curly braces from the for loop to adhere to kernel coding style.
Signed-off-by: Thomas Zimmermann tzimmermann@suse.de
s/implement in/move to/ in the subject. Also would be nice to add kerneldoc while moving (there's not kerneldoc for drm_memory) to avoid the new warning. With that fixed:
Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch
It's mildly confusing, but in a way drm_cache.c is our "hack around dma-api layering issues" pile, so fits :-) Maybe we should even make this the official DOC: kerneldoc intro section for this file ...
Cheers, Daniel
drivers/gpu/drm/drm_cache.c | 32 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_memory.c | 33 --------------------------------- 2 files changed, 32 insertions(+), 33 deletions(-)
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 0fe3c496002a..49551a7fa22f 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -30,6 +30,7 @@
#include <linux/export.h> #include <linux/highmem.h> +#include <xen/xen.h>
#include <drm/drm_cache.h>
@@ -176,3 +177,34 @@ drm_clflush_virt_range(void *addr, unsigned long length) #endif } EXPORT_SYMBOL(drm_clflush_virt_range);
+bool drm_need_swiotlb(int dma_bits) +{
- struct resource *tmp;
- resource_size_t max_iomem = 0;
- /*
* Xen paravirtual hosts require swiotlb regardless of requested dma
* transfer size.
*
* NOTE: Really, what it requires is use of the dma_alloc_coherent
* allocator used in ttm_dma_populate() instead of
* ttm_populate_and_map_pages(), which bounce buffers so much in
* Xen it leads to swiotlb buffer exhaustion.
*/
- if (xen_pv_domain())
return true;
- /*
* Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts.
*/
- if (mem_encrypt_active())
return true;
- for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
max_iomem = max(max_iomem, tmp->end);
- return max_iomem > ((u64)1 << dma_bits);
+} +EXPORT_SYMBOL(drm_need_swiotlb); diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index f4f2bffdd5bd..e4f20a2eb6e7 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -37,7 +37,6 @@ #include <linux/highmem.h> #include <linux/pci.h> #include <linux/vmalloc.h> -#include <xen/xen.h>
#include <drm/drm_agpsupport.h> #include <drm/drm_cache.h> @@ -138,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) iounmap(map->handle); } EXPORT_SYMBOL(drm_legacy_ioremapfree);
-bool drm_need_swiotlb(int dma_bits) -{
- struct resource *tmp;
- resource_size_t max_iomem = 0;
- /*
* Xen paravirtual hosts require swiotlb regardless of requested dma
* transfer size.
*
* NOTE: Really, what it requires is use of the dma_alloc_coherent
* allocator used in ttm_dma_populate() instead of
* ttm_populate_and_map_pages(), which bounce buffers so much in
* Xen it leads to swiotlb buffer exhaustion.
*/
- if (xen_pv_domain())
return true;
- /*
* Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts.
*/
- if (mem_encrypt_active())
return true;
- for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
max_iomem = max(max_iomem, tmp->end);
- }
- return max_iomem > ((u64)1 << dma_bits);
-}
-EXPORT_SYMBOL(drm_need_swiotlb);
2.29.2