From: Russell King rmk+kernel@arm.linux.org.uk
In order to avoid flushing the GPU MMU every time we unmap and remap, allocate MMU addresses in a round-robin fashion. When we have to wrap back to the beginning, indicate that the MMU needs to be flushed.
Signed-off-by: Russell King rmk+kernel@arm.linux.org.uk --- drivers/staging/etnaviv/etnaviv_mmu.c | 23 +++++++++++++++++++++-- drivers/staging/etnaviv/etnaviv_mmu.h | 1 + 2 files changed, 22 insertions(+), 2 deletions(-)
diff --git a/drivers/staging/etnaviv/etnaviv_mmu.c b/drivers/staging/etnaviv/etnaviv_mmu.c index 4589995b83ff..4fbba26a3f37 100644 --- a/drivers/staging/etnaviv/etnaviv_mmu.c +++ b/drivers/staging/etnaviv/etnaviv_mmu.c @@ -117,10 +117,29 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, if (!node) return -ENOMEM;
- ret = drm_mm_insert_node(&mmu->mm, node, etnaviv_obj->base.size, 0, - DRM_MM_SEARCH_DEFAULT); + while (1) { + ret = drm_mm_insert_node_in_range(&mmu->mm, node, + etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL, + DRM_MM_SEARCH_DEFAULT); + + if (ret != -ENOSPC) + break; + + /* + * If we did not search from the start of the MMU region, + * try again in case there are free slots. + */ + if (mmu->last_iova) { + mmu->last_iova = 0; + mmu->need_flush = true; + continue; + } + + break; + }
if (!ret) { + mmu->last_iova = node->start + etnaviv_obj->base.size; offset = node->start; etnaviv_obj->iova = offset; etnaviv_obj->gpu_vram_node = node; diff --git a/drivers/staging/etnaviv/etnaviv_mmu.h b/drivers/staging/etnaviv/etnaviv_mmu.h index 262c4e26e901..a37affda9590 100644 --- a/drivers/staging/etnaviv/etnaviv_mmu.h +++ b/drivers/staging/etnaviv/etnaviv_mmu.h @@ -26,6 +26,7 @@ struct etnaviv_iommu {
/* memory manager for GPU address area */ struct drm_mm mm; + uint32_t last_iova; bool need_flush; };