On 03.03.21 14:20, Thomas Gleixner wrote:
From: Thomas Gleixner tglx@linutronix.de
There is no reason to disable pagefaults and preemption as a side effect of kmap_atomic_prot().
Use kmap_local_page_prot() instead and document the reasoning for the mapping usage with the given pgprot.
Remove the NULL pointer check for the map. These functions return a valid address for valid pages and the return was bogus anyway as it would have left preemption and pagefaults disabled.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Cc: VMware Graphics linux-graphics-maintainer@vmware.com Cc: Roland Scheidegger sroland@vmware.com Cc: Zack Rusin zackr@vmware.com Cc: David Airlie airlied@linux.ie Cc: Daniel Vetter daniel@ffwll.ch Cc: dri-devel@lists.freedesktop.org
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-)
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -375,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct v copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
kunmap_atomic(d->src_addr);
kunmap_local(d->src_addr); d->src_addr = NULL;
}
if (unmap_dst) {
kunmap_atomic(d->dst_addr);
}kunmap_local(d->dst_addr); d->dst_addr = NULL;
@@ -388,12 +388,8 @@ static int vmw_bo_cpu_blit_line(struct v if (WARN_ON_ONCE(dst_page >= d->dst_num_pages)) return -EINVAL;
d->dst_addr =
kmap_atomic_prot(d->dst_pages[dst_page],
d->dst_prot);
if (!d->dst_addr)
return -ENOMEM;
d->dst_addr = kmap_local_page_prot(d->dst_pages[dst_page],
}d->dst_prot); d->mapped_dst = dst_page;
@@ -401,12 +397,8 @@ static int vmw_bo_cpu_blit_line(struct v if (WARN_ON_ONCE(src_page >= d->src_num_pages)) return -EINVAL;
d->src_addr =
kmap_atomic_prot(d->src_pages[src_page],
d->src_prot);
if (!d->src_addr)
return -ENOMEM;
d->src_addr = kmap_local_page_prot(d->src_pages[src_page],
} diff->do_cpy(diff, d->dst_addr + dst_page_offset,d->src_prot); d->mapped_src = src_page;
@@ -436,8 +428,10 @@ static int vmw_bo_cpu_blit_line(struct v
- Performs a CPU blit from one buffer object to another avoiding a full
- bo vmap which may exhaust- or fragment vmalloc space.
- On supported architectures (x86), we're using kmap_atomic which avoids
- cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
- On supported architectures (x86), we're using kmap_local_prot() which
- avoids cross-processor TLB- and cache flushes. kmap_local_prot() will
- either map a highmem page with the proper pgprot on HIGHMEM=y systems or
- reference already set-up mappings.
- Neither of the buffer objects may be placed in PCI memory
@@ -500,9 +494,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob } out: if (d.src_addr)
kunmap_atomic(d.src_addr);
if (d.dst_addr)kunmap_local(d.src_addr);
kunmap_atomic(d.dst_addr);
kunmap_local(d.dst_addr);
return ret;
}
Seems reasonable to me. Reviewed-by: Roland Scheidegger sroland@vmware.com