On 08/05/2010 09:09 AM, Ben Skeggs wrote:
From: Ben Skeggs bskeggs@redhat.com
Nouveau will need this on GeForce 8 and up to account for the GPU reordering physical VRAM for some memory types.
Signed-off-by: Ben Skeggs bskeggs@redhat.com
Reviewed-by: Jerome Glisse jglisse@redhat.com
drivers/gpu/drm/nouveau/nouveau_bo.c | 12 ++- drivers/gpu/drm/nouveau/nouveau_channel.c | 6 +- drivers/gpu/drm/nouveau/nouveau_notifier.c | 2 +- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 4 +- drivers/gpu/drm/nouveau/nv50_crtc.c | 3 +- drivers/gpu/drm/nouveau/nv50_display.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 6 +- drivers/gpu/drm/radeon/radeon_ttm.c | 16 ++-- drivers/gpu/drm/ttm/Makefile | 3 +- drivers/gpu/drm/ttm/ttm_agp_backend.c | 3 +- drivers/gpu/drm/ttm/ttm_bo.c | 100 ++++--------------- drivers/gpu/drm/ttm/ttm_bo_manager.c | 148 ++++++++++++++++++++++++++++ drivers/gpu/drm/ttm/ttm_bo_util.c | 3 +- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 3 +- include/drm/ttm/ttm_bo_api.h | 3 +- include/drm/ttm/ttm_bo_driver.h | 21 ++++- 17 files changed, 225 insertions(+), 112 deletions(-) create mode 100644 drivers/gpu/drm/ttm/ttm_bo_manager.c
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 92290fa..f403737 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -390,6 +390,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM:
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED |man->func = &ttm_bo_manager_func;
@@ -398,6 +399,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->gpu_offset = dev_priv->vm_vram_base; break; case TTM_PL_TT:
switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;man->func = &ttm_bo_manager_func;
@@ -500,8 +502,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (!chan || nvbo->tile_flags || nvbo->no_vm) chan = dev_priv->channel;
- src_offset = old_mem->mm_node->start << PAGE_SHIFT;
- dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
- src_offset = old_mem->start << PAGE_SHIFT;
- dst_offset = new_mem->start << PAGE_SHIFT; if (chan != dev_priv->channel) { if (old_mem->mem_type == TTM_PL_TT) src_offset += dev_priv->vm_gart_base;
@@ -650,7 +652,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, return 0; }
- offset = new_mem->mm_node->start << PAGE_SHIFT;
offset = new_mem->start << PAGE_SHIFT;
if (dev_priv->card_type == NV_50) { ret = nv50_mem_vm_bind_linear(dev,
@@ -764,14 +766,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) case TTM_PL_TT: #if __OS_HAS_AGP if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
}mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->gart_info.aper_base; mem->bus.is_iomem = true;
#endif break; case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; break;mem->bus.offset = mem->start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 90fdcda..90d3450 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RO, &pushbuf, NULL);
chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf);chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else { /* NV04 cmdbuf hack, from original ddx.. not sure of it'schan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
- exact reason for existing :) PCI access to cmdbuf in
@@ -67,7 +67,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf);
chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
}
ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3ec181f..d218427 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -112,7 +112,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, return -ENOMEM; }
- offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { target = NV_DMA_TARGET_VIDMEM; } else
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 491767f..4374366 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; unsigned i, j, pte;
- NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
- NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); nvbe->pte_start = pte; for (i = 0; i < nvbe->nr_pages; i++) { dma_addr_t dma_offset = nvbe->pages[i];
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 5d11ea1..a6a0f95 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -104,8 +104,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) OUT_RING(evo, nv_crtc->lut.depth == 8 ? NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
PAGE_SHIFT) >> 8);
if (dev_priv->chipset != 0x50) { BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); OUT_RING(evo, NvEvoVRAM);OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index f13ad0d..5f88e93 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -322,7 +322,7 @@ nv50_display_init(struct drm_device *dev)
/* initialise fifo */ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 37c7b48..a35e04c 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -397,7 +397,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, return ret; }
- gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.start; gpuobj->im_backing_start <<= PAGE_SHIFT;
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0afd1e6..c261060 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -435,7 +435,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out: radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
bo->tbo.mem.mm_node->start << PAGE_SHIFT,
return 0;bo->tbo.mem.start << PAGE_SHIFT, bo->tbo.num_pages << PAGE_SHIFT);
} @@ -532,7 +532,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rdev = rbo->rdev; if (bo->mem.mem_type == TTM_PL_VRAM) { size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.mm_node->start << PAGE_SHIFT;
if ((offset + size) > rdev->mc.visible_vram_size) { /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);offset = bo->mem.start << PAGE_SHIFT;
@@ -540,7 +540,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) r = ttm_bo_validate(bo, &rbo->placement, false, true, false); if (unlikely(r != 0)) return r;
offset = bo->mem.mm_node->start << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT; /* this should not happen */ if ((offset + size) > rdev->mc.visible_vram_size) return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index cc19aba..0921910 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -152,6 +152,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT:
man->gpu_offset = rdev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED;man->func = &ttm_bo_manager_func;
@@ -173,6 +174,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;man->func = &ttm_bo_manager_func;
@@ -246,8 +248,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; }
- old_start = old_mem->mm_node->start << PAGE_SHIFT;
- new_start = new_mem->mm_node->start << PAGE_SHIFT;
old_start = old_mem->start << PAGE_SHIFT;
new_start = new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) { case TTM_PL_VRAM:
@@ -435,14 +437,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
}mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
#endif break; case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
/* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) return -EINVAL;mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -685,7 +687,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, int r;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
- gtt->offset = bo_mem->start << PAGE_SHIFT; if (!gtt->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); }
@@ -784,9 +786,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev) radeon_mem_types_list[i].show = &radeon_mm_dump_table; radeon_mem_types_list[i].driver_features = 0; if (i == 0)
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
elseradeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
} /* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b256d4a..f3cf6f0 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
- ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
- ttm_bo_manager.o
obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 4bf69c4..f999e36 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -74,6 +74,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) { struct ttm_agp_backend *agp_be = container_of(backend, struct ttm_agp_backend, backend);
- struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem = agp_be->mem; int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); int ret;
@@ -81,7 +82,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) mem->is_flushed = 1; mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
- ret = agp_bind_memory(mem, bo_mem->mm_node->start);
- ret = agp_bind_memory(mem, node->start); if (ret) printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 80d37b4..af7b57a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -84,11 +84,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) man->available_caching); printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", man->default_caching);
- if (mem_type != TTM_PL_SYSTEM) {
spin_lock(&bdev->glob->lru_lock);
drm_mm_debug_table(&man->manager, TTM_PFX);
spin_unlock(&bdev->glob->lru_lock);
- }
- if (mem_type != TTM_PL_SYSTEM)
(*man->func->debug)(man, TTM_PFX);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, @@ -421,7 +418,7 @@ moved:
if (bo->mem.mm_node) { spin_lock(&bo->lock);
bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; spin_unlock(&bo->lock);bo->offset = (bo->mem.start << PAGE_SHIFT) +
@@ -724,52 +721,12 @@ retry: return ret; }
-static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
struct drm_mm_node **node)
-{
- struct ttm_bo_global *glob = bo->glob;
- unsigned long lpfn;
- int ret;
- lpfn = placement->lpfn;
- if (!lpfn)
lpfn = man->size;
- *node = NULL;
- do {
ret = drm_mm_pre_get(&man->manager);
if (unlikely(ret))
return ret;
spin_lock(&glob->lru_lock);
*node = drm_mm_search_free_in_range(&man->manager,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(*node == NULL)) {
spin_unlock(&glob->lru_lock);
return 0;
}
*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&glob->lru_lock);
- } while (*node == NULL);
- return 0;
-}
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) {
- struct ttm_bo_global *glob = bo->glob;
- struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
- if (mem->mm_node) {
spin_lock(&glob->lru_lock);
drm_mm_put_block(mem->mm_node);
spin_unlock(&glob->lru_lock);
mem->mm_node = NULL;
- }
- if (mem->mm_node)
(*man->func->put_node)(man, mem);
} EXPORT_SYMBOL(ttm_bo_mem_put);
@@ -788,14 +745,13 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct drm_mm_node *node; int ret;
do {
ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
if (unlikely(ret != 0)) return ret;ret = (*man->func->get_node)(man, bo, placement, mem);
if (node)
spin_lock(&glob->lru_lock); if (list_empty(&man->lru)) {if (mem->mm_node) break;
@@ -808,9 +764,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, if (unlikely(ret != 0)) return ret; } while (1);
- if (node == NULL)
- if (mem->mm_node == NULL) return -ENOMEM;
- mem->mm_node = node; mem->mem_type = mem_type; return 0;
} @@ -884,7 +839,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool type_found = false; bool type_ok = false; bool has_erestartsys = false;
struct drm_mm_node *node = NULL; int i, ret;
mem->mm_node = NULL;
@@ -918,17 +872,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (man->has_type && man->use_type) { type_found = true;
ret = ttm_bo_man_get_node(bo, man, placement, mem,
&node);
}ret = (*man->func->get_node)(man, bo, placement, mem); if (unlikely(ret)) return ret;
if (node)
}if (mem->mm_node) break;
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
mem->mm_node = node;
- if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { mem->mem_type = mem_type; mem->placement = cur_flags; return 0;
@@ -998,7 +950,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) {
- struct ttm_bo_global *glob = bo->glob; int ret = 0; struct ttm_mem_reg mem;
@@ -1026,11 +977,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, goto out_unlock; ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); out_unlock:
- if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
- }
- if (ret && mem.mm_node)
return ret;ttm_bo_mem_put(bo, &mem);
}
@@ -1038,11 +986,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i;
struct drm_mm_node *node = mem->mm_node;
if (node && placement->lpfn != 0 &&
(node->start < placement->fpfn ||
node->start + node->size > placement->lpfn))
if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn))
return -1;
for (i = 0; i < placement->num_placement; i++) {
@@ -1286,7 +1233,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) {
- struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man; int ret = -EINVAL;
@@ -1309,13 +1255,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) if (mem_type > 0) { ttm_bo_force_list_clean(bdev, mem_type, false);
spin_lock(&glob->lru_lock);
if (drm_mm_clean(&man->manager))
drm_mm_takedown(&man->manager);
else
ret = -EBUSY;
spin_unlock(&glob->lru_lock);
ret = (*man->func->takedown)(man);
}
return ret;
@@ -1366,6 +1306,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) return ret;
man->bdev = bdev;
ret = 0; if (type != TTM_PL_SYSTEM) {
@@ -1375,7 +1316,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, type); return ret; }
ret = drm_mm_init(&man->manager, 0, p_size);
if (ret) return ret; }ret = (*man->func->init)(man, p_size);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c new file mode 100644 index 0000000..7410c19 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -0,0 +1,148 @@ +/**************************************************************************
- Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
- All Rights Reserved.
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sub license, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
- The above copyright notice and this permission notice (including the
- next paragraph) shall be included in all copies or substantial portions
- of the Software.
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- USE OR OTHER DEALINGS IN THE SOFTWARE.
- **************************************************************************/
+/*
- Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
+#include "ttm/ttm_module.h" +#include "ttm/ttm_bo_driver.h" +#include "ttm/ttm_placement.h" +#include <linux/jiffies.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/file.h> +#include <linux/module.h>
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
+{
- struct ttm_bo_global *glob = man->bdev->glob;
- struct drm_mm *mm = man->priv;
- struct drm_mm_node *node = NULL;
- unsigned long lpfn;
- int ret;
- lpfn = placement->lpfn;
- if (!lpfn)
lpfn = man->size;
- do {
ret = drm_mm_pre_get(mm);
if (unlikely(ret))
return ret;
spin_lock(&glob->lru_lock);
node = drm_mm_search_free_in_range(mm,
mem->num_pages, mem->page_alignment,
placement->fpfn, lpfn, 1);
if (unlikely(node == NULL)) {
spin_unlock(&glob->lru_lock);
return 0;
}
node = drm_mm_get_block_atomic_range(node, mem->num_pages,
mem->page_alignment,
placement->fpfn,
lpfn);
spin_unlock(&glob->lru_lock);
- } while (node == NULL);
- mem->mm_node = node;
- mem->start = node->start;
- return 0;
+}
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
+{
- struct ttm_bo_global *glob = man->bdev->glob;
- if (mem->mm_node) {
spin_lock(&glob->lru_lock);
drm_mm_put_block(mem->mm_node);
spin_unlock(&glob->lru_lock);
mem->mm_node = NULL;
- }
+}
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
+{
- struct drm_mm *mm;
- int ret;
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm)
return -ENOMEM;
- ret = drm_mm_init(mm, 0, p_size);
- if (ret) {
kfree(mm);
return ret;
- }
- man->priv = mm;
- return 0;
+}
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) +{
- struct ttm_bo_global *glob = man->bdev->glob;
- struct drm_mm *mm = man->priv;
- int ret = 0;
- spin_lock(&glob->lru_lock);
- if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
kfree(mm);
man->priv = NULL;
- } else
ret = -EBUSY;
- spin_unlock(&glob->lru_lock);
- return ret;
+}
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
+{
- struct ttm_bo_global *glob = man->bdev->glob;
- struct drm_mm *mm = man->priv;
- spin_lock(&glob->lru_lock);
- drm_mm_debug_table(mm, prefix);
- spin_unlock(&glob->lru_lock);
+}
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
- ttm_bo_man_init,
- ttm_bo_man_takedown,
- ttm_bo_man_get_node,
- ttm_bo_man_put_node,
- ttm_bo_man_debug
+}; +EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 0ebfe0d..c9d2d4d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -256,8 +256,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, dir = 1;
if ((old_mem->mem_type == new_mem->mem_type) &&
(new_mem->mm_node->start <
old_mem->mm_node->start + old_mem->mm_node->size)) {
dir = -1; add = new_mem->num_pages - 1; }(new_mem->start < old_mem->start + old_mem->size)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index c4f5114..1b3bd8c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -147,6 +147,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */
man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_MASK_CACHING;man->func = &ttm_bo_manager_func;
@@ -203,7 +204,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg /* System memory */ return 0; case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
mem->bus.base = dev_priv->vram_start; mem->bus.is_iomem = true; break;mem->bus.offset = mem->start << PAGE_SHIFT;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 267a86c..49b43c2 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -102,7 +102,8 @@ struct ttm_bus_placement { */
struct ttm_mem_reg {
- struct drm_mm_node *mm_node;
- void *mm_node;
- unsigned long start; unsigned long size; unsigned long num_pages; uint32_t page_alignment;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6c694d8..e3371db 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -203,7 +203,22 @@ struct ttm_tt {
- It's set up by the ttm_bo_driver::init_mem_type method.
*/
+struct ttm_mem_type_manager;
+struct ttm_mem_type_manager_func {
- int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
- int (*takedown)(struct ttm_mem_type_manager *man);
- int (*get_node)(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem);
- void (*put_node)(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem);
- void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
+};
struct ttm_mem_type_manager {
struct ttm_bo_device *bdev;
/*
- No protection. Constant from start.
@@ -222,8 +237,8 @@ struct ttm_mem_type_manager { * TODO: Consider one lru_lock per ttm_mem_type_manager. * Plays ill with list removal, though. */
- struct drm_mm manager;
- const struct ttm_mem_type_manager_func *func;
- void *priv; struct list_head lru;
};
@@ -895,6 +910,8 @@ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) #define TTM_HAS_AGP #include <linux/agp_backend.h>