This is getting bigger than expected. Adding the locking that Chris suggested on IRC.
Thanks for taking time to review Chris.
- Lionel
Signed-off-by: Lionel Landwerlin lionel.g.landwerlin@intel.com --- xf86atomic.h | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/xf86atomic.h b/xf86atomic.h index db2f619..bc482c9 100644 --- a/xf86atomic.h +++ b/xf86atomic.h @@ -96,4 +96,13 @@ typedef struct { uint_t atomic; } atomic_t; #error libdrm requires atomic operations, please define them for your CPU/compiler. #endif
+static inline int atomic_add_unless(atomic_t *v, int add, int unless) +{ + int c, old; + c = atomic_read(v); + while (c != unless && (old = atomic_cmpxchg(v, c, c + add)) != c) + c = old; + return c == unless; +} + #endif
When using Mesa and LibVA in the same process, one would like to be able bind buffers from the output of the decoder to a GL texture through an EGLImage.
LibVA can reuse buffers allocated by Gbm through a file descriptor. It will then wrap it into a drm_intel_bo with drm_intel_bo_gem_create_from_prime().
The problem at the moment is that both library get a different drm_intel_bufmgr object when they call drm_intel_bufmgr_gem_init() even though they're using the same drm file descriptor. As a result, instead of manipulating the same buffer object for a given file descriptor, they get 2 different drm_intel_bo objects and 2 different refcounts, leading one of the library to get errors from the kernel on invalid BO when one of the 2 library is done with a shared buffer.
This patch modifies drm_intel_bufmgr_gem_init() so, given a file descriptor, it will look for an already existing drm_intel_bufmgr using the same file descriptor and return that object.
Signed-off-by: Lionel Landwerlin lionel.g.landwerlin@intel.com --- intel/intel_bufmgr_gem.c | 63 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 58 insertions(+), 5 deletions(-)
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index 0e1cb0d..1e2dd77 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -94,6 +94,8 @@ struct drm_intel_gem_bo_bucket { typedef struct _drm_intel_bufmgr_gem { drm_intel_bufmgr bufmgr;
+ atomic_t refcount; + int fd;
int max_relocs; @@ -111,6 +113,8 @@ typedef struct _drm_intel_bufmgr_gem { int num_buckets; time_t time;
+ drmMMListHead managers; + drmMMListHead named; drmMMListHead vma_cache; int vma_count, vma_open, vma_max; @@ -3186,6 +3190,41 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, bo_gem->aub_annotation_count = count; }
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER; +static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list }; + +static drm_intel_bufmgr_gem * +drm_intel_bufmgr_gem_find(int fd) +{ + drm_intel_bufmgr_gem *bufmgr_gem; + + DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) { + if (bufmgr_gem->fd == fd) { + atomic_inc(&bufmgr_gem->refcount); + return bufmgr_gem; + } + } + + return NULL; +} + +static void +drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + + if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) { + pthread_mutex_lock(&bufmgr_list_mutex); + + if (atomic_dec_and_test(&bufmgr_gem->refcount)) { + DRMLISTDEL(&bufmgr_gem->managers); + drm_intel_bufmgr_gem_destroy(bufmgr); + } + + pthread_mutex_unlock(&bufmgr_list_mutex); + } +} + /** * Initializes the GEM buffer manager, which uses the kernel to allocate, map, * and manage map buffer objections. @@ -3201,15 +3240,23 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) int ret, tmp; bool exec2 = false;
+ pthread_mutex_lock(&bufmgr_list_mutex); + + bufmgr_gem = drm_intel_bufmgr_gem_find(fd); + if (bufmgr_gem) + goto exit; + bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); if (bufmgr_gem == NULL) - return NULL; + goto exit;
bufmgr_gem->fd = fd; + atomic_set(&bufmgr_gem->refcount, 1);
if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { free(bufmgr_gem); - return NULL; + bufmgr_gem = NULL; + goto exit; }
ret = drmIoctl(bufmgr_gem->fd, @@ -3246,7 +3293,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->gen = 8; else { free(bufmgr_gem); - return NULL; + bufmgr_gem = NULL; + goto exit; }
if (IS_GEN3(bufmgr_gem->pci_device) && @@ -3357,7 +3405,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; - bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy; + bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref; bufmgr_gem->bufmgr.debug = 0; bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space; @@ -3373,5 +3421,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) DRMINITLISTHEAD(&bufmgr_gem->vma_cache); bufmgr_gem->vma_max = -1; /* unlimited by default */
- return &bufmgr_gem->bufmgr; + DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list); + +exit: + pthread_mutex_unlock(&bufmgr_list_mutex); + + return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL; }
Signed-off-by: Lionel Landwerlin lionel.g.landwerlin@intel.com --- intel/intel_bufmgr_gem.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index 1e2dd77..bf6745d 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -1157,7 +1157,8 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
assert(atomic_read(&bo_gem->refcount) > 0); - if (atomic_dec_and_test(&bo_gem->refcount)) { + + if (atomic_add_unless(&bo_gem->refcount, -1, 1)) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; struct timespec time; @@ -1165,8 +1166,12 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) clock_gettime(CLOCK_MONOTONIC, &time);
pthread_mutex_lock(&bufmgr_gem->lock); - drm_intel_gem_bo_unreference_final(bo, time.tv_sec); - drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec); + + if (atomic_dec_and_test(&bo_gem->refcount)) { + drm_intel_gem_bo_unreference_final(bo, time.tv_sec); + drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec); + } + pthread_mutex_unlock(&bufmgr_gem->lock); } }
Signed-off-by: Lionel Landwerlin lionel.g.landwerlin@intel.com --- intel/intel_bufmgr_gem.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index bf6745d..4e34f75 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -1783,6 +1783,7 @@ drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo) drm_public void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) { + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int i; struct timespec time; @@ -1790,7 +1791,10 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) clock_gettime(CLOCK_MONOTONIC, &time);
assert(bo_gem->reloc_count >= start); + /* Unreference the cleared target buffers */ + pthread_mutex_lock(&bufmgr_gem->lock); + for (i = start; i < bo_gem->reloc_count; i++) { drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo; if (&target_bo_gem->bo != bo) { @@ -1800,6 +1804,9 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) } } bo_gem->reloc_count = start; + + pthread_mutex_unlock(&bufmgr_gem->lock); + }
/**
On Fri, Sep 12, 2014 at 01:48:34PM +0100, Lionel Landwerlin wrote:
This is getting bigger than expected. Adding the locking that Chris suggested on IRC.
Thanks for taking time to review Chris.
I'm happy with this series, Reviewed-by: Chris Wilson chris@chris-wilson.co.uk -Chris
On Mon, Sep 15, 2014 at 02:12:25PM +0100, Chris Wilson wrote:
On Fri, Sep 12, 2014 at 01:48:34PM +0100, Lionel Landwerlin wrote:
This is getting bigger than expected. Adding the locking that Chris suggested on IRC.
Thanks for taking time to review Chris.
I'm happy with this series, Reviewed-by: Chris Wilson chris@chris-wilson.co.uk
Just landed the series for Lionel (who doesn't have access to this git repo) then.
dri-devel@lists.freedesktop.org