Lines Matching refs:e
135 struct amdgpu_sync_entry *e; in amdgpu_sync_add_later() local
137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
138 if (dma_fence_is_signaled(e->fence)) { in amdgpu_sync_add_later()
139 dma_fence_put(e->fence); in amdgpu_sync_add_later()
140 e->fence = dma_fence_get(f); in amdgpu_sync_add_later()
144 if (likely(e->fence->context == f->context)) { in amdgpu_sync_add_later()
145 amdgpu_sync_keep_later(&e->fence, f); in amdgpu_sync_add_later()
164 struct amdgpu_sync_entry *e; in amdgpu_sync_fence() local
172 e = kmem_cache_alloc(amdgpu_sync_slab, flags); in amdgpu_sync_fence()
173 if (!e) in amdgpu_sync_fence()
176 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
177 e->fence = dma_fence_get(f); in amdgpu_sync_fence()
301 static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) in amdgpu_sync_entry_free() argument
303 hash_del(&e->node); in amdgpu_sync_entry_free()
304 dma_fence_put(e->fence); in amdgpu_sync_entry_free()
305 kmem_cache_free(amdgpu_sync_slab, e); in amdgpu_sync_entry_free()
320 struct amdgpu_sync_entry *e; in amdgpu_sync_peek_fence() local
324 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
325 struct dma_fence *f = e->fence; in amdgpu_sync_peek_fence()
329 amdgpu_sync_entry_free(e); in amdgpu_sync_peek_fence()
359 struct amdgpu_sync_entry *e; in amdgpu_sync_get_fence() local
364 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
366 f = e->fence; in amdgpu_sync_get_fence()
368 hash_del(&e->node); in amdgpu_sync_get_fence()
369 kmem_cache_free(amdgpu_sync_slab, e); in amdgpu_sync_get_fence()
390 struct amdgpu_sync_entry *e; in amdgpu_sync_clone() local
395 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone()
396 f = e->fence; in amdgpu_sync_clone()
402 amdgpu_sync_entry_free(e); in amdgpu_sync_clone()
437 struct amdgpu_sync_entry *e; in amdgpu_sync_push_to_job() local
442 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_push_to_job()
443 f = e->fence; in amdgpu_sync_push_to_job()
445 amdgpu_sync_entry_free(e); in amdgpu_sync_push_to_job()
461 struct amdgpu_sync_entry *e; in amdgpu_sync_wait() local
465 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
466 r = dma_fence_wait(e->fence, intr); in amdgpu_sync_wait()
470 amdgpu_sync_entry_free(e); in amdgpu_sync_wait()
485 struct amdgpu_sync_entry *e; in amdgpu_sync_free() local
489 hash_for_each_safe(sync->fences, i, tmp, e, node) in amdgpu_sync_free()
490 amdgpu_sync_entry_free(e); in amdgpu_sync_free()