Lines Matching refs:node

31 	struct vhost_iotlb_entry *node, *temp_node;  in vhost_user_iotlb_pending_remove_all()  local
35 RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, temp_node) { in vhost_user_iotlb_pending_remove_all()
36 TAILQ_REMOVE(&vq->iotlb_pending_list, node, next); in vhost_user_iotlb_pending_remove_all()
37 rte_mempool_put(vq->iotlb_pool, node); in vhost_user_iotlb_pending_remove_all()
47 struct vhost_iotlb_entry *node; in vhost_user_iotlb_pending_miss() local
52 TAILQ_FOREACH(node, &vq->iotlb_pending_list, next) { in vhost_user_iotlb_pending_miss()
53 if ((node->iova == iova) && (node->perm == perm)) { in vhost_user_iotlb_pending_miss()
68 struct vhost_iotlb_entry *node; in vhost_user_iotlb_pending_insert() local
71 ret = rte_mempool_get(vq->iotlb_pool, (void **)&node); in vhost_user_iotlb_pending_insert()
80 ret = rte_mempool_get(vq->iotlb_pool, (void **)&node); in vhost_user_iotlb_pending_insert()
89 node->iova = iova; in vhost_user_iotlb_pending_insert()
90 node->perm = perm; in vhost_user_iotlb_pending_insert()
94 TAILQ_INSERT_TAIL(&vq->iotlb_pending_list, node, next); in vhost_user_iotlb_pending_insert()
103 struct vhost_iotlb_entry *node, *temp_node; in vhost_user_iotlb_pending_remove() local
107 RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_pending_list, next, in vhost_user_iotlb_pending_remove()
109 if (node->iova < iova) in vhost_user_iotlb_pending_remove()
111 if (node->iova >= iova + size) in vhost_user_iotlb_pending_remove()
113 if ((node->perm & perm) != node->perm) in vhost_user_iotlb_pending_remove()
115 TAILQ_REMOVE(&vq->iotlb_pending_list, node, next); in vhost_user_iotlb_pending_remove()
116 rte_mempool_put(vq->iotlb_pool, node); in vhost_user_iotlb_pending_remove()
125 struct vhost_iotlb_entry *node, *temp_node; in vhost_user_iotlb_cache_remove_all() local
129 RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) { in vhost_user_iotlb_cache_remove_all()
130 TAILQ_REMOVE(&vq->iotlb_list, node, next); in vhost_user_iotlb_cache_remove_all()
131 rte_mempool_put(vq->iotlb_pool, node); in vhost_user_iotlb_cache_remove_all()
142 struct vhost_iotlb_entry *node, *temp_node; in vhost_user_iotlb_cache_random_evict() local
149 RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) { in vhost_user_iotlb_cache_random_evict()
151 TAILQ_REMOVE(&vq->iotlb_list, node, next); in vhost_user_iotlb_cache_random_evict()
152 rte_mempool_put(vq->iotlb_pool, node); in vhost_user_iotlb_cache_random_evict()
167 struct vhost_iotlb_entry *node, *new_node; in vhost_user_iotlb_cache_insert() local
195 TAILQ_FOREACH(node, &vq->iotlb_list, next) { in vhost_user_iotlb_cache_insert()
200 if (node->iova == new_node->iova) { in vhost_user_iotlb_cache_insert()
203 } else if (node->iova > new_node->iova) { in vhost_user_iotlb_cache_insert()
204 TAILQ_INSERT_BEFORE(node, new_node, next); in vhost_user_iotlb_cache_insert()
224 struct vhost_iotlb_entry *node, *temp_node; in vhost_user_iotlb_cache_remove() local
231 RTE_TAILQ_FOREACH_SAFE(node, &vq->iotlb_list, next, temp_node) { in vhost_user_iotlb_cache_remove()
233 if (unlikely(iova + size < node->iova)) in vhost_user_iotlb_cache_remove()
236 if (iova < node->iova + node->size) { in vhost_user_iotlb_cache_remove()
237 TAILQ_REMOVE(&vq->iotlb_list, node, next); in vhost_user_iotlb_cache_remove()
238 rte_mempool_put(vq->iotlb_pool, node); in vhost_user_iotlb_cache_remove()
250 struct vhost_iotlb_entry *node; in vhost_user_iotlb_cache_find() local
256 TAILQ_FOREACH(node, &vq->iotlb_list, next) { in vhost_user_iotlb_cache_find()
258 if (unlikely(iova < node->iova)) in vhost_user_iotlb_cache_find()
261 if (iova >= node->iova + node->size) in vhost_user_iotlb_cache_find()
264 if (unlikely((perm & node->perm) != perm)) { in vhost_user_iotlb_cache_find()
269 offset = iova - node->iova; in vhost_user_iotlb_cache_find()
271 vva = node->uaddr + offset; in vhost_user_iotlb_cache_find()
273 mapped += node->size - offset; in vhost_user_iotlb_cache_find()
274 iova = node->iova + node->size; in vhost_user_iotlb_cache_find()