1170aafe3SMina Almasry // SPDX-License-Identifier: GPL-2.0-or-later
2170aafe3SMina Almasry /*
3170aafe3SMina Almasry * Devmem TCP
4170aafe3SMina Almasry *
5170aafe3SMina Almasry * Authors: Mina Almasry <[email protected]>
6170aafe3SMina Almasry * Willem de Bruijn <[email protected]>
7170aafe3SMina Almasry * Kaiyuan Zhang <[email protected]
8170aafe3SMina Almasry */
9170aafe3SMina Almasry
10170aafe3SMina Almasry #include <linux/dma-buf.h>
11170aafe3SMina Almasry #include <linux/genalloc.h>
12170aafe3SMina Almasry #include <linux/mm.h>
13170aafe3SMina Almasry #include <linux/netdevice.h>
14170aafe3SMina Almasry #include <linux/types.h>
15170aafe3SMina Almasry #include <net/netdev_queues.h>
16170aafe3SMina Almasry #include <net/netdev_rx_queue.h>
17170aafe3SMina Almasry #include <net/page_pool/helpers.h>
1857afb483SPavel Begunkov #include <net/page_pool/memory_provider.h>
19170aafe3SMina Almasry #include <trace/events/page_pool.h>
20170aafe3SMina Almasry
21170aafe3SMina Almasry #include "devmem.h"
220f921404SMina Almasry #include "mp_dmabuf_devmem.h"
238ab79ed5SMina Almasry #include "page_pool_priv.h"
24170aafe3SMina Almasry
25170aafe3SMina Almasry /* Device memory support */
26170aafe3SMina Almasry
27170aafe3SMina Almasry static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
28170aafe3SMina Almasry
2957afb483SPavel Begunkov static const struct memory_provider_ops dmabuf_devmem_ops;
3057afb483SPavel Begunkov
net_is_devmem_iov(struct net_iov * niov)3169e39537SPavel Begunkov bool net_is_devmem_iov(struct net_iov *niov)
3269e39537SPavel Begunkov {
3369e39537SPavel Begunkov return niov->pp->mp_ops == &dmabuf_devmem_ops;
3469e39537SPavel Begunkov }
3569e39537SPavel Begunkov
net_devmem_dmabuf_free_chunk_owner(struct gen_pool * genpool,struct gen_pool_chunk * chunk,void * not_used)36170aafe3SMina Almasry static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
37170aafe3SMina Almasry struct gen_pool_chunk *chunk,
38170aafe3SMina Almasry void *not_used)
39170aafe3SMina Almasry {
40170aafe3SMina Almasry struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
41170aafe3SMina Almasry
427d60fa9eSPavel Begunkov kvfree(owner->area.niovs);
43170aafe3SMina Almasry kfree(owner);
44170aafe3SMina Almasry }
45170aafe3SMina Almasry
net_devmem_get_dma_addr(const struct net_iov * niov)4628c5c74eSMina Almasry static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
4728c5c74eSMina Almasry {
487d60fa9eSPavel Begunkov struct dmabuf_genpool_chunk_owner *owner;
4928c5c74eSMina Almasry
507d60fa9eSPavel Begunkov owner = net_devmem_iov_to_chunk_owner(niov);
5128c5c74eSMina Almasry return owner->base_dma_addr +
5228c5c74eSMina Almasry ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
5328c5c74eSMina Almasry }
5428c5c74eSMina Almasry
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding * binding)55170aafe3SMina Almasry void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
56170aafe3SMina Almasry {
57170aafe3SMina Almasry size_t size, avail;
58170aafe3SMina Almasry
59170aafe3SMina Almasry gen_pool_for_each_chunk(binding->chunk_pool,
60170aafe3SMina Almasry net_devmem_dmabuf_free_chunk_owner, NULL);
61170aafe3SMina Almasry
62170aafe3SMina Almasry size = gen_pool_size(binding->chunk_pool);
63170aafe3SMina Almasry avail = gen_pool_avail(binding->chunk_pool);
64170aafe3SMina Almasry
65170aafe3SMina Almasry if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
66170aafe3SMina Almasry size, avail))
67170aafe3SMina Almasry gen_pool_destroy(binding->chunk_pool);
68170aafe3SMina Almasry
69170aafe3SMina Almasry dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
70170aafe3SMina Almasry DMA_FROM_DEVICE);
71170aafe3SMina Almasry dma_buf_detach(binding->dmabuf, binding->attachment);
72170aafe3SMina Almasry dma_buf_put(binding->dmabuf);
73170aafe3SMina Almasry xa_destroy(&binding->bound_rxqs);
74170aafe3SMina Almasry kfree(binding);
75170aafe3SMina Almasry }
76170aafe3SMina Almasry
7728c5c74eSMina Almasry struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)7828c5c74eSMina Almasry net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
7928c5c74eSMina Almasry {
8028c5c74eSMina Almasry struct dmabuf_genpool_chunk_owner *owner;
8128c5c74eSMina Almasry unsigned long dma_addr;
8228c5c74eSMina Almasry struct net_iov *niov;
8328c5c74eSMina Almasry ssize_t offset;
8428c5c74eSMina Almasry ssize_t index;
8528c5c74eSMina Almasry
8628c5c74eSMina Almasry dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
8728c5c74eSMina Almasry (void **)&owner);
8828c5c74eSMina Almasry if (!dma_addr)
8928c5c74eSMina Almasry return NULL;
9028c5c74eSMina Almasry
9128c5c74eSMina Almasry offset = dma_addr - owner->base_dma_addr;
9228c5c74eSMina Almasry index = offset / PAGE_SIZE;
937d60fa9eSPavel Begunkov niov = &owner->area.niovs[index];
9428c5c74eSMina Almasry
958ab79ed5SMina Almasry niov->pp_magic = 0;
968ab79ed5SMina Almasry niov->pp = NULL;
978ab79ed5SMina Almasry atomic_long_set(&niov->pp_ref_count, 0);
988ab79ed5SMina Almasry
9928c5c74eSMina Almasry return niov;
10028c5c74eSMina Almasry }
10128c5c74eSMina Almasry
net_devmem_free_dmabuf(struct net_iov * niov)10228c5c74eSMina Almasry void net_devmem_free_dmabuf(struct net_iov *niov)
10328c5c74eSMina Almasry {
104297d389eSPavel Begunkov struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
10528c5c74eSMina Almasry unsigned long dma_addr = net_devmem_get_dma_addr(niov);
10628c5c74eSMina Almasry
10728c5c74eSMina Almasry if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
10828c5c74eSMina Almasry PAGE_SIZE)))
10928c5c74eSMina Almasry return;
11028c5c74eSMina Almasry
11128c5c74eSMina Almasry gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
11228c5c74eSMina Almasry }
11328c5c74eSMina Almasry
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)114170aafe3SMina Almasry void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
115170aafe3SMina Almasry {
116170aafe3SMina Almasry struct netdev_rx_queue *rxq;
117170aafe3SMina Almasry unsigned long xa_idx;
118170aafe3SMina Almasry unsigned int rxq_idx;
119170aafe3SMina Almasry
120170aafe3SMina Almasry if (binding->list.next)
121170aafe3SMina Almasry list_del(&binding->list);
122170aafe3SMina Almasry
123170aafe3SMina Almasry xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
12434f71de3SJakub Kicinski const struct pp_memory_provider_params mp_params = {
12534f71de3SJakub Kicinski .mp_priv = binding,
12634f71de3SJakub Kicinski .mp_ops = &dmabuf_devmem_ops,
12734f71de3SJakub Kicinski };
128170aafe3SMina Almasry
129170aafe3SMina Almasry rxq_idx = get_netdev_rx_queue_index(rxq);
130170aafe3SMina Almasry
13134f71de3SJakub Kicinski __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
132170aafe3SMina Almasry }
133170aafe3SMina Almasry
134170aafe3SMina Almasry xa_erase(&net_devmem_dmabuf_bindings, binding->id);
135170aafe3SMina Almasry
136170aafe3SMina Almasry net_devmem_dmabuf_binding_put(binding);
137170aafe3SMina Almasry }
138170aafe3SMina Almasry
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)139170aafe3SMina Almasry int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
140170aafe3SMina Almasry struct net_devmem_dmabuf_binding *binding,
141170aafe3SMina Almasry struct netlink_ext_ack *extack)
142170aafe3SMina Almasry {
143ec304b70SJakub Kicinski struct pp_memory_provider_params mp_params = {
144ec304b70SJakub Kicinski .mp_priv = binding,
145ec304b70SJakub Kicinski .mp_ops = &dmabuf_devmem_ops,
146ec304b70SJakub Kicinski };
147170aafe3SMina Almasry struct netdev_rx_queue *rxq;
148170aafe3SMina Almasry u32 xa_idx;
149170aafe3SMina Almasry int err;
150170aafe3SMina Almasry
151ec304b70SJakub Kicinski err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
152170aafe3SMina Almasry if (err)
153170aafe3SMina Almasry return err;
154170aafe3SMina Almasry
155ec304b70SJakub Kicinski rxq = __netif_get_rx_queue(dev, rxq_idx);
156ec304b70SJakub Kicinski err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
157ec304b70SJakub Kicinski GFP_KERNEL);
158170aafe3SMina Almasry if (err)
159ec304b70SJakub Kicinski goto err_close_rxq;
160170aafe3SMina Almasry
161170aafe3SMina Almasry return 0;
162170aafe3SMina Almasry
163ec304b70SJakub Kicinski err_close_rxq:
164ec304b70SJakub Kicinski __net_mp_close_rxq(dev, rxq_idx, &mp_params);
165170aafe3SMina Almasry return err;
166170aafe3SMina Almasry }
167170aafe3SMina Almasry
168170aafe3SMina Almasry struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,unsigned int dmabuf_fd,struct netlink_ext_ack * extack)169170aafe3SMina Almasry net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
170170aafe3SMina Almasry struct netlink_ext_ack *extack)
171170aafe3SMina Almasry {
172170aafe3SMina Almasry struct net_devmem_dmabuf_binding *binding;
173170aafe3SMina Almasry static u32 id_alloc_next;
174170aafe3SMina Almasry struct scatterlist *sg;
175170aafe3SMina Almasry struct dma_buf *dmabuf;
176170aafe3SMina Almasry unsigned int sg_idx, i;
177170aafe3SMina Almasry unsigned long virtual;
178170aafe3SMina Almasry int err;
179170aafe3SMina Almasry
180170aafe3SMina Almasry dmabuf = dma_buf_get(dmabuf_fd);
181170aafe3SMina Almasry if (IS_ERR(dmabuf))
182170aafe3SMina Almasry return ERR_CAST(dmabuf);
183170aafe3SMina Almasry
184170aafe3SMina Almasry binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
185170aafe3SMina Almasry dev_to_node(&dev->dev));
186170aafe3SMina Almasry if (!binding) {
187170aafe3SMina Almasry err = -ENOMEM;
188170aafe3SMina Almasry goto err_put_dmabuf;
189170aafe3SMina Almasry }
190170aafe3SMina Almasry
191170aafe3SMina Almasry binding->dev = dev;
192170aafe3SMina Almasry
193170aafe3SMina Almasry err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
194170aafe3SMina Almasry binding, xa_limit_32b, &id_alloc_next,
195170aafe3SMina Almasry GFP_KERNEL);
196170aafe3SMina Almasry if (err < 0)
197170aafe3SMina Almasry goto err_free_binding;
198170aafe3SMina Almasry
199170aafe3SMina Almasry xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
200170aafe3SMina Almasry
201170aafe3SMina Almasry refcount_set(&binding->ref, 1);
202170aafe3SMina Almasry
203*0afc44d8STaehee Yoo mutex_init(&binding->lock);
204*0afc44d8STaehee Yoo
205170aafe3SMina Almasry binding->dmabuf = dmabuf;
206170aafe3SMina Almasry
207170aafe3SMina Almasry binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
208170aafe3SMina Almasry if (IS_ERR(binding->attachment)) {
209170aafe3SMina Almasry err = PTR_ERR(binding->attachment);
210170aafe3SMina Almasry NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
211170aafe3SMina Almasry goto err_free_id;
212170aafe3SMina Almasry }
213170aafe3SMina Almasry
214170aafe3SMina Almasry binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
215170aafe3SMina Almasry DMA_FROM_DEVICE);
216170aafe3SMina Almasry if (IS_ERR(binding->sgt)) {
217170aafe3SMina Almasry err = PTR_ERR(binding->sgt);
218170aafe3SMina Almasry NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
219170aafe3SMina Almasry goto err_detach;
220170aafe3SMina Almasry }
221170aafe3SMina Almasry
222170aafe3SMina Almasry /* For simplicity we expect to make PAGE_SIZE allocations, but the
223170aafe3SMina Almasry * binding can be much more flexible than that. We may be able to
224170aafe3SMina Almasry * allocate MTU sized chunks here. Leave that for future work...
225170aafe3SMina Almasry */
226170aafe3SMina Almasry binding->chunk_pool =
227170aafe3SMina Almasry gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
228170aafe3SMina Almasry if (!binding->chunk_pool) {
229170aafe3SMina Almasry err = -ENOMEM;
230170aafe3SMina Almasry goto err_unmap;
231170aafe3SMina Almasry }
232170aafe3SMina Almasry
233170aafe3SMina Almasry virtual = 0;
234170aafe3SMina Almasry for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
235170aafe3SMina Almasry dma_addr_t dma_addr = sg_dma_address(sg);
236170aafe3SMina Almasry struct dmabuf_genpool_chunk_owner *owner;
237170aafe3SMina Almasry size_t len = sg_dma_len(sg);
238170aafe3SMina Almasry struct net_iov *niov;
239170aafe3SMina Almasry
240170aafe3SMina Almasry owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
241170aafe3SMina Almasry dev_to_node(&dev->dev));
242170aafe3SMina Almasry if (!owner) {
243170aafe3SMina Almasry err = -ENOMEM;
244170aafe3SMina Almasry goto err_free_chunks;
245170aafe3SMina Almasry }
246170aafe3SMina Almasry
2477d60fa9eSPavel Begunkov owner->area.base_virtual = virtual;
248170aafe3SMina Almasry owner->base_dma_addr = dma_addr;
2497d60fa9eSPavel Begunkov owner->area.num_niovs = len / PAGE_SIZE;
250170aafe3SMina Almasry owner->binding = binding;
251170aafe3SMina Almasry
252170aafe3SMina Almasry err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
253170aafe3SMina Almasry dma_addr, len, dev_to_node(&dev->dev),
254170aafe3SMina Almasry owner);
255170aafe3SMina Almasry if (err) {
256170aafe3SMina Almasry kfree(owner);
257170aafe3SMina Almasry err = -EINVAL;
258170aafe3SMina Almasry goto err_free_chunks;
259170aafe3SMina Almasry }
260170aafe3SMina Almasry
2617d60fa9eSPavel Begunkov owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
2627d60fa9eSPavel Begunkov sizeof(*owner->area.niovs),
263170aafe3SMina Almasry GFP_KERNEL);
2647d60fa9eSPavel Begunkov if (!owner->area.niovs) {
265170aafe3SMina Almasry err = -ENOMEM;
266170aafe3SMina Almasry goto err_free_chunks;
267170aafe3SMina Almasry }
268170aafe3SMina Almasry
2697d60fa9eSPavel Begunkov for (i = 0; i < owner->area.num_niovs; i++) {
2707d60fa9eSPavel Begunkov niov = &owner->area.niovs[i];
2717d60fa9eSPavel Begunkov niov->owner = &owner->area;
2728ab79ed5SMina Almasry page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
2738ab79ed5SMina Almasry net_devmem_get_dma_addr(niov));
274170aafe3SMina Almasry }
275170aafe3SMina Almasry
276170aafe3SMina Almasry virtual += len;
277170aafe3SMina Almasry }
278170aafe3SMina Almasry
279170aafe3SMina Almasry return binding;
280170aafe3SMina Almasry
281170aafe3SMina Almasry err_free_chunks:
282170aafe3SMina Almasry gen_pool_for_each_chunk(binding->chunk_pool,
283170aafe3SMina Almasry net_devmem_dmabuf_free_chunk_owner, NULL);
284170aafe3SMina Almasry gen_pool_destroy(binding->chunk_pool);
285170aafe3SMina Almasry err_unmap:
286170aafe3SMina Almasry dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
287170aafe3SMina Almasry DMA_FROM_DEVICE);
288170aafe3SMina Almasry err_detach:
289170aafe3SMina Almasry dma_buf_detach(dmabuf, binding->attachment);
290170aafe3SMina Almasry err_free_id:
291170aafe3SMina Almasry xa_erase(&net_devmem_dmabuf_bindings, binding->id);
292170aafe3SMina Almasry err_free_binding:
293170aafe3SMina Almasry kfree(binding);
294170aafe3SMina Almasry err_put_dmabuf:
295170aafe3SMina Almasry dma_buf_put(dmabuf);
296170aafe3SMina Almasry return ERR_PTR(err);
297170aafe3SMina Almasry }
298170aafe3SMina Almasry
2990f921404SMina Almasry /*** "Dmabuf devmem memory provider" ***/
3000f921404SMina Almasry
mp_dmabuf_devmem_init(struct page_pool * pool)3010f921404SMina Almasry int mp_dmabuf_devmem_init(struct page_pool *pool)
3020f921404SMina Almasry {
3030f921404SMina Almasry struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
3040f921404SMina Almasry
3050f921404SMina Almasry if (!binding)
3060f921404SMina Almasry return -EINVAL;
3070f921404SMina Almasry
308b400f4b8SSamiullah Khawaja /* dma-buf dma addresses do not need and should not be used with
309b400f4b8SSamiullah Khawaja * dma_sync_for_cpu/device. Force disable dma_sync.
310b400f4b8SSamiullah Khawaja */
311b400f4b8SSamiullah Khawaja pool->dma_sync = false;
3127dba339fSMina Almasry pool->dma_sync_for_cpu = false;
3130f921404SMina Almasry
3140f921404SMina Almasry if (pool->p.order != 0)
3150f921404SMina Almasry return -E2BIG;
3160f921404SMina Almasry
3170f921404SMina Almasry net_devmem_dmabuf_binding_get(binding);
3180f921404SMina Almasry return 0;
3190f921404SMina Almasry }
3200f921404SMina Almasry
mp_dmabuf_devmem_alloc_netmems(struct page_pool * pool,gfp_t gfp)3210f921404SMina Almasry netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
3220f921404SMina Almasry {
3230f921404SMina Almasry struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
3240f921404SMina Almasry struct net_iov *niov;
3250f921404SMina Almasry netmem_ref netmem;
3260f921404SMina Almasry
3270f921404SMina Almasry niov = net_devmem_alloc_dmabuf(binding);
3280f921404SMina Almasry if (!niov)
3290f921404SMina Almasry return 0;
3300f921404SMina Almasry
3310f921404SMina Almasry netmem = net_iov_to_netmem(niov);
3320f921404SMina Almasry
3330f921404SMina Almasry page_pool_set_pp_info(pool, netmem);
3340f921404SMina Almasry
3350f921404SMina Almasry pool->pages_state_hold_cnt++;
3360f921404SMina Almasry trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
3370f921404SMina Almasry return netmem;
3380f921404SMina Almasry }
3390f921404SMina Almasry
mp_dmabuf_devmem_destroy(struct page_pool * pool)3400f921404SMina Almasry void mp_dmabuf_devmem_destroy(struct page_pool *pool)
3410f921404SMina Almasry {
3420f921404SMina Almasry struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
3430f921404SMina Almasry
3440f921404SMina Almasry net_devmem_dmabuf_binding_put(binding);
3450f921404SMina Almasry }
3460f921404SMina Almasry
mp_dmabuf_devmem_release_page(struct page_pool * pool,netmem_ref netmem)3470f921404SMina Almasry bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
3480f921404SMina Almasry {
3490f921404SMina Almasry long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
3500f921404SMina Almasry
3510f921404SMina Almasry if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
3520f921404SMina Almasry return false;
3530f921404SMina Almasry
3540f921404SMina Almasry if (WARN_ON_ONCE(refcount != 1))
3550f921404SMina Almasry return false;
3560f921404SMina Almasry
3570f921404SMina Almasry page_pool_clear_pp_info(netmem);
3580f921404SMina Almasry
3590f921404SMina Almasry net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
3600f921404SMina Almasry
3610f921404SMina Almasry /* We don't want the page pool put_page()ing our net_iovs. */
3620f921404SMina Almasry return false;
3630f921404SMina Almasry }
36457afb483SPavel Begunkov
mp_dmabuf_devmem_nl_fill(void * mp_priv,struct sk_buff * rsp,struct netdev_rx_queue * rxq)3652508a46fSPavel Begunkov static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
3662508a46fSPavel Begunkov struct netdev_rx_queue *rxq)
3672508a46fSPavel Begunkov {
3682508a46fSPavel Begunkov const struct net_devmem_dmabuf_binding *binding = mp_priv;
3692508a46fSPavel Begunkov int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
3702508a46fSPavel Begunkov
3712508a46fSPavel Begunkov return nla_put_u32(rsp, type, binding->id);
3722508a46fSPavel Begunkov }
3732508a46fSPavel Begunkov
mp_dmabuf_devmem_uninstall(void * mp_priv,struct netdev_rx_queue * rxq)374f8350a43SPavel Begunkov static void mp_dmabuf_devmem_uninstall(void *mp_priv,
375f8350a43SPavel Begunkov struct netdev_rx_queue *rxq)
376f8350a43SPavel Begunkov {
377f8350a43SPavel Begunkov struct net_devmem_dmabuf_binding *binding = mp_priv;
378f8350a43SPavel Begunkov struct netdev_rx_queue *bound_rxq;
379f8350a43SPavel Begunkov unsigned long xa_idx;
380f8350a43SPavel Begunkov
381f8350a43SPavel Begunkov xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
382f8350a43SPavel Begunkov if (bound_rxq == rxq) {
383f8350a43SPavel Begunkov xa_erase(&binding->bound_rxqs, xa_idx);
384*0afc44d8STaehee Yoo if (xa_empty(&binding->bound_rxqs)) {
385*0afc44d8STaehee Yoo mutex_lock(&binding->lock);
386*0afc44d8STaehee Yoo binding->dev = NULL;
387*0afc44d8STaehee Yoo mutex_unlock(&binding->lock);
388*0afc44d8STaehee Yoo }
389f8350a43SPavel Begunkov break;
390f8350a43SPavel Begunkov }
391f8350a43SPavel Begunkov }
392f8350a43SPavel Begunkov }
393f8350a43SPavel Begunkov
39457afb483SPavel Begunkov static const struct memory_provider_ops dmabuf_devmem_ops = {
39557afb483SPavel Begunkov .init = mp_dmabuf_devmem_init,
39657afb483SPavel Begunkov .destroy = mp_dmabuf_devmem_destroy,
39757afb483SPavel Begunkov .alloc_netmems = mp_dmabuf_devmem_alloc_netmems,
39857afb483SPavel Begunkov .release_netmem = mp_dmabuf_devmem_release_page,
3992508a46fSPavel Begunkov .nl_fill = mp_dmabuf_devmem_nl_fill,
400f8350a43SPavel Begunkov .uninstall = mp_dmabuf_devmem_uninstall,
40157afb483SPavel Begunkov };
402