1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Devmem TCP
4 *
5 * Authors: Mina Almasry <[email protected]>
6 * Willem de Bruijn <[email protected]>
7 * Kaiyuan Zhang <[email protected]
8 */
9
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/mm.h>
13 #include <linux/netdevice.h>
14 #include <linux/types.h>
15 #include <net/netdev_queues.h>
16 #include <net/netdev_rx_queue.h>
17 #include <net/page_pool/helpers.h>
18 #include <net/page_pool/memory_provider.h>
19 #include <trace/events/page_pool.h>
20
21 #include "devmem.h"
22 #include "mp_dmabuf_devmem.h"
23 #include "page_pool_priv.h"
24
25 /* Device memory support */
26
27 static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1);
28
29 static const struct memory_provider_ops dmabuf_devmem_ops;
30
net_is_devmem_iov(struct net_iov * niov)31 bool net_is_devmem_iov(struct net_iov *niov)
32 {
33 return niov->pp->mp_ops == &dmabuf_devmem_ops;
34 }
35
net_devmem_dmabuf_free_chunk_owner(struct gen_pool * genpool,struct gen_pool_chunk * chunk,void * not_used)36 static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool,
37 struct gen_pool_chunk *chunk,
38 void *not_used)
39 {
40 struct dmabuf_genpool_chunk_owner *owner = chunk->owner;
41
42 kvfree(owner->area.niovs);
43 kfree(owner);
44 }
45
net_devmem_get_dma_addr(const struct net_iov * niov)46 static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov)
47 {
48 struct dmabuf_genpool_chunk_owner *owner;
49
50 owner = net_devmem_iov_to_chunk_owner(niov);
51 return owner->base_dma_addr +
52 ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT);
53 }
54
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding * binding)55 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
56 {
57 size_t size, avail;
58
59 gen_pool_for_each_chunk(binding->chunk_pool,
60 net_devmem_dmabuf_free_chunk_owner, NULL);
61
62 size = gen_pool_size(binding->chunk_pool);
63 avail = gen_pool_avail(binding->chunk_pool);
64
65 if (!WARN(size != avail, "can't destroy genpool. size=%zu, avail=%zu",
66 size, avail))
67 gen_pool_destroy(binding->chunk_pool);
68
69 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
70 DMA_FROM_DEVICE);
71 dma_buf_detach(binding->dmabuf, binding->attachment);
72 dma_buf_put(binding->dmabuf);
73 xa_destroy(&binding->bound_rxqs);
74 kfree(binding);
75 }
76
77 struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding * binding)78 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
79 {
80 struct dmabuf_genpool_chunk_owner *owner;
81 unsigned long dma_addr;
82 struct net_iov *niov;
83 ssize_t offset;
84 ssize_t index;
85
86 dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
87 (void **)&owner);
88 if (!dma_addr)
89 return NULL;
90
91 offset = dma_addr - owner->base_dma_addr;
92 index = offset / PAGE_SIZE;
93 niov = &owner->area.niovs[index];
94
95 niov->pp_magic = 0;
96 niov->pp = NULL;
97 atomic_long_set(&niov->pp_ref_count, 0);
98
99 return niov;
100 }
101
net_devmem_free_dmabuf(struct net_iov * niov)102 void net_devmem_free_dmabuf(struct net_iov *niov)
103 {
104 struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov);
105 unsigned long dma_addr = net_devmem_get_dma_addr(niov);
106
107 if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr,
108 PAGE_SIZE)))
109 return;
110
111 gen_pool_free(binding->chunk_pool, dma_addr, PAGE_SIZE);
112 }
113
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding * binding)114 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
115 {
116 struct netdev_rx_queue *rxq;
117 unsigned long xa_idx;
118 unsigned int rxq_idx;
119
120 if (binding->list.next)
121 list_del(&binding->list);
122
123 xa_for_each(&binding->bound_rxqs, xa_idx, rxq) {
124 const struct pp_memory_provider_params mp_params = {
125 .mp_priv = binding,
126 .mp_ops = &dmabuf_devmem_ops,
127 };
128
129 rxq_idx = get_netdev_rx_queue_index(rxq);
130
131 __net_mp_close_rxq(binding->dev, rxq_idx, &mp_params);
132 }
133
134 xa_erase(&net_devmem_dmabuf_bindings, binding->id);
135
136 net_devmem_dmabuf_binding_put(binding);
137 }
138
net_devmem_bind_dmabuf_to_queue(struct net_device * dev,u32 rxq_idx,struct net_devmem_dmabuf_binding * binding,struct netlink_ext_ack * extack)139 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
140 struct net_devmem_dmabuf_binding *binding,
141 struct netlink_ext_ack *extack)
142 {
143 struct pp_memory_provider_params mp_params = {
144 .mp_priv = binding,
145 .mp_ops = &dmabuf_devmem_ops,
146 };
147 struct netdev_rx_queue *rxq;
148 u32 xa_idx;
149 int err;
150
151 err = __net_mp_open_rxq(dev, rxq_idx, &mp_params, extack);
152 if (err)
153 return err;
154
155 rxq = __netif_get_rx_queue(dev, rxq_idx);
156 err = xa_alloc(&binding->bound_rxqs, &xa_idx, rxq, xa_limit_32b,
157 GFP_KERNEL);
158 if (err)
159 goto err_close_rxq;
160
161 return 0;
162
163 err_close_rxq:
164 __net_mp_close_rxq(dev, rxq_idx, &mp_params);
165 return err;
166 }
167
168 struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device * dev,unsigned int dmabuf_fd,struct netlink_ext_ack * extack)169 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
170 struct netlink_ext_ack *extack)
171 {
172 struct net_devmem_dmabuf_binding *binding;
173 static u32 id_alloc_next;
174 struct scatterlist *sg;
175 struct dma_buf *dmabuf;
176 unsigned int sg_idx, i;
177 unsigned long virtual;
178 int err;
179
180 dmabuf = dma_buf_get(dmabuf_fd);
181 if (IS_ERR(dmabuf))
182 return ERR_CAST(dmabuf);
183
184 binding = kzalloc_node(sizeof(*binding), GFP_KERNEL,
185 dev_to_node(&dev->dev));
186 if (!binding) {
187 err = -ENOMEM;
188 goto err_put_dmabuf;
189 }
190
191 binding->dev = dev;
192
193 err = xa_alloc_cyclic(&net_devmem_dmabuf_bindings, &binding->id,
194 binding, xa_limit_32b, &id_alloc_next,
195 GFP_KERNEL);
196 if (err < 0)
197 goto err_free_binding;
198
199 xa_init_flags(&binding->bound_rxqs, XA_FLAGS_ALLOC);
200
201 refcount_set(&binding->ref, 1);
202
203 mutex_init(&binding->lock);
204
205 binding->dmabuf = dmabuf;
206
207 binding->attachment = dma_buf_attach(binding->dmabuf, dev->dev.parent);
208 if (IS_ERR(binding->attachment)) {
209 err = PTR_ERR(binding->attachment);
210 NL_SET_ERR_MSG(extack, "Failed to bind dmabuf to device");
211 goto err_free_id;
212 }
213
214 binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
215 DMA_FROM_DEVICE);
216 if (IS_ERR(binding->sgt)) {
217 err = PTR_ERR(binding->sgt);
218 NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
219 goto err_detach;
220 }
221
222 /* For simplicity we expect to make PAGE_SIZE allocations, but the
223 * binding can be much more flexible than that. We may be able to
224 * allocate MTU sized chunks here. Leave that for future work...
225 */
226 binding->chunk_pool =
227 gen_pool_create(PAGE_SHIFT, dev_to_node(&dev->dev));
228 if (!binding->chunk_pool) {
229 err = -ENOMEM;
230 goto err_unmap;
231 }
232
233 virtual = 0;
234 for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
235 dma_addr_t dma_addr = sg_dma_address(sg);
236 struct dmabuf_genpool_chunk_owner *owner;
237 size_t len = sg_dma_len(sg);
238 struct net_iov *niov;
239
240 owner = kzalloc_node(sizeof(*owner), GFP_KERNEL,
241 dev_to_node(&dev->dev));
242 if (!owner) {
243 err = -ENOMEM;
244 goto err_free_chunks;
245 }
246
247 owner->area.base_virtual = virtual;
248 owner->base_dma_addr = dma_addr;
249 owner->area.num_niovs = len / PAGE_SIZE;
250 owner->binding = binding;
251
252 err = gen_pool_add_owner(binding->chunk_pool, dma_addr,
253 dma_addr, len, dev_to_node(&dev->dev),
254 owner);
255 if (err) {
256 kfree(owner);
257 err = -EINVAL;
258 goto err_free_chunks;
259 }
260
261 owner->area.niovs = kvmalloc_array(owner->area.num_niovs,
262 sizeof(*owner->area.niovs),
263 GFP_KERNEL);
264 if (!owner->area.niovs) {
265 err = -ENOMEM;
266 goto err_free_chunks;
267 }
268
269 for (i = 0; i < owner->area.num_niovs; i++) {
270 niov = &owner->area.niovs[i];
271 niov->owner = &owner->area;
272 page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
273 net_devmem_get_dma_addr(niov));
274 }
275
276 virtual += len;
277 }
278
279 return binding;
280
281 err_free_chunks:
282 gen_pool_for_each_chunk(binding->chunk_pool,
283 net_devmem_dmabuf_free_chunk_owner, NULL);
284 gen_pool_destroy(binding->chunk_pool);
285 err_unmap:
286 dma_buf_unmap_attachment_unlocked(binding->attachment, binding->sgt,
287 DMA_FROM_DEVICE);
288 err_detach:
289 dma_buf_detach(dmabuf, binding->attachment);
290 err_free_id:
291 xa_erase(&net_devmem_dmabuf_bindings, binding->id);
292 err_free_binding:
293 kfree(binding);
294 err_put_dmabuf:
295 dma_buf_put(dmabuf);
296 return ERR_PTR(err);
297 }
298
299 /*** "Dmabuf devmem memory provider" ***/
300
mp_dmabuf_devmem_init(struct page_pool * pool)301 int mp_dmabuf_devmem_init(struct page_pool *pool)
302 {
303 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
304
305 if (!binding)
306 return -EINVAL;
307
308 /* dma-buf dma addresses do not need and should not be used with
309 * dma_sync_for_cpu/device. Force disable dma_sync.
310 */
311 pool->dma_sync = false;
312 pool->dma_sync_for_cpu = false;
313
314 if (pool->p.order != 0)
315 return -E2BIG;
316
317 net_devmem_dmabuf_binding_get(binding);
318 return 0;
319 }
320
mp_dmabuf_devmem_alloc_netmems(struct page_pool * pool,gfp_t gfp)321 netmem_ref mp_dmabuf_devmem_alloc_netmems(struct page_pool *pool, gfp_t gfp)
322 {
323 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
324 struct net_iov *niov;
325 netmem_ref netmem;
326
327 niov = net_devmem_alloc_dmabuf(binding);
328 if (!niov)
329 return 0;
330
331 netmem = net_iov_to_netmem(niov);
332
333 page_pool_set_pp_info(pool, netmem);
334
335 pool->pages_state_hold_cnt++;
336 trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt);
337 return netmem;
338 }
339
mp_dmabuf_devmem_destroy(struct page_pool * pool)340 void mp_dmabuf_devmem_destroy(struct page_pool *pool)
341 {
342 struct net_devmem_dmabuf_binding *binding = pool->mp_priv;
343
344 net_devmem_dmabuf_binding_put(binding);
345 }
346
mp_dmabuf_devmem_release_page(struct page_pool * pool,netmem_ref netmem)347 bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem)
348 {
349 long refcount = atomic_long_read(netmem_get_pp_ref_count_ref(netmem));
350
351 if (WARN_ON_ONCE(!netmem_is_net_iov(netmem)))
352 return false;
353
354 if (WARN_ON_ONCE(refcount != 1))
355 return false;
356
357 page_pool_clear_pp_info(netmem);
358
359 net_devmem_free_dmabuf(netmem_to_net_iov(netmem));
360
361 /* We don't want the page pool put_page()ing our net_iovs. */
362 return false;
363 }
364
mp_dmabuf_devmem_nl_fill(void * mp_priv,struct sk_buff * rsp,struct netdev_rx_queue * rxq)365 static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp,
366 struct netdev_rx_queue *rxq)
367 {
368 const struct net_devmem_dmabuf_binding *binding = mp_priv;
369 int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF;
370
371 return nla_put_u32(rsp, type, binding->id);
372 }
373
mp_dmabuf_devmem_uninstall(void * mp_priv,struct netdev_rx_queue * rxq)374 static void mp_dmabuf_devmem_uninstall(void *mp_priv,
375 struct netdev_rx_queue *rxq)
376 {
377 struct net_devmem_dmabuf_binding *binding = mp_priv;
378 struct netdev_rx_queue *bound_rxq;
379 unsigned long xa_idx;
380
381 xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) {
382 if (bound_rxq == rxq) {
383 xa_erase(&binding->bound_rxqs, xa_idx);
384 if (xa_empty(&binding->bound_rxqs)) {
385 mutex_lock(&binding->lock);
386 binding->dev = NULL;
387 mutex_unlock(&binding->lock);
388 }
389 break;
390 }
391 }
392 }
393
394 static const struct memory_provider_ops dmabuf_devmem_ops = {
395 .init = mp_dmabuf_devmem_init,
396 .destroy = mp_dmabuf_devmem_destroy,
397 .alloc_netmems = mp_dmabuf_devmem_alloc_netmems,
398 .release_netmem = mp_dmabuf_devmem_release_page,
399 .nl_fill = mp_dmabuf_devmem_nl_fill,
400 .uninstall = mp_dmabuf_devmem_uninstall,
401 };
402