1 /*-
2 * Copyright (c) 2020-2022 The FreeBSD Foundation
3 * Copyright (c) 2021-2022 Bjoern A. Zeeb
4 *
5 * This software was developed by Björn Zeeb under sponsorship from
6 * the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32 /*
33 * NOTE: this socket buffer compatibility code is highly EXPERIMENTAL.
34 * Do not rely on the internals of this implementation. They are highly
35 * likely to change as we will improve the integration to FreeBSD mbufs.
36 */
37
38 #ifndef _LINUXKPI_LINUX_SKBUFF_H
39 #define _LINUXKPI_LINUX_SKBUFF_H
40
41 #include <linux/page.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdev_features.h>
44 #include <linux/list.h>
45 #include <linux/gfp.h>
46 #include <linux/compiler.h>
47 #include <linux/spinlock.h>
48
49 /* #define SKB_DEBUG */
50 #ifdef SKB_DEBUG
51 #define DSKB_TODO 0x01
52 #define DSKB_IMPROVE 0x02
53 #define DSKB_TRACE 0x10
54 #define DSKB_TRACEX 0x20
55 extern int linuxkpi_debug_skb;
56
57 #define SKB_TODO() \
58 if (linuxkpi_debug_skb & DSKB_TODO) \
59 printf("SKB_TODO %s:%d\n", __func__, __LINE__)
60 #define SKB_IMPROVE(...) \
61 if (linuxkpi_debug_skb & DSKB_IMPROVE) \
62 printf("SKB_IMPROVE %s:%d\n", __func__, __LINE__)
63 #define SKB_TRACE(_s) \
64 if (linuxkpi_debug_skb & DSKB_TRACE) \
65 printf("SKB_TRACE %s:%d %p\n", __func__, __LINE__, _s)
66 #define SKB_TRACE2(_s, _p) \
67 if (linuxkpi_debug_skb & DSKB_TRACE) \
68 printf("SKB_TRACE %s:%d %p, %p\n", __func__, __LINE__, _s, _p)
69 #define SKB_TRACE_FMT(_s, _fmt, ...) \
70 if (linuxkpi_debug_skb & DSKB_TRACE) \
71 printf("SKB_TRACE %s:%d %p " _fmt "\n", __func__, __LINE__, _s, \
72 __VA_ARGS__)
73 #else
74 #define SKB_TODO() do { } while(0)
75 #define SKB_IMPROVE(...) do { } while(0)
76 #define SKB_TRACE(_s) do { } while(0)
77 #define SKB_TRACE2(_s, _p) do { } while(0)
78 #define SKB_TRACE_FMT(_s, ...) do { } while(0)
79 #endif
80
81 enum sk_buff_pkt_type {
82 PACKET_BROADCAST,
83 PACKET_MULTICAST,
84 PACKET_OTHERHOST,
85 };
86
87 #define NET_SKB_PAD CACHE_LINE_SIZE /* ? */
88
89 struct sk_buff_head {
90 /* XXX TODO */
91 struct sk_buff *next;
92 struct sk_buff *prev;
93 size_t qlen;
94 spinlock_t lock;
95 };
96
97 enum sk_checksum_flags {
98 CHECKSUM_NONE = 0x00,
99 CHECKSUM_UNNECESSARY = 0x01,
100 CHECKSUM_PARTIAL = 0x02,
101 CHECKSUM_COMPLETE = 0x04,
102 };
103
104 struct skb_frag {
105 /* XXX TODO */
106 struct page *page; /* XXX-BZ These three are a wild guess so far! */
107 off_t offset;
108 size_t size;
109 };
110 typedef struct skb_frag skb_frag_t;
111
112 enum skb_shared_info_gso_type {
113 SKB_GSO_TCPV4,
114 SKB_GSO_TCPV6,
115 };
116
117 struct skb_shared_info {
118 enum skb_shared_info_gso_type gso_type;
119 uint16_t gso_size;
120 uint16_t nr_frags;
121 struct sk_buff *frag_list;
122 skb_frag_t frags[64]; /* XXX TODO, 16xpage? */
123 };
124
125 struct sk_buff {
126 /* XXX TODO */
127 /* struct sk_buff_head */
128 struct sk_buff *next;
129 struct sk_buff *prev;
130 int list; /* XXX TYPE */
131 uint32_t _alloc_len; /* Length of alloc data-buf. XXX-BZ give up for truesize? */
132 uint32_t len; /* ? */
133 uint32_t data_len; /* ? If we have frags? */
134 uint32_t truesize; /* The total size of all buffers, incl. frags. */
135 uint16_t mac_len; /* Link-layer header length. */
136 __sum16 csum;
137 uint16_t l3hdroff; /* network header offset from *head */
138 uint16_t l4hdroff; /* transport header offset from *head */
139 uint32_t priority;
140 uint16_t qmap; /* queue mapping */
141 uint16_t _spareu16_0;
142 enum sk_buff_pkt_type pkt_type;
143
144 /* "Scratch" area for layers to store metadata. */
145 /* ??? I see sizeof() operations so probably an array. */
146 uint8_t cb[64] __aligned(CACHE_LINE_SIZE);
147
148 struct net_device *dev;
149 void *sk; /* XXX net/sock.h? */
150
151 int csum_offset, csum_start, ip_summed, protocol;
152
153 uint8_t *head; /* Head of buffer. */
154 uint8_t *data; /* Head of data. */
155 uint8_t *tail; /* End of data. */
156 uint8_t *end; /* End of buffer. */
157
158 struct skb_shared_info *shinfo;
159
160 /* FreeBSD specific bandaid (see linuxkpi_kfree_skb). */
161 void *m;
162 void(*m_free_func)(void *);
163
164 /* Force padding to CACHE_LINE_SIZE. */
165 uint8_t __scratch[0] __aligned(CACHE_LINE_SIZE);
166 };
167
168 /* -------------------------------------------------------------------------- */
169
170 struct sk_buff *linuxkpi_alloc_skb(size_t, gfp_t);
171 void linuxkpi_kfree_skb(struct sk_buff *);
172
173 /* -------------------------------------------------------------------------- */
174
175 static inline struct sk_buff *
alloc_skb(size_t size,gfp_t gfp)176 alloc_skb(size_t size, gfp_t gfp)
177 {
178 struct sk_buff *skb;
179
180 skb = linuxkpi_alloc_skb(size, gfp);
181 SKB_TRACE(skb);
182 return (skb);
183 }
184
185 static inline struct sk_buff *
__dev_alloc_skb(size_t len,gfp_t gfp)186 __dev_alloc_skb(size_t len, gfp_t gfp)
187 {
188 struct sk_buff *skb;
189
190 skb = alloc_skb(len, gfp);
191 SKB_IMPROVE();
192 SKB_TRACE(skb);
193 return (skb);
194 }
195
196 static inline struct sk_buff *
dev_alloc_skb(size_t len)197 dev_alloc_skb(size_t len)
198 {
199 struct sk_buff *skb;
200
201 skb = alloc_skb(len, GFP_NOWAIT);
202 SKB_IMPROVE();
203 SKB_TRACE(skb);
204 return (skb);
205 }
206
207 static inline void
kfree_skb(struct sk_buff * skb)208 kfree_skb(struct sk_buff *skb)
209 {
210 SKB_TRACE(skb);
211 linuxkpi_kfree_skb(skb);
212 }
213
214 static inline void
dev_kfree_skb(struct sk_buff * skb)215 dev_kfree_skb(struct sk_buff *skb)
216 {
217 SKB_TRACE(skb);
218 kfree_skb(skb);
219 }
220
221 static inline void
dev_kfree_skb_any(struct sk_buff * skb)222 dev_kfree_skb_any(struct sk_buff *skb)
223 {
224 SKB_TRACE(skb);
225 dev_kfree_skb(skb);
226 }
227
228 static inline void
dev_kfree_skb_irq(struct sk_buff * skb)229 dev_kfree_skb_irq(struct sk_buff *skb)
230 {
231 SKB_TRACE(skb);
232 SKB_TODO();
233 }
234
235 /* -------------------------------------------------------------------------- */
236
237 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
238 #define skb_list_walk_safe(_q, skb, tmp) \
239 for ((skb) = (_q)->next; (skb) != NULL && ((tmp) = (skb)->next); (skb) = (tmp))
240
241 /* Add headroom; cannot do once there is data in there. */
242 static inline void
skb_reserve(struct sk_buff * skb,size_t len)243 skb_reserve(struct sk_buff *skb, size_t len)
244 {
245 SKB_TRACE(skb);
246 #if 0
247 /* Apparently it is allowed to call skb_reserve multiple times in a row. */
248 KASSERT(skb->data == skb->head, ("%s: skb %p not empty head %p data %p "
249 "tail %p\n", __func__, skb, skb->head, skb->data, skb->tail));
250 #else
251 KASSERT(skb->len == 0 && skb->data == skb->tail, ("%s: skb %p not "
252 "empty head %p data %p tail %p len %u\n", __func__, skb,
253 skb->head, skb->data, skb->tail, skb->len));
254 #endif
255 skb->data += len;
256 skb->tail += len;
257 }
258
259 /*
260 * Remove headroom; return new data pointer; basically make space at the
261 * front to copy data in (manually).
262 */
263 static inline void *
skb_push(struct sk_buff * skb,size_t len)264 skb_push(struct sk_buff *skb, size_t len)
265 {
266 SKB_TRACE(skb);
267 KASSERT(((skb->data - len) >= skb->head), ("%s: skb %p (data %p - "
268 "len %zu) < head %p\n", __func__, skb, skb->data, len, skb->data));
269 skb->len += len;
270 skb->data -= len;
271 return (skb->data);
272 }
273
274 /*
275 * Length of the data on the skb (without any frags)???
276 */
277 static inline size_t
skb_headlen(struct sk_buff * skb)278 skb_headlen(struct sk_buff *skb)
279 {
280
281 SKB_TRACE(skb);
282 return (skb->len - skb->data_len);
283 }
284
285
286 /* Return the end of data (tail pointer). */
287 static inline uint8_t *
skb_tail_pointer(struct sk_buff * skb)288 skb_tail_pointer(struct sk_buff *skb)
289 {
290
291 SKB_TRACE(skb);
292 return (skb->tail);
293 }
294
295 /* Return number of bytes available at end of buffer. */
296 static inline unsigned int
skb_tailroom(struct sk_buff * skb)297 skb_tailroom(struct sk_buff *skb)
298 {
299
300 SKB_TRACE(skb);
301 KASSERT((skb->end - skb->tail) >= 0, ("%s: skb %p tailroom < 0, "
302 "end %p tail %p\n", __func__, skb, skb->end, skb->tail));
303 return (skb->end - skb->tail);
304 }
305
306 /* Return numer of bytes available at the beginning of buffer. */
307 static inline unsigned int
skb_headroom(struct sk_buff * skb)308 skb_headroom(struct sk_buff *skb)
309 {
310 SKB_TRACE(skb);
311 KASSERT((skb->data - skb->head) >= 0, ("%s: skb %p headroom < 0, "
312 "data %p head %p\n", __func__, skb, skb->data, skb->head));
313 return (skb->data - skb->head);
314 }
315
316
317 /*
318 * Remove tailroom; return the old tail pointer; basically make space at
319 * the end to copy data in (manually). See also skb_put_data() below.
320 */
321 static inline void *
skb_put(struct sk_buff * skb,size_t len)322 skb_put(struct sk_buff *skb, size_t len)
323 {
324 void *s;
325
326 SKB_TRACE(skb);
327 KASSERT(((skb->tail + len) <= skb->end), ("%s: skb %p (tail %p + "
328 "len %zu) > end %p, head %p data %p len %u\n", __func__,
329 skb, skb->tail, len, skb->end, skb->head, skb->data, skb->len));
330
331 s = skb_tail_pointer(skb);
332 skb->tail += len;
333 skb->len += len;
334 #ifdef SKB_DEBUG
335 if (linuxkpi_debug_skb & DSKB_TRACEX)
336 printf("%s: skb %p (%u) head %p data %p tail %p end %p, s %p len %zu\n",
337 __func__, skb, skb->len, skb->head, skb->data, skb->tail, skb->end,
338 s, len);
339 #endif
340 return (s);
341 }
342
343 /* skb_put() + copying data in. */
344 static inline void *
skb_put_data(struct sk_buff * skb,const void * buf,size_t len)345 skb_put_data(struct sk_buff *skb, const void *buf, size_t len)
346 {
347 void *s;
348
349 SKB_TRACE2(skb, buf);
350 s = skb_put(skb, len);
351 memcpy(s, buf, len);
352 return (s);
353 }
354
355 /* skb_put() + filling with zeros. */
356 static inline void *
skb_put_zero(struct sk_buff * skb,size_t len)357 skb_put_zero(struct sk_buff *skb, size_t len)
358 {
359 void *s;
360
361 SKB_TRACE(skb);
362 s = skb_put(skb, len);
363 memset(s, '\0', len);
364 return (s);
365 }
366
367 /*
368 * Remove len bytes from beginning of data.
369 *
370 * XXX-BZ ath10k checks for !NULL conditions so I assume this doesn't panic;
371 * we return the advanced data pointer so we don't have to keep a temp, correct?
372 */
373 static inline void *
skb_pull(struct sk_buff * skb,size_t len)374 skb_pull(struct sk_buff *skb, size_t len)
375 {
376
377 SKB_TRACE(skb);
378 #if 0 /* Apparently this doesn't barf... */
379 KASSERT(skb->len >= len, ("%s: skb %p skb->len %u < len %u, data %p\n",
380 __func__, skb, skb->len, len, skb->data));
381 #endif
382 if (skb->len < len)
383 return (NULL);
384 skb->len -= len;
385 skb->data += len;
386 return (skb->data);
387 }
388
389 /* Reduce skb data to given length or do nothing if smaller already. */
390 static inline void
__skb_trim(struct sk_buff * skb,unsigned int len)391 __skb_trim(struct sk_buff *skb, unsigned int len)
392 {
393
394 SKB_TRACE(skb);
395 if (skb->len < len)
396 return;
397
398 skb->len = len;
399 skb->tail = skb->data + skb->len;
400 }
401
402 static inline void
skb_trim(struct sk_buff * skb,unsigned int len)403 skb_trim(struct sk_buff *skb, unsigned int len)
404 {
405
406 return (__skb_trim(skb, len));
407 }
408
409 static inline struct skb_shared_info *
skb_shinfo(struct sk_buff * skb)410 skb_shinfo(struct sk_buff *skb)
411 {
412
413 SKB_TRACE(skb);
414 return (skb->shinfo);
415 }
416
417 static inline void
skb_add_rx_frag(struct sk_buff * skb,int fragno,struct page * page,off_t offset,size_t size,unsigned int truesize)418 skb_add_rx_frag(struct sk_buff *skb, int fragno, struct page *page,
419 off_t offset, size_t size, unsigned int truesize)
420 {
421 struct skb_shared_info *shinfo;
422
423 SKB_TRACE(skb);
424 #ifdef SKB_DEBUG
425 if (linuxkpi_debug_skb & DSKB_TRACEX)
426 printf("%s: skb %p head %p data %p tail %p end %p len %u fragno %d "
427 "page %#jx offset %ju size %zu truesize %u\n", __func__,
428 skb, skb->head, skb->data, skb->tail, skb->end, skb->len, fragno,
429 (uintmax_t)(uintptr_t)linux_page_address(page), (uintmax_t)offset,
430 size, truesize);
431 #endif
432
433 shinfo = skb_shinfo(skb);
434 KASSERT(fragno >= 0 && fragno < nitems(shinfo->frags), ("%s: skb %p "
435 "fragno %d too big\n", __func__, skb, fragno));
436 shinfo->frags[fragno].page = page;
437 shinfo->frags[fragno].offset = offset;
438 shinfo->frags[fragno].size = size;
439 shinfo->nr_frags = fragno + 1;
440 skb->len += size;
441 skb->truesize += truesize;
442
443 /* XXX TODO EXTEND truesize? */
444 }
445
446 /* -------------------------------------------------------------------------- */
447
448 /* XXX BZ review this one for terminal condition as Linux "queues" are special. */
449 #define skb_queue_walk(_q, skb) \
450 for ((skb) = (_q)->next; (skb) != (struct sk_buff *)(_q); \
451 (skb) = (skb)->next)
452
453 #define skb_queue_walk_safe(_q, skb, tmp) \
454 for ((skb) = (_q)->next, (tmp) = (skb)->next; \
455 (skb) != (struct sk_buff *)(_q); (skb) = (tmp), (tmp) = (skb)->next)
456
457 static inline bool
skb_queue_empty(struct sk_buff_head * q)458 skb_queue_empty(struct sk_buff_head *q)
459 {
460
461 SKB_TRACE(q);
462 return (q->qlen == 0);
463 }
464
465 static inline void
__skb_queue_head_init(struct sk_buff_head * q)466 __skb_queue_head_init(struct sk_buff_head *q)
467 {
468 SKB_TRACE(q);
469 q->prev = q->next = (struct sk_buff *)q;
470 q->qlen = 0;
471 }
472
473 static inline void
skb_queue_head_init(struct sk_buff_head * q)474 skb_queue_head_init(struct sk_buff_head *q)
475 {
476 SKB_TRACE(q);
477 return (__skb_queue_head_init(q));
478 }
479
480 static inline void
__skb_insert(struct sk_buff * new,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * q)481 __skb_insert(struct sk_buff *new, struct sk_buff *prev, struct sk_buff *next,
482 struct sk_buff_head *q)
483 {
484
485 SKB_TRACE_FMT(new, "prev %p next %p q %p", prev, next, q);
486 new->prev = prev;
487 new->next = next;
488 next->prev = new;
489 prev->next = new;
490 q->qlen++;
491 }
492
493 static inline void
__skb_queue_after(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)494 __skb_queue_after(struct sk_buff_head *q, struct sk_buff *skb,
495 struct sk_buff *new)
496 {
497
498 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
499 __skb_insert(new, skb, skb->next, q);
500 }
501
502 static inline void
__skb_queue_before(struct sk_buff_head * q,struct sk_buff * skb,struct sk_buff * new)503 __skb_queue_before(struct sk_buff_head *q, struct sk_buff *skb,
504 struct sk_buff *new)
505 {
506
507 SKB_TRACE_FMT(q, "skb %p new %p", skb, new);
508 __skb_insert(new, skb->prev, skb, q);
509 }
510
511 static inline void
__skb_queue_tail(struct sk_buff_head * q,struct sk_buff * skb)512 __skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
513 {
514 struct sk_buff *s;
515
516 SKB_TRACE2(q, skb);
517 q->qlen++;
518 s = (struct sk_buff *)q;
519 s->prev->next = skb;
520 skb->prev = s->prev;
521 skb->next = s;
522 s->prev = skb;
523 }
524
525 static inline void
skb_queue_tail(struct sk_buff_head * q,struct sk_buff * skb)526 skb_queue_tail(struct sk_buff_head *q, struct sk_buff *skb)
527 {
528 SKB_TRACE2(q, skb);
529 return (__skb_queue_tail(q, skb));
530 }
531
532 static inline struct sk_buff *
skb_peek_tail(struct sk_buff_head * q)533 skb_peek_tail(struct sk_buff_head *q)
534 {
535 struct sk_buff *skb;
536
537 skb = q->prev;
538 SKB_TRACE2(q, skb);
539 if (skb == (struct sk_buff *)q)
540 return (NULL);
541 return (skb);
542 }
543
544 static inline void
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * head)545 __skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
546 {
547 SKB_TRACE2(skb, head);
548 struct sk_buff *p, *n;;
549
550 head->qlen--;
551 p = skb->prev;
552 n = skb->next;
553 p->next = n;
554 n->prev = p;
555 skb->prev = skb->next = NULL;
556 }
557
558 static inline void
skb_unlink(struct sk_buff * skb,struct sk_buff_head * head)559 skb_unlink(struct sk_buff *skb, struct sk_buff_head *head)
560 {
561 SKB_TRACE2(skb, head);
562 return (__skb_unlink(skb, head));
563 }
564
565 static inline struct sk_buff *
__skb_dequeue(struct sk_buff_head * q)566 __skb_dequeue(struct sk_buff_head *q)
567 {
568 struct sk_buff *skb;
569
570 SKB_TRACE(q);
571 skb = q->next;
572 if (skb == (struct sk_buff *)q)
573 return (NULL);
574 if (skb != NULL)
575 __skb_unlink(skb, q);
576 SKB_TRACE(skb);
577 return (skb);
578 }
579
580 static inline struct sk_buff *
skb_dequeue(struct sk_buff_head * q)581 skb_dequeue(struct sk_buff_head *q)
582 {
583 SKB_TRACE(q);
584 return (__skb_dequeue(q));
585 }
586
587 static inline struct sk_buff *
skb_dequeue_tail(struct sk_buff_head * q)588 skb_dequeue_tail(struct sk_buff_head *q)
589 {
590 struct sk_buff *skb;
591
592 skb = skb_peek_tail(q);
593 if (skb != NULL)
594 __skb_unlink(skb, q);
595
596 SKB_TRACE2(q, skb);
597 return (skb);
598 }
599
600 static inline void
__skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)601 __skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
602 {
603
604 SKB_TRACE2(q, skb);
605 __skb_queue_after(q, (struct sk_buff *)q, skb);
606 }
607
608 static inline void
skb_queue_head(struct sk_buff_head * q,struct sk_buff * skb)609 skb_queue_head(struct sk_buff_head *q, struct sk_buff *skb)
610 {
611
612 SKB_TRACE2(q, skb);
613 __skb_queue_after(q, (struct sk_buff *)q, skb);
614 }
615
616 static inline uint32_t
skb_queue_len(struct sk_buff_head * head)617 skb_queue_len(struct sk_buff_head *head)
618 {
619
620 SKB_TRACE(head);
621 return (head->qlen);
622 }
623
624 static inline uint32_t
skb_queue_len_lockless(const struct sk_buff_head * head)625 skb_queue_len_lockless(const struct sk_buff_head *head)
626 {
627
628 SKB_TRACE(head);
629 return (READ_ONCE(head->qlen));
630 }
631
632 static inline void
__skb_queue_purge(struct sk_buff_head * q)633 __skb_queue_purge(struct sk_buff_head *q)
634 {
635 struct sk_buff *skb;
636
637 SKB_TRACE(q);
638 while ((skb = __skb_dequeue(q)) != NULL)
639 kfree_skb(skb);
640 }
641
642 static inline void
skb_queue_purge(struct sk_buff_head * q)643 skb_queue_purge(struct sk_buff_head *q)
644 {
645 SKB_TRACE(q);
646 return (__skb_queue_purge(q));
647 }
648
649 static inline struct sk_buff *
skb_queue_prev(struct sk_buff_head * q,struct sk_buff * skb)650 skb_queue_prev(struct sk_buff_head *q, struct sk_buff *skb)
651 {
652
653 SKB_TRACE2(q, skb);
654 /* XXX what is the q argument good for? */
655 return (skb->prev);
656 }
657
658 /* -------------------------------------------------------------------------- */
659
660 static inline struct sk_buff *
skb_copy(struct sk_buff * skb,gfp_t gfp)661 skb_copy(struct sk_buff *skb, gfp_t gfp)
662 {
663 SKB_TRACE(skb);
664 SKB_TODO();
665 return (NULL);
666 }
667
668 static inline void
consume_skb(struct sk_buff * skb)669 consume_skb(struct sk_buff *skb)
670 {
671 SKB_TRACE(skb);
672 SKB_TODO();
673 }
674
675 static inline uint16_t
skb_checksum(struct sk_buff * skb,int offs,size_t len,int x)676 skb_checksum(struct sk_buff *skb, int offs, size_t len, int x)
677 {
678 SKB_TRACE(skb);
679 SKB_TODO();
680 return (0xffff);
681 }
682
683 static inline int
skb_checksum_start_offset(struct sk_buff * skb)684 skb_checksum_start_offset(struct sk_buff *skb)
685 {
686 SKB_TRACE(skb);
687 SKB_TODO();
688 return (-1);
689 }
690
691 static inline dma_addr_t
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,int x,size_t fragsz,enum dma_data_direction dir)692 skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, int x,
693 size_t fragsz, enum dma_data_direction dir)
694 {
695 SKB_TRACE2(frag, dev);
696 SKB_TODO();
697 return (-1);
698 }
699
700 static inline size_t
skb_frag_size(const skb_frag_t * frag)701 skb_frag_size(const skb_frag_t *frag)
702 {
703 SKB_TRACE(frag);
704 SKB_TODO();
705 return (-1);
706 }
707
708 static inline bool
skb_is_nonlinear(struct sk_buff * skb)709 skb_is_nonlinear(struct sk_buff *skb)
710 {
711 SKB_TRACE(skb);
712 return ((skb->data_len > 0) ? true : false);
713 }
714
715 #define skb_walk_frags(_skb, _frag) \
716 for ((_frag) = (_skb); false; (_frag)++)
717
718 static inline void
skb_checksum_help(struct sk_buff * skb)719 skb_checksum_help(struct sk_buff *skb)
720 {
721 SKB_TRACE(skb);
722 SKB_TODO();
723 }
724
725 static inline bool
skb_ensure_writable(struct sk_buff * skb,size_t off)726 skb_ensure_writable(struct sk_buff *skb, size_t off)
727 {
728 SKB_TRACE(skb);
729 SKB_TODO();
730 return (false);
731 }
732
733 static inline void *
skb_frag_address(const skb_frag_t * frag)734 skb_frag_address(const skb_frag_t *frag)
735 {
736 SKB_TRACE(frag);
737 SKB_TODO();
738 return (NULL);
739 }
740
741 static inline struct sk_buff *
skb_gso_segment(struct sk_buff * skb,netdev_features_t netdev_flags)742 skb_gso_segment(struct sk_buff *skb, netdev_features_t netdev_flags)
743 {
744 SKB_TRACE(skb);
745 SKB_TODO();
746 return (NULL);
747 }
748
749 static inline bool
skb_is_gso(struct sk_buff * skb)750 skb_is_gso(struct sk_buff *skb)
751 {
752 SKB_TRACE(skb);
753 SKB_TODO();
754 return (false);
755 }
756
757 static inline void
skb_mark_not_on_list(struct sk_buff * skb)758 skb_mark_not_on_list(struct sk_buff *skb)
759 {
760 SKB_TRACE(skb);
761 SKB_TODO();
762 }
763
764 static inline void
skb_queue_splice_init(struct sk_buff_head * from,struct sk_buff_head * to)765 skb_queue_splice_init(struct sk_buff_head *from, struct sk_buff_head *to)
766 {
767 struct sk_buff *b, *e, *n;
768
769 SKB_TRACE2(from, to);
770
771 if (skb_queue_empty(from))
772 return;
773
774 /* XXX do we need a barrier around this? */
775 b = from->next;
776 e = from->prev;
777 n = to->next;
778
779 b->prev = (struct sk_buff *)to;
780 to->next = b;
781 e->next = n;
782 n->prev = e;
783
784 to->qlen += from->qlen;
785 __skb_queue_head_init(from);
786 }
787
788 static inline void
skb_reset_transport_header(struct sk_buff * skb)789 skb_reset_transport_header(struct sk_buff *skb)
790 {
791
792 SKB_TRACE(skb);
793 skb->l4hdroff = skb->data - skb->head;
794 }
795
796 static inline uint8_t *
skb_transport_header(struct sk_buff * skb)797 skb_transport_header(struct sk_buff *skb)
798 {
799
800 SKB_TRACE(skb);
801 return (skb->head + skb->l4hdroff);
802 }
803
804 static inline uint8_t *
skb_network_header(struct sk_buff * skb)805 skb_network_header(struct sk_buff *skb)
806 {
807
808 SKB_TRACE(skb);
809 return (skb->head + skb->l3hdroff);
810 }
811
812 static inline int
__skb_linearize(struct sk_buff * skb)813 __skb_linearize(struct sk_buff *skb)
814 {
815 SKB_TRACE(skb);
816 SKB_TODO();
817 return (ENXIO);
818 }
819
820 static inline int
pskb_expand_head(struct sk_buff * skb,int x,int len,gfp_t gfp)821 pskb_expand_head(struct sk_buff *skb, int x, int len, gfp_t gfp)
822 {
823 SKB_TRACE(skb);
824 SKB_TODO();
825 return (-ENXIO);
826 }
827
828 /* Not really seen this one but need it as symmetric accessor function. */
829 static inline void
skb_set_queue_mapping(struct sk_buff * skb,uint16_t qmap)830 skb_set_queue_mapping(struct sk_buff *skb, uint16_t qmap)
831 {
832
833 SKB_TRACE_FMT(skb, "qmap %u", qmap);
834 skb->qmap = qmap;
835 }
836
837 static inline uint16_t
skb_get_queue_mapping(struct sk_buff * skb)838 skb_get_queue_mapping(struct sk_buff *skb)
839 {
840
841 SKB_TRACE_FMT(skb, "qmap %u", skb->qmap);
842 return (skb->qmap);
843 }
844
845 static inline bool
skb_header_cloned(struct sk_buff * skb)846 skb_header_cloned(struct sk_buff *skb)
847 {
848 SKB_TRACE(skb);
849 SKB_TODO();
850 return (false);
851 }
852
853 static inline uint8_t *
skb_mac_header(struct sk_buff * skb)854 skb_mac_header(struct sk_buff *skb)
855 {
856 SKB_TRACE(skb);
857 SKB_TODO();
858 return (NULL);
859 }
860
861 static inline void
skb_orphan(struct sk_buff * skb)862 skb_orphan(struct sk_buff *skb)
863 {
864 SKB_TRACE(skb);
865 SKB_TODO();
866 }
867
868 static inline void
skb_reset_mac_header(struct sk_buff * skb)869 skb_reset_mac_header(struct sk_buff *skb)
870 {
871 SKB_TRACE(skb);
872 SKB_TODO();
873 }
874
875 static inline struct sk_buff *
skb_peek(struct sk_buff_head * q)876 skb_peek(struct sk_buff_head *q)
877 {
878 SKB_TRACE(q);
879 SKB_TODO();
880 return (NULL);
881 }
882
883 static inline __sum16
csum_unfold(__sum16 sum)884 csum_unfold(__sum16 sum)
885 {
886 SKB_TODO();
887 return (sum);
888 }
889
890 static __inline void
skb_postpush_rcsum(struct sk_buff * skb,const void * data,size_t len)891 skb_postpush_rcsum(struct sk_buff *skb, const void *data, size_t len)
892 {
893 SKB_TODO();
894 }
895
896 static inline void
skb_reset_tail_pointer(struct sk_buff * skb)897 skb_reset_tail_pointer(struct sk_buff *skb)
898 {
899
900 SKB_TRACE(skb);
901 skb->tail = (uint8_t *)(uintptr_t)(skb->data - skb->head);
902 SKB_TRACE(skb);
903 }
904
905 static inline struct sk_buff *
skb_get(struct sk_buff * skb)906 skb_get(struct sk_buff *skb)
907 {
908
909 SKB_TODO(); /* XXX refcnt? as in get/put_device? */
910 return (skb);
911 }
912
913 static inline struct sk_buff *
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)914 skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
915 {
916
917 SKB_TODO();
918 return (NULL);
919 }
920
921 static inline void
skb_copy_from_linear_data(const struct sk_buff * skb,void * dst,size_t len)922 skb_copy_from_linear_data(const struct sk_buff *skb, void *dst, size_t len)
923 {
924
925 SKB_TRACE(skb);
926 /* Let us just hope the destination has len space ... */
927 memcpy(dst, skb->data, len);
928 }
929
930 #endif /* _LINUXKPI_LINUX_SKBUFF_H */
931