1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <inttypes.h>
6 #include <string.h>
7
8 #include <rte_string_fns.h>
9 #include <rte_log.h>
10 #include <rte_mbuf.h>
11 #include <rte_mbuf_dyn.h>
12 #include <rte_eal_memconfig.h>
13 #include <rte_errno.h>
14 #include <rte_malloc.h>
15 #include <rte_tailq.h>
16
17 #include "rte_reorder.h"
18
19 TAILQ_HEAD(rte_reorder_list, rte_tailq_entry);
20
21 static struct rte_tailq_elem rte_reorder_tailq = {
22 .name = "RTE_REORDER",
23 };
24 EAL_REGISTER_TAILQ(rte_reorder_tailq)
25
26 #define NO_FLAGS 0
27 #define RTE_REORDER_PREFIX "RO_"
28 #define RTE_REORDER_NAMESIZE 32
29
30 /* Macros for printing using RTE_LOG */
31 #define RTE_LOGTYPE_REORDER RTE_LOGTYPE_USER1
32
33 #define RTE_REORDER_SEQN_DYNFIELD_NAME "rte_reorder_seqn_dynfield"
34 int rte_reorder_seqn_dynfield_offset = -1;
35
36 /* A generic circular buffer */
37 struct cir_buffer {
38 unsigned int size; /**< Number of entries that can be stored */
39 unsigned int mask; /**< [buffer_size - 1]: used for wrap-around */
40 unsigned int head; /**< insertion point in buffer */
41 unsigned int tail; /**< extraction point in buffer */
42 struct rte_mbuf **entries;
43 } __rte_cache_aligned;
44
45 /* The reorder buffer data structure itself */
46 struct rte_reorder_buffer {
47 char name[RTE_REORDER_NAMESIZE];
48 uint32_t min_seqn; /**< Lowest seq. number that can be in the buffer */
49 unsigned int memsize; /**< memory area size of reorder buffer */
50 struct cir_buffer ready_buf; /**< temp buffer for dequeued entries */
51 struct cir_buffer order_buf; /**< buffer used to reorder entries */
52 int is_initialized;
53 } __rte_cache_aligned;
54
55 static void
56 rte_reorder_free_mbufs(struct rte_reorder_buffer *b);
57
58 struct rte_reorder_buffer *
rte_reorder_init(struct rte_reorder_buffer * b,unsigned int bufsize,const char * name,unsigned int size)59 rte_reorder_init(struct rte_reorder_buffer *b, unsigned int bufsize,
60 const char *name, unsigned int size)
61 {
62 const unsigned int min_bufsize = sizeof(*b) +
63 (2 * size * sizeof(struct rte_mbuf *));
64
65 if (b == NULL) {
66 RTE_LOG(ERR, REORDER, "Invalid reorder buffer parameter:"
67 " NULL\n");
68 rte_errno = EINVAL;
69 return NULL;
70 }
71 if (!rte_is_power_of_2(size)) {
72 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
73 " - Not a power of 2\n");
74 rte_errno = EINVAL;
75 return NULL;
76 }
77 if (name == NULL) {
78 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
79 " NULL\n");
80 rte_errno = EINVAL;
81 return NULL;
82 }
83 if (bufsize < min_bufsize) {
84 RTE_LOG(ERR, REORDER, "Invalid reorder buffer memory size: %u, "
85 "minimum required: %u\n", bufsize, min_bufsize);
86 rte_errno = EINVAL;
87 return NULL;
88 }
89
90 memset(b, 0, bufsize);
91 strlcpy(b->name, name, sizeof(b->name));
92 b->memsize = bufsize;
93 b->order_buf.size = b->ready_buf.size = size;
94 b->order_buf.mask = b->ready_buf.mask = size - 1;
95 b->ready_buf.entries = (void *)&b[1];
96 b->order_buf.entries = RTE_PTR_ADD(&b[1],
97 size * sizeof(b->ready_buf.entries[0]));
98
99 return b;
100 }
101
102 struct rte_reorder_buffer*
rte_reorder_create(const char * name,unsigned socket_id,unsigned int size)103 rte_reorder_create(const char *name, unsigned socket_id, unsigned int size)
104 {
105 struct rte_reorder_buffer *b = NULL;
106 struct rte_tailq_entry *te;
107 struct rte_reorder_list *reorder_list;
108 const unsigned int bufsize = sizeof(struct rte_reorder_buffer) +
109 (2 * size * sizeof(struct rte_mbuf *));
110 static const struct rte_mbuf_dynfield reorder_seqn_dynfield_desc = {
111 .name = RTE_REORDER_SEQN_DYNFIELD_NAME,
112 .size = sizeof(rte_reorder_seqn_t),
113 .align = __alignof__(rte_reorder_seqn_t),
114 };
115
116 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
117
118 /* Check user arguments. */
119 if (!rte_is_power_of_2(size)) {
120 RTE_LOG(ERR, REORDER, "Invalid reorder buffer size"
121 " - Not a power of 2\n");
122 rte_errno = EINVAL;
123 return NULL;
124 }
125 if (name == NULL) {
126 RTE_LOG(ERR, REORDER, "Invalid reorder buffer name ptr:"
127 " NULL\n");
128 rte_errno = EINVAL;
129 return NULL;
130 }
131
132 rte_reorder_seqn_dynfield_offset =
133 rte_mbuf_dynfield_register(&reorder_seqn_dynfield_desc);
134 if (rte_reorder_seqn_dynfield_offset < 0) {
135 RTE_LOG(ERR, REORDER, "Failed to register mbuf field for reorder sequence number\n");
136 rte_errno = ENOMEM;
137 return NULL;
138 }
139
140 rte_mcfg_tailq_write_lock();
141
142 /* guarantee there's no existing */
143 TAILQ_FOREACH(te, reorder_list, next) {
144 b = (struct rte_reorder_buffer *) te->data;
145 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
146 break;
147 }
148 if (te != NULL)
149 goto exit;
150
151 /* allocate tailq entry */
152 te = rte_zmalloc("REORDER_TAILQ_ENTRY", sizeof(*te), 0);
153 if (te == NULL) {
154 RTE_LOG(ERR, REORDER, "Failed to allocate tailq entry\n");
155 rte_errno = ENOMEM;
156 b = NULL;
157 goto exit;
158 }
159
160 /* Allocate memory to store the reorder buffer structure. */
161 b = rte_zmalloc_socket("REORDER_BUFFER", bufsize, 0, socket_id);
162 if (b == NULL) {
163 RTE_LOG(ERR, REORDER, "Memzone allocation failed\n");
164 rte_errno = ENOMEM;
165 rte_free(te);
166 } else {
167 rte_reorder_init(b, bufsize, name, size);
168 te->data = (void *)b;
169 TAILQ_INSERT_TAIL(reorder_list, te, next);
170 }
171
172 exit:
173 rte_mcfg_tailq_write_unlock();
174 return b;
175 }
176
177 void
rte_reorder_reset(struct rte_reorder_buffer * b)178 rte_reorder_reset(struct rte_reorder_buffer *b)
179 {
180 char name[RTE_REORDER_NAMESIZE];
181
182 rte_reorder_free_mbufs(b);
183 strlcpy(name, b->name, sizeof(name));
184 /* No error checking as current values should be valid */
185 rte_reorder_init(b, b->memsize, name, b->order_buf.size);
186 }
187
188 static void
rte_reorder_free_mbufs(struct rte_reorder_buffer * b)189 rte_reorder_free_mbufs(struct rte_reorder_buffer *b)
190 {
191 unsigned i;
192
193 /* Free up the mbufs of order buffer & ready buffer */
194 for (i = 0; i < b->order_buf.size; i++) {
195 if (b->order_buf.entries[i])
196 rte_pktmbuf_free(b->order_buf.entries[i]);
197 if (b->ready_buf.entries[i])
198 rte_pktmbuf_free(b->ready_buf.entries[i]);
199 }
200 }
201
202 void
rte_reorder_free(struct rte_reorder_buffer * b)203 rte_reorder_free(struct rte_reorder_buffer *b)
204 {
205 struct rte_reorder_list *reorder_list;
206 struct rte_tailq_entry *te;
207
208 /* Check user arguments. */
209 if (b == NULL)
210 return;
211
212 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
213
214 rte_mcfg_tailq_write_lock();
215
216 /* find our tailq entry */
217 TAILQ_FOREACH(te, reorder_list, next) {
218 if (te->data == (void *) b)
219 break;
220 }
221 if (te == NULL) {
222 rte_mcfg_tailq_write_unlock();
223 return;
224 }
225
226 TAILQ_REMOVE(reorder_list, te, next);
227
228 rte_mcfg_tailq_write_unlock();
229
230 rte_reorder_free_mbufs(b);
231
232 rte_free(b);
233 rte_free(te);
234 }
235
236 struct rte_reorder_buffer *
rte_reorder_find_existing(const char * name)237 rte_reorder_find_existing(const char *name)
238 {
239 struct rte_reorder_buffer *b = NULL;
240 struct rte_tailq_entry *te;
241 struct rte_reorder_list *reorder_list;
242
243 if (name == NULL) {
244 rte_errno = EINVAL;
245 return NULL;
246 }
247
248 reorder_list = RTE_TAILQ_CAST(rte_reorder_tailq.head, rte_reorder_list);
249
250 rte_mcfg_tailq_read_lock();
251 TAILQ_FOREACH(te, reorder_list, next) {
252 b = (struct rte_reorder_buffer *) te->data;
253 if (strncmp(name, b->name, RTE_REORDER_NAMESIZE) == 0)
254 break;
255 }
256 rte_mcfg_tailq_read_unlock();
257
258 if (te == NULL) {
259 rte_errno = ENOENT;
260 return NULL;
261 }
262
263 return b;
264 }
265
266 static unsigned
rte_reorder_fill_overflow(struct rte_reorder_buffer * b,unsigned n)267 rte_reorder_fill_overflow(struct rte_reorder_buffer *b, unsigned n)
268 {
269 /*
270 * 1. Move all ready entries that fit to the ready_buf
271 * 2. check if we meet the minimum needed (n).
272 * 3. If not, then skip any gaps and keep moving.
273 * 4. If at any point the ready buffer is full, stop
274 * 5. Return the number of positions the order_buf head has moved
275 */
276
277 struct cir_buffer *order_buf = &b->order_buf,
278 *ready_buf = &b->ready_buf;
279
280 unsigned int order_head_adv = 0;
281
282 /*
283 * move at least n packets to ready buffer, assuming ready buffer
284 * has room for those packets.
285 */
286 while (order_head_adv < n &&
287 ((ready_buf->head + 1) & ready_buf->mask) != ready_buf->tail) {
288
289 /* if we are blocked waiting on a packet, skip it */
290 if (order_buf->entries[order_buf->head] == NULL) {
291 order_buf->head = (order_buf->head + 1) & order_buf->mask;
292 order_head_adv++;
293 }
294
295 /* Move all ready entries that fit to the ready_buf */
296 while (order_buf->entries[order_buf->head] != NULL) {
297 ready_buf->entries[ready_buf->head] =
298 order_buf->entries[order_buf->head];
299
300 order_buf->entries[order_buf->head] = NULL;
301 order_head_adv++;
302
303 order_buf->head = (order_buf->head + 1) & order_buf->mask;
304
305 if (((ready_buf->head + 1) & ready_buf->mask) == ready_buf->tail)
306 break;
307
308 ready_buf->head = (ready_buf->head + 1) & ready_buf->mask;
309 }
310 }
311
312 b->min_seqn += order_head_adv;
313 /* Return the number of positions the order_buf head has moved */
314 return order_head_adv;
315 }
316
317 int
rte_reorder_insert(struct rte_reorder_buffer * b,struct rte_mbuf * mbuf)318 rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf)
319 {
320 uint32_t offset, position;
321 struct cir_buffer *order_buf;
322
323 if (b == NULL || mbuf == NULL) {
324 rte_errno = EINVAL;
325 return -1;
326 }
327
328 order_buf = &b->order_buf;
329 if (!b->is_initialized) {
330 b->min_seqn = *rte_reorder_seqn(mbuf);
331 b->is_initialized = 1;
332 }
333
334 /*
335 * calculate the offset from the head pointer we need to go.
336 * The subtraction takes care of the sequence number wrapping.
337 * For example (using 16-bit for brevity):
338 * min_seqn = 0xFFFD
339 * mbuf_seqn = 0x0010
340 * offset = 0x0010 - 0xFFFD = 0x13
341 */
342 offset = *rte_reorder_seqn(mbuf) - b->min_seqn;
343
344 /*
345 * action to take depends on offset.
346 * offset < buffer->size: the mbuf fits within the current window of
347 * sequence numbers we can reorder. EXPECTED CASE.
348 * offset > buffer->size: the mbuf is outside the current window. There
349 * are a number of cases to consider:
350 * 1. The packet sequence is just outside the window, then we need
351 * to see about shifting the head pointer and taking any ready
352 * to return packets out of the ring. If there was a delayed
353 * or dropped packet preventing drains from shifting the window
354 * this case will skip over the dropped packet instead, and any
355 * packets dequeued here will be returned on the next drain call.
356 * 2. The packet sequence number is vastly outside our window, taken
357 * here as having offset greater than twice the buffer size. In
358 * this case, the packet is probably an old or late packet that
359 * was previously skipped, so just enqueue the packet for
360 * immediate return on the next drain call, or else return error.
361 */
362 if (offset < b->order_buf.size) {
363 position = (order_buf->head + offset) & order_buf->mask;
364 order_buf->entries[position] = mbuf;
365 } else if (offset < 2 * b->order_buf.size) {
366 if (rte_reorder_fill_overflow(b, offset + 1 - order_buf->size)
367 < (offset + 1 - order_buf->size)) {
368 /* Put in handling for enqueue straight to output */
369 rte_errno = ENOSPC;
370 return -1;
371 }
372 offset = *rte_reorder_seqn(mbuf) - b->min_seqn;
373 position = (order_buf->head + offset) & order_buf->mask;
374 order_buf->entries[position] = mbuf;
375 } else {
376 /* Put in handling for enqueue straight to output */
377 rte_errno = ERANGE;
378 return -1;
379 }
380 return 0;
381 }
382
383 unsigned int
rte_reorder_drain(struct rte_reorder_buffer * b,struct rte_mbuf ** mbufs,unsigned max_mbufs)384 rte_reorder_drain(struct rte_reorder_buffer *b, struct rte_mbuf **mbufs,
385 unsigned max_mbufs)
386 {
387 unsigned int drain_cnt = 0;
388
389 struct cir_buffer *order_buf = &b->order_buf,
390 *ready_buf = &b->ready_buf;
391
392 /* Try to fetch requested number of mbufs from ready buffer */
393 while ((drain_cnt < max_mbufs) && (ready_buf->tail != ready_buf->head)) {
394 mbufs[drain_cnt++] = ready_buf->entries[ready_buf->tail];
395 ready_buf->tail = (ready_buf->tail + 1) & ready_buf->mask;
396 }
397
398 /*
399 * If requested number of buffers not fetched from ready buffer, fetch
400 * remaining buffers from order buffer
401 */
402 while ((drain_cnt < max_mbufs) &&
403 (order_buf->entries[order_buf->head] != NULL)) {
404 mbufs[drain_cnt++] = order_buf->entries[order_buf->head];
405 order_buf->entries[order_buf->head] = NULL;
406 b->min_seqn++;
407 order_buf->head = (order_buf->head + 1) & order_buf->mask;
408 }
409
410 return drain_cnt;
411 }
412