1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
3 */
4 #include <stdlib.h>
5 #include <string.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <inttypes.h>
9 #include <sys/queue.h>
10 #include <arpa/inet.h>
11
12 #include <rte_common.h>
13 #include <rte_prefetch.h>
14 #include <rte_byteorder.h>
15
16 #include "rte_swx_pipeline.h"
17 #include "rte_swx_ctl.h"
18
19 #define CHECK(condition, err_code) \
20 do { \
21 if (!(condition)) \
22 return -(err_code); \
23 } while (0)
24
25 #define CHECK_NAME(name, err_code) \
26 CHECK((name) && \
27 (name)[0] && \
28 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \
29 err_code)
30
31 #define CHECK_INSTRUCTION(instr, err_code) \
32 CHECK((instr) && \
33 (instr)[0] && \
34 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \
35 RTE_SWX_INSTRUCTION_SIZE), \
36 err_code)
37
38 #ifndef TRACE_LEVEL
39 #define TRACE_LEVEL 0
40 #endif
41
42 #if TRACE_LEVEL
43 #define TRACE(...) printf(__VA_ARGS__)
44 #else
45 #define TRACE(...)
46 #endif
47
48 #define ntoh64(x) rte_be_to_cpu_64(x)
49 #define hton64(x) rte_cpu_to_be_64(x)
50
51 /*
52 * Struct.
53 */
54 struct field {
55 char name[RTE_SWX_NAME_SIZE];
56 uint32_t n_bits;
57 uint32_t offset;
58 };
59
60 struct struct_type {
61 TAILQ_ENTRY(struct_type) node;
62 char name[RTE_SWX_NAME_SIZE];
63 struct field *fields;
64 uint32_t n_fields;
65 uint32_t n_bits;
66 };
67
68 TAILQ_HEAD(struct_type_tailq, struct_type);
69
70 /*
71 * Input port.
72 */
73 struct port_in_type {
74 TAILQ_ENTRY(port_in_type) node;
75 char name[RTE_SWX_NAME_SIZE];
76 struct rte_swx_port_in_ops ops;
77 };
78
79 TAILQ_HEAD(port_in_type_tailq, port_in_type);
80
81 struct port_in {
82 TAILQ_ENTRY(port_in) node;
83 struct port_in_type *type;
84 void *obj;
85 uint32_t id;
86 };
87
88 TAILQ_HEAD(port_in_tailq, port_in);
89
90 struct port_in_runtime {
91 rte_swx_port_in_pkt_rx_t pkt_rx;
92 void *obj;
93 };
94
95 /*
96 * Output port.
97 */
98 struct port_out_type {
99 TAILQ_ENTRY(port_out_type) node;
100 char name[RTE_SWX_NAME_SIZE];
101 struct rte_swx_port_out_ops ops;
102 };
103
104 TAILQ_HEAD(port_out_type_tailq, port_out_type);
105
106 struct port_out {
107 TAILQ_ENTRY(port_out) node;
108 struct port_out_type *type;
109 void *obj;
110 uint32_t id;
111 };
112
113 TAILQ_HEAD(port_out_tailq, port_out);
114
115 struct port_out_runtime {
116 rte_swx_port_out_pkt_tx_t pkt_tx;
117 rte_swx_port_out_flush_t flush;
118 void *obj;
119 };
120
121 /*
122 * Extern object.
123 */
124 struct extern_type_member_func {
125 TAILQ_ENTRY(extern_type_member_func) node;
126 char name[RTE_SWX_NAME_SIZE];
127 rte_swx_extern_type_member_func_t func;
128 uint32_t id;
129 };
130
131 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
132
133 struct extern_type {
134 TAILQ_ENTRY(extern_type) node;
135 char name[RTE_SWX_NAME_SIZE];
136 struct struct_type *mailbox_struct_type;
137 rte_swx_extern_type_constructor_t constructor;
138 rte_swx_extern_type_destructor_t destructor;
139 struct extern_type_member_func_tailq funcs;
140 uint32_t n_funcs;
141 };
142
143 TAILQ_HEAD(extern_type_tailq, extern_type);
144
145 struct extern_obj {
146 TAILQ_ENTRY(extern_obj) node;
147 char name[RTE_SWX_NAME_SIZE];
148 struct extern_type *type;
149 void *obj;
150 uint32_t struct_id;
151 uint32_t id;
152 };
153
154 TAILQ_HEAD(extern_obj_tailq, extern_obj);
155
156 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX
157 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8
158 #endif
159
160 struct extern_obj_runtime {
161 void *obj;
162 uint8_t *mailbox;
163 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX];
164 };
165
166 /*
167 * Extern function.
168 */
169 struct extern_func {
170 TAILQ_ENTRY(extern_func) node;
171 char name[RTE_SWX_NAME_SIZE];
172 struct struct_type *mailbox_struct_type;
173 rte_swx_extern_func_t func;
174 uint32_t struct_id;
175 uint32_t id;
176 };
177
178 TAILQ_HEAD(extern_func_tailq, extern_func);
179
180 struct extern_func_runtime {
181 uint8_t *mailbox;
182 rte_swx_extern_func_t func;
183 };
184
185 /*
186 * Header.
187 */
188 struct header {
189 TAILQ_ENTRY(header) node;
190 char name[RTE_SWX_NAME_SIZE];
191 struct struct_type *st;
192 uint32_t struct_id;
193 uint32_t id;
194 };
195
196 TAILQ_HEAD(header_tailq, header);
197
198 struct header_runtime {
199 uint8_t *ptr0;
200 };
201
202 struct header_out_runtime {
203 uint8_t *ptr0;
204 uint8_t *ptr;
205 uint32_t n_bytes;
206 };
207
208 /*
209 * Instruction.
210 */
211
212 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian.
213 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO).
214 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO
215 * when transferred to packet meta-data and in NBO when transferred to packet
216 * headers.
217 */
218
219 /* Notation conventions:
220 * -Header field: H = h.header.field (dst/src)
221 * -Meta-data field: M = m.field (dst/src)
222 * -Extern object mailbox field: E = e.field (dst/src)
223 * -Extern function mailbox field: F = f.field (dst/src)
224 * -Table action data field: T = t.field (src only)
225 * -Immediate value: I = 32-bit unsigned value (src only)
226 */
227
228 enum instruction_type {
229 /* rx m.port_in */
230 INSTR_RX,
231
232 /* tx m.port_out */
233 INSTR_TX,
234
235 /* extract h.header */
236 INSTR_HDR_EXTRACT,
237 INSTR_HDR_EXTRACT2,
238 INSTR_HDR_EXTRACT3,
239 INSTR_HDR_EXTRACT4,
240 INSTR_HDR_EXTRACT5,
241 INSTR_HDR_EXTRACT6,
242 INSTR_HDR_EXTRACT7,
243 INSTR_HDR_EXTRACT8,
244
245 /* emit h.header */
246 INSTR_HDR_EMIT,
247 INSTR_HDR_EMIT_TX,
248 INSTR_HDR_EMIT2_TX,
249 INSTR_HDR_EMIT3_TX,
250 INSTR_HDR_EMIT4_TX,
251 INSTR_HDR_EMIT5_TX,
252 INSTR_HDR_EMIT6_TX,
253 INSTR_HDR_EMIT7_TX,
254 INSTR_HDR_EMIT8_TX,
255
256 /* validate h.header */
257 INSTR_HDR_VALIDATE,
258
259 /* invalidate h.header */
260 INSTR_HDR_INVALIDATE,
261
262 /* mov dst src
263 * dst = src
264 * dst = HMEF, src = HMEFTI
265 */
266 INSTR_MOV, /* dst = MEF, src = MEFT */
267 INSTR_MOV_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
268 INSTR_MOV_I, /* dst = HMEF, src = I */
269
270 /* dma h.header t.field
271 * memcpy(h.header, t.field, sizeof(h.header))
272 */
273 INSTR_DMA_HT,
274 INSTR_DMA_HT2,
275 INSTR_DMA_HT3,
276 INSTR_DMA_HT4,
277 INSTR_DMA_HT5,
278 INSTR_DMA_HT6,
279 INSTR_DMA_HT7,
280 INSTR_DMA_HT8,
281
282 /* add dst src
283 * dst += src
284 * dst = HMEF, src = HMEFTI
285 */
286 INSTR_ALU_ADD, /* dst = MEF, src = MEF */
287 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */
288 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */
289 INSTR_ALU_ADD_HH, /* dst = H, src = H */
290 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */
291 INSTR_ALU_ADD_HI, /* dst = H, src = I */
292
293 /* sub dst src
294 * dst -= src
295 * dst = HMEF, src = HMEFTI
296 */
297 INSTR_ALU_SUB, /* dst = MEF, src = MEF */
298 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */
299 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */
300 INSTR_ALU_SUB_HH, /* dst = H, src = H */
301 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */
302 INSTR_ALU_SUB_HI, /* dst = H, src = I */
303
304 /* ckadd dst src
305 * dst = dst '+ src[0:1] '+ src[2:3] + ...
306 * dst = H, src = {H, h.header}
307 */
308 INSTR_ALU_CKADD_FIELD, /* src = H */
309 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
310 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
311
312 /* cksub dst src
313 * dst = dst '- src
314 * dst = H, src = H
315 */
316 INSTR_ALU_CKSUB_FIELD,
317
318 /* and dst src
319 * dst &= src
320 * dst = HMEF, src = HMEFTI
321 */
322 INSTR_ALU_AND, /* dst = MEF, src = MEFT */
323 INSTR_ALU_AND_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
324 INSTR_ALU_AND_I, /* dst = HMEF, src = I */
325
326 /* or dst src
327 * dst |= src
328 * dst = HMEF, src = HMEFTI
329 */
330 INSTR_ALU_OR, /* dst = MEF, src = MEFT */
331 INSTR_ALU_OR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
332 INSTR_ALU_OR_I, /* dst = HMEF, src = I */
333
334 /* xor dst src
335 * dst ^= src
336 * dst = HMEF, src = HMEFTI
337 */
338 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */
339 INSTR_ALU_XOR_S, /* (dst, src) = (MEF, H) or (dst, src) = (H, MEFT) */
340 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */
341
342 /* shl dst src
343 * dst <<= src
344 * dst = HMEF, src = HMEFTI
345 */
346 INSTR_ALU_SHL, /* dst = MEF, src = MEF */
347 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */
348 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */
349 INSTR_ALU_SHL_HH, /* dst = H, src = H */
350 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */
351 INSTR_ALU_SHL_HI, /* dst = H, src = I */
352
353 /* shr dst src
354 * dst >>= src
355 * dst = HMEF, src = HMEFTI
356 */
357 INSTR_ALU_SHR, /* dst = MEF, src = MEF */
358 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */
359 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */
360 INSTR_ALU_SHR_HH, /* dst = H, src = H */
361 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */
362 INSTR_ALU_SHR_HI, /* dst = H, src = I */
363
364 /* table TABLE */
365 INSTR_TABLE,
366
367 /* extern e.obj.func */
368 INSTR_EXTERN_OBJ,
369
370 /* extern f.func */
371 INSTR_EXTERN_FUNC,
372
373 /* jmp LABEL
374 * Unconditional jump
375 */
376 INSTR_JMP,
377
378 /* jmpv LABEL h.header
379 * Jump if header is valid
380 */
381 INSTR_JMP_VALID,
382
383 /* jmpnv LABEL h.header
384 * Jump if header is invalid
385 */
386 INSTR_JMP_INVALID,
387
388 /* jmph LABEL
389 * Jump if table lookup hit
390 */
391 INSTR_JMP_HIT,
392
393 /* jmpnh LABEL
394 * Jump if table lookup miss
395 */
396 INSTR_JMP_MISS,
397
398 /* jmpa LABEL ACTION
399 * Jump if action run
400 */
401 INSTR_JMP_ACTION_HIT,
402
403 /* jmpna LABEL ACTION
404 * Jump if action not run
405 */
406 INSTR_JMP_ACTION_MISS,
407
408 /* jmpeq LABEL a b
409 * Jump is a is equal to b
410 * a = HMEFT, b = HMEFTI
411 */
412 INSTR_JMP_EQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
413 INSTR_JMP_EQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
414 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
415
416 /* jmpneq LABEL a b
417 * Jump is a is not equal to b
418 * a = HMEFT, b = HMEFTI
419 */
420 INSTR_JMP_NEQ, /* (a, b) = (MEFT, MEFT) or (a, b) = (H, H) */
421 INSTR_JMP_NEQ_S, /* (a, b) = (MEFT, H) or (a, b) = (H, MEFT) */
422 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */
423
424 /* jmplt LABEL a b
425 * Jump if a is less than b
426 * a = HMEFT, b = HMEFTI
427 */
428 INSTR_JMP_LT, /* a = MEF, b = MEF */
429 INSTR_JMP_LT_MH, /* a = MEF, b = H */
430 INSTR_JMP_LT_HM, /* a = H, b = MEF */
431 INSTR_JMP_LT_HH, /* a = H, b = H */
432 INSTR_JMP_LT_MI, /* a = MEF, b = I */
433 INSTR_JMP_LT_HI, /* a = H, b = I */
434
435 /* jmpgt LABEL a b
436 * Jump if a is greater than b
437 * a = HMEFT, b = HMEFTI
438 */
439 INSTR_JMP_GT, /* a = MEF, b = MEF */
440 INSTR_JMP_GT_MH, /* a = MEF, b = H */
441 INSTR_JMP_GT_HM, /* a = H, b = MEF */
442 INSTR_JMP_GT_HH, /* a = H, b = H */
443 INSTR_JMP_GT_MI, /* a = MEF, b = I */
444 INSTR_JMP_GT_HI, /* a = H, b = I */
445
446 /* return
447 * Return from action
448 */
449 INSTR_RETURN,
450 };
451
452 struct instr_operand {
453 uint8_t struct_id;
454 uint8_t n_bits;
455 uint8_t offset;
456 uint8_t pad;
457 };
458
459 struct instr_io {
460 struct {
461 uint8_t offset;
462 uint8_t n_bits;
463 uint8_t pad[2];
464 } io;
465
466 struct {
467 uint8_t header_id[8];
468 uint8_t struct_id[8];
469 uint8_t n_bytes[8];
470 } hdr;
471 };
472
473 struct instr_hdr_validity {
474 uint8_t header_id;
475 };
476
477 struct instr_table {
478 uint8_t table_id;
479 };
480
481 struct instr_extern_obj {
482 uint8_t ext_obj_id;
483 uint8_t func_id;
484 };
485
486 struct instr_extern_func {
487 uint8_t ext_func_id;
488 };
489
490 struct instr_dst_src {
491 struct instr_operand dst;
492 union {
493 struct instr_operand src;
494 uint64_t src_val;
495 };
496 };
497
498 struct instr_dma {
499 struct {
500 uint8_t header_id[8];
501 uint8_t struct_id[8];
502 } dst;
503
504 struct {
505 uint8_t offset[8];
506 } src;
507
508 uint16_t n_bytes[8];
509 };
510
511 struct instr_jmp {
512 struct instruction *ip;
513
514 union {
515 struct instr_operand a;
516 uint8_t header_id;
517 uint8_t action_id;
518 };
519
520 union {
521 struct instr_operand b;
522 uint64_t b_val;
523 };
524 };
525
526 struct instruction {
527 enum instruction_type type;
528 union {
529 struct instr_io io;
530 struct instr_hdr_validity valid;
531 struct instr_dst_src mov;
532 struct instr_dma dma;
533 struct instr_dst_src alu;
534 struct instr_table table;
535 struct instr_extern_obj ext_obj;
536 struct instr_extern_func ext_func;
537 struct instr_jmp jmp;
538 };
539 };
540
541 struct instruction_data {
542 char label[RTE_SWX_NAME_SIZE];
543 char jmp_label[RTE_SWX_NAME_SIZE];
544 uint32_t n_users; /* user = jmp instruction to this instruction. */
545 int invalid;
546 };
547
548 /*
549 * Action.
550 */
551 struct action {
552 TAILQ_ENTRY(action) node;
553 char name[RTE_SWX_NAME_SIZE];
554 struct struct_type *st;
555 struct instruction *instructions;
556 uint32_t n_instructions;
557 uint32_t id;
558 };
559
560 TAILQ_HEAD(action_tailq, action);
561
562 /*
563 * Table.
564 */
565 struct table_type {
566 TAILQ_ENTRY(table_type) node;
567 char name[RTE_SWX_NAME_SIZE];
568 enum rte_swx_table_match_type match_type;
569 struct rte_swx_table_ops ops;
570 };
571
572 TAILQ_HEAD(table_type_tailq, table_type);
573
574 struct match_field {
575 enum rte_swx_table_match_type match_type;
576 struct field *field;
577 };
578
579 struct table {
580 TAILQ_ENTRY(table) node;
581 char name[RTE_SWX_NAME_SIZE];
582 char args[RTE_SWX_NAME_SIZE];
583 struct table_type *type; /* NULL when n_fields == 0. */
584
585 /* Match. */
586 struct match_field *fields;
587 uint32_t n_fields;
588 int is_header; /* Only valid when n_fields > 0. */
589 struct header *header; /* Only valid when n_fields > 0. */
590
591 /* Action. */
592 struct action **actions;
593 struct action *default_action;
594 uint8_t *default_action_data;
595 uint32_t n_actions;
596 int default_action_is_const;
597 uint32_t action_data_size_max;
598
599 uint32_t size;
600 uint32_t id;
601 };
602
603 TAILQ_HEAD(table_tailq, table);
604
605 struct table_runtime {
606 rte_swx_table_lookup_t func;
607 void *mailbox;
608 uint8_t **key;
609 };
610
611 /*
612 * Pipeline.
613 */
614 struct thread {
615 /* Packet. */
616 struct rte_swx_pkt pkt;
617 uint8_t *ptr;
618
619 /* Structures. */
620 uint8_t **structs;
621
622 /* Packet headers. */
623 struct header_runtime *headers; /* Extracted or generated headers. */
624 struct header_out_runtime *headers_out; /* Emitted headers. */
625 uint8_t *header_storage;
626 uint8_t *header_out_storage;
627 uint64_t valid_headers;
628 uint32_t n_headers_out;
629
630 /* Packet meta-data. */
631 uint8_t *metadata;
632
633 /* Tables. */
634 struct table_runtime *tables;
635 struct rte_swx_table_state *table_state;
636 uint64_t action_id;
637 int hit; /* 0 = Miss, 1 = Hit. */
638
639 /* Extern objects and functions. */
640 struct extern_obj_runtime *extern_objs;
641 struct extern_func_runtime *extern_funcs;
642
643 /* Instructions. */
644 struct instruction *ip;
645 struct instruction *ret;
646 };
647
648 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos)))
649 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos)))
650 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos)))
651
652 #define HEADER_VALID(thread, header_id) \
653 MASK64_BIT_GET((thread)->valid_headers, header_id)
654
655 #define ALU(thread, ip, operator) \
656 { \
657 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
658 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
659 uint64_t dst64 = *dst64_ptr; \
660 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
661 uint64_t dst = dst64 & dst64_mask; \
662 \
663 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
664 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
665 uint64_t src64 = *src64_ptr; \
666 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
667 uint64_t src = src64 & src64_mask; \
668 \
669 uint64_t result = dst operator src; \
670 \
671 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
672 }
673
674 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
675
676 #define ALU_S(thread, ip, operator) \
677 { \
678 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
679 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
680 uint64_t dst64 = *dst64_ptr; \
681 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
682 uint64_t dst = dst64 & dst64_mask; \
683 \
684 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
685 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
686 uint64_t src64 = *src64_ptr; \
687 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
688 \
689 uint64_t result = dst operator src; \
690 \
691 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
692 }
693
694 #define ALU_MH ALU_S
695
696 #define ALU_HM(thread, ip, operator) \
697 { \
698 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
699 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
700 uint64_t dst64 = *dst64_ptr; \
701 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
702 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
703 \
704 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
705 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
706 uint64_t src64 = *src64_ptr; \
707 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \
708 uint64_t src = src64 & src64_mask; \
709 \
710 uint64_t result = dst operator src; \
711 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
712 \
713 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
714 }
715
716 #define ALU_HH(thread, ip, operator) \
717 { \
718 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
719 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
720 uint64_t dst64 = *dst64_ptr; \
721 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
722 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
723 \
724 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \
725 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \
726 uint64_t src64 = *src64_ptr; \
727 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \
728 \
729 uint64_t result = dst operator src; \
730 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
731 \
732 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
733 }
734
735 #else
736
737 #define ALU_S ALU
738 #define ALU_MH ALU
739 #define ALU_HM ALU
740 #define ALU_HH ALU
741
742 #endif
743
744 #define ALU_I(thread, ip, operator) \
745 { \
746 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
747 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
748 uint64_t dst64 = *dst64_ptr; \
749 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
750 uint64_t dst = dst64 & dst64_mask; \
751 \
752 uint64_t src = (ip)->alu.src_val; \
753 \
754 uint64_t result = dst operator src; \
755 \
756 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \
757 }
758
759 #define ALU_MI ALU_I
760
761 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
762
763 #define ALU_HI(thread, ip, operator) \
764 { \
765 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \
766 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \
767 uint64_t dst64 = *dst64_ptr; \
768 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \
769 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \
770 \
771 uint64_t src = (ip)->alu.src_val; \
772 \
773 uint64_t result = dst operator src; \
774 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \
775 \
776 *dst64_ptr = (dst64 & ~dst64_mask) | result; \
777 }
778
779 #else
780
781 #define ALU_HI ALU_I
782
783 #endif
784
785 #define MOV(thread, ip) \
786 { \
787 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
788 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
789 uint64_t dst64 = *dst64_ptr; \
790 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
791 \
792 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
793 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
794 uint64_t src64 = *src64_ptr; \
795 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \
796 uint64_t src = src64 & src64_mask; \
797 \
798 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
799 }
800
801 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
802
803 #define MOV_S(thread, ip) \
804 { \
805 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
806 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
807 uint64_t dst64 = *dst64_ptr; \
808 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
809 \
810 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \
811 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \
812 uint64_t src64 = *src64_ptr; \
813 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \
814 \
815 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
816 }
817
818 #else
819
820 #define MOV_S MOV
821
822 #endif
823
824 #define MOV_I(thread, ip) \
825 { \
826 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \
827 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \
828 uint64_t dst64 = *dst64_ptr; \
829 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \
830 \
831 uint64_t src = (ip)->mov.src_val; \
832 \
833 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \
834 }
835
836 #define JMP_CMP(thread, ip, operator) \
837 { \
838 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
839 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
840 uint64_t a64 = *a64_ptr; \
841 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
842 uint64_t a = a64 & a64_mask; \
843 \
844 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
845 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
846 uint64_t b64 = *b64_ptr; \
847 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
848 uint64_t b = b64 & b64_mask; \
849 \
850 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
851 }
852
853 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
854
855 #define JMP_CMP_S(thread, ip, operator) \
856 { \
857 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
858 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
859 uint64_t a64 = *a64_ptr; \
860 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
861 uint64_t a = a64 & a64_mask; \
862 \
863 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
864 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
865 uint64_t b64 = *b64_ptr; \
866 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
867 \
868 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
869 }
870
871 #define JMP_CMP_MH JMP_CMP_S
872
873 #define JMP_CMP_HM(thread, ip, operator) \
874 { \
875 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
876 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
877 uint64_t a64 = *a64_ptr; \
878 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
879 \
880 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
881 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
882 uint64_t b64 = *b64_ptr; \
883 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \
884 uint64_t b = b64 & b64_mask; \
885 \
886 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
887 }
888
889 #define JMP_CMP_HH(thread, ip, operator) \
890 { \
891 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
892 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
893 uint64_t a64 = *a64_ptr; \
894 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
895 \
896 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \
897 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \
898 uint64_t b64 = *b64_ptr; \
899 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \
900 \
901 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
902 }
903
904 #else
905
906 #define JMP_CMP_S JMP_CMP
907 #define JMP_CMP_MH JMP_CMP
908 #define JMP_CMP_HM JMP_CMP
909 #define JMP_CMP_HH JMP_CMP
910
911 #endif
912
913 #define JMP_CMP_I(thread, ip, operator) \
914 { \
915 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
916 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
917 uint64_t a64 = *a64_ptr; \
918 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \
919 uint64_t a = a64 & a64_mask; \
920 \
921 uint64_t b = (ip)->jmp.b_val; \
922 \
923 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
924 }
925
926 #define JMP_CMP_MI JMP_CMP_I
927
928 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
929
930 #define JMP_CMP_HI(thread, ip, operator) \
931 { \
932 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \
933 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \
934 uint64_t a64 = *a64_ptr; \
935 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \
936 \
937 uint64_t b = (ip)->jmp.b_val; \
938 \
939 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \
940 }
941
942 #else
943
944 #define JMP_CMP_HI JMP_CMP_I
945
946 #endif
947
948 #define METADATA_READ(thread, offset, n_bits) \
949 ({ \
950 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
951 uint64_t m64 = *m64_ptr; \
952 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
953 (m64 & m64_mask); \
954 })
955
956 #define METADATA_WRITE(thread, offset, n_bits, value) \
957 { \
958 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \
959 uint64_t m64 = *m64_ptr; \
960 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \
961 \
962 uint64_t m_new = value; \
963 \
964 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \
965 }
966
967 #ifndef RTE_SWX_PIPELINE_THREADS_MAX
968 #define RTE_SWX_PIPELINE_THREADS_MAX 16
969 #endif
970
971 struct rte_swx_pipeline {
972 struct struct_type_tailq struct_types;
973 struct port_in_type_tailq port_in_types;
974 struct port_in_tailq ports_in;
975 struct port_out_type_tailq port_out_types;
976 struct port_out_tailq ports_out;
977 struct extern_type_tailq extern_types;
978 struct extern_obj_tailq extern_objs;
979 struct extern_func_tailq extern_funcs;
980 struct header_tailq headers;
981 struct struct_type *metadata_st;
982 uint32_t metadata_struct_id;
983 struct action_tailq actions;
984 struct table_type_tailq table_types;
985 struct table_tailq tables;
986
987 struct port_in_runtime *in;
988 struct port_out_runtime *out;
989 struct instruction **action_instructions;
990 struct rte_swx_table_state *table_state;
991 struct instruction *instructions;
992 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
993
994 uint32_t n_structs;
995 uint32_t n_ports_in;
996 uint32_t n_ports_out;
997 uint32_t n_extern_objs;
998 uint32_t n_extern_funcs;
999 uint32_t n_actions;
1000 uint32_t n_tables;
1001 uint32_t n_headers;
1002 uint32_t thread_id;
1003 uint32_t port_id;
1004 uint32_t n_instructions;
1005 int build_done;
1006 int numa_node;
1007 };
1008
1009 /*
1010 * Struct.
1011 */
1012 static struct struct_type *
struct_type_find(struct rte_swx_pipeline * p,const char * name)1013 struct_type_find(struct rte_swx_pipeline *p, const char *name)
1014 {
1015 struct struct_type *elem;
1016
1017 TAILQ_FOREACH(elem, &p->struct_types, node)
1018 if (strcmp(elem->name, name) == 0)
1019 return elem;
1020
1021 return NULL;
1022 }
1023
1024 static struct field *
struct_type_field_find(struct struct_type * st,const char * name)1025 struct_type_field_find(struct struct_type *st, const char *name)
1026 {
1027 uint32_t i;
1028
1029 for (i = 0; i < st->n_fields; i++) {
1030 struct field *f = &st->fields[i];
1031
1032 if (strcmp(f->name, name) == 0)
1033 return f;
1034 }
1035
1036 return NULL;
1037 }
1038
1039 int
rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline * p,const char * name,struct rte_swx_field_params * fields,uint32_t n_fields)1040 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p,
1041 const char *name,
1042 struct rte_swx_field_params *fields,
1043 uint32_t n_fields)
1044 {
1045 struct struct_type *st;
1046 uint32_t i;
1047
1048 CHECK(p, EINVAL);
1049 CHECK_NAME(name, EINVAL);
1050 CHECK(fields, EINVAL);
1051 CHECK(n_fields, EINVAL);
1052
1053 for (i = 0; i < n_fields; i++) {
1054 struct rte_swx_field_params *f = &fields[i];
1055 uint32_t j;
1056
1057 CHECK_NAME(f->name, EINVAL);
1058 CHECK(f->n_bits, EINVAL);
1059 CHECK(f->n_bits <= 64, EINVAL);
1060 CHECK((f->n_bits & 7) == 0, EINVAL);
1061
1062 for (j = 0; j < i; j++) {
1063 struct rte_swx_field_params *f_prev = &fields[j];
1064
1065 CHECK(strcmp(f->name, f_prev->name), EINVAL);
1066 }
1067 }
1068
1069 CHECK(!struct_type_find(p, name), EEXIST);
1070
1071 /* Node allocation. */
1072 st = calloc(1, sizeof(struct struct_type));
1073 CHECK(st, ENOMEM);
1074
1075 st->fields = calloc(n_fields, sizeof(struct field));
1076 if (!st->fields) {
1077 free(st);
1078 CHECK(0, ENOMEM);
1079 }
1080
1081 /* Node initialization. */
1082 strcpy(st->name, name);
1083 for (i = 0; i < n_fields; i++) {
1084 struct field *dst = &st->fields[i];
1085 struct rte_swx_field_params *src = &fields[i];
1086
1087 strcpy(dst->name, src->name);
1088 dst->n_bits = src->n_bits;
1089 dst->offset = st->n_bits;
1090
1091 st->n_bits += src->n_bits;
1092 }
1093 st->n_fields = n_fields;
1094
1095 /* Node add to tailq. */
1096 TAILQ_INSERT_TAIL(&p->struct_types, st, node);
1097
1098 return 0;
1099 }
1100
1101 static int
struct_build(struct rte_swx_pipeline * p)1102 struct_build(struct rte_swx_pipeline *p)
1103 {
1104 uint32_t i;
1105
1106 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1107 struct thread *t = &p->threads[i];
1108
1109 t->structs = calloc(p->n_structs, sizeof(uint8_t *));
1110 CHECK(t->structs, ENOMEM);
1111 }
1112
1113 return 0;
1114 }
1115
1116 static void
struct_build_free(struct rte_swx_pipeline * p)1117 struct_build_free(struct rte_swx_pipeline *p)
1118 {
1119 uint32_t i;
1120
1121 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1122 struct thread *t = &p->threads[i];
1123
1124 free(t->structs);
1125 t->structs = NULL;
1126 }
1127 }
1128
1129 static void
struct_free(struct rte_swx_pipeline * p)1130 struct_free(struct rte_swx_pipeline *p)
1131 {
1132 struct_build_free(p);
1133
1134 /* Struct types. */
1135 for ( ; ; ) {
1136 struct struct_type *elem;
1137
1138 elem = TAILQ_FIRST(&p->struct_types);
1139 if (!elem)
1140 break;
1141
1142 TAILQ_REMOVE(&p->struct_types, elem, node);
1143 free(elem->fields);
1144 free(elem);
1145 }
1146 }
1147
1148 /*
1149 * Input port.
1150 */
1151 static struct port_in_type *
port_in_type_find(struct rte_swx_pipeline * p,const char * name)1152 port_in_type_find(struct rte_swx_pipeline *p, const char *name)
1153 {
1154 struct port_in_type *elem;
1155
1156 if (!name)
1157 return NULL;
1158
1159 TAILQ_FOREACH(elem, &p->port_in_types, node)
1160 if (strcmp(elem->name, name) == 0)
1161 return elem;
1162
1163 return NULL;
1164 }
1165
1166 int
rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline * p,const char * name,struct rte_swx_port_in_ops * ops)1167 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p,
1168 const char *name,
1169 struct rte_swx_port_in_ops *ops)
1170 {
1171 struct port_in_type *elem;
1172
1173 CHECK(p, EINVAL);
1174 CHECK_NAME(name, EINVAL);
1175 CHECK(ops, EINVAL);
1176 CHECK(ops->create, EINVAL);
1177 CHECK(ops->free, EINVAL);
1178 CHECK(ops->pkt_rx, EINVAL);
1179 CHECK(ops->stats_read, EINVAL);
1180
1181 CHECK(!port_in_type_find(p, name), EEXIST);
1182
1183 /* Node allocation. */
1184 elem = calloc(1, sizeof(struct port_in_type));
1185 CHECK(elem, ENOMEM);
1186
1187 /* Node initialization. */
1188 strcpy(elem->name, name);
1189 memcpy(&elem->ops, ops, sizeof(*ops));
1190
1191 /* Node add to tailq. */
1192 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node);
1193
1194 return 0;
1195 }
1196
1197 static struct port_in *
port_in_find(struct rte_swx_pipeline * p,uint32_t port_id)1198 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id)
1199 {
1200 struct port_in *port;
1201
1202 TAILQ_FOREACH(port, &p->ports_in, node)
1203 if (port->id == port_id)
1204 return port;
1205
1206 return NULL;
1207 }
1208
1209 int
rte_swx_pipeline_port_in_config(struct rte_swx_pipeline * p,uint32_t port_id,const char * port_type_name,void * args)1210 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p,
1211 uint32_t port_id,
1212 const char *port_type_name,
1213 void *args)
1214 {
1215 struct port_in_type *type = NULL;
1216 struct port_in *port = NULL;
1217 void *obj = NULL;
1218
1219 CHECK(p, EINVAL);
1220
1221 CHECK(!port_in_find(p, port_id), EINVAL);
1222
1223 CHECK_NAME(port_type_name, EINVAL);
1224 type = port_in_type_find(p, port_type_name);
1225 CHECK(type, EINVAL);
1226
1227 obj = type->ops.create(args);
1228 CHECK(obj, ENODEV);
1229
1230 /* Node allocation. */
1231 port = calloc(1, sizeof(struct port_in));
1232 CHECK(port, ENOMEM);
1233
1234 /* Node initialization. */
1235 port->type = type;
1236 port->obj = obj;
1237 port->id = port_id;
1238
1239 /* Node add to tailq. */
1240 TAILQ_INSERT_TAIL(&p->ports_in, port, node);
1241 if (p->n_ports_in < port_id + 1)
1242 p->n_ports_in = port_id + 1;
1243
1244 return 0;
1245 }
1246
1247 static int
port_in_build(struct rte_swx_pipeline * p)1248 port_in_build(struct rte_swx_pipeline *p)
1249 {
1250 struct port_in *port;
1251 uint32_t i;
1252
1253 CHECK(p->n_ports_in, EINVAL);
1254 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL);
1255
1256 for (i = 0; i < p->n_ports_in; i++)
1257 CHECK(port_in_find(p, i), EINVAL);
1258
1259 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime));
1260 CHECK(p->in, ENOMEM);
1261
1262 TAILQ_FOREACH(port, &p->ports_in, node) {
1263 struct port_in_runtime *in = &p->in[port->id];
1264
1265 in->pkt_rx = port->type->ops.pkt_rx;
1266 in->obj = port->obj;
1267 }
1268
1269 return 0;
1270 }
1271
1272 static void
port_in_build_free(struct rte_swx_pipeline * p)1273 port_in_build_free(struct rte_swx_pipeline *p)
1274 {
1275 free(p->in);
1276 p->in = NULL;
1277 }
1278
1279 static void
port_in_free(struct rte_swx_pipeline * p)1280 port_in_free(struct rte_swx_pipeline *p)
1281 {
1282 port_in_build_free(p);
1283
1284 /* Input ports. */
1285 for ( ; ; ) {
1286 struct port_in *port;
1287
1288 port = TAILQ_FIRST(&p->ports_in);
1289 if (!port)
1290 break;
1291
1292 TAILQ_REMOVE(&p->ports_in, port, node);
1293 port->type->ops.free(port->obj);
1294 free(port);
1295 }
1296
1297 /* Input port types. */
1298 for ( ; ; ) {
1299 struct port_in_type *elem;
1300
1301 elem = TAILQ_FIRST(&p->port_in_types);
1302 if (!elem)
1303 break;
1304
1305 TAILQ_REMOVE(&p->port_in_types, elem, node);
1306 free(elem);
1307 }
1308 }
1309
1310 /*
1311 * Output port.
1312 */
1313 static struct port_out_type *
port_out_type_find(struct rte_swx_pipeline * p,const char * name)1314 port_out_type_find(struct rte_swx_pipeline *p, const char *name)
1315 {
1316 struct port_out_type *elem;
1317
1318 if (!name)
1319 return NULL;
1320
1321 TAILQ_FOREACH(elem, &p->port_out_types, node)
1322 if (!strcmp(elem->name, name))
1323 return elem;
1324
1325 return NULL;
1326 }
1327
1328 int
rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline * p,const char * name,struct rte_swx_port_out_ops * ops)1329 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p,
1330 const char *name,
1331 struct rte_swx_port_out_ops *ops)
1332 {
1333 struct port_out_type *elem;
1334
1335 CHECK(p, EINVAL);
1336 CHECK_NAME(name, EINVAL);
1337 CHECK(ops, EINVAL);
1338 CHECK(ops->create, EINVAL);
1339 CHECK(ops->free, EINVAL);
1340 CHECK(ops->pkt_tx, EINVAL);
1341 CHECK(ops->stats_read, EINVAL);
1342
1343 CHECK(!port_out_type_find(p, name), EEXIST);
1344
1345 /* Node allocation. */
1346 elem = calloc(1, sizeof(struct port_out_type));
1347 CHECK(elem, ENOMEM);
1348
1349 /* Node initialization. */
1350 strcpy(elem->name, name);
1351 memcpy(&elem->ops, ops, sizeof(*ops));
1352
1353 /* Node add to tailq. */
1354 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node);
1355
1356 return 0;
1357 }
1358
1359 static struct port_out *
port_out_find(struct rte_swx_pipeline * p,uint32_t port_id)1360 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id)
1361 {
1362 struct port_out *port;
1363
1364 TAILQ_FOREACH(port, &p->ports_out, node)
1365 if (port->id == port_id)
1366 return port;
1367
1368 return NULL;
1369 }
1370
1371 int
rte_swx_pipeline_port_out_config(struct rte_swx_pipeline * p,uint32_t port_id,const char * port_type_name,void * args)1372 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p,
1373 uint32_t port_id,
1374 const char *port_type_name,
1375 void *args)
1376 {
1377 struct port_out_type *type = NULL;
1378 struct port_out *port = NULL;
1379 void *obj = NULL;
1380
1381 CHECK(p, EINVAL);
1382
1383 CHECK(!port_out_find(p, port_id), EINVAL);
1384
1385 CHECK_NAME(port_type_name, EINVAL);
1386 type = port_out_type_find(p, port_type_name);
1387 CHECK(type, EINVAL);
1388
1389 obj = type->ops.create(args);
1390 CHECK(obj, ENODEV);
1391
1392 /* Node allocation. */
1393 port = calloc(1, sizeof(struct port_out));
1394 CHECK(port, ENOMEM);
1395
1396 /* Node initialization. */
1397 port->type = type;
1398 port->obj = obj;
1399 port->id = port_id;
1400
1401 /* Node add to tailq. */
1402 TAILQ_INSERT_TAIL(&p->ports_out, port, node);
1403 if (p->n_ports_out < port_id + 1)
1404 p->n_ports_out = port_id + 1;
1405
1406 return 0;
1407 }
1408
1409 static int
port_out_build(struct rte_swx_pipeline * p)1410 port_out_build(struct rte_swx_pipeline *p)
1411 {
1412 struct port_out *port;
1413 uint32_t i;
1414
1415 CHECK(p->n_ports_out, EINVAL);
1416
1417 for (i = 0; i < p->n_ports_out; i++)
1418 CHECK(port_out_find(p, i), EINVAL);
1419
1420 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime));
1421 CHECK(p->out, ENOMEM);
1422
1423 TAILQ_FOREACH(port, &p->ports_out, node) {
1424 struct port_out_runtime *out = &p->out[port->id];
1425
1426 out->pkt_tx = port->type->ops.pkt_tx;
1427 out->flush = port->type->ops.flush;
1428 out->obj = port->obj;
1429 }
1430
1431 return 0;
1432 }
1433
1434 static void
port_out_build_free(struct rte_swx_pipeline * p)1435 port_out_build_free(struct rte_swx_pipeline *p)
1436 {
1437 free(p->out);
1438 p->out = NULL;
1439 }
1440
1441 static void
port_out_free(struct rte_swx_pipeline * p)1442 port_out_free(struct rte_swx_pipeline *p)
1443 {
1444 port_out_build_free(p);
1445
1446 /* Output ports. */
1447 for ( ; ; ) {
1448 struct port_out *port;
1449
1450 port = TAILQ_FIRST(&p->ports_out);
1451 if (!port)
1452 break;
1453
1454 TAILQ_REMOVE(&p->ports_out, port, node);
1455 port->type->ops.free(port->obj);
1456 free(port);
1457 }
1458
1459 /* Output port types. */
1460 for ( ; ; ) {
1461 struct port_out_type *elem;
1462
1463 elem = TAILQ_FIRST(&p->port_out_types);
1464 if (!elem)
1465 break;
1466
1467 TAILQ_REMOVE(&p->port_out_types, elem, node);
1468 free(elem);
1469 }
1470 }
1471
1472 /*
1473 * Extern object.
1474 */
1475 static struct extern_type *
extern_type_find(struct rte_swx_pipeline * p,const char * name)1476 extern_type_find(struct rte_swx_pipeline *p, const char *name)
1477 {
1478 struct extern_type *elem;
1479
1480 TAILQ_FOREACH(elem, &p->extern_types, node)
1481 if (strcmp(elem->name, name) == 0)
1482 return elem;
1483
1484 return NULL;
1485 }
1486
1487 static struct extern_type_member_func *
extern_type_member_func_find(struct extern_type * type,const char * name)1488 extern_type_member_func_find(struct extern_type *type, const char *name)
1489 {
1490 struct extern_type_member_func *elem;
1491
1492 TAILQ_FOREACH(elem, &type->funcs, node)
1493 if (strcmp(elem->name, name) == 0)
1494 return elem;
1495
1496 return NULL;
1497 }
1498
1499 static struct extern_obj *
extern_obj_find(struct rte_swx_pipeline * p,const char * name)1500 extern_obj_find(struct rte_swx_pipeline *p, const char *name)
1501 {
1502 struct extern_obj *elem;
1503
1504 TAILQ_FOREACH(elem, &p->extern_objs, node)
1505 if (strcmp(elem->name, name) == 0)
1506 return elem;
1507
1508 return NULL;
1509 }
1510
1511 static struct extern_type_member_func *
extern_obj_member_func_parse(struct rte_swx_pipeline * p,const char * name,struct extern_obj ** obj)1512 extern_obj_member_func_parse(struct rte_swx_pipeline *p,
1513 const char *name,
1514 struct extern_obj **obj)
1515 {
1516 struct extern_obj *object;
1517 struct extern_type_member_func *func;
1518 char *object_name, *func_name;
1519
1520 if (name[0] != 'e' || name[1] != '.')
1521 return NULL;
1522
1523 object_name = strdup(&name[2]);
1524 if (!object_name)
1525 return NULL;
1526
1527 func_name = strchr(object_name, '.');
1528 if (!func_name) {
1529 free(object_name);
1530 return NULL;
1531 }
1532
1533 *func_name = 0;
1534 func_name++;
1535
1536 object = extern_obj_find(p, object_name);
1537 if (!object) {
1538 free(object_name);
1539 return NULL;
1540 }
1541
1542 func = extern_type_member_func_find(object->type, func_name);
1543 if (!func) {
1544 free(object_name);
1545 return NULL;
1546 }
1547
1548 if (obj)
1549 *obj = object;
1550
1551 free(object_name);
1552 return func;
1553 }
1554
1555 static struct field *
extern_obj_mailbox_field_parse(struct rte_swx_pipeline * p,const char * name,struct extern_obj ** object)1556 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p,
1557 const char *name,
1558 struct extern_obj **object)
1559 {
1560 struct extern_obj *obj;
1561 struct field *f;
1562 char *obj_name, *field_name;
1563
1564 if ((name[0] != 'e') || (name[1] != '.'))
1565 return NULL;
1566
1567 obj_name = strdup(&name[2]);
1568 if (!obj_name)
1569 return NULL;
1570
1571 field_name = strchr(obj_name, '.');
1572 if (!field_name) {
1573 free(obj_name);
1574 return NULL;
1575 }
1576
1577 *field_name = 0;
1578 field_name++;
1579
1580 obj = extern_obj_find(p, obj_name);
1581 if (!obj) {
1582 free(obj_name);
1583 return NULL;
1584 }
1585
1586 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name);
1587 if (!f) {
1588 free(obj_name);
1589 return NULL;
1590 }
1591
1592 if (object)
1593 *object = obj;
1594
1595 free(obj_name);
1596 return f;
1597 }
1598
1599 int
rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline * p,const char * name,const char * mailbox_struct_type_name,rte_swx_extern_type_constructor_t constructor,rte_swx_extern_type_destructor_t destructor)1600 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p,
1601 const char *name,
1602 const char *mailbox_struct_type_name,
1603 rte_swx_extern_type_constructor_t constructor,
1604 rte_swx_extern_type_destructor_t destructor)
1605 {
1606 struct extern_type *elem;
1607 struct struct_type *mailbox_struct_type;
1608
1609 CHECK(p, EINVAL);
1610
1611 CHECK_NAME(name, EINVAL);
1612 CHECK(!extern_type_find(p, name), EEXIST);
1613
1614 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1615 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1616 CHECK(mailbox_struct_type, EINVAL);
1617
1618 CHECK(constructor, EINVAL);
1619 CHECK(destructor, EINVAL);
1620
1621 /* Node allocation. */
1622 elem = calloc(1, sizeof(struct extern_type));
1623 CHECK(elem, ENOMEM);
1624
1625 /* Node initialization. */
1626 strcpy(elem->name, name);
1627 elem->mailbox_struct_type = mailbox_struct_type;
1628 elem->constructor = constructor;
1629 elem->destructor = destructor;
1630 TAILQ_INIT(&elem->funcs);
1631
1632 /* Node add to tailq. */
1633 TAILQ_INSERT_TAIL(&p->extern_types, elem, node);
1634
1635 return 0;
1636 }
1637
1638 int
rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline * p,const char * extern_type_name,const char * name,rte_swx_extern_type_member_func_t member_func)1639 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p,
1640 const char *extern_type_name,
1641 const char *name,
1642 rte_swx_extern_type_member_func_t member_func)
1643 {
1644 struct extern_type *type;
1645 struct extern_type_member_func *type_member;
1646
1647 CHECK(p, EINVAL);
1648
1649 CHECK_NAME(extern_type_name, EINVAL);
1650 type = extern_type_find(p, extern_type_name);
1651 CHECK(type, EINVAL);
1652 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC);
1653
1654 CHECK_NAME(name, EINVAL);
1655 CHECK(!extern_type_member_func_find(type, name), EEXIST);
1656
1657 CHECK(member_func, EINVAL);
1658
1659 /* Node allocation. */
1660 type_member = calloc(1, sizeof(struct extern_type_member_func));
1661 CHECK(type_member, ENOMEM);
1662
1663 /* Node initialization. */
1664 strcpy(type_member->name, name);
1665 type_member->func = member_func;
1666 type_member->id = type->n_funcs;
1667
1668 /* Node add to tailq. */
1669 TAILQ_INSERT_TAIL(&type->funcs, type_member, node);
1670 type->n_funcs++;
1671
1672 return 0;
1673 }
1674
1675 int
rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline * p,const char * extern_type_name,const char * name,const char * args)1676 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p,
1677 const char *extern_type_name,
1678 const char *name,
1679 const char *args)
1680 {
1681 struct extern_type *type;
1682 struct extern_obj *obj;
1683 void *obj_handle;
1684
1685 CHECK(p, EINVAL);
1686
1687 CHECK_NAME(extern_type_name, EINVAL);
1688 type = extern_type_find(p, extern_type_name);
1689 CHECK(type, EINVAL);
1690
1691 CHECK_NAME(name, EINVAL);
1692 CHECK(!extern_obj_find(p, name), EEXIST);
1693
1694 /* Node allocation. */
1695 obj = calloc(1, sizeof(struct extern_obj));
1696 CHECK(obj, ENOMEM);
1697
1698 /* Object construction. */
1699 obj_handle = type->constructor(args);
1700 if (!obj_handle) {
1701 free(obj);
1702 CHECK(0, ENODEV);
1703 }
1704
1705 /* Node initialization. */
1706 strcpy(obj->name, name);
1707 obj->type = type;
1708 obj->obj = obj_handle;
1709 obj->struct_id = p->n_structs;
1710 obj->id = p->n_extern_objs;
1711
1712 /* Node add to tailq. */
1713 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node);
1714 p->n_extern_objs++;
1715 p->n_structs++;
1716
1717 return 0;
1718 }
1719
1720 static int
extern_obj_build(struct rte_swx_pipeline * p)1721 extern_obj_build(struct rte_swx_pipeline *p)
1722 {
1723 uint32_t i;
1724
1725 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1726 struct thread *t = &p->threads[i];
1727 struct extern_obj *obj;
1728
1729 t->extern_objs = calloc(p->n_extern_objs,
1730 sizeof(struct extern_obj_runtime));
1731 CHECK(t->extern_objs, ENOMEM);
1732
1733 TAILQ_FOREACH(obj, &p->extern_objs, node) {
1734 struct extern_obj_runtime *r =
1735 &t->extern_objs[obj->id];
1736 struct extern_type_member_func *func;
1737 uint32_t mailbox_size =
1738 obj->type->mailbox_struct_type->n_bits / 8;
1739
1740 r->obj = obj->obj;
1741
1742 r->mailbox = calloc(1, mailbox_size);
1743 CHECK(r->mailbox, ENOMEM);
1744
1745 TAILQ_FOREACH(func, &obj->type->funcs, node)
1746 r->funcs[func->id] = func->func;
1747
1748 t->structs[obj->struct_id] = r->mailbox;
1749 }
1750 }
1751
1752 return 0;
1753 }
1754
1755 static void
extern_obj_build_free(struct rte_swx_pipeline * p)1756 extern_obj_build_free(struct rte_swx_pipeline *p)
1757 {
1758 uint32_t i;
1759
1760 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1761 struct thread *t = &p->threads[i];
1762 uint32_t j;
1763
1764 if (!t->extern_objs)
1765 continue;
1766
1767 for (j = 0; j < p->n_extern_objs; j++) {
1768 struct extern_obj_runtime *r = &t->extern_objs[j];
1769
1770 free(r->mailbox);
1771 }
1772
1773 free(t->extern_objs);
1774 t->extern_objs = NULL;
1775 }
1776 }
1777
1778 static void
extern_obj_free(struct rte_swx_pipeline * p)1779 extern_obj_free(struct rte_swx_pipeline *p)
1780 {
1781 extern_obj_build_free(p);
1782
1783 /* Extern objects. */
1784 for ( ; ; ) {
1785 struct extern_obj *elem;
1786
1787 elem = TAILQ_FIRST(&p->extern_objs);
1788 if (!elem)
1789 break;
1790
1791 TAILQ_REMOVE(&p->extern_objs, elem, node);
1792 if (elem->obj)
1793 elem->type->destructor(elem->obj);
1794 free(elem);
1795 }
1796
1797 /* Extern types. */
1798 for ( ; ; ) {
1799 struct extern_type *elem;
1800
1801 elem = TAILQ_FIRST(&p->extern_types);
1802 if (!elem)
1803 break;
1804
1805 TAILQ_REMOVE(&p->extern_types, elem, node);
1806
1807 for ( ; ; ) {
1808 struct extern_type_member_func *func;
1809
1810 func = TAILQ_FIRST(&elem->funcs);
1811 if (!func)
1812 break;
1813
1814 TAILQ_REMOVE(&elem->funcs, func, node);
1815 free(func);
1816 }
1817
1818 free(elem);
1819 }
1820 }
1821
1822 /*
1823 * Extern function.
1824 */
1825 static struct extern_func *
extern_func_find(struct rte_swx_pipeline * p,const char * name)1826 extern_func_find(struct rte_swx_pipeline *p, const char *name)
1827 {
1828 struct extern_func *elem;
1829
1830 TAILQ_FOREACH(elem, &p->extern_funcs, node)
1831 if (strcmp(elem->name, name) == 0)
1832 return elem;
1833
1834 return NULL;
1835 }
1836
1837 static struct extern_func *
extern_func_parse(struct rte_swx_pipeline * p,const char * name)1838 extern_func_parse(struct rte_swx_pipeline *p,
1839 const char *name)
1840 {
1841 if (name[0] != 'f' || name[1] != '.')
1842 return NULL;
1843
1844 return extern_func_find(p, &name[2]);
1845 }
1846
1847 static struct field *
extern_func_mailbox_field_parse(struct rte_swx_pipeline * p,const char * name,struct extern_func ** function)1848 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p,
1849 const char *name,
1850 struct extern_func **function)
1851 {
1852 struct extern_func *func;
1853 struct field *f;
1854 char *func_name, *field_name;
1855
1856 if ((name[0] != 'f') || (name[1] != '.'))
1857 return NULL;
1858
1859 func_name = strdup(&name[2]);
1860 if (!func_name)
1861 return NULL;
1862
1863 field_name = strchr(func_name, '.');
1864 if (!field_name) {
1865 free(func_name);
1866 return NULL;
1867 }
1868
1869 *field_name = 0;
1870 field_name++;
1871
1872 func = extern_func_find(p, func_name);
1873 if (!func) {
1874 free(func_name);
1875 return NULL;
1876 }
1877
1878 f = struct_type_field_find(func->mailbox_struct_type, field_name);
1879 if (!f) {
1880 free(func_name);
1881 return NULL;
1882 }
1883
1884 if (function)
1885 *function = func;
1886
1887 free(func_name);
1888 return f;
1889 }
1890
1891 int
rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline * p,const char * name,const char * mailbox_struct_type_name,rte_swx_extern_func_t func)1892 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p,
1893 const char *name,
1894 const char *mailbox_struct_type_name,
1895 rte_swx_extern_func_t func)
1896 {
1897 struct extern_func *f;
1898 struct struct_type *mailbox_struct_type;
1899
1900 CHECK(p, EINVAL);
1901
1902 CHECK_NAME(name, EINVAL);
1903 CHECK(!extern_func_find(p, name), EEXIST);
1904
1905 CHECK_NAME(mailbox_struct_type_name, EINVAL);
1906 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name);
1907 CHECK(mailbox_struct_type, EINVAL);
1908
1909 CHECK(func, EINVAL);
1910
1911 /* Node allocation. */
1912 f = calloc(1, sizeof(struct extern_func));
1913 CHECK(func, ENOMEM);
1914
1915 /* Node initialization. */
1916 strcpy(f->name, name);
1917 f->mailbox_struct_type = mailbox_struct_type;
1918 f->func = func;
1919 f->struct_id = p->n_structs;
1920 f->id = p->n_extern_funcs;
1921
1922 /* Node add to tailq. */
1923 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node);
1924 p->n_extern_funcs++;
1925 p->n_structs++;
1926
1927 return 0;
1928 }
1929
1930 static int
extern_func_build(struct rte_swx_pipeline * p)1931 extern_func_build(struct rte_swx_pipeline *p)
1932 {
1933 uint32_t i;
1934
1935 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1936 struct thread *t = &p->threads[i];
1937 struct extern_func *func;
1938
1939 /* Memory allocation. */
1940 t->extern_funcs = calloc(p->n_extern_funcs,
1941 sizeof(struct extern_func_runtime));
1942 CHECK(t->extern_funcs, ENOMEM);
1943
1944 /* Extern function. */
1945 TAILQ_FOREACH(func, &p->extern_funcs, node) {
1946 struct extern_func_runtime *r =
1947 &t->extern_funcs[func->id];
1948 uint32_t mailbox_size =
1949 func->mailbox_struct_type->n_bits / 8;
1950
1951 r->func = func->func;
1952
1953 r->mailbox = calloc(1, mailbox_size);
1954 CHECK(r->mailbox, ENOMEM);
1955
1956 t->structs[func->struct_id] = r->mailbox;
1957 }
1958 }
1959
1960 return 0;
1961 }
1962
1963 static void
extern_func_build_free(struct rte_swx_pipeline * p)1964 extern_func_build_free(struct rte_swx_pipeline *p)
1965 {
1966 uint32_t i;
1967
1968 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
1969 struct thread *t = &p->threads[i];
1970 uint32_t j;
1971
1972 if (!t->extern_funcs)
1973 continue;
1974
1975 for (j = 0; j < p->n_extern_funcs; j++) {
1976 struct extern_func_runtime *r = &t->extern_funcs[j];
1977
1978 free(r->mailbox);
1979 }
1980
1981 free(t->extern_funcs);
1982 t->extern_funcs = NULL;
1983 }
1984 }
1985
1986 static void
extern_func_free(struct rte_swx_pipeline * p)1987 extern_func_free(struct rte_swx_pipeline *p)
1988 {
1989 extern_func_build_free(p);
1990
1991 for ( ; ; ) {
1992 struct extern_func *elem;
1993
1994 elem = TAILQ_FIRST(&p->extern_funcs);
1995 if (!elem)
1996 break;
1997
1998 TAILQ_REMOVE(&p->extern_funcs, elem, node);
1999 free(elem);
2000 }
2001 }
2002
2003 /*
2004 * Header.
2005 */
2006 static struct header *
header_find(struct rte_swx_pipeline * p,const char * name)2007 header_find(struct rte_swx_pipeline *p, const char *name)
2008 {
2009 struct header *elem;
2010
2011 TAILQ_FOREACH(elem, &p->headers, node)
2012 if (strcmp(elem->name, name) == 0)
2013 return elem;
2014
2015 return NULL;
2016 }
2017
2018 static struct header *
header_parse(struct rte_swx_pipeline * p,const char * name)2019 header_parse(struct rte_swx_pipeline *p,
2020 const char *name)
2021 {
2022 if (name[0] != 'h' || name[1] != '.')
2023 return NULL;
2024
2025 return header_find(p, &name[2]);
2026 }
2027
2028 static struct field *
header_field_parse(struct rte_swx_pipeline * p,const char * name,struct header ** header)2029 header_field_parse(struct rte_swx_pipeline *p,
2030 const char *name,
2031 struct header **header)
2032 {
2033 struct header *h;
2034 struct field *f;
2035 char *header_name, *field_name;
2036
2037 if ((name[0] != 'h') || (name[1] != '.'))
2038 return NULL;
2039
2040 header_name = strdup(&name[2]);
2041 if (!header_name)
2042 return NULL;
2043
2044 field_name = strchr(header_name, '.');
2045 if (!field_name) {
2046 free(header_name);
2047 return NULL;
2048 }
2049
2050 *field_name = 0;
2051 field_name++;
2052
2053 h = header_find(p, header_name);
2054 if (!h) {
2055 free(header_name);
2056 return NULL;
2057 }
2058
2059 f = struct_type_field_find(h->st, field_name);
2060 if (!f) {
2061 free(header_name);
2062 return NULL;
2063 }
2064
2065 if (header)
2066 *header = h;
2067
2068 free(header_name);
2069 return f;
2070 }
2071
2072 int
rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline * p,const char * name,const char * struct_type_name)2073 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p,
2074 const char *name,
2075 const char *struct_type_name)
2076 {
2077 struct struct_type *st;
2078 struct header *h;
2079 size_t n_headers_max;
2080
2081 CHECK(p, EINVAL);
2082 CHECK_NAME(name, EINVAL);
2083 CHECK_NAME(struct_type_name, EINVAL);
2084
2085 CHECK(!header_find(p, name), EEXIST);
2086
2087 st = struct_type_find(p, struct_type_name);
2088 CHECK(st, EINVAL);
2089
2090 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8;
2091 CHECK(p->n_headers < n_headers_max, ENOSPC);
2092
2093 /* Node allocation. */
2094 h = calloc(1, sizeof(struct header));
2095 CHECK(h, ENOMEM);
2096
2097 /* Node initialization. */
2098 strcpy(h->name, name);
2099 h->st = st;
2100 h->struct_id = p->n_structs;
2101 h->id = p->n_headers;
2102
2103 /* Node add to tailq. */
2104 TAILQ_INSERT_TAIL(&p->headers, h, node);
2105 p->n_headers++;
2106 p->n_structs++;
2107
2108 return 0;
2109 }
2110
2111 static int
header_build(struct rte_swx_pipeline * p)2112 header_build(struct rte_swx_pipeline *p)
2113 {
2114 struct header *h;
2115 uint32_t n_bytes = 0, i;
2116
2117 TAILQ_FOREACH(h, &p->headers, node) {
2118 n_bytes += h->st->n_bits / 8;
2119 }
2120
2121 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2122 struct thread *t = &p->threads[i];
2123 uint32_t offset = 0;
2124
2125 t->headers = calloc(p->n_headers,
2126 sizeof(struct header_runtime));
2127 CHECK(t->headers, ENOMEM);
2128
2129 t->headers_out = calloc(p->n_headers,
2130 sizeof(struct header_out_runtime));
2131 CHECK(t->headers_out, ENOMEM);
2132
2133 t->header_storage = calloc(1, n_bytes);
2134 CHECK(t->header_storage, ENOMEM);
2135
2136 t->header_out_storage = calloc(1, n_bytes);
2137 CHECK(t->header_out_storage, ENOMEM);
2138
2139 TAILQ_FOREACH(h, &p->headers, node) {
2140 uint8_t *header_storage;
2141
2142 header_storage = &t->header_storage[offset];
2143 offset += h->st->n_bits / 8;
2144
2145 t->headers[h->id].ptr0 = header_storage;
2146 t->structs[h->struct_id] = header_storage;
2147 }
2148 }
2149
2150 return 0;
2151 }
2152
2153 static void
header_build_free(struct rte_swx_pipeline * p)2154 header_build_free(struct rte_swx_pipeline *p)
2155 {
2156 uint32_t i;
2157
2158 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2159 struct thread *t = &p->threads[i];
2160
2161 free(t->headers_out);
2162 t->headers_out = NULL;
2163
2164 free(t->headers);
2165 t->headers = NULL;
2166
2167 free(t->header_out_storage);
2168 t->header_out_storage = NULL;
2169
2170 free(t->header_storage);
2171 t->header_storage = NULL;
2172 }
2173 }
2174
2175 static void
header_free(struct rte_swx_pipeline * p)2176 header_free(struct rte_swx_pipeline *p)
2177 {
2178 header_build_free(p);
2179
2180 for ( ; ; ) {
2181 struct header *elem;
2182
2183 elem = TAILQ_FIRST(&p->headers);
2184 if (!elem)
2185 break;
2186
2187 TAILQ_REMOVE(&p->headers, elem, node);
2188 free(elem);
2189 }
2190 }
2191
2192 /*
2193 * Meta-data.
2194 */
2195 static struct field *
metadata_field_parse(struct rte_swx_pipeline * p,const char * name)2196 metadata_field_parse(struct rte_swx_pipeline *p, const char *name)
2197 {
2198 if (!p->metadata_st)
2199 return NULL;
2200
2201 if (name[0] != 'm' || name[1] != '.')
2202 return NULL;
2203
2204 return struct_type_field_find(p->metadata_st, &name[2]);
2205 }
2206
2207 int
rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline * p,const char * struct_type_name)2208 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p,
2209 const char *struct_type_name)
2210 {
2211 struct struct_type *st = NULL;
2212
2213 CHECK(p, EINVAL);
2214
2215 CHECK_NAME(struct_type_name, EINVAL);
2216 st = struct_type_find(p, struct_type_name);
2217 CHECK(st, EINVAL);
2218 CHECK(!p->metadata_st, EINVAL);
2219
2220 p->metadata_st = st;
2221 p->metadata_struct_id = p->n_structs;
2222
2223 p->n_structs++;
2224
2225 return 0;
2226 }
2227
2228 static int
metadata_build(struct rte_swx_pipeline * p)2229 metadata_build(struct rte_swx_pipeline *p)
2230 {
2231 uint32_t n_bytes = p->metadata_st->n_bits / 8;
2232 uint32_t i;
2233
2234 /* Thread-level initialization. */
2235 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2236 struct thread *t = &p->threads[i];
2237 uint8_t *metadata;
2238
2239 metadata = calloc(1, n_bytes);
2240 CHECK(metadata, ENOMEM);
2241
2242 t->metadata = metadata;
2243 t->structs[p->metadata_struct_id] = metadata;
2244 }
2245
2246 return 0;
2247 }
2248
2249 static void
metadata_build_free(struct rte_swx_pipeline * p)2250 metadata_build_free(struct rte_swx_pipeline *p)
2251 {
2252 uint32_t i;
2253
2254 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
2255 struct thread *t = &p->threads[i];
2256
2257 free(t->metadata);
2258 t->metadata = NULL;
2259 }
2260 }
2261
2262 static void
metadata_free(struct rte_swx_pipeline * p)2263 metadata_free(struct rte_swx_pipeline *p)
2264 {
2265 metadata_build_free(p);
2266 }
2267
2268 /*
2269 * Instruction.
2270 */
2271 static int
instruction_is_jmp(struct instruction * instr)2272 instruction_is_jmp(struct instruction *instr)
2273 {
2274 switch (instr->type) {
2275 case INSTR_JMP:
2276 case INSTR_JMP_VALID:
2277 case INSTR_JMP_INVALID:
2278 case INSTR_JMP_HIT:
2279 case INSTR_JMP_MISS:
2280 case INSTR_JMP_ACTION_HIT:
2281 case INSTR_JMP_ACTION_MISS:
2282 case INSTR_JMP_EQ:
2283 case INSTR_JMP_EQ_S:
2284 case INSTR_JMP_EQ_I:
2285 case INSTR_JMP_NEQ:
2286 case INSTR_JMP_NEQ_S:
2287 case INSTR_JMP_NEQ_I:
2288 case INSTR_JMP_LT:
2289 case INSTR_JMP_LT_MH:
2290 case INSTR_JMP_LT_HM:
2291 case INSTR_JMP_LT_HH:
2292 case INSTR_JMP_LT_MI:
2293 case INSTR_JMP_LT_HI:
2294 case INSTR_JMP_GT:
2295 case INSTR_JMP_GT_MH:
2296 case INSTR_JMP_GT_HM:
2297 case INSTR_JMP_GT_HH:
2298 case INSTR_JMP_GT_MI:
2299 case INSTR_JMP_GT_HI:
2300 return 1;
2301
2302 default:
2303 return 0;
2304 }
2305 }
2306
2307 static struct field *
2308 action_field_parse(struct action *action, const char *name);
2309
2310 static struct field *
struct_field_parse(struct rte_swx_pipeline * p,struct action * action,const char * name,uint32_t * struct_id)2311 struct_field_parse(struct rte_swx_pipeline *p,
2312 struct action *action,
2313 const char *name,
2314 uint32_t *struct_id)
2315 {
2316 struct field *f;
2317
2318 switch (name[0]) {
2319 case 'h':
2320 {
2321 struct header *header;
2322
2323 f = header_field_parse(p, name, &header);
2324 if (!f)
2325 return NULL;
2326
2327 *struct_id = header->struct_id;
2328 return f;
2329 }
2330
2331 case 'm':
2332 {
2333 f = metadata_field_parse(p, name);
2334 if (!f)
2335 return NULL;
2336
2337 *struct_id = p->metadata_struct_id;
2338 return f;
2339 }
2340
2341 case 't':
2342 {
2343 if (!action)
2344 return NULL;
2345
2346 f = action_field_parse(action, name);
2347 if (!f)
2348 return NULL;
2349
2350 *struct_id = 0;
2351 return f;
2352 }
2353
2354 case 'e':
2355 {
2356 struct extern_obj *obj;
2357
2358 f = extern_obj_mailbox_field_parse(p, name, &obj);
2359 if (!f)
2360 return NULL;
2361
2362 *struct_id = obj->struct_id;
2363 return f;
2364 }
2365
2366 case 'f':
2367 {
2368 struct extern_func *func;
2369
2370 f = extern_func_mailbox_field_parse(p, name, &func);
2371 if (!f)
2372 return NULL;
2373
2374 *struct_id = func->struct_id;
2375 return f;
2376 }
2377
2378 default:
2379 return NULL;
2380 }
2381 }
2382
2383 static inline void
pipeline_port_inc(struct rte_swx_pipeline * p)2384 pipeline_port_inc(struct rte_swx_pipeline *p)
2385 {
2386 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
2387 }
2388
2389 static inline void
thread_ip_reset(struct rte_swx_pipeline * p,struct thread * t)2390 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t)
2391 {
2392 t->ip = p->instructions;
2393 }
2394
2395 static inline void
thread_ip_set(struct thread * t,struct instruction * ip)2396 thread_ip_set(struct thread *t, struct instruction *ip)
2397 {
2398 t->ip = ip;
2399 }
2400
2401 static inline void
thread_ip_action_call(struct rte_swx_pipeline * p,struct thread * t,uint32_t action_id)2402 thread_ip_action_call(struct rte_swx_pipeline *p,
2403 struct thread *t,
2404 uint32_t action_id)
2405 {
2406 t->ret = t->ip + 1;
2407 t->ip = p->action_instructions[action_id];
2408 }
2409
2410 static inline void
2411 thread_ip_inc(struct rte_swx_pipeline *p);
2412
2413 static inline void
thread_ip_inc(struct rte_swx_pipeline * p)2414 thread_ip_inc(struct rte_swx_pipeline *p)
2415 {
2416 struct thread *t = &p->threads[p->thread_id];
2417
2418 t->ip++;
2419 }
2420
2421 static inline void
thread_ip_inc_cond(struct thread * t,int cond)2422 thread_ip_inc_cond(struct thread *t, int cond)
2423 {
2424 t->ip += cond;
2425 }
2426
2427 static inline void
thread_yield(struct rte_swx_pipeline * p)2428 thread_yield(struct rte_swx_pipeline *p)
2429 {
2430 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2431 }
2432
2433 static inline void
thread_yield_cond(struct rte_swx_pipeline * p,int cond)2434 thread_yield_cond(struct rte_swx_pipeline *p, int cond)
2435 {
2436 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
2437 }
2438
2439 /*
2440 * rx.
2441 */
2442 static int
instr_rx_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)2443 instr_rx_translate(struct rte_swx_pipeline *p,
2444 struct action *action,
2445 char **tokens,
2446 int n_tokens,
2447 struct instruction *instr,
2448 struct instruction_data *data __rte_unused)
2449 {
2450 struct field *f;
2451
2452 CHECK(!action, EINVAL);
2453 CHECK(n_tokens == 2, EINVAL);
2454
2455 f = metadata_field_parse(p, tokens[1]);
2456 CHECK(f, EINVAL);
2457
2458 instr->type = INSTR_RX;
2459 instr->io.io.offset = f->offset / 8;
2460 instr->io.io.n_bits = f->n_bits;
2461 return 0;
2462 }
2463
2464 static inline void
2465 instr_rx_exec(struct rte_swx_pipeline *p);
2466
2467 static inline void
instr_rx_exec(struct rte_swx_pipeline * p)2468 instr_rx_exec(struct rte_swx_pipeline *p)
2469 {
2470 struct thread *t = &p->threads[p->thread_id];
2471 struct instruction *ip = t->ip;
2472 struct port_in_runtime *port = &p->in[p->port_id];
2473 struct rte_swx_pkt *pkt = &t->pkt;
2474 int pkt_received;
2475
2476 /* Packet. */
2477 pkt_received = port->pkt_rx(port->obj, pkt);
2478 t->ptr = &pkt->pkt[pkt->offset];
2479 rte_prefetch0(t->ptr);
2480
2481 TRACE("[Thread %2u] rx %s from port %u\n",
2482 p->thread_id,
2483 pkt_received ? "1 pkt" : "0 pkts",
2484 p->port_id);
2485
2486 /* Headers. */
2487 t->valid_headers = 0;
2488 t->n_headers_out = 0;
2489
2490 /* Meta-data. */
2491 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
2492
2493 /* Tables. */
2494 t->table_state = p->table_state;
2495
2496 /* Thread. */
2497 pipeline_port_inc(p);
2498 thread_ip_inc_cond(t, pkt_received);
2499 thread_yield(p);
2500 }
2501
2502 /*
2503 * tx.
2504 */
2505 static int
instr_tx_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)2506 instr_tx_translate(struct rte_swx_pipeline *p,
2507 struct action *action __rte_unused,
2508 char **tokens,
2509 int n_tokens,
2510 struct instruction *instr,
2511 struct instruction_data *data __rte_unused)
2512 {
2513 struct field *f;
2514
2515 CHECK(n_tokens == 2, EINVAL);
2516
2517 f = metadata_field_parse(p, tokens[1]);
2518 CHECK(f, EINVAL);
2519
2520 instr->type = INSTR_TX;
2521 instr->io.io.offset = f->offset / 8;
2522 instr->io.io.n_bits = f->n_bits;
2523 return 0;
2524 }
2525
2526 static inline void
emit_handler(struct thread * t)2527 emit_handler(struct thread *t)
2528 {
2529 struct header_out_runtime *h0 = &t->headers_out[0];
2530 struct header_out_runtime *h1 = &t->headers_out[1];
2531 uint32_t offset = 0, i;
2532
2533 /* No header change or header decapsulation. */
2534 if ((t->n_headers_out == 1) &&
2535 (h0->ptr + h0->n_bytes == t->ptr)) {
2536 TRACE("Emit handler: no header change or header decap.\n");
2537
2538 t->pkt.offset -= h0->n_bytes;
2539 t->pkt.length += h0->n_bytes;
2540
2541 return;
2542 }
2543
2544 /* Header encapsulation (optionally, with prior header decasulation). */
2545 if ((t->n_headers_out == 2) &&
2546 (h1->ptr + h1->n_bytes == t->ptr) &&
2547 (h0->ptr == h0->ptr0)) {
2548 uint32_t offset;
2549
2550 TRACE("Emit handler: header encapsulation.\n");
2551
2552 offset = h0->n_bytes + h1->n_bytes;
2553 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
2554 t->pkt.offset -= offset;
2555 t->pkt.length += offset;
2556
2557 return;
2558 }
2559
2560 /* Header insertion. */
2561 /* TBD */
2562
2563 /* Header extraction. */
2564 /* TBD */
2565
2566 /* For any other case. */
2567 TRACE("Emit handler: complex case.\n");
2568
2569 for (i = 0; i < t->n_headers_out; i++) {
2570 struct header_out_runtime *h = &t->headers_out[i];
2571
2572 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
2573 offset += h->n_bytes;
2574 }
2575
2576 if (offset) {
2577 memcpy(t->ptr - offset, t->header_out_storage, offset);
2578 t->pkt.offset -= offset;
2579 t->pkt.length += offset;
2580 }
2581 }
2582
2583 static inline void
2584 instr_tx_exec(struct rte_swx_pipeline *p);
2585
2586 static inline void
instr_tx_exec(struct rte_swx_pipeline * p)2587 instr_tx_exec(struct rte_swx_pipeline *p)
2588 {
2589 struct thread *t = &p->threads[p->thread_id];
2590 struct instruction *ip = t->ip;
2591 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2592 struct port_out_runtime *port = &p->out[port_id];
2593 struct rte_swx_pkt *pkt = &t->pkt;
2594
2595 TRACE("[Thread %2u]: tx 1 pkt to port %u\n",
2596 p->thread_id,
2597 (uint32_t)port_id);
2598
2599 /* Headers. */
2600 emit_handler(t);
2601
2602 /* Packet. */
2603 port->pkt_tx(port->obj, pkt);
2604
2605 /* Thread. */
2606 thread_ip_reset(p, t);
2607 instr_rx_exec(p);
2608 }
2609
2610 /*
2611 * extract.
2612 */
2613 static int
instr_hdr_extract_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)2614 instr_hdr_extract_translate(struct rte_swx_pipeline *p,
2615 struct action *action,
2616 char **tokens,
2617 int n_tokens,
2618 struct instruction *instr,
2619 struct instruction_data *data __rte_unused)
2620 {
2621 struct header *h;
2622
2623 CHECK(!action, EINVAL);
2624 CHECK(n_tokens == 2, EINVAL);
2625
2626 h = header_parse(p, tokens[1]);
2627 CHECK(h, EINVAL);
2628
2629 instr->type = INSTR_HDR_EXTRACT;
2630 instr->io.hdr.header_id[0] = h->id;
2631 instr->io.hdr.struct_id[0] = h->struct_id;
2632 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2633 return 0;
2634 }
2635
2636 static inline void
2637 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract);
2638
2639 static inline void
__instr_hdr_extract_exec(struct rte_swx_pipeline * p,uint32_t n_extract)2640 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract)
2641 {
2642 struct thread *t = &p->threads[p->thread_id];
2643 struct instruction *ip = t->ip;
2644 uint64_t valid_headers = t->valid_headers;
2645 uint8_t *ptr = t->ptr;
2646 uint32_t offset = t->pkt.offset;
2647 uint32_t length = t->pkt.length;
2648 uint32_t i;
2649
2650 for (i = 0; i < n_extract; i++) {
2651 uint32_t header_id = ip->io.hdr.header_id[i];
2652 uint32_t struct_id = ip->io.hdr.struct_id[i];
2653 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2654
2655 TRACE("[Thread %2u]: extract header %u (%u bytes)\n",
2656 p->thread_id,
2657 header_id,
2658 n_bytes);
2659
2660 /* Headers. */
2661 t->structs[struct_id] = ptr;
2662 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2663
2664 /* Packet. */
2665 offset += n_bytes;
2666 length -= n_bytes;
2667 ptr += n_bytes;
2668 }
2669
2670 /* Headers. */
2671 t->valid_headers = valid_headers;
2672
2673 /* Packet. */
2674 t->pkt.offset = offset;
2675 t->pkt.length = length;
2676 t->ptr = ptr;
2677 }
2678
2679 static inline void
instr_hdr_extract_exec(struct rte_swx_pipeline * p)2680 instr_hdr_extract_exec(struct rte_swx_pipeline *p)
2681 {
2682 __instr_hdr_extract_exec(p, 1);
2683
2684 /* Thread. */
2685 thread_ip_inc(p);
2686 }
2687
2688 static inline void
instr_hdr_extract2_exec(struct rte_swx_pipeline * p)2689 instr_hdr_extract2_exec(struct rte_swx_pipeline *p)
2690 {
2691 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2692 p->thread_id);
2693
2694 __instr_hdr_extract_exec(p, 2);
2695
2696 /* Thread. */
2697 thread_ip_inc(p);
2698 }
2699
2700 static inline void
instr_hdr_extract3_exec(struct rte_swx_pipeline * p)2701 instr_hdr_extract3_exec(struct rte_swx_pipeline *p)
2702 {
2703 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2704 p->thread_id);
2705
2706 __instr_hdr_extract_exec(p, 3);
2707
2708 /* Thread. */
2709 thread_ip_inc(p);
2710 }
2711
2712 static inline void
instr_hdr_extract4_exec(struct rte_swx_pipeline * p)2713 instr_hdr_extract4_exec(struct rte_swx_pipeline *p)
2714 {
2715 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2716 p->thread_id);
2717
2718 __instr_hdr_extract_exec(p, 4);
2719
2720 /* Thread. */
2721 thread_ip_inc(p);
2722 }
2723
2724 static inline void
instr_hdr_extract5_exec(struct rte_swx_pipeline * p)2725 instr_hdr_extract5_exec(struct rte_swx_pipeline *p)
2726 {
2727 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2728 p->thread_id);
2729
2730 __instr_hdr_extract_exec(p, 5);
2731
2732 /* Thread. */
2733 thread_ip_inc(p);
2734 }
2735
2736 static inline void
instr_hdr_extract6_exec(struct rte_swx_pipeline * p)2737 instr_hdr_extract6_exec(struct rte_swx_pipeline *p)
2738 {
2739 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2740 p->thread_id);
2741
2742 __instr_hdr_extract_exec(p, 6);
2743
2744 /* Thread. */
2745 thread_ip_inc(p);
2746 }
2747
2748 static inline void
instr_hdr_extract7_exec(struct rte_swx_pipeline * p)2749 instr_hdr_extract7_exec(struct rte_swx_pipeline *p)
2750 {
2751 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2752 p->thread_id);
2753
2754 __instr_hdr_extract_exec(p, 7);
2755
2756 /* Thread. */
2757 thread_ip_inc(p);
2758 }
2759
2760 static inline void
instr_hdr_extract8_exec(struct rte_swx_pipeline * p)2761 instr_hdr_extract8_exec(struct rte_swx_pipeline *p)
2762 {
2763 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2764 p->thread_id);
2765
2766 __instr_hdr_extract_exec(p, 8);
2767
2768 /* Thread. */
2769 thread_ip_inc(p);
2770 }
2771
2772 /*
2773 * emit.
2774 */
2775 static int
instr_hdr_emit_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)2776 instr_hdr_emit_translate(struct rte_swx_pipeline *p,
2777 struct action *action __rte_unused,
2778 char **tokens,
2779 int n_tokens,
2780 struct instruction *instr,
2781 struct instruction_data *data __rte_unused)
2782 {
2783 struct header *h;
2784
2785 CHECK(n_tokens == 2, EINVAL);
2786
2787 h = header_parse(p, tokens[1]);
2788 CHECK(h, EINVAL);
2789
2790 instr->type = INSTR_HDR_EMIT;
2791 instr->io.hdr.header_id[0] = h->id;
2792 instr->io.hdr.struct_id[0] = h->struct_id;
2793 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8;
2794 return 0;
2795 }
2796
2797 static inline void
2798 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit);
2799
2800 static inline void
__instr_hdr_emit_exec(struct rte_swx_pipeline * p,uint32_t n_emit)2801 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit)
2802 {
2803 struct thread *t = &p->threads[p->thread_id];
2804 struct instruction *ip = t->ip;
2805 uint32_t n_headers_out = t->n_headers_out;
2806 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1];
2807 uint8_t *ho_ptr = NULL;
2808 uint32_t ho_nbytes = 0, i;
2809
2810 for (i = 0; i < n_emit; i++) {
2811 uint32_t header_id = ip->io.hdr.header_id[i];
2812 uint32_t struct_id = ip->io.hdr.struct_id[i];
2813 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
2814
2815 struct header_runtime *hi = &t->headers[header_id];
2816 uint8_t *hi_ptr = t->structs[struct_id];
2817
2818 TRACE("[Thread %2u]: emit header %u\n",
2819 p->thread_id,
2820 header_id);
2821
2822 /* Headers. */
2823 if (!i) {
2824 if (!t->n_headers_out) {
2825 ho = &t->headers_out[0];
2826
2827 ho->ptr0 = hi->ptr0;
2828 ho->ptr = hi_ptr;
2829
2830 ho_ptr = hi_ptr;
2831 ho_nbytes = n_bytes;
2832
2833 n_headers_out = 1;
2834
2835 continue;
2836 } else {
2837 ho_ptr = ho->ptr;
2838 ho_nbytes = ho->n_bytes;
2839 }
2840 }
2841
2842 if (ho_ptr + ho_nbytes == hi_ptr) {
2843 ho_nbytes += n_bytes;
2844 } else {
2845 ho->n_bytes = ho_nbytes;
2846
2847 ho++;
2848 ho->ptr0 = hi->ptr0;
2849 ho->ptr = hi_ptr;
2850
2851 ho_ptr = hi_ptr;
2852 ho_nbytes = n_bytes;
2853
2854 n_headers_out++;
2855 }
2856 }
2857
2858 ho->n_bytes = ho_nbytes;
2859 t->n_headers_out = n_headers_out;
2860 }
2861
2862 static inline void
instr_hdr_emit_exec(struct rte_swx_pipeline * p)2863 instr_hdr_emit_exec(struct rte_swx_pipeline *p)
2864 {
2865 __instr_hdr_emit_exec(p, 1);
2866
2867 /* Thread. */
2868 thread_ip_inc(p);
2869 }
2870
2871 static inline void
instr_hdr_emit_tx_exec(struct rte_swx_pipeline * p)2872 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p)
2873 {
2874 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
2875 p->thread_id);
2876
2877 __instr_hdr_emit_exec(p, 1);
2878 instr_tx_exec(p);
2879 }
2880
2881 static inline void
instr_hdr_emit2_tx_exec(struct rte_swx_pipeline * p)2882 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p)
2883 {
2884 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
2885 p->thread_id);
2886
2887 __instr_hdr_emit_exec(p, 2);
2888 instr_tx_exec(p);
2889 }
2890
2891 static inline void
instr_hdr_emit3_tx_exec(struct rte_swx_pipeline * p)2892 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p)
2893 {
2894 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
2895 p->thread_id);
2896
2897 __instr_hdr_emit_exec(p, 3);
2898 instr_tx_exec(p);
2899 }
2900
2901 static inline void
instr_hdr_emit4_tx_exec(struct rte_swx_pipeline * p)2902 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p)
2903 {
2904 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
2905 p->thread_id);
2906
2907 __instr_hdr_emit_exec(p, 4);
2908 instr_tx_exec(p);
2909 }
2910
2911 static inline void
instr_hdr_emit5_tx_exec(struct rte_swx_pipeline * p)2912 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p)
2913 {
2914 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
2915 p->thread_id);
2916
2917 __instr_hdr_emit_exec(p, 5);
2918 instr_tx_exec(p);
2919 }
2920
2921 static inline void
instr_hdr_emit6_tx_exec(struct rte_swx_pipeline * p)2922 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p)
2923 {
2924 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
2925 p->thread_id);
2926
2927 __instr_hdr_emit_exec(p, 6);
2928 instr_tx_exec(p);
2929 }
2930
2931 static inline void
instr_hdr_emit7_tx_exec(struct rte_swx_pipeline * p)2932 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p)
2933 {
2934 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
2935 p->thread_id);
2936
2937 __instr_hdr_emit_exec(p, 7);
2938 instr_tx_exec(p);
2939 }
2940
2941 static inline void
instr_hdr_emit8_tx_exec(struct rte_swx_pipeline * p)2942 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p)
2943 {
2944 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n",
2945 p->thread_id);
2946
2947 __instr_hdr_emit_exec(p, 8);
2948 instr_tx_exec(p);
2949 }
2950
2951 /*
2952 * validate.
2953 */
2954 static int
instr_hdr_validate_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)2955 instr_hdr_validate_translate(struct rte_swx_pipeline *p,
2956 struct action *action __rte_unused,
2957 char **tokens,
2958 int n_tokens,
2959 struct instruction *instr,
2960 struct instruction_data *data __rte_unused)
2961 {
2962 struct header *h;
2963
2964 CHECK(n_tokens == 2, EINVAL);
2965
2966 h = header_parse(p, tokens[1]);
2967 CHECK(h, EINVAL);
2968
2969 instr->type = INSTR_HDR_VALIDATE;
2970 instr->valid.header_id = h->id;
2971 return 0;
2972 }
2973
2974 static inline void
instr_hdr_validate_exec(struct rte_swx_pipeline * p)2975 instr_hdr_validate_exec(struct rte_swx_pipeline *p)
2976 {
2977 struct thread *t = &p->threads[p->thread_id];
2978 struct instruction *ip = t->ip;
2979 uint32_t header_id = ip->valid.header_id;
2980
2981 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id);
2982
2983 /* Headers. */
2984 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id);
2985
2986 /* Thread. */
2987 thread_ip_inc(p);
2988 }
2989
2990 /*
2991 * invalidate.
2992 */
2993 static int
instr_hdr_invalidate_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)2994 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p,
2995 struct action *action __rte_unused,
2996 char **tokens,
2997 int n_tokens,
2998 struct instruction *instr,
2999 struct instruction_data *data __rte_unused)
3000 {
3001 struct header *h;
3002
3003 CHECK(n_tokens == 2, EINVAL);
3004
3005 h = header_parse(p, tokens[1]);
3006 CHECK(h, EINVAL);
3007
3008 instr->type = INSTR_HDR_INVALIDATE;
3009 instr->valid.header_id = h->id;
3010 return 0;
3011 }
3012
3013 static inline void
instr_hdr_invalidate_exec(struct rte_swx_pipeline * p)3014 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p)
3015 {
3016 struct thread *t = &p->threads[p->thread_id];
3017 struct instruction *ip = t->ip;
3018 uint32_t header_id = ip->valid.header_id;
3019
3020 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
3021
3022 /* Headers. */
3023 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
3024
3025 /* Thread. */
3026 thread_ip_inc(p);
3027 }
3028
3029 /*
3030 * table.
3031 */
3032 static struct table *
3033 table_find(struct rte_swx_pipeline *p, const char *name);
3034
3035 static int
instr_table_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3036 instr_table_translate(struct rte_swx_pipeline *p,
3037 struct action *action,
3038 char **tokens,
3039 int n_tokens,
3040 struct instruction *instr,
3041 struct instruction_data *data __rte_unused)
3042 {
3043 struct table *t;
3044
3045 CHECK(!action, EINVAL);
3046 CHECK(n_tokens == 2, EINVAL);
3047
3048 t = table_find(p, tokens[1]);
3049 CHECK(t, EINVAL);
3050
3051 instr->type = INSTR_TABLE;
3052 instr->table.table_id = t->id;
3053 return 0;
3054 }
3055
3056 static inline void
instr_table_exec(struct rte_swx_pipeline * p)3057 instr_table_exec(struct rte_swx_pipeline *p)
3058 {
3059 struct thread *t = &p->threads[p->thread_id];
3060 struct instruction *ip = t->ip;
3061 uint32_t table_id = ip->table.table_id;
3062 struct rte_swx_table_state *ts = &t->table_state[table_id];
3063 struct table_runtime *table = &t->tables[table_id];
3064 uint64_t action_id;
3065 uint8_t *action_data;
3066 int done, hit;
3067
3068 /* Table. */
3069 done = table->func(ts->obj,
3070 table->mailbox,
3071 table->key,
3072 &action_id,
3073 &action_data,
3074 &hit);
3075 if (!done) {
3076 /* Thread. */
3077 TRACE("[Thread %2u] table %u (not finalized)\n",
3078 p->thread_id,
3079 table_id);
3080
3081 thread_yield(p);
3082 return;
3083 }
3084
3085 action_id = hit ? action_id : ts->default_action_id;
3086 action_data = hit ? action_data : ts->default_action_data;
3087
3088 TRACE("[Thread %2u] table %u (%s, action %u)\n",
3089 p->thread_id,
3090 table_id,
3091 hit ? "hit" : "miss",
3092 (uint32_t)action_id);
3093
3094 t->action_id = action_id;
3095 t->structs[0] = action_data;
3096 t->hit = hit;
3097
3098 /* Thread. */
3099 thread_ip_action_call(p, t, action_id);
3100 }
3101
3102 /*
3103 * extern.
3104 */
3105 static int
instr_extern_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3106 instr_extern_translate(struct rte_swx_pipeline *p,
3107 struct action *action __rte_unused,
3108 char **tokens,
3109 int n_tokens,
3110 struct instruction *instr,
3111 struct instruction_data *data __rte_unused)
3112 {
3113 char *token = tokens[1];
3114
3115 CHECK(n_tokens == 2, EINVAL);
3116
3117 if (token[0] == 'e') {
3118 struct extern_obj *obj;
3119 struct extern_type_member_func *func;
3120
3121 func = extern_obj_member_func_parse(p, token, &obj);
3122 CHECK(func, EINVAL);
3123
3124 instr->type = INSTR_EXTERN_OBJ;
3125 instr->ext_obj.ext_obj_id = obj->id;
3126 instr->ext_obj.func_id = func->id;
3127
3128 return 0;
3129 }
3130
3131 if (token[0] == 'f') {
3132 struct extern_func *func;
3133
3134 func = extern_func_parse(p, token);
3135 CHECK(func, EINVAL);
3136
3137 instr->type = INSTR_EXTERN_FUNC;
3138 instr->ext_func.ext_func_id = func->id;
3139
3140 return 0;
3141 }
3142
3143 CHECK(0, EINVAL);
3144 }
3145
3146 static inline void
instr_extern_obj_exec(struct rte_swx_pipeline * p)3147 instr_extern_obj_exec(struct rte_swx_pipeline *p)
3148 {
3149 struct thread *t = &p->threads[p->thread_id];
3150 struct instruction *ip = t->ip;
3151 uint32_t obj_id = ip->ext_obj.ext_obj_id;
3152 uint32_t func_id = ip->ext_obj.func_id;
3153 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
3154 rte_swx_extern_type_member_func_t func = obj->funcs[func_id];
3155
3156 TRACE("[Thread %2u] extern obj %u member func %u\n",
3157 p->thread_id,
3158 obj_id,
3159 func_id);
3160
3161 /* Extern object member function execute. */
3162 uint32_t done = func(obj->obj, obj->mailbox);
3163
3164 /* Thread. */
3165 thread_ip_inc_cond(t, done);
3166 thread_yield_cond(p, done ^ 1);
3167 }
3168
3169 static inline void
instr_extern_func_exec(struct rte_swx_pipeline * p)3170 instr_extern_func_exec(struct rte_swx_pipeline *p)
3171 {
3172 struct thread *t = &p->threads[p->thread_id];
3173 struct instruction *ip = t->ip;
3174 uint32_t ext_func_id = ip->ext_func.ext_func_id;
3175 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
3176 rte_swx_extern_func_t func = ext_func->func;
3177
3178 TRACE("[Thread %2u] extern func %u\n",
3179 p->thread_id,
3180 ext_func_id);
3181
3182 /* Extern function execute. */
3183 uint32_t done = func(ext_func->mailbox);
3184
3185 /* Thread. */
3186 thread_ip_inc_cond(t, done);
3187 thread_yield_cond(p, done ^ 1);
3188 }
3189
3190 /*
3191 * mov.
3192 */
3193 static int
instr_mov_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3194 instr_mov_translate(struct rte_swx_pipeline *p,
3195 struct action *action,
3196 char **tokens,
3197 int n_tokens,
3198 struct instruction *instr,
3199 struct instruction_data *data __rte_unused)
3200 {
3201 char *dst = tokens[1], *src = tokens[2];
3202 struct field *fdst, *fsrc;
3203 uint64_t src_val;
3204 uint32_t dst_struct_id, src_struct_id;
3205
3206 CHECK(n_tokens == 3, EINVAL);
3207
3208 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3209 CHECK(fdst, EINVAL);
3210
3211 /* MOV or MOV_S. */
3212 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3213 if (fsrc) {
3214 instr->type = INSTR_MOV;
3215 if ((dst[0] == 'h' && src[0] != 'h') ||
3216 (dst[0] != 'h' && src[0] == 'h'))
3217 instr->type = INSTR_MOV_S;
3218
3219 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3220 instr->mov.dst.n_bits = fdst->n_bits;
3221 instr->mov.dst.offset = fdst->offset / 8;
3222 instr->mov.src.struct_id = (uint8_t)src_struct_id;
3223 instr->mov.src.n_bits = fsrc->n_bits;
3224 instr->mov.src.offset = fsrc->offset / 8;
3225 return 0;
3226 }
3227
3228 /* MOV_I. */
3229 src_val = strtoull(src, &src, 0);
3230 CHECK(!src[0], EINVAL);
3231
3232 if (dst[0] == 'h')
3233 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3234
3235 instr->type = INSTR_MOV_I;
3236 instr->mov.dst.struct_id = (uint8_t)dst_struct_id;
3237 instr->mov.dst.n_bits = fdst->n_bits;
3238 instr->mov.dst.offset = fdst->offset / 8;
3239 instr->mov.src_val = src_val;
3240 return 0;
3241 }
3242
3243 static inline void
instr_mov_exec(struct rte_swx_pipeline * p)3244 instr_mov_exec(struct rte_swx_pipeline *p)
3245 {
3246 struct thread *t = &p->threads[p->thread_id];
3247 struct instruction *ip = t->ip;
3248
3249 TRACE("[Thread %2u] mov\n",
3250 p->thread_id);
3251
3252 MOV(t, ip);
3253
3254 /* Thread. */
3255 thread_ip_inc(p);
3256 }
3257
3258 static inline void
instr_mov_s_exec(struct rte_swx_pipeline * p)3259 instr_mov_s_exec(struct rte_swx_pipeline *p)
3260 {
3261 struct thread *t = &p->threads[p->thread_id];
3262 struct instruction *ip = t->ip;
3263
3264 TRACE("[Thread %2u] mov (s)\n",
3265 p->thread_id);
3266
3267 MOV_S(t, ip);
3268
3269 /* Thread. */
3270 thread_ip_inc(p);
3271 }
3272
3273 static inline void
instr_mov_i_exec(struct rte_swx_pipeline * p)3274 instr_mov_i_exec(struct rte_swx_pipeline *p)
3275 {
3276 struct thread *t = &p->threads[p->thread_id];
3277 struct instruction *ip = t->ip;
3278
3279 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n",
3280 p->thread_id,
3281 ip->mov.src_val);
3282
3283 MOV_I(t, ip);
3284
3285 /* Thread. */
3286 thread_ip_inc(p);
3287 }
3288
3289 /*
3290 * dma.
3291 */
3292 static int
instr_dma_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3293 instr_dma_translate(struct rte_swx_pipeline *p,
3294 struct action *action,
3295 char **tokens,
3296 int n_tokens,
3297 struct instruction *instr,
3298 struct instruction_data *data __rte_unused)
3299 {
3300 char *dst = tokens[1];
3301 char *src = tokens[2];
3302 struct header *h;
3303 struct field *tf;
3304
3305 CHECK(action, EINVAL);
3306 CHECK(n_tokens == 3, EINVAL);
3307
3308 h = header_parse(p, dst);
3309 CHECK(h, EINVAL);
3310
3311 tf = action_field_parse(action, src);
3312 CHECK(tf, EINVAL);
3313
3314 instr->type = INSTR_DMA_HT;
3315 instr->dma.dst.header_id[0] = h->id;
3316 instr->dma.dst.struct_id[0] = h->struct_id;
3317 instr->dma.n_bytes[0] = h->st->n_bits / 8;
3318 instr->dma.src.offset[0] = tf->offset / 8;
3319
3320 return 0;
3321 }
3322
3323 static inline void
3324 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma);
3325
3326 static inline void
__instr_dma_ht_exec(struct rte_swx_pipeline * p,uint32_t n_dma)3327 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma)
3328 {
3329 struct thread *t = &p->threads[p->thread_id];
3330 struct instruction *ip = t->ip;
3331 uint8_t *action_data = t->structs[0];
3332 uint64_t valid_headers = t->valid_headers;
3333 uint32_t i;
3334
3335 for (i = 0; i < n_dma; i++) {
3336 uint32_t header_id = ip->dma.dst.header_id[i];
3337 uint32_t struct_id = ip->dma.dst.struct_id[i];
3338 uint32_t offset = ip->dma.src.offset[i];
3339 uint32_t n_bytes = ip->dma.n_bytes[i];
3340
3341 struct header_runtime *h = &t->headers[header_id];
3342 uint8_t *h_ptr0 = h->ptr0;
3343 uint8_t *h_ptr = t->structs[struct_id];
3344
3345 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
3346 h_ptr : h_ptr0;
3347 void *src = &action_data[offset];
3348
3349 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id);
3350
3351 /* Headers. */
3352 memcpy(dst, src, n_bytes);
3353 t->structs[struct_id] = dst;
3354 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
3355 }
3356
3357 t->valid_headers = valid_headers;
3358 }
3359
3360 static inline void
instr_dma_ht_exec(struct rte_swx_pipeline * p)3361 instr_dma_ht_exec(struct rte_swx_pipeline *p)
3362 {
3363 __instr_dma_ht_exec(p, 1);
3364
3365 /* Thread. */
3366 thread_ip_inc(p);
3367 }
3368
3369 static inline void
instr_dma_ht2_exec(struct rte_swx_pipeline * p)3370 instr_dma_ht2_exec(struct rte_swx_pipeline *p)
3371 {
3372 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n",
3373 p->thread_id);
3374
3375 __instr_dma_ht_exec(p, 2);
3376
3377 /* Thread. */
3378 thread_ip_inc(p);
3379 }
3380
3381 static inline void
instr_dma_ht3_exec(struct rte_swx_pipeline * p)3382 instr_dma_ht3_exec(struct rte_swx_pipeline *p)
3383 {
3384 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n",
3385 p->thread_id);
3386
3387 __instr_dma_ht_exec(p, 3);
3388
3389 /* Thread. */
3390 thread_ip_inc(p);
3391 }
3392
3393 static inline void
instr_dma_ht4_exec(struct rte_swx_pipeline * p)3394 instr_dma_ht4_exec(struct rte_swx_pipeline *p)
3395 {
3396 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n",
3397 p->thread_id);
3398
3399 __instr_dma_ht_exec(p, 4);
3400
3401 /* Thread. */
3402 thread_ip_inc(p);
3403 }
3404
3405 static inline void
instr_dma_ht5_exec(struct rte_swx_pipeline * p)3406 instr_dma_ht5_exec(struct rte_swx_pipeline *p)
3407 {
3408 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n",
3409 p->thread_id);
3410
3411 __instr_dma_ht_exec(p, 5);
3412
3413 /* Thread. */
3414 thread_ip_inc(p);
3415 }
3416
3417 static inline void
instr_dma_ht6_exec(struct rte_swx_pipeline * p)3418 instr_dma_ht6_exec(struct rte_swx_pipeline *p)
3419 {
3420 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n",
3421 p->thread_id);
3422
3423 __instr_dma_ht_exec(p, 6);
3424
3425 /* Thread. */
3426 thread_ip_inc(p);
3427 }
3428
3429 static inline void
instr_dma_ht7_exec(struct rte_swx_pipeline * p)3430 instr_dma_ht7_exec(struct rte_swx_pipeline *p)
3431 {
3432 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n",
3433 p->thread_id);
3434
3435 __instr_dma_ht_exec(p, 7);
3436
3437 /* Thread. */
3438 thread_ip_inc(p);
3439 }
3440
3441 static inline void
instr_dma_ht8_exec(struct rte_swx_pipeline * p)3442 instr_dma_ht8_exec(struct rte_swx_pipeline *p)
3443 {
3444 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n",
3445 p->thread_id);
3446
3447 __instr_dma_ht_exec(p, 8);
3448
3449 /* Thread. */
3450 thread_ip_inc(p);
3451 }
3452
3453 /*
3454 * alu.
3455 */
3456 static int
instr_alu_add_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3457 instr_alu_add_translate(struct rte_swx_pipeline *p,
3458 struct action *action,
3459 char **tokens,
3460 int n_tokens,
3461 struct instruction *instr,
3462 struct instruction_data *data __rte_unused)
3463 {
3464 char *dst = tokens[1], *src = tokens[2];
3465 struct field *fdst, *fsrc;
3466 uint64_t src_val;
3467 uint32_t dst_struct_id, src_struct_id;
3468
3469 CHECK(n_tokens == 3, EINVAL);
3470
3471 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3472 CHECK(fdst, EINVAL);
3473
3474 /* ADD, ADD_HM, ADD_MH, ADD_HH. */
3475 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3476 if (fsrc) {
3477 instr->type = INSTR_ALU_ADD;
3478 if (dst[0] == 'h' && src[0] == 'm')
3479 instr->type = INSTR_ALU_ADD_HM;
3480 if (dst[0] == 'm' && src[0] == 'h')
3481 instr->type = INSTR_ALU_ADD_MH;
3482 if (dst[0] == 'h' && src[0] == 'h')
3483 instr->type = INSTR_ALU_ADD_HH;
3484
3485 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3486 instr->alu.dst.n_bits = fdst->n_bits;
3487 instr->alu.dst.offset = fdst->offset / 8;
3488 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3489 instr->alu.src.n_bits = fsrc->n_bits;
3490 instr->alu.src.offset = fsrc->offset / 8;
3491 return 0;
3492 }
3493
3494 /* ADD_MI, ADD_HI. */
3495 src_val = strtoull(src, &src, 0);
3496 CHECK(!src[0], EINVAL);
3497
3498 instr->type = INSTR_ALU_ADD_MI;
3499 if (dst[0] == 'h')
3500 instr->type = INSTR_ALU_ADD_HI;
3501
3502 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3503 instr->alu.dst.n_bits = fdst->n_bits;
3504 instr->alu.dst.offset = fdst->offset / 8;
3505 instr->alu.src_val = src_val;
3506 return 0;
3507 }
3508
3509 static int
instr_alu_sub_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3510 instr_alu_sub_translate(struct rte_swx_pipeline *p,
3511 struct action *action,
3512 char **tokens,
3513 int n_tokens,
3514 struct instruction *instr,
3515 struct instruction_data *data __rte_unused)
3516 {
3517 char *dst = tokens[1], *src = tokens[2];
3518 struct field *fdst, *fsrc;
3519 uint64_t src_val;
3520 uint32_t dst_struct_id, src_struct_id;
3521
3522 CHECK(n_tokens == 3, EINVAL);
3523
3524 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3525 CHECK(fdst, EINVAL);
3526
3527 /* SUB, SUB_HM, SUB_MH, SUB_HH. */
3528 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3529 if (fsrc) {
3530 instr->type = INSTR_ALU_SUB;
3531 if (dst[0] == 'h' && src[0] == 'm')
3532 instr->type = INSTR_ALU_SUB_HM;
3533 if (dst[0] == 'm' && src[0] == 'h')
3534 instr->type = INSTR_ALU_SUB_MH;
3535 if (dst[0] == 'h' && src[0] == 'h')
3536 instr->type = INSTR_ALU_SUB_HH;
3537
3538 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3539 instr->alu.dst.n_bits = fdst->n_bits;
3540 instr->alu.dst.offset = fdst->offset / 8;
3541 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3542 instr->alu.src.n_bits = fsrc->n_bits;
3543 instr->alu.src.offset = fsrc->offset / 8;
3544 return 0;
3545 }
3546
3547 /* SUB_MI, SUB_HI. */
3548 src_val = strtoull(src, &src, 0);
3549 CHECK(!src[0], EINVAL);
3550
3551 instr->type = INSTR_ALU_SUB_MI;
3552 if (dst[0] == 'h')
3553 instr->type = INSTR_ALU_SUB_HI;
3554
3555 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3556 instr->alu.dst.n_bits = fdst->n_bits;
3557 instr->alu.dst.offset = fdst->offset / 8;
3558 instr->alu.src_val = src_val;
3559 return 0;
3560 }
3561
3562 static int
instr_alu_ckadd_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3563 instr_alu_ckadd_translate(struct rte_swx_pipeline *p,
3564 struct action *action __rte_unused,
3565 char **tokens,
3566 int n_tokens,
3567 struct instruction *instr,
3568 struct instruction_data *data __rte_unused)
3569 {
3570 char *dst = tokens[1], *src = tokens[2];
3571 struct header *hdst, *hsrc;
3572 struct field *fdst, *fsrc;
3573
3574 CHECK(n_tokens == 3, EINVAL);
3575
3576 fdst = header_field_parse(p, dst, &hdst);
3577 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3578
3579 /* CKADD_FIELD. */
3580 fsrc = header_field_parse(p, src, &hsrc);
3581 if (fsrc) {
3582 instr->type = INSTR_ALU_CKADD_FIELD;
3583 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3584 instr->alu.dst.n_bits = fdst->n_bits;
3585 instr->alu.dst.offset = fdst->offset / 8;
3586 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3587 instr->alu.src.n_bits = fsrc->n_bits;
3588 instr->alu.src.offset = fsrc->offset / 8;
3589 return 0;
3590 }
3591
3592 /* CKADD_STRUCT, CKADD_STRUCT20. */
3593 hsrc = header_parse(p, src);
3594 CHECK(hsrc, EINVAL);
3595
3596 instr->type = INSTR_ALU_CKADD_STRUCT;
3597 if ((hsrc->st->n_bits / 8) == 20)
3598 instr->type = INSTR_ALU_CKADD_STRUCT20;
3599
3600 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3601 instr->alu.dst.n_bits = fdst->n_bits;
3602 instr->alu.dst.offset = fdst->offset / 8;
3603 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3604 instr->alu.src.n_bits = hsrc->st->n_bits;
3605 instr->alu.src.offset = 0; /* Unused. */
3606 return 0;
3607 }
3608
3609 static int
instr_alu_cksub_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3610 instr_alu_cksub_translate(struct rte_swx_pipeline *p,
3611 struct action *action __rte_unused,
3612 char **tokens,
3613 int n_tokens,
3614 struct instruction *instr,
3615 struct instruction_data *data __rte_unused)
3616 {
3617 char *dst = tokens[1], *src = tokens[2];
3618 struct header *hdst, *hsrc;
3619 struct field *fdst, *fsrc;
3620
3621 CHECK(n_tokens == 3, EINVAL);
3622
3623 fdst = header_field_parse(p, dst, &hdst);
3624 CHECK(fdst && (fdst->n_bits == 16), EINVAL);
3625
3626 fsrc = header_field_parse(p, src, &hsrc);
3627 CHECK(fsrc, EINVAL);
3628
3629 instr->type = INSTR_ALU_CKSUB_FIELD;
3630 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id;
3631 instr->alu.dst.n_bits = fdst->n_bits;
3632 instr->alu.dst.offset = fdst->offset / 8;
3633 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id;
3634 instr->alu.src.n_bits = fsrc->n_bits;
3635 instr->alu.src.offset = fsrc->offset / 8;
3636 return 0;
3637 }
3638
3639 static int
instr_alu_shl_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3640 instr_alu_shl_translate(struct rte_swx_pipeline *p,
3641 struct action *action,
3642 char **tokens,
3643 int n_tokens,
3644 struct instruction *instr,
3645 struct instruction_data *data __rte_unused)
3646 {
3647 char *dst = tokens[1], *src = tokens[2];
3648 struct field *fdst, *fsrc;
3649 uint64_t src_val;
3650 uint32_t dst_struct_id, src_struct_id;
3651
3652 CHECK(n_tokens == 3, EINVAL);
3653
3654 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3655 CHECK(fdst, EINVAL);
3656
3657 /* SHL, SHL_HM, SHL_MH, SHL_HH. */
3658 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3659 if (fsrc) {
3660 instr->type = INSTR_ALU_SHL;
3661 if (dst[0] == 'h' && src[0] == 'm')
3662 instr->type = INSTR_ALU_SHL_HM;
3663 if (dst[0] == 'm' && src[0] == 'h')
3664 instr->type = INSTR_ALU_SHL_MH;
3665 if (dst[0] == 'h' && src[0] == 'h')
3666 instr->type = INSTR_ALU_SHL_HH;
3667
3668 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3669 instr->alu.dst.n_bits = fdst->n_bits;
3670 instr->alu.dst.offset = fdst->offset / 8;
3671 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3672 instr->alu.src.n_bits = fsrc->n_bits;
3673 instr->alu.src.offset = fsrc->offset / 8;
3674 return 0;
3675 }
3676
3677 /* SHL_MI, SHL_HI. */
3678 src_val = strtoull(src, &src, 0);
3679 CHECK(!src[0], EINVAL);
3680
3681 instr->type = INSTR_ALU_SHL_MI;
3682 if (dst[0] == 'h')
3683 instr->type = INSTR_ALU_SHL_HI;
3684
3685 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3686 instr->alu.dst.n_bits = fdst->n_bits;
3687 instr->alu.dst.offset = fdst->offset / 8;
3688 instr->alu.src_val = src_val;
3689 return 0;
3690 }
3691
3692 static int
instr_alu_shr_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3693 instr_alu_shr_translate(struct rte_swx_pipeline *p,
3694 struct action *action,
3695 char **tokens,
3696 int n_tokens,
3697 struct instruction *instr,
3698 struct instruction_data *data __rte_unused)
3699 {
3700 char *dst = tokens[1], *src = tokens[2];
3701 struct field *fdst, *fsrc;
3702 uint64_t src_val;
3703 uint32_t dst_struct_id, src_struct_id;
3704
3705 CHECK(n_tokens == 3, EINVAL);
3706
3707 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3708 CHECK(fdst, EINVAL);
3709
3710 /* SHR, SHR_HM, SHR_MH, SHR_HH. */
3711 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3712 if (fsrc) {
3713 instr->type = INSTR_ALU_SHR;
3714 if (dst[0] == 'h' && src[0] == 'm')
3715 instr->type = INSTR_ALU_SHR_HM;
3716 if (dst[0] == 'm' && src[0] == 'h')
3717 instr->type = INSTR_ALU_SHR_MH;
3718 if (dst[0] == 'h' && src[0] == 'h')
3719 instr->type = INSTR_ALU_SHR_HH;
3720
3721 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3722 instr->alu.dst.n_bits = fdst->n_bits;
3723 instr->alu.dst.offset = fdst->offset / 8;
3724 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3725 instr->alu.src.n_bits = fsrc->n_bits;
3726 instr->alu.src.offset = fsrc->offset / 8;
3727 return 0;
3728 }
3729
3730 /* SHR_MI, SHR_HI. */
3731 src_val = strtoull(src, &src, 0);
3732 CHECK(!src[0], EINVAL);
3733
3734 instr->type = INSTR_ALU_SHR_MI;
3735 if (dst[0] == 'h')
3736 instr->type = INSTR_ALU_SHR_HI;
3737
3738 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3739 instr->alu.dst.n_bits = fdst->n_bits;
3740 instr->alu.dst.offset = fdst->offset / 8;
3741 instr->alu.src_val = src_val;
3742 return 0;
3743 }
3744
3745 static int
instr_alu_and_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3746 instr_alu_and_translate(struct rte_swx_pipeline *p,
3747 struct action *action,
3748 char **tokens,
3749 int n_tokens,
3750 struct instruction *instr,
3751 struct instruction_data *data __rte_unused)
3752 {
3753 char *dst = tokens[1], *src = tokens[2];
3754 struct field *fdst, *fsrc;
3755 uint64_t src_val;
3756 uint32_t dst_struct_id, src_struct_id;
3757
3758 CHECK(n_tokens == 3, EINVAL);
3759
3760 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3761 CHECK(fdst, EINVAL);
3762
3763 /* AND or AND_S. */
3764 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3765 if (fsrc) {
3766 instr->type = INSTR_ALU_AND;
3767 if ((dst[0] == 'h' && src[0] != 'h') ||
3768 (dst[0] != 'h' && src[0] == 'h'))
3769 instr->type = INSTR_ALU_AND_S;
3770
3771 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3772 instr->alu.dst.n_bits = fdst->n_bits;
3773 instr->alu.dst.offset = fdst->offset / 8;
3774 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3775 instr->alu.src.n_bits = fsrc->n_bits;
3776 instr->alu.src.offset = fsrc->offset / 8;
3777 return 0;
3778 }
3779
3780 /* AND_I. */
3781 src_val = strtoull(src, &src, 0);
3782 CHECK(!src[0], EINVAL);
3783
3784 if (dst[0] == 'h')
3785 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3786
3787 instr->type = INSTR_ALU_AND_I;
3788 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3789 instr->alu.dst.n_bits = fdst->n_bits;
3790 instr->alu.dst.offset = fdst->offset / 8;
3791 instr->alu.src_val = src_val;
3792 return 0;
3793 }
3794
3795 static int
instr_alu_or_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3796 instr_alu_or_translate(struct rte_swx_pipeline *p,
3797 struct action *action,
3798 char **tokens,
3799 int n_tokens,
3800 struct instruction *instr,
3801 struct instruction_data *data __rte_unused)
3802 {
3803 char *dst = tokens[1], *src = tokens[2];
3804 struct field *fdst, *fsrc;
3805 uint64_t src_val;
3806 uint32_t dst_struct_id, src_struct_id;
3807
3808 CHECK(n_tokens == 3, EINVAL);
3809
3810 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3811 CHECK(fdst, EINVAL);
3812
3813 /* OR or OR_S. */
3814 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3815 if (fsrc) {
3816 instr->type = INSTR_ALU_OR;
3817 if ((dst[0] == 'h' && src[0] != 'h') ||
3818 (dst[0] != 'h' && src[0] == 'h'))
3819 instr->type = INSTR_ALU_OR_S;
3820
3821 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3822 instr->alu.dst.n_bits = fdst->n_bits;
3823 instr->alu.dst.offset = fdst->offset / 8;
3824 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3825 instr->alu.src.n_bits = fsrc->n_bits;
3826 instr->alu.src.offset = fsrc->offset / 8;
3827 return 0;
3828 }
3829
3830 /* OR_I. */
3831 src_val = strtoull(src, &src, 0);
3832 CHECK(!src[0], EINVAL);
3833
3834 if (dst[0] == 'h')
3835 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3836
3837 instr->type = INSTR_ALU_OR_I;
3838 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3839 instr->alu.dst.n_bits = fdst->n_bits;
3840 instr->alu.dst.offset = fdst->offset / 8;
3841 instr->alu.src_val = src_val;
3842 return 0;
3843 }
3844
3845 static int
instr_alu_xor_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)3846 instr_alu_xor_translate(struct rte_swx_pipeline *p,
3847 struct action *action,
3848 char **tokens,
3849 int n_tokens,
3850 struct instruction *instr,
3851 struct instruction_data *data __rte_unused)
3852 {
3853 char *dst = tokens[1], *src = tokens[2];
3854 struct field *fdst, *fsrc;
3855 uint64_t src_val;
3856 uint32_t dst_struct_id, src_struct_id;
3857
3858 CHECK(n_tokens == 3, EINVAL);
3859
3860 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id);
3861 CHECK(fdst, EINVAL);
3862
3863 /* XOR or XOR_S. */
3864 fsrc = struct_field_parse(p, action, src, &src_struct_id);
3865 if (fsrc) {
3866 instr->type = INSTR_ALU_XOR;
3867 if ((dst[0] == 'h' && src[0] != 'h') ||
3868 (dst[0] != 'h' && src[0] == 'h'))
3869 instr->type = INSTR_ALU_XOR_S;
3870
3871 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3872 instr->alu.dst.n_bits = fdst->n_bits;
3873 instr->alu.dst.offset = fdst->offset / 8;
3874 instr->alu.src.struct_id = (uint8_t)src_struct_id;
3875 instr->alu.src.n_bits = fsrc->n_bits;
3876 instr->alu.src.offset = fsrc->offset / 8;
3877 return 0;
3878 }
3879
3880 /* XOR_I. */
3881 src_val = strtoull(src, &src, 0);
3882 CHECK(!src[0], EINVAL);
3883
3884 if (dst[0] == 'h')
3885 src_val = hton64(src_val) >> (64 - fdst->n_bits);
3886
3887 instr->type = INSTR_ALU_XOR_I;
3888 instr->alu.dst.struct_id = (uint8_t)dst_struct_id;
3889 instr->alu.dst.n_bits = fdst->n_bits;
3890 instr->alu.dst.offset = fdst->offset / 8;
3891 instr->alu.src_val = src_val;
3892 return 0;
3893 }
3894
3895 static inline void
instr_alu_add_exec(struct rte_swx_pipeline * p)3896 instr_alu_add_exec(struct rte_swx_pipeline *p)
3897 {
3898 struct thread *t = &p->threads[p->thread_id];
3899 struct instruction *ip = t->ip;
3900
3901 TRACE("[Thread %2u] add\n", p->thread_id);
3902
3903 /* Structs. */
3904 ALU(t, ip, +);
3905
3906 /* Thread. */
3907 thread_ip_inc(p);
3908 }
3909
3910 static inline void
instr_alu_add_mh_exec(struct rte_swx_pipeline * p)3911 instr_alu_add_mh_exec(struct rte_swx_pipeline *p)
3912 {
3913 struct thread *t = &p->threads[p->thread_id];
3914 struct instruction *ip = t->ip;
3915
3916 TRACE("[Thread %2u] add (mh)\n", p->thread_id);
3917
3918 /* Structs. */
3919 ALU_MH(t, ip, +);
3920
3921 /* Thread. */
3922 thread_ip_inc(p);
3923 }
3924
3925 static inline void
instr_alu_add_hm_exec(struct rte_swx_pipeline * p)3926 instr_alu_add_hm_exec(struct rte_swx_pipeline *p)
3927 {
3928 struct thread *t = &p->threads[p->thread_id];
3929 struct instruction *ip = t->ip;
3930
3931 TRACE("[Thread %2u] add (hm)\n", p->thread_id);
3932
3933 /* Structs. */
3934 ALU_HM(t, ip, +);
3935
3936 /* Thread. */
3937 thread_ip_inc(p);
3938 }
3939
3940 static inline void
instr_alu_add_hh_exec(struct rte_swx_pipeline * p)3941 instr_alu_add_hh_exec(struct rte_swx_pipeline *p)
3942 {
3943 struct thread *t = &p->threads[p->thread_id];
3944 struct instruction *ip = t->ip;
3945
3946 TRACE("[Thread %2u] add (hh)\n", p->thread_id);
3947
3948 /* Structs. */
3949 ALU_HH(t, ip, +);
3950
3951 /* Thread. */
3952 thread_ip_inc(p);
3953 }
3954
3955 static inline void
instr_alu_add_mi_exec(struct rte_swx_pipeline * p)3956 instr_alu_add_mi_exec(struct rte_swx_pipeline *p)
3957 {
3958 struct thread *t = &p->threads[p->thread_id];
3959 struct instruction *ip = t->ip;
3960
3961 TRACE("[Thread %2u] add (mi)\n", p->thread_id);
3962
3963 /* Structs. */
3964 ALU_MI(t, ip, +);
3965
3966 /* Thread. */
3967 thread_ip_inc(p);
3968 }
3969
3970 static inline void
instr_alu_add_hi_exec(struct rte_swx_pipeline * p)3971 instr_alu_add_hi_exec(struct rte_swx_pipeline *p)
3972 {
3973 struct thread *t = &p->threads[p->thread_id];
3974 struct instruction *ip = t->ip;
3975
3976 TRACE("[Thread %2u] add (hi)\n", p->thread_id);
3977
3978 /* Structs. */
3979 ALU_HI(t, ip, +);
3980
3981 /* Thread. */
3982 thread_ip_inc(p);
3983 }
3984
3985 static inline void
instr_alu_sub_exec(struct rte_swx_pipeline * p)3986 instr_alu_sub_exec(struct rte_swx_pipeline *p)
3987 {
3988 struct thread *t = &p->threads[p->thread_id];
3989 struct instruction *ip = t->ip;
3990
3991 TRACE("[Thread %2u] sub\n", p->thread_id);
3992
3993 /* Structs. */
3994 ALU(t, ip, -);
3995
3996 /* Thread. */
3997 thread_ip_inc(p);
3998 }
3999
4000 static inline void
instr_alu_sub_mh_exec(struct rte_swx_pipeline * p)4001 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p)
4002 {
4003 struct thread *t = &p->threads[p->thread_id];
4004 struct instruction *ip = t->ip;
4005
4006 TRACE("[Thread %2u] sub (mh)\n", p->thread_id);
4007
4008 /* Structs. */
4009 ALU_MH(t, ip, -);
4010
4011 /* Thread. */
4012 thread_ip_inc(p);
4013 }
4014
4015 static inline void
instr_alu_sub_hm_exec(struct rte_swx_pipeline * p)4016 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p)
4017 {
4018 struct thread *t = &p->threads[p->thread_id];
4019 struct instruction *ip = t->ip;
4020
4021 TRACE("[Thread %2u] sub (hm)\n", p->thread_id);
4022
4023 /* Structs. */
4024 ALU_HM(t, ip, -);
4025
4026 /* Thread. */
4027 thread_ip_inc(p);
4028 }
4029
4030 static inline void
instr_alu_sub_hh_exec(struct rte_swx_pipeline * p)4031 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p)
4032 {
4033 struct thread *t = &p->threads[p->thread_id];
4034 struct instruction *ip = t->ip;
4035
4036 TRACE("[Thread %2u] sub (hh)\n", p->thread_id);
4037
4038 /* Structs. */
4039 ALU_HH(t, ip, -);
4040
4041 /* Thread. */
4042 thread_ip_inc(p);
4043 }
4044
4045 static inline void
instr_alu_sub_mi_exec(struct rte_swx_pipeline * p)4046 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p)
4047 {
4048 struct thread *t = &p->threads[p->thread_id];
4049 struct instruction *ip = t->ip;
4050
4051 TRACE("[Thread %2u] sub (mi)\n", p->thread_id);
4052
4053 /* Structs. */
4054 ALU_MI(t, ip, -);
4055
4056 /* Thread. */
4057 thread_ip_inc(p);
4058 }
4059
4060 static inline void
instr_alu_sub_hi_exec(struct rte_swx_pipeline * p)4061 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p)
4062 {
4063 struct thread *t = &p->threads[p->thread_id];
4064 struct instruction *ip = t->ip;
4065
4066 TRACE("[Thread %2u] sub (hi)\n", p->thread_id);
4067
4068 /* Structs. */
4069 ALU_HI(t, ip, -);
4070
4071 /* Thread. */
4072 thread_ip_inc(p);
4073 }
4074
4075 static inline void
instr_alu_shl_exec(struct rte_swx_pipeline * p)4076 instr_alu_shl_exec(struct rte_swx_pipeline *p)
4077 {
4078 struct thread *t = &p->threads[p->thread_id];
4079 struct instruction *ip = t->ip;
4080
4081 TRACE("[Thread %2u] shl\n", p->thread_id);
4082
4083 /* Structs. */
4084 ALU(t, ip, <<);
4085
4086 /* Thread. */
4087 thread_ip_inc(p);
4088 }
4089
4090 static inline void
instr_alu_shl_mh_exec(struct rte_swx_pipeline * p)4091 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p)
4092 {
4093 struct thread *t = &p->threads[p->thread_id];
4094 struct instruction *ip = t->ip;
4095
4096 TRACE("[Thread %2u] shl (mh)\n", p->thread_id);
4097
4098 /* Structs. */
4099 ALU_MH(t, ip, <<);
4100
4101 /* Thread. */
4102 thread_ip_inc(p);
4103 }
4104
4105 static inline void
instr_alu_shl_hm_exec(struct rte_swx_pipeline * p)4106 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p)
4107 {
4108 struct thread *t = &p->threads[p->thread_id];
4109 struct instruction *ip = t->ip;
4110
4111 TRACE("[Thread %2u] shl (hm)\n", p->thread_id);
4112
4113 /* Structs. */
4114 ALU_HM(t, ip, <<);
4115
4116 /* Thread. */
4117 thread_ip_inc(p);
4118 }
4119
4120 static inline void
instr_alu_shl_hh_exec(struct rte_swx_pipeline * p)4121 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p)
4122 {
4123 struct thread *t = &p->threads[p->thread_id];
4124 struct instruction *ip = t->ip;
4125
4126 TRACE("[Thread %2u] shl (hh)\n", p->thread_id);
4127
4128 /* Structs. */
4129 ALU_HH(t, ip, <<);
4130
4131 /* Thread. */
4132 thread_ip_inc(p);
4133 }
4134
4135 static inline void
instr_alu_shl_mi_exec(struct rte_swx_pipeline * p)4136 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p)
4137 {
4138 struct thread *t = &p->threads[p->thread_id];
4139 struct instruction *ip = t->ip;
4140
4141 TRACE("[Thread %2u] shl (mi)\n", p->thread_id);
4142
4143 /* Structs. */
4144 ALU_MI(t, ip, <<);
4145
4146 /* Thread. */
4147 thread_ip_inc(p);
4148 }
4149
4150 static inline void
instr_alu_shl_hi_exec(struct rte_swx_pipeline * p)4151 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p)
4152 {
4153 struct thread *t = &p->threads[p->thread_id];
4154 struct instruction *ip = t->ip;
4155
4156 TRACE("[Thread %2u] shl (hi)\n", p->thread_id);
4157
4158 /* Structs. */
4159 ALU_HI(t, ip, <<);
4160
4161 /* Thread. */
4162 thread_ip_inc(p);
4163 }
4164
4165 static inline void
instr_alu_shr_exec(struct rte_swx_pipeline * p)4166 instr_alu_shr_exec(struct rte_swx_pipeline *p)
4167 {
4168 struct thread *t = &p->threads[p->thread_id];
4169 struct instruction *ip = t->ip;
4170
4171 TRACE("[Thread %2u] shr\n", p->thread_id);
4172
4173 /* Structs. */
4174 ALU(t, ip, >>);
4175
4176 /* Thread. */
4177 thread_ip_inc(p);
4178 }
4179
4180 static inline void
instr_alu_shr_mh_exec(struct rte_swx_pipeline * p)4181 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p)
4182 {
4183 struct thread *t = &p->threads[p->thread_id];
4184 struct instruction *ip = t->ip;
4185
4186 TRACE("[Thread %2u] shr (mh)\n", p->thread_id);
4187
4188 /* Structs. */
4189 ALU_MH(t, ip, >>);
4190
4191 /* Thread. */
4192 thread_ip_inc(p);
4193 }
4194
4195 static inline void
instr_alu_shr_hm_exec(struct rte_swx_pipeline * p)4196 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p)
4197 {
4198 struct thread *t = &p->threads[p->thread_id];
4199 struct instruction *ip = t->ip;
4200
4201 TRACE("[Thread %2u] shr (hm)\n", p->thread_id);
4202
4203 /* Structs. */
4204 ALU_HM(t, ip, >>);
4205
4206 /* Thread. */
4207 thread_ip_inc(p);
4208 }
4209
4210 static inline void
instr_alu_shr_hh_exec(struct rte_swx_pipeline * p)4211 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p)
4212 {
4213 struct thread *t = &p->threads[p->thread_id];
4214 struct instruction *ip = t->ip;
4215
4216 TRACE("[Thread %2u] shr (hh)\n", p->thread_id);
4217
4218 /* Structs. */
4219 ALU_HH(t, ip, >>);
4220
4221 /* Thread. */
4222 thread_ip_inc(p);
4223 }
4224
4225 static inline void
instr_alu_shr_mi_exec(struct rte_swx_pipeline * p)4226 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p)
4227 {
4228 struct thread *t = &p->threads[p->thread_id];
4229 struct instruction *ip = t->ip;
4230
4231 TRACE("[Thread %2u] shr (mi)\n", p->thread_id);
4232
4233 /* Structs. */
4234 ALU_MI(t, ip, >>);
4235
4236 /* Thread. */
4237 thread_ip_inc(p);
4238 }
4239
4240 static inline void
instr_alu_shr_hi_exec(struct rte_swx_pipeline * p)4241 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p)
4242 {
4243 struct thread *t = &p->threads[p->thread_id];
4244 struct instruction *ip = t->ip;
4245
4246 TRACE("[Thread %2u] shr (hi)\n", p->thread_id);
4247
4248 /* Structs. */
4249 ALU_HI(t, ip, >>);
4250
4251 /* Thread. */
4252 thread_ip_inc(p);
4253 }
4254
4255 static inline void
instr_alu_and_exec(struct rte_swx_pipeline * p)4256 instr_alu_and_exec(struct rte_swx_pipeline *p)
4257 {
4258 struct thread *t = &p->threads[p->thread_id];
4259 struct instruction *ip = t->ip;
4260
4261 TRACE("[Thread %2u] and\n", p->thread_id);
4262
4263 /* Structs. */
4264 ALU(t, ip, &);
4265
4266 /* Thread. */
4267 thread_ip_inc(p);
4268 }
4269
4270 static inline void
instr_alu_and_s_exec(struct rte_swx_pipeline * p)4271 instr_alu_and_s_exec(struct rte_swx_pipeline *p)
4272 {
4273 struct thread *t = &p->threads[p->thread_id];
4274 struct instruction *ip = t->ip;
4275
4276 TRACE("[Thread %2u] and (s)\n", p->thread_id);
4277
4278 /* Structs. */
4279 ALU_S(t, ip, &);
4280
4281 /* Thread. */
4282 thread_ip_inc(p);
4283 }
4284
4285 static inline void
instr_alu_and_i_exec(struct rte_swx_pipeline * p)4286 instr_alu_and_i_exec(struct rte_swx_pipeline *p)
4287 {
4288 struct thread *t = &p->threads[p->thread_id];
4289 struct instruction *ip = t->ip;
4290
4291 TRACE("[Thread %2u] and (i)\n", p->thread_id);
4292
4293 /* Structs. */
4294 ALU_I(t, ip, &);
4295
4296 /* Thread. */
4297 thread_ip_inc(p);
4298 }
4299
4300 static inline void
instr_alu_or_exec(struct rte_swx_pipeline * p)4301 instr_alu_or_exec(struct rte_swx_pipeline *p)
4302 {
4303 struct thread *t = &p->threads[p->thread_id];
4304 struct instruction *ip = t->ip;
4305
4306 TRACE("[Thread %2u] or\n", p->thread_id);
4307
4308 /* Structs. */
4309 ALU(t, ip, |);
4310
4311 /* Thread. */
4312 thread_ip_inc(p);
4313 }
4314
4315 static inline void
instr_alu_or_s_exec(struct rte_swx_pipeline * p)4316 instr_alu_or_s_exec(struct rte_swx_pipeline *p)
4317 {
4318 struct thread *t = &p->threads[p->thread_id];
4319 struct instruction *ip = t->ip;
4320
4321 TRACE("[Thread %2u] or (s)\n", p->thread_id);
4322
4323 /* Structs. */
4324 ALU_S(t, ip, |);
4325
4326 /* Thread. */
4327 thread_ip_inc(p);
4328 }
4329
4330 static inline void
instr_alu_or_i_exec(struct rte_swx_pipeline * p)4331 instr_alu_or_i_exec(struct rte_swx_pipeline *p)
4332 {
4333 struct thread *t = &p->threads[p->thread_id];
4334 struct instruction *ip = t->ip;
4335
4336 TRACE("[Thread %2u] or (i)\n", p->thread_id);
4337
4338 /* Structs. */
4339 ALU_I(t, ip, |);
4340
4341 /* Thread. */
4342 thread_ip_inc(p);
4343 }
4344
4345 static inline void
instr_alu_xor_exec(struct rte_swx_pipeline * p)4346 instr_alu_xor_exec(struct rte_swx_pipeline *p)
4347 {
4348 struct thread *t = &p->threads[p->thread_id];
4349 struct instruction *ip = t->ip;
4350
4351 TRACE("[Thread %2u] xor\n", p->thread_id);
4352
4353 /* Structs. */
4354 ALU(t, ip, ^);
4355
4356 /* Thread. */
4357 thread_ip_inc(p);
4358 }
4359
4360 static inline void
instr_alu_xor_s_exec(struct rte_swx_pipeline * p)4361 instr_alu_xor_s_exec(struct rte_swx_pipeline *p)
4362 {
4363 struct thread *t = &p->threads[p->thread_id];
4364 struct instruction *ip = t->ip;
4365
4366 TRACE("[Thread %2u] xor (s)\n", p->thread_id);
4367
4368 /* Structs. */
4369 ALU_S(t, ip, ^);
4370
4371 /* Thread. */
4372 thread_ip_inc(p);
4373 }
4374
4375 static inline void
instr_alu_xor_i_exec(struct rte_swx_pipeline * p)4376 instr_alu_xor_i_exec(struct rte_swx_pipeline *p)
4377 {
4378 struct thread *t = &p->threads[p->thread_id];
4379 struct instruction *ip = t->ip;
4380
4381 TRACE("[Thread %2u] xor (i)\n", p->thread_id);
4382
4383 /* Structs. */
4384 ALU_I(t, ip, ^);
4385
4386 /* Thread. */
4387 thread_ip_inc(p);
4388 }
4389
4390 static inline void
instr_alu_ckadd_field_exec(struct rte_swx_pipeline * p)4391 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p)
4392 {
4393 struct thread *t = &p->threads[p->thread_id];
4394 struct instruction *ip = t->ip;
4395 uint8_t *dst_struct, *src_struct;
4396 uint16_t *dst16_ptr, dst;
4397 uint64_t *src64_ptr, src64, src64_mask, src;
4398 uint64_t r;
4399
4400 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id);
4401
4402 /* Structs. */
4403 dst_struct = t->structs[ip->alu.dst.struct_id];
4404 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4405 dst = *dst16_ptr;
4406
4407 src_struct = t->structs[ip->alu.src.struct_id];
4408 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4409 src64 = *src64_ptr;
4410 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4411 src = src64 & src64_mask;
4412
4413 r = dst;
4414 r = ~r & 0xFFFF;
4415
4416 /* The first input (r) is a 16-bit number. The second and the third
4417 * inputs are 32-bit numbers. In the worst case scenario, the sum of the
4418 * three numbers (output r) is a 34-bit number.
4419 */
4420 r += (src >> 32) + (src & 0xFFFFFFFF);
4421
4422 /* The first input is a 16-bit number. The second input is an 18-bit
4423 * number. In the worst case scenario, the sum of the two numbers is a
4424 * 19-bit number.
4425 */
4426 r = (r & 0xFFFF) + (r >> 16);
4427
4428 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4429 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006).
4430 */
4431 r = (r & 0xFFFF) + (r >> 16);
4432
4433 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4434 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4435 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated,
4436 * therefore the output r is always a 16-bit number.
4437 */
4438 r = (r & 0xFFFF) + (r >> 16);
4439
4440 r = ~r & 0xFFFF;
4441 r = r ? r : 0xFFFF;
4442
4443 *dst16_ptr = (uint16_t)r;
4444
4445 /* Thread. */
4446 thread_ip_inc(p);
4447 }
4448
4449 static inline void
instr_alu_cksub_field_exec(struct rte_swx_pipeline * p)4450 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p)
4451 {
4452 struct thread *t = &p->threads[p->thread_id];
4453 struct instruction *ip = t->ip;
4454 uint8_t *dst_struct, *src_struct;
4455 uint16_t *dst16_ptr, dst;
4456 uint64_t *src64_ptr, src64, src64_mask, src;
4457 uint64_t r;
4458
4459 TRACE("[Thread %2u] cksub (field)\n", p->thread_id);
4460
4461 /* Structs. */
4462 dst_struct = t->structs[ip->alu.dst.struct_id];
4463 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4464 dst = *dst16_ptr;
4465
4466 src_struct = t->structs[ip->alu.src.struct_id];
4467 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
4468 src64 = *src64_ptr;
4469 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
4470 src = src64 & src64_mask;
4471
4472 r = dst;
4473 r = ~r & 0xFFFF;
4474
4475 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as
4476 * the following sequence of operations in 2's complement arithmetic:
4477 * a '- b = (a - b) % 0xFFFF.
4478 *
4479 * In order to prevent an underflow for the below subtraction, in which
4480 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the
4481 * minuend), we first add a multiple of the 0xFFFF modulus to the
4482 * minuend. The number we add to the minuend needs to be a 34-bit number
4483 * or higher, so for readability reasons we picked the 36-bit multiple.
4484 * We are effectively turning the 16-bit minuend into a 36-bit number:
4485 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF.
4486 */
4487 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */
4488
4489 /* A 33-bit number is subtracted from a 36-bit number (the input r). The
4490 * result (the output r) is a 36-bit number.
4491 */
4492 r -= (src >> 32) + (src & 0xFFFFFFFF);
4493
4494 /* The first input is a 16-bit number. The second input is a 20-bit
4495 * number. Their sum is a 21-bit number.
4496 */
4497 r = (r & 0xFFFF) + (r >> 16);
4498
4499 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4500 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E).
4501 */
4502 r = (r & 0xFFFF) + (r >> 16);
4503
4504 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4505 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4506 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4507 * generated, therefore the output r is always a 16-bit number.
4508 */
4509 r = (r & 0xFFFF) + (r >> 16);
4510
4511 r = ~r & 0xFFFF;
4512 r = r ? r : 0xFFFF;
4513
4514 *dst16_ptr = (uint16_t)r;
4515
4516 /* Thread. */
4517 thread_ip_inc(p);
4518 }
4519
4520 static inline void
instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline * p)4521 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p)
4522 {
4523 struct thread *t = &p->threads[p->thread_id];
4524 struct instruction *ip = t->ip;
4525 uint8_t *dst_struct, *src_struct;
4526 uint16_t *dst16_ptr;
4527 uint32_t *src32_ptr;
4528 uint64_t r0, r1;
4529
4530 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
4531
4532 /* Structs. */
4533 dst_struct = t->structs[ip->alu.dst.struct_id];
4534 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4535
4536 src_struct = t->structs[ip->alu.src.struct_id];
4537 src32_ptr = (uint32_t *)&src_struct[0];
4538
4539 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */
4540 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */
4541 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */
4542 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */
4543 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */
4544
4545 /* The first input is a 16-bit number. The second input is a 19-bit
4546 * number. Their sum is a 20-bit number.
4547 */
4548 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4549
4550 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4551 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E).
4552 */
4553 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4554
4555 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4556 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4557 * 0x1000E), the output r is (0 .. 15). So no carry bit can be
4558 * generated, therefore the output r is always a 16-bit number.
4559 */
4560 r0 = (r0 & 0xFFFF) + (r0 >> 16);
4561
4562 r0 = ~r0 & 0xFFFF;
4563 r0 = r0 ? r0 : 0xFFFF;
4564
4565 *dst16_ptr = (uint16_t)r0;
4566
4567 /* Thread. */
4568 thread_ip_inc(p);
4569 }
4570
4571 static inline void
instr_alu_ckadd_struct_exec(struct rte_swx_pipeline * p)4572 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p)
4573 {
4574 struct thread *t = &p->threads[p->thread_id];
4575 struct instruction *ip = t->ip;
4576 uint8_t *dst_struct, *src_struct;
4577 uint16_t *dst16_ptr;
4578 uint32_t *src32_ptr;
4579 uint64_t r = 0;
4580 uint32_t i;
4581
4582 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id);
4583
4584 /* Structs. */
4585 dst_struct = t->structs[ip->alu.dst.struct_id];
4586 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
4587
4588 src_struct = t->structs[ip->alu.src.struct_id];
4589 src32_ptr = (uint32_t *)&src_struct[0];
4590
4591 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3.
4592 * Therefore, in the worst case scenario, a 35-bit number is added to a
4593 * 16-bit number (the input r), so the output r is 36-bit number.
4594 */
4595 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++)
4596 r += *src32_ptr;
4597
4598 /* The first input is a 16-bit number. The second input is a 20-bit
4599 * number. Their sum is a 21-bit number.
4600 */
4601 r = (r & 0xFFFF) + (r >> 16);
4602
4603 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is
4604 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E).
4605 */
4606 r = (r & 0xFFFF) + (r >> 16);
4607
4608 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input
4609 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 ..
4610 * 0x1001E), the output r is (0 .. 31). So no carry bit can be
4611 * generated, therefore the output r is always a 16-bit number.
4612 */
4613 r = (r & 0xFFFF) + (r >> 16);
4614
4615 r = ~r & 0xFFFF;
4616 r = r ? r : 0xFFFF;
4617
4618 *dst16_ptr = (uint16_t)r;
4619
4620 /* Thread. */
4621 thread_ip_inc(p);
4622 }
4623
4624 /*
4625 * jmp.
4626 */
4627 static struct action *
4628 action_find(struct rte_swx_pipeline *p, const char *name);
4629
4630 static int
instr_jmp_translate(struct rte_swx_pipeline * p __rte_unused,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4631 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused,
4632 struct action *action __rte_unused,
4633 char **tokens,
4634 int n_tokens,
4635 struct instruction *instr,
4636 struct instruction_data *data)
4637 {
4638 CHECK(n_tokens == 2, EINVAL);
4639
4640 strcpy(data->jmp_label, tokens[1]);
4641
4642 instr->type = INSTR_JMP;
4643 instr->jmp.ip = NULL; /* Resolved later. */
4644 return 0;
4645 }
4646
4647 static int
instr_jmp_valid_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4648 instr_jmp_valid_translate(struct rte_swx_pipeline *p,
4649 struct action *action __rte_unused,
4650 char **tokens,
4651 int n_tokens,
4652 struct instruction *instr,
4653 struct instruction_data *data)
4654 {
4655 struct header *h;
4656
4657 CHECK(n_tokens == 3, EINVAL);
4658
4659 strcpy(data->jmp_label, tokens[1]);
4660
4661 h = header_parse(p, tokens[2]);
4662 CHECK(h, EINVAL);
4663
4664 instr->type = INSTR_JMP_VALID;
4665 instr->jmp.ip = NULL; /* Resolved later. */
4666 instr->jmp.header_id = h->id;
4667 return 0;
4668 }
4669
4670 static int
instr_jmp_invalid_translate(struct rte_swx_pipeline * p,struct action * action __rte_unused,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4671 instr_jmp_invalid_translate(struct rte_swx_pipeline *p,
4672 struct action *action __rte_unused,
4673 char **tokens,
4674 int n_tokens,
4675 struct instruction *instr,
4676 struct instruction_data *data)
4677 {
4678 struct header *h;
4679
4680 CHECK(n_tokens == 3, EINVAL);
4681
4682 strcpy(data->jmp_label, tokens[1]);
4683
4684 h = header_parse(p, tokens[2]);
4685 CHECK(h, EINVAL);
4686
4687 instr->type = INSTR_JMP_INVALID;
4688 instr->jmp.ip = NULL; /* Resolved later. */
4689 instr->jmp.header_id = h->id;
4690 return 0;
4691 }
4692
4693 static int
instr_jmp_hit_translate(struct rte_swx_pipeline * p __rte_unused,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4694 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused,
4695 struct action *action,
4696 char **tokens,
4697 int n_tokens,
4698 struct instruction *instr,
4699 struct instruction_data *data)
4700 {
4701 CHECK(!action, EINVAL);
4702 CHECK(n_tokens == 2, EINVAL);
4703
4704 strcpy(data->jmp_label, tokens[1]);
4705
4706 instr->type = INSTR_JMP_HIT;
4707 instr->jmp.ip = NULL; /* Resolved later. */
4708 return 0;
4709 }
4710
4711 static int
instr_jmp_miss_translate(struct rte_swx_pipeline * p __rte_unused,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4712 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused,
4713 struct action *action,
4714 char **tokens,
4715 int n_tokens,
4716 struct instruction *instr,
4717 struct instruction_data *data)
4718 {
4719 CHECK(!action, EINVAL);
4720 CHECK(n_tokens == 2, EINVAL);
4721
4722 strcpy(data->jmp_label, tokens[1]);
4723
4724 instr->type = INSTR_JMP_MISS;
4725 instr->jmp.ip = NULL; /* Resolved later. */
4726 return 0;
4727 }
4728
4729 static int
instr_jmp_action_hit_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4730 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p,
4731 struct action *action,
4732 char **tokens,
4733 int n_tokens,
4734 struct instruction *instr,
4735 struct instruction_data *data)
4736 {
4737 struct action *a;
4738
4739 CHECK(!action, EINVAL);
4740 CHECK(n_tokens == 3, EINVAL);
4741
4742 strcpy(data->jmp_label, tokens[1]);
4743
4744 a = action_find(p, tokens[2]);
4745 CHECK(a, EINVAL);
4746
4747 instr->type = INSTR_JMP_ACTION_HIT;
4748 instr->jmp.ip = NULL; /* Resolved later. */
4749 instr->jmp.action_id = a->id;
4750 return 0;
4751 }
4752
4753 static int
instr_jmp_action_miss_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4754 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p,
4755 struct action *action,
4756 char **tokens,
4757 int n_tokens,
4758 struct instruction *instr,
4759 struct instruction_data *data)
4760 {
4761 struct action *a;
4762
4763 CHECK(!action, EINVAL);
4764 CHECK(n_tokens == 3, EINVAL);
4765
4766 strcpy(data->jmp_label, tokens[1]);
4767
4768 a = action_find(p, tokens[2]);
4769 CHECK(a, EINVAL);
4770
4771 instr->type = INSTR_JMP_ACTION_MISS;
4772 instr->jmp.ip = NULL; /* Resolved later. */
4773 instr->jmp.action_id = a->id;
4774 return 0;
4775 }
4776
4777 static int
instr_jmp_eq_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4778 instr_jmp_eq_translate(struct rte_swx_pipeline *p,
4779 struct action *action,
4780 char **tokens,
4781 int n_tokens,
4782 struct instruction *instr,
4783 struct instruction_data *data)
4784 {
4785 char *a = tokens[2], *b = tokens[3];
4786 struct field *fa, *fb;
4787 uint64_t b_val;
4788 uint32_t a_struct_id, b_struct_id;
4789
4790 CHECK(n_tokens == 4, EINVAL);
4791
4792 strcpy(data->jmp_label, tokens[1]);
4793
4794 fa = struct_field_parse(p, action, a, &a_struct_id);
4795 CHECK(fa, EINVAL);
4796
4797 /* JMP_EQ or JMP_EQ_S. */
4798 fb = struct_field_parse(p, action, b, &b_struct_id);
4799 if (fb) {
4800 instr->type = INSTR_JMP_EQ;
4801 if ((a[0] == 'h' && b[0] != 'h') ||
4802 (a[0] != 'h' && b[0] == 'h'))
4803 instr->type = INSTR_JMP_EQ_S;
4804 instr->jmp.ip = NULL; /* Resolved later. */
4805
4806 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4807 instr->jmp.a.n_bits = fa->n_bits;
4808 instr->jmp.a.offset = fa->offset / 8;
4809 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4810 instr->jmp.b.n_bits = fb->n_bits;
4811 instr->jmp.b.offset = fb->offset / 8;
4812 return 0;
4813 }
4814
4815 /* JMP_EQ_I. */
4816 b_val = strtoull(b, &b, 0);
4817 CHECK(!b[0], EINVAL);
4818
4819 if (a[0] == 'h')
4820 b_val = hton64(b_val) >> (64 - fa->n_bits);
4821
4822 instr->type = INSTR_JMP_EQ_I;
4823 instr->jmp.ip = NULL; /* Resolved later. */
4824 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4825 instr->jmp.a.n_bits = fa->n_bits;
4826 instr->jmp.a.offset = fa->offset / 8;
4827 instr->jmp.b_val = b_val;
4828 return 0;
4829 }
4830
4831 static int
instr_jmp_neq_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4832 instr_jmp_neq_translate(struct rte_swx_pipeline *p,
4833 struct action *action,
4834 char **tokens,
4835 int n_tokens,
4836 struct instruction *instr,
4837 struct instruction_data *data)
4838 {
4839 char *a = tokens[2], *b = tokens[3];
4840 struct field *fa, *fb;
4841 uint64_t b_val;
4842 uint32_t a_struct_id, b_struct_id;
4843
4844 CHECK(n_tokens == 4, EINVAL);
4845
4846 strcpy(data->jmp_label, tokens[1]);
4847
4848 fa = struct_field_parse(p, action, a, &a_struct_id);
4849 CHECK(fa, EINVAL);
4850
4851 /* JMP_NEQ or JMP_NEQ_S. */
4852 fb = struct_field_parse(p, action, b, &b_struct_id);
4853 if (fb) {
4854 instr->type = INSTR_JMP_NEQ;
4855 if ((a[0] == 'h' && b[0] != 'h') ||
4856 (a[0] != 'h' && b[0] == 'h'))
4857 instr->type = INSTR_JMP_NEQ_S;
4858 instr->jmp.ip = NULL; /* Resolved later. */
4859
4860 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4861 instr->jmp.a.n_bits = fa->n_bits;
4862 instr->jmp.a.offset = fa->offset / 8;
4863 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4864 instr->jmp.b.n_bits = fb->n_bits;
4865 instr->jmp.b.offset = fb->offset / 8;
4866 return 0;
4867 }
4868
4869 /* JMP_NEQ_I. */
4870 b_val = strtoull(b, &b, 0);
4871 CHECK(!b[0], EINVAL);
4872
4873 if (a[0] == 'h')
4874 b_val = hton64(b_val) >> (64 - fa->n_bits);
4875
4876 instr->type = INSTR_JMP_NEQ_I;
4877 instr->jmp.ip = NULL; /* Resolved later. */
4878 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4879 instr->jmp.a.n_bits = fa->n_bits;
4880 instr->jmp.a.offset = fa->offset / 8;
4881 instr->jmp.b_val = b_val;
4882 return 0;
4883 }
4884
4885 static int
instr_jmp_lt_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4886 instr_jmp_lt_translate(struct rte_swx_pipeline *p,
4887 struct action *action,
4888 char **tokens,
4889 int n_tokens,
4890 struct instruction *instr,
4891 struct instruction_data *data)
4892 {
4893 char *a = tokens[2], *b = tokens[3];
4894 struct field *fa, *fb;
4895 uint64_t b_val;
4896 uint32_t a_struct_id, b_struct_id;
4897
4898 CHECK(n_tokens == 4, EINVAL);
4899
4900 strcpy(data->jmp_label, tokens[1]);
4901
4902 fa = struct_field_parse(p, action, a, &a_struct_id);
4903 CHECK(fa, EINVAL);
4904
4905 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */
4906 fb = struct_field_parse(p, action, b, &b_struct_id);
4907 if (fb) {
4908 instr->type = INSTR_JMP_LT;
4909 if (a[0] == 'h' && b[0] == 'm')
4910 instr->type = INSTR_JMP_LT_HM;
4911 if (a[0] == 'm' && b[0] == 'h')
4912 instr->type = INSTR_JMP_LT_MH;
4913 if (a[0] == 'h' && b[0] == 'h')
4914 instr->type = INSTR_JMP_LT_HH;
4915 instr->jmp.ip = NULL; /* Resolved later. */
4916
4917 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4918 instr->jmp.a.n_bits = fa->n_bits;
4919 instr->jmp.a.offset = fa->offset / 8;
4920 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4921 instr->jmp.b.n_bits = fb->n_bits;
4922 instr->jmp.b.offset = fb->offset / 8;
4923 return 0;
4924 }
4925
4926 /* JMP_LT_MI, JMP_LT_HI. */
4927 b_val = strtoull(b, &b, 0);
4928 CHECK(!b[0], EINVAL);
4929
4930 instr->type = INSTR_JMP_LT_MI;
4931 if (a[0] == 'h')
4932 instr->type = INSTR_JMP_LT_HI;
4933 instr->jmp.ip = NULL; /* Resolved later. */
4934
4935 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4936 instr->jmp.a.n_bits = fa->n_bits;
4937 instr->jmp.a.offset = fa->offset / 8;
4938 instr->jmp.b_val = b_val;
4939 return 0;
4940 }
4941
4942 static int
instr_jmp_gt_translate(struct rte_swx_pipeline * p,struct action * action,char ** tokens,int n_tokens,struct instruction * instr,struct instruction_data * data)4943 instr_jmp_gt_translate(struct rte_swx_pipeline *p,
4944 struct action *action,
4945 char **tokens,
4946 int n_tokens,
4947 struct instruction *instr,
4948 struct instruction_data *data)
4949 {
4950 char *a = tokens[2], *b = tokens[3];
4951 struct field *fa, *fb;
4952 uint64_t b_val;
4953 uint32_t a_struct_id, b_struct_id;
4954
4955 CHECK(n_tokens == 4, EINVAL);
4956
4957 strcpy(data->jmp_label, tokens[1]);
4958
4959 fa = struct_field_parse(p, action, a, &a_struct_id);
4960 CHECK(fa, EINVAL);
4961
4962 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */
4963 fb = struct_field_parse(p, action, b, &b_struct_id);
4964 if (fb) {
4965 instr->type = INSTR_JMP_GT;
4966 if (a[0] == 'h' && b[0] == 'm')
4967 instr->type = INSTR_JMP_GT_HM;
4968 if (a[0] == 'm' && b[0] == 'h')
4969 instr->type = INSTR_JMP_GT_MH;
4970 if (a[0] == 'h' && b[0] == 'h')
4971 instr->type = INSTR_JMP_GT_HH;
4972 instr->jmp.ip = NULL; /* Resolved later. */
4973
4974 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4975 instr->jmp.a.n_bits = fa->n_bits;
4976 instr->jmp.a.offset = fa->offset / 8;
4977 instr->jmp.b.struct_id = (uint8_t)b_struct_id;
4978 instr->jmp.b.n_bits = fb->n_bits;
4979 instr->jmp.b.offset = fb->offset / 8;
4980 return 0;
4981 }
4982
4983 /* JMP_GT_MI, JMP_GT_HI. */
4984 b_val = strtoull(b, &b, 0);
4985 CHECK(!b[0], EINVAL);
4986
4987 instr->type = INSTR_JMP_GT_MI;
4988 if (a[0] == 'h')
4989 instr->type = INSTR_JMP_GT_HI;
4990 instr->jmp.ip = NULL; /* Resolved later. */
4991
4992 instr->jmp.a.struct_id = (uint8_t)a_struct_id;
4993 instr->jmp.a.n_bits = fa->n_bits;
4994 instr->jmp.a.offset = fa->offset / 8;
4995 instr->jmp.b_val = b_val;
4996 return 0;
4997 }
4998
4999 static inline void
instr_jmp_exec(struct rte_swx_pipeline * p)5000 instr_jmp_exec(struct rte_swx_pipeline *p)
5001 {
5002 struct thread *t = &p->threads[p->thread_id];
5003 struct instruction *ip = t->ip;
5004
5005 TRACE("[Thread %2u] jmp\n", p->thread_id);
5006
5007 thread_ip_set(t, ip->jmp.ip);
5008 }
5009
5010 static inline void
instr_jmp_valid_exec(struct rte_swx_pipeline * p)5011 instr_jmp_valid_exec(struct rte_swx_pipeline *p)
5012 {
5013 struct thread *t = &p->threads[p->thread_id];
5014 struct instruction *ip = t->ip;
5015 uint32_t header_id = ip->jmp.header_id;
5016
5017 TRACE("[Thread %2u] jmpv\n", p->thread_id);
5018
5019 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1);
5020 }
5021
5022 static inline void
instr_jmp_invalid_exec(struct rte_swx_pipeline * p)5023 instr_jmp_invalid_exec(struct rte_swx_pipeline *p)
5024 {
5025 struct thread *t = &p->threads[p->thread_id];
5026 struct instruction *ip = t->ip;
5027 uint32_t header_id = ip->jmp.header_id;
5028
5029 TRACE("[Thread %2u] jmpnv\n", p->thread_id);
5030
5031 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip;
5032 }
5033
5034 static inline void
instr_jmp_hit_exec(struct rte_swx_pipeline * p)5035 instr_jmp_hit_exec(struct rte_swx_pipeline *p)
5036 {
5037 struct thread *t = &p->threads[p->thread_id];
5038 struct instruction *ip = t->ip;
5039 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip};
5040
5041 TRACE("[Thread %2u] jmph\n", p->thread_id);
5042
5043 t->ip = ip_next[t->hit];
5044 }
5045
5046 static inline void
instr_jmp_miss_exec(struct rte_swx_pipeline * p)5047 instr_jmp_miss_exec(struct rte_swx_pipeline *p)
5048 {
5049 struct thread *t = &p->threads[p->thread_id];
5050 struct instruction *ip = t->ip;
5051 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1};
5052
5053 TRACE("[Thread %2u] jmpnh\n", p->thread_id);
5054
5055 t->ip = ip_next[t->hit];
5056 }
5057
5058 static inline void
instr_jmp_action_hit_exec(struct rte_swx_pipeline * p)5059 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p)
5060 {
5061 struct thread *t = &p->threads[p->thread_id];
5062 struct instruction *ip = t->ip;
5063
5064 TRACE("[Thread %2u] jmpa\n", p->thread_id);
5065
5066 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1);
5067 }
5068
5069 static inline void
instr_jmp_action_miss_exec(struct rte_swx_pipeline * p)5070 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p)
5071 {
5072 struct thread *t = &p->threads[p->thread_id];
5073 struct instruction *ip = t->ip;
5074
5075 TRACE("[Thread %2u] jmpna\n", p->thread_id);
5076
5077 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip;
5078 }
5079
5080 static inline void
instr_jmp_eq_exec(struct rte_swx_pipeline * p)5081 instr_jmp_eq_exec(struct rte_swx_pipeline *p)
5082 {
5083 struct thread *t = &p->threads[p->thread_id];
5084 struct instruction *ip = t->ip;
5085
5086 TRACE("[Thread %2u] jmpeq\n", p->thread_id);
5087
5088 JMP_CMP(t, ip, ==);
5089 }
5090
5091 static inline void
instr_jmp_eq_s_exec(struct rte_swx_pipeline * p)5092 instr_jmp_eq_s_exec(struct rte_swx_pipeline *p)
5093 {
5094 struct thread *t = &p->threads[p->thread_id];
5095 struct instruction *ip = t->ip;
5096
5097 TRACE("[Thread %2u] jmpeq (s)\n", p->thread_id);
5098
5099 JMP_CMP_S(t, ip, ==);
5100 }
5101
5102 static inline void
instr_jmp_eq_i_exec(struct rte_swx_pipeline * p)5103 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p)
5104 {
5105 struct thread *t = &p->threads[p->thread_id];
5106 struct instruction *ip = t->ip;
5107
5108 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id);
5109
5110 JMP_CMP_I(t, ip, ==);
5111 }
5112
5113 static inline void
instr_jmp_neq_exec(struct rte_swx_pipeline * p)5114 instr_jmp_neq_exec(struct rte_swx_pipeline *p)
5115 {
5116 struct thread *t = &p->threads[p->thread_id];
5117 struct instruction *ip = t->ip;
5118
5119 TRACE("[Thread %2u] jmpneq\n", p->thread_id);
5120
5121 JMP_CMP(t, ip, !=);
5122 }
5123
5124 static inline void
instr_jmp_neq_s_exec(struct rte_swx_pipeline * p)5125 instr_jmp_neq_s_exec(struct rte_swx_pipeline *p)
5126 {
5127 struct thread *t = &p->threads[p->thread_id];
5128 struct instruction *ip = t->ip;
5129
5130 TRACE("[Thread %2u] jmpneq (s)\n", p->thread_id);
5131
5132 JMP_CMP_S(t, ip, !=);
5133 }
5134
5135 static inline void
instr_jmp_neq_i_exec(struct rte_swx_pipeline * p)5136 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p)
5137 {
5138 struct thread *t = &p->threads[p->thread_id];
5139 struct instruction *ip = t->ip;
5140
5141 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id);
5142
5143 JMP_CMP_I(t, ip, !=);
5144 }
5145
5146 static inline void
instr_jmp_lt_exec(struct rte_swx_pipeline * p)5147 instr_jmp_lt_exec(struct rte_swx_pipeline *p)
5148 {
5149 struct thread *t = &p->threads[p->thread_id];
5150 struct instruction *ip = t->ip;
5151
5152 TRACE("[Thread %2u] jmplt\n", p->thread_id);
5153
5154 JMP_CMP(t, ip, <);
5155 }
5156
5157 static inline void
instr_jmp_lt_mh_exec(struct rte_swx_pipeline * p)5158 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p)
5159 {
5160 struct thread *t = &p->threads[p->thread_id];
5161 struct instruction *ip = t->ip;
5162
5163 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id);
5164
5165 JMP_CMP_MH(t, ip, <);
5166 }
5167
5168 static inline void
instr_jmp_lt_hm_exec(struct rte_swx_pipeline * p)5169 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p)
5170 {
5171 struct thread *t = &p->threads[p->thread_id];
5172 struct instruction *ip = t->ip;
5173
5174 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id);
5175
5176 JMP_CMP_HM(t, ip, <);
5177 }
5178
5179 static inline void
instr_jmp_lt_hh_exec(struct rte_swx_pipeline * p)5180 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p)
5181 {
5182 struct thread *t = &p->threads[p->thread_id];
5183 struct instruction *ip = t->ip;
5184
5185 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id);
5186
5187 JMP_CMP_HH(t, ip, <);
5188 }
5189
5190 static inline void
instr_jmp_lt_mi_exec(struct rte_swx_pipeline * p)5191 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p)
5192 {
5193 struct thread *t = &p->threads[p->thread_id];
5194 struct instruction *ip = t->ip;
5195
5196 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id);
5197
5198 JMP_CMP_MI(t, ip, <);
5199 }
5200
5201 static inline void
instr_jmp_lt_hi_exec(struct rte_swx_pipeline * p)5202 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p)
5203 {
5204 struct thread *t = &p->threads[p->thread_id];
5205 struct instruction *ip = t->ip;
5206
5207 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id);
5208
5209 JMP_CMP_HI(t, ip, <);
5210 }
5211
5212 static inline void
instr_jmp_gt_exec(struct rte_swx_pipeline * p)5213 instr_jmp_gt_exec(struct rte_swx_pipeline *p)
5214 {
5215 struct thread *t = &p->threads[p->thread_id];
5216 struct instruction *ip = t->ip;
5217
5218 TRACE("[Thread %2u] jmpgt\n", p->thread_id);
5219
5220 JMP_CMP(t, ip, >);
5221 }
5222
5223 static inline void
instr_jmp_gt_mh_exec(struct rte_swx_pipeline * p)5224 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p)
5225 {
5226 struct thread *t = &p->threads[p->thread_id];
5227 struct instruction *ip = t->ip;
5228
5229 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id);
5230
5231 JMP_CMP_MH(t, ip, >);
5232 }
5233
5234 static inline void
instr_jmp_gt_hm_exec(struct rte_swx_pipeline * p)5235 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p)
5236 {
5237 struct thread *t = &p->threads[p->thread_id];
5238 struct instruction *ip = t->ip;
5239
5240 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id);
5241
5242 JMP_CMP_HM(t, ip, >);
5243 }
5244
5245 static inline void
instr_jmp_gt_hh_exec(struct rte_swx_pipeline * p)5246 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p)
5247 {
5248 struct thread *t = &p->threads[p->thread_id];
5249 struct instruction *ip = t->ip;
5250
5251 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id);
5252
5253 JMP_CMP_HH(t, ip, >);
5254 }
5255
5256 static inline void
instr_jmp_gt_mi_exec(struct rte_swx_pipeline * p)5257 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p)
5258 {
5259 struct thread *t = &p->threads[p->thread_id];
5260 struct instruction *ip = t->ip;
5261
5262 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id);
5263
5264 JMP_CMP_MI(t, ip, >);
5265 }
5266
5267 static inline void
instr_jmp_gt_hi_exec(struct rte_swx_pipeline * p)5268 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p)
5269 {
5270 struct thread *t = &p->threads[p->thread_id];
5271 struct instruction *ip = t->ip;
5272
5273 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id);
5274
5275 JMP_CMP_HI(t, ip, >);
5276 }
5277
5278 /*
5279 * return.
5280 */
5281 static int
instr_return_translate(struct rte_swx_pipeline * p __rte_unused,struct action * action,char ** tokens __rte_unused,int n_tokens,struct instruction * instr,struct instruction_data * data __rte_unused)5282 instr_return_translate(struct rte_swx_pipeline *p __rte_unused,
5283 struct action *action,
5284 char **tokens __rte_unused,
5285 int n_tokens,
5286 struct instruction *instr,
5287 struct instruction_data *data __rte_unused)
5288 {
5289 CHECK(action, EINVAL);
5290 CHECK(n_tokens == 1, EINVAL);
5291
5292 instr->type = INSTR_RETURN;
5293 return 0;
5294 }
5295
5296 static inline void
instr_return_exec(struct rte_swx_pipeline * p)5297 instr_return_exec(struct rte_swx_pipeline *p)
5298 {
5299 struct thread *t = &p->threads[p->thread_id];
5300
5301 TRACE("[Thread %2u] return\n", p->thread_id);
5302
5303 t->ip = t->ret;
5304 }
5305
5306 static int
instr_translate(struct rte_swx_pipeline * p,struct action * action,char * string,struct instruction * instr,struct instruction_data * data)5307 instr_translate(struct rte_swx_pipeline *p,
5308 struct action *action,
5309 char *string,
5310 struct instruction *instr,
5311 struct instruction_data *data)
5312 {
5313 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX];
5314 int n_tokens = 0, tpos = 0;
5315
5316 /* Parse the instruction string into tokens. */
5317 for ( ; ; ) {
5318 char *token;
5319
5320 token = strtok_r(string, " \t\v", &string);
5321 if (!token)
5322 break;
5323
5324 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL);
5325 CHECK_NAME(token, EINVAL);
5326
5327 tokens[n_tokens] = token;
5328 n_tokens++;
5329 }
5330
5331 CHECK(n_tokens, EINVAL);
5332
5333 /* Handle the optional instruction label. */
5334 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) {
5335 strcpy(data->label, tokens[0]);
5336
5337 tpos += 2;
5338 CHECK(n_tokens - tpos, EINVAL);
5339 }
5340
5341 /* Identify the instruction type. */
5342 if (!strcmp(tokens[tpos], "rx"))
5343 return instr_rx_translate(p,
5344 action,
5345 &tokens[tpos],
5346 n_tokens - tpos,
5347 instr,
5348 data);
5349
5350 if (!strcmp(tokens[tpos], "tx"))
5351 return instr_tx_translate(p,
5352 action,
5353 &tokens[tpos],
5354 n_tokens - tpos,
5355 instr,
5356 data);
5357
5358 if (!strcmp(tokens[tpos], "extract"))
5359 return instr_hdr_extract_translate(p,
5360 action,
5361 &tokens[tpos],
5362 n_tokens - tpos,
5363 instr,
5364 data);
5365
5366 if (!strcmp(tokens[tpos], "emit"))
5367 return instr_hdr_emit_translate(p,
5368 action,
5369 &tokens[tpos],
5370 n_tokens - tpos,
5371 instr,
5372 data);
5373
5374 if (!strcmp(tokens[tpos], "validate"))
5375 return instr_hdr_validate_translate(p,
5376 action,
5377 &tokens[tpos],
5378 n_tokens - tpos,
5379 instr,
5380 data);
5381
5382 if (!strcmp(tokens[tpos], "invalidate"))
5383 return instr_hdr_invalidate_translate(p,
5384 action,
5385 &tokens[tpos],
5386 n_tokens - tpos,
5387 instr,
5388 data);
5389
5390 if (!strcmp(tokens[tpos], "mov"))
5391 return instr_mov_translate(p,
5392 action,
5393 &tokens[tpos],
5394 n_tokens - tpos,
5395 instr,
5396 data);
5397
5398 if (!strcmp(tokens[tpos], "dma"))
5399 return instr_dma_translate(p,
5400 action,
5401 &tokens[tpos],
5402 n_tokens - tpos,
5403 instr,
5404 data);
5405
5406 if (!strcmp(tokens[tpos], "add"))
5407 return instr_alu_add_translate(p,
5408 action,
5409 &tokens[tpos],
5410 n_tokens - tpos,
5411 instr,
5412 data);
5413
5414 if (!strcmp(tokens[tpos], "sub"))
5415 return instr_alu_sub_translate(p,
5416 action,
5417 &tokens[tpos],
5418 n_tokens - tpos,
5419 instr,
5420 data);
5421
5422 if (!strcmp(tokens[tpos], "ckadd"))
5423 return instr_alu_ckadd_translate(p,
5424 action,
5425 &tokens[tpos],
5426 n_tokens - tpos,
5427 instr,
5428 data);
5429
5430 if (!strcmp(tokens[tpos], "cksub"))
5431 return instr_alu_cksub_translate(p,
5432 action,
5433 &tokens[tpos],
5434 n_tokens - tpos,
5435 instr,
5436 data);
5437
5438 if (!strcmp(tokens[tpos], "and"))
5439 return instr_alu_and_translate(p,
5440 action,
5441 &tokens[tpos],
5442 n_tokens - tpos,
5443 instr,
5444 data);
5445
5446 if (!strcmp(tokens[tpos], "or"))
5447 return instr_alu_or_translate(p,
5448 action,
5449 &tokens[tpos],
5450 n_tokens - tpos,
5451 instr,
5452 data);
5453
5454 if (!strcmp(tokens[tpos], "xor"))
5455 return instr_alu_xor_translate(p,
5456 action,
5457 &tokens[tpos],
5458 n_tokens - tpos,
5459 instr,
5460 data);
5461
5462 if (!strcmp(tokens[tpos], "shl"))
5463 return instr_alu_shl_translate(p,
5464 action,
5465 &tokens[tpos],
5466 n_tokens - tpos,
5467 instr,
5468 data);
5469
5470 if (!strcmp(tokens[tpos], "shr"))
5471 return instr_alu_shr_translate(p,
5472 action,
5473 &tokens[tpos],
5474 n_tokens - tpos,
5475 instr,
5476 data);
5477
5478 if (!strcmp(tokens[tpos], "table"))
5479 return instr_table_translate(p,
5480 action,
5481 &tokens[tpos],
5482 n_tokens - tpos,
5483 instr,
5484 data);
5485
5486 if (!strcmp(tokens[tpos], "extern"))
5487 return instr_extern_translate(p,
5488 action,
5489 &tokens[tpos],
5490 n_tokens - tpos,
5491 instr,
5492 data);
5493
5494 if (!strcmp(tokens[tpos], "jmp"))
5495 return instr_jmp_translate(p,
5496 action,
5497 &tokens[tpos],
5498 n_tokens - tpos,
5499 instr,
5500 data);
5501
5502 if (!strcmp(tokens[tpos], "jmpv"))
5503 return instr_jmp_valid_translate(p,
5504 action,
5505 &tokens[tpos],
5506 n_tokens - tpos,
5507 instr,
5508 data);
5509
5510 if (!strcmp(tokens[tpos], "jmpnv"))
5511 return instr_jmp_invalid_translate(p,
5512 action,
5513 &tokens[tpos],
5514 n_tokens - tpos,
5515 instr,
5516 data);
5517
5518 if (!strcmp(tokens[tpos], "jmph"))
5519 return instr_jmp_hit_translate(p,
5520 action,
5521 &tokens[tpos],
5522 n_tokens - tpos,
5523 instr,
5524 data);
5525
5526 if (!strcmp(tokens[tpos], "jmpnh"))
5527 return instr_jmp_miss_translate(p,
5528 action,
5529 &tokens[tpos],
5530 n_tokens - tpos,
5531 instr,
5532 data);
5533
5534 if (!strcmp(tokens[tpos], "jmpa"))
5535 return instr_jmp_action_hit_translate(p,
5536 action,
5537 &tokens[tpos],
5538 n_tokens - tpos,
5539 instr,
5540 data);
5541
5542 if (!strcmp(tokens[tpos], "jmpna"))
5543 return instr_jmp_action_miss_translate(p,
5544 action,
5545 &tokens[tpos],
5546 n_tokens - tpos,
5547 instr,
5548 data);
5549
5550 if (!strcmp(tokens[tpos], "jmpeq"))
5551 return instr_jmp_eq_translate(p,
5552 action,
5553 &tokens[tpos],
5554 n_tokens - tpos,
5555 instr,
5556 data);
5557
5558 if (!strcmp(tokens[tpos], "jmpneq"))
5559 return instr_jmp_neq_translate(p,
5560 action,
5561 &tokens[tpos],
5562 n_tokens - tpos,
5563 instr,
5564 data);
5565
5566 if (!strcmp(tokens[tpos], "jmplt"))
5567 return instr_jmp_lt_translate(p,
5568 action,
5569 &tokens[tpos],
5570 n_tokens - tpos,
5571 instr,
5572 data);
5573
5574 if (!strcmp(tokens[tpos], "jmpgt"))
5575 return instr_jmp_gt_translate(p,
5576 action,
5577 &tokens[tpos],
5578 n_tokens - tpos,
5579 instr,
5580 data);
5581
5582 if (!strcmp(tokens[tpos], "return"))
5583 return instr_return_translate(p,
5584 action,
5585 &tokens[tpos],
5586 n_tokens - tpos,
5587 instr,
5588 data);
5589
5590 CHECK(0, EINVAL);
5591 }
5592
5593 static struct instruction_data *
label_find(struct instruction_data * data,uint32_t n,const char * label)5594 label_find(struct instruction_data *data, uint32_t n, const char *label)
5595 {
5596 uint32_t i;
5597
5598 for (i = 0; i < n; i++)
5599 if (!strcmp(label, data[i].label))
5600 return &data[i];
5601
5602 return NULL;
5603 }
5604
5605 static uint32_t
label_is_used(struct instruction_data * data,uint32_t n,const char * label)5606 label_is_used(struct instruction_data *data, uint32_t n, const char *label)
5607 {
5608 uint32_t count = 0, i;
5609
5610 if (!label[0])
5611 return 0;
5612
5613 for (i = 0; i < n; i++)
5614 if (!strcmp(label, data[i].jmp_label))
5615 count++;
5616
5617 return count;
5618 }
5619
5620 static int
instr_label_check(struct instruction_data * instruction_data,uint32_t n_instructions)5621 instr_label_check(struct instruction_data *instruction_data,
5622 uint32_t n_instructions)
5623 {
5624 uint32_t i;
5625
5626 /* Check that all instruction labels are unique. */
5627 for (i = 0; i < n_instructions; i++) {
5628 struct instruction_data *data = &instruction_data[i];
5629 char *label = data->label;
5630 uint32_t j;
5631
5632 if (!label[0])
5633 continue;
5634
5635 for (j = i + 1; j < n_instructions; j++)
5636 CHECK(strcmp(label, data[j].label), EINVAL);
5637 }
5638
5639 /* Get users for each instruction label. */
5640 for (i = 0; i < n_instructions; i++) {
5641 struct instruction_data *data = &instruction_data[i];
5642 char *label = data->label;
5643
5644 data->n_users = label_is_used(instruction_data,
5645 n_instructions,
5646 label);
5647 }
5648
5649 return 0;
5650 }
5651
5652 static int
instr_jmp_resolve(struct instruction * instructions,struct instruction_data * instruction_data,uint32_t n_instructions)5653 instr_jmp_resolve(struct instruction *instructions,
5654 struct instruction_data *instruction_data,
5655 uint32_t n_instructions)
5656 {
5657 uint32_t i;
5658
5659 for (i = 0; i < n_instructions; i++) {
5660 struct instruction *instr = &instructions[i];
5661 struct instruction_data *data = &instruction_data[i];
5662 struct instruction_data *found;
5663
5664 if (!instruction_is_jmp(instr))
5665 continue;
5666
5667 found = label_find(instruction_data,
5668 n_instructions,
5669 data->jmp_label);
5670 CHECK(found, EINVAL);
5671
5672 instr->jmp.ip = &instructions[found - instruction_data];
5673 }
5674
5675 return 0;
5676 }
5677
5678 static int
instr_verify(struct rte_swx_pipeline * p __rte_unused,struct action * a,struct instruction * instr,struct instruction_data * data __rte_unused,uint32_t n_instructions)5679 instr_verify(struct rte_swx_pipeline *p __rte_unused,
5680 struct action *a,
5681 struct instruction *instr,
5682 struct instruction_data *data __rte_unused,
5683 uint32_t n_instructions)
5684 {
5685 if (!a) {
5686 enum instruction_type type;
5687 uint32_t i;
5688
5689 /* Check that the first instruction is rx. */
5690 CHECK(instr[0].type == INSTR_RX, EINVAL);
5691
5692 /* Check that there is at least one tx instruction. */
5693 for (i = 0; i < n_instructions; i++) {
5694 type = instr[i].type;
5695
5696 if (type == INSTR_TX)
5697 break;
5698 }
5699 CHECK(i < n_instructions, EINVAL);
5700
5701 /* Check that the last instruction is either tx or unconditional
5702 * jump.
5703 */
5704 type = instr[n_instructions - 1].type;
5705 CHECK((type == INSTR_TX) || (type == INSTR_JMP), EINVAL);
5706 }
5707
5708 if (a) {
5709 enum instruction_type type;
5710 uint32_t i;
5711
5712 /* Check that there is at least one return or tx instruction. */
5713 for (i = 0; i < n_instructions; i++) {
5714 type = instr[i].type;
5715
5716 if ((type == INSTR_RETURN) || (type == INSTR_TX))
5717 break;
5718 }
5719 CHECK(i < n_instructions, EINVAL);
5720 }
5721
5722 return 0;
5723 }
5724
5725 static int
instr_pattern_extract_many_detect(struct instruction * instr,struct instruction_data * data,uint32_t n_instr,uint32_t * n_pattern_instr)5726 instr_pattern_extract_many_detect(struct instruction *instr,
5727 struct instruction_data *data,
5728 uint32_t n_instr,
5729 uint32_t *n_pattern_instr)
5730 {
5731 uint32_t i;
5732
5733 for (i = 0; i < n_instr; i++) {
5734 if (data[i].invalid)
5735 break;
5736
5737 if (instr[i].type != INSTR_HDR_EXTRACT)
5738 break;
5739
5740 if (i == RTE_DIM(instr->io.hdr.header_id))
5741 break;
5742
5743 if (i && data[i].n_users)
5744 break;
5745 }
5746
5747 if (i < 2)
5748 return 0;
5749
5750 *n_pattern_instr = i;
5751 return 1;
5752 }
5753
5754 static void
instr_pattern_extract_many_optimize(struct instruction * instr,struct instruction_data * data,uint32_t n_instr)5755 instr_pattern_extract_many_optimize(struct instruction *instr,
5756 struct instruction_data *data,
5757 uint32_t n_instr)
5758 {
5759 uint32_t i;
5760
5761 for (i = 1; i < n_instr; i++) {
5762 instr[0].type++;
5763 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
5764 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
5765 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
5766
5767 data[i].invalid = 1;
5768 }
5769 }
5770
5771 static int
instr_pattern_emit_many_tx_detect(struct instruction * instr,struct instruction_data * data,uint32_t n_instr,uint32_t * n_pattern_instr)5772 instr_pattern_emit_many_tx_detect(struct instruction *instr,
5773 struct instruction_data *data,
5774 uint32_t n_instr,
5775 uint32_t *n_pattern_instr)
5776 {
5777 uint32_t i;
5778
5779 for (i = 0; i < n_instr; i++) {
5780 if (data[i].invalid)
5781 break;
5782
5783 if (instr[i].type != INSTR_HDR_EMIT)
5784 break;
5785
5786 if (i == RTE_DIM(instr->io.hdr.header_id))
5787 break;
5788
5789 if (i && data[i].n_users)
5790 break;
5791 }
5792
5793 if (!i)
5794 return 0;
5795
5796 if (instr[i].type != INSTR_TX)
5797 return 0;
5798
5799 if (data[i].n_users)
5800 return 0;
5801
5802 i++;
5803
5804 *n_pattern_instr = i;
5805 return 1;
5806 }
5807
5808 static void
instr_pattern_emit_many_tx_optimize(struct instruction * instr,struct instruction_data * data,uint32_t n_instr)5809 instr_pattern_emit_many_tx_optimize(struct instruction *instr,
5810 struct instruction_data *data,
5811 uint32_t n_instr)
5812 {
5813 uint32_t i;
5814
5815 /* Any emit instruction in addition to the first one. */
5816 for (i = 1; i < n_instr - 1; i++) {
5817 instr[0].type++;
5818 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0];
5819 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0];
5820 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0];
5821
5822 data[i].invalid = 1;
5823 }
5824
5825 /* The TX instruction is the last one in the pattern. */
5826 instr[0].type++;
5827 instr[0].io.io.offset = instr[i].io.io.offset;
5828 instr[0].io.io.n_bits = instr[i].io.io.n_bits;
5829 data[i].invalid = 1;
5830 }
5831
5832 static int
instr_pattern_dma_many_detect(struct instruction * instr,struct instruction_data * data,uint32_t n_instr,uint32_t * n_pattern_instr)5833 instr_pattern_dma_many_detect(struct instruction *instr,
5834 struct instruction_data *data,
5835 uint32_t n_instr,
5836 uint32_t *n_pattern_instr)
5837 {
5838 uint32_t i;
5839
5840 for (i = 0; i < n_instr; i++) {
5841 if (data[i].invalid)
5842 break;
5843
5844 if (instr[i].type != INSTR_DMA_HT)
5845 break;
5846
5847 if (i == RTE_DIM(instr->dma.dst.header_id))
5848 break;
5849
5850 if (i && data[i].n_users)
5851 break;
5852 }
5853
5854 if (i < 2)
5855 return 0;
5856
5857 *n_pattern_instr = i;
5858 return 1;
5859 }
5860
5861 static void
instr_pattern_dma_many_optimize(struct instruction * instr,struct instruction_data * data,uint32_t n_instr)5862 instr_pattern_dma_many_optimize(struct instruction *instr,
5863 struct instruction_data *data,
5864 uint32_t n_instr)
5865 {
5866 uint32_t i;
5867
5868 for (i = 1; i < n_instr; i++) {
5869 instr[0].type++;
5870 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0];
5871 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0];
5872 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0];
5873 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0];
5874
5875 data[i].invalid = 1;
5876 }
5877 }
5878
5879 static uint32_t
instr_optimize(struct instruction * instructions,struct instruction_data * instruction_data,uint32_t n_instructions)5880 instr_optimize(struct instruction *instructions,
5881 struct instruction_data *instruction_data,
5882 uint32_t n_instructions)
5883 {
5884 uint32_t i, pos = 0;
5885
5886 for (i = 0; i < n_instructions; ) {
5887 struct instruction *instr = &instructions[i];
5888 struct instruction_data *data = &instruction_data[i];
5889 uint32_t n_instr = 0;
5890 int detected;
5891
5892 /* Extract many. */
5893 detected = instr_pattern_extract_many_detect(instr,
5894 data,
5895 n_instructions - i,
5896 &n_instr);
5897 if (detected) {
5898 instr_pattern_extract_many_optimize(instr,
5899 data,
5900 n_instr);
5901 i += n_instr;
5902 continue;
5903 }
5904
5905 /* Emit many + TX. */
5906 detected = instr_pattern_emit_many_tx_detect(instr,
5907 data,
5908 n_instructions - i,
5909 &n_instr);
5910 if (detected) {
5911 instr_pattern_emit_many_tx_optimize(instr,
5912 data,
5913 n_instr);
5914 i += n_instr;
5915 continue;
5916 }
5917
5918 /* DMA many. */
5919 detected = instr_pattern_dma_many_detect(instr,
5920 data,
5921 n_instructions - i,
5922 &n_instr);
5923 if (detected) {
5924 instr_pattern_dma_many_optimize(instr, data, n_instr);
5925 i += n_instr;
5926 continue;
5927 }
5928
5929 /* No pattern starting at the current instruction. */
5930 i++;
5931 }
5932
5933 /* Eliminate the invalid instructions that have been optimized out. */
5934 for (i = 0; i < n_instructions; i++) {
5935 struct instruction *instr = &instructions[i];
5936 struct instruction_data *data = &instruction_data[i];
5937
5938 if (data->invalid)
5939 continue;
5940
5941 if (i != pos) {
5942 memcpy(&instructions[pos], instr, sizeof(*instr));
5943 memcpy(&instruction_data[pos], data, sizeof(*data));
5944 }
5945
5946 pos++;
5947 }
5948
5949 return pos;
5950 }
5951
5952 static int
instruction_config(struct rte_swx_pipeline * p,struct action * a,const char ** instructions,uint32_t n_instructions)5953 instruction_config(struct rte_swx_pipeline *p,
5954 struct action *a,
5955 const char **instructions,
5956 uint32_t n_instructions)
5957 {
5958 struct instruction *instr = NULL;
5959 struct instruction_data *data = NULL;
5960 int err = 0;
5961 uint32_t i;
5962
5963 CHECK(n_instructions, EINVAL);
5964 CHECK(instructions, EINVAL);
5965 for (i = 0; i < n_instructions; i++)
5966 CHECK_INSTRUCTION(instructions[i], EINVAL);
5967
5968 /* Memory allocation. */
5969 instr = calloc(n_instructions, sizeof(struct instruction));
5970 if (!instr) {
5971 err = ENOMEM;
5972 goto error;
5973 }
5974
5975 data = calloc(n_instructions, sizeof(struct instruction_data));
5976 if (!data) {
5977 err = ENOMEM;
5978 goto error;
5979 }
5980
5981 for (i = 0; i < n_instructions; i++) {
5982 char *string = strdup(instructions[i]);
5983 if (!string) {
5984 err = ENOMEM;
5985 goto error;
5986 }
5987
5988 err = instr_translate(p, a, string, &instr[i], &data[i]);
5989 if (err) {
5990 free(string);
5991 goto error;
5992 }
5993
5994 free(string);
5995 }
5996
5997 err = instr_label_check(data, n_instructions);
5998 if (err)
5999 goto error;
6000
6001 err = instr_verify(p, a, instr, data, n_instructions);
6002 if (err)
6003 goto error;
6004
6005 n_instructions = instr_optimize(instr, data, n_instructions);
6006
6007 err = instr_jmp_resolve(instr, data, n_instructions);
6008 if (err)
6009 goto error;
6010
6011 if (a) {
6012 a->instructions = instr;
6013 a->n_instructions = n_instructions;
6014 } else {
6015 p->instructions = instr;
6016 p->n_instructions = n_instructions;
6017 }
6018
6019 free(data);
6020 return 0;
6021
6022 error:
6023 free(data);
6024 free(instr);
6025 return err;
6026 }
6027
6028 typedef void (*instr_exec_t)(struct rte_swx_pipeline *);
6029
6030 static instr_exec_t instruction_table[] = {
6031 [INSTR_RX] = instr_rx_exec,
6032 [INSTR_TX] = instr_tx_exec,
6033
6034 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec,
6035 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec,
6036 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec,
6037 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec,
6038 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec,
6039 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec,
6040 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec,
6041 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec,
6042
6043 [INSTR_HDR_EMIT] = instr_hdr_emit_exec,
6044 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec,
6045 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec,
6046 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec,
6047 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec,
6048 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec,
6049 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec,
6050 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec,
6051 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec,
6052
6053 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec,
6054 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec,
6055
6056 [INSTR_MOV] = instr_mov_exec,
6057 [INSTR_MOV_S] = instr_mov_s_exec,
6058 [INSTR_MOV_I] = instr_mov_i_exec,
6059
6060 [INSTR_DMA_HT] = instr_dma_ht_exec,
6061 [INSTR_DMA_HT2] = instr_dma_ht2_exec,
6062 [INSTR_DMA_HT3] = instr_dma_ht3_exec,
6063 [INSTR_DMA_HT4] = instr_dma_ht4_exec,
6064 [INSTR_DMA_HT5] = instr_dma_ht5_exec,
6065 [INSTR_DMA_HT6] = instr_dma_ht6_exec,
6066 [INSTR_DMA_HT7] = instr_dma_ht7_exec,
6067 [INSTR_DMA_HT8] = instr_dma_ht8_exec,
6068
6069 [INSTR_ALU_ADD] = instr_alu_add_exec,
6070 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec,
6071 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec,
6072 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec,
6073 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec,
6074 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec,
6075
6076 [INSTR_ALU_SUB] = instr_alu_sub_exec,
6077 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec,
6078 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec,
6079 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec,
6080 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec,
6081 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec,
6082
6083 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec,
6084 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec,
6085 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec,
6086 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec,
6087
6088 [INSTR_ALU_AND] = instr_alu_and_exec,
6089 [INSTR_ALU_AND_S] = instr_alu_and_s_exec,
6090 [INSTR_ALU_AND_I] = instr_alu_and_i_exec,
6091
6092 [INSTR_ALU_OR] = instr_alu_or_exec,
6093 [INSTR_ALU_OR_S] = instr_alu_or_s_exec,
6094 [INSTR_ALU_OR_I] = instr_alu_or_i_exec,
6095
6096 [INSTR_ALU_XOR] = instr_alu_xor_exec,
6097 [INSTR_ALU_XOR_S] = instr_alu_xor_s_exec,
6098 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec,
6099
6100 [INSTR_ALU_SHL] = instr_alu_shl_exec,
6101 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec,
6102 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec,
6103 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec,
6104 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec,
6105 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec,
6106
6107 [INSTR_ALU_SHR] = instr_alu_shr_exec,
6108 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec,
6109 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec,
6110 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec,
6111 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec,
6112 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec,
6113
6114 [INSTR_TABLE] = instr_table_exec,
6115 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec,
6116 [INSTR_EXTERN_FUNC] = instr_extern_func_exec,
6117
6118 [INSTR_JMP] = instr_jmp_exec,
6119 [INSTR_JMP_VALID] = instr_jmp_valid_exec,
6120 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec,
6121 [INSTR_JMP_HIT] = instr_jmp_hit_exec,
6122 [INSTR_JMP_MISS] = instr_jmp_miss_exec,
6123 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec,
6124 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec,
6125
6126 [INSTR_JMP_EQ] = instr_jmp_eq_exec,
6127 [INSTR_JMP_EQ_S] = instr_jmp_eq_s_exec,
6128 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec,
6129
6130 [INSTR_JMP_NEQ] = instr_jmp_neq_exec,
6131 [INSTR_JMP_NEQ_S] = instr_jmp_neq_s_exec,
6132 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec,
6133
6134 [INSTR_JMP_LT] = instr_jmp_lt_exec,
6135 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec,
6136 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec,
6137 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec,
6138 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec,
6139 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec,
6140
6141 [INSTR_JMP_GT] = instr_jmp_gt_exec,
6142 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec,
6143 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec,
6144 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec,
6145 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec,
6146 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec,
6147
6148 [INSTR_RETURN] = instr_return_exec,
6149 };
6150
6151 static inline void
instr_exec(struct rte_swx_pipeline * p)6152 instr_exec(struct rte_swx_pipeline *p)
6153 {
6154 struct thread *t = &p->threads[p->thread_id];
6155 struct instruction *ip = t->ip;
6156 instr_exec_t instr = instruction_table[ip->type];
6157
6158 instr(p);
6159 }
6160
6161 /*
6162 * Action.
6163 */
6164 static struct action *
action_find(struct rte_swx_pipeline * p,const char * name)6165 action_find(struct rte_swx_pipeline *p, const char *name)
6166 {
6167 struct action *elem;
6168
6169 if (!name)
6170 return NULL;
6171
6172 TAILQ_FOREACH(elem, &p->actions, node)
6173 if (strcmp(elem->name, name) == 0)
6174 return elem;
6175
6176 return NULL;
6177 }
6178
6179 static struct action *
action_find_by_id(struct rte_swx_pipeline * p,uint32_t id)6180 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
6181 {
6182 struct action *action = NULL;
6183
6184 TAILQ_FOREACH(action, &p->actions, node)
6185 if (action->id == id)
6186 return action;
6187
6188 return NULL;
6189 }
6190
6191 static struct field *
action_field_find(struct action * a,const char * name)6192 action_field_find(struct action *a, const char *name)
6193 {
6194 return a->st ? struct_type_field_find(a->st, name) : NULL;
6195 }
6196
6197 static struct field *
action_field_parse(struct action * action,const char * name)6198 action_field_parse(struct action *action, const char *name)
6199 {
6200 if (name[0] != 't' || name[1] != '.')
6201 return NULL;
6202
6203 return action_field_find(action, &name[2]);
6204 }
6205
6206 int
rte_swx_pipeline_action_config(struct rte_swx_pipeline * p,const char * name,const char * args_struct_type_name,const char ** instructions,uint32_t n_instructions)6207 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p,
6208 const char *name,
6209 const char *args_struct_type_name,
6210 const char **instructions,
6211 uint32_t n_instructions)
6212 {
6213 struct struct_type *args_struct_type;
6214 struct action *a;
6215 int err;
6216
6217 CHECK(p, EINVAL);
6218
6219 CHECK_NAME(name, EINVAL);
6220 CHECK(!action_find(p, name), EEXIST);
6221
6222 if (args_struct_type_name) {
6223 CHECK_NAME(args_struct_type_name, EINVAL);
6224 args_struct_type = struct_type_find(p, args_struct_type_name);
6225 CHECK(args_struct_type, EINVAL);
6226 } else {
6227 args_struct_type = NULL;
6228 }
6229
6230 /* Node allocation. */
6231 a = calloc(1, sizeof(struct action));
6232 CHECK(a, ENOMEM);
6233
6234 /* Node initialization. */
6235 strcpy(a->name, name);
6236 a->st = args_struct_type;
6237 a->id = p->n_actions;
6238
6239 /* Instruction translation. */
6240 err = instruction_config(p, a, instructions, n_instructions);
6241 if (err) {
6242 free(a);
6243 return err;
6244 }
6245
6246 /* Node add to tailq. */
6247 TAILQ_INSERT_TAIL(&p->actions, a, node);
6248 p->n_actions++;
6249
6250 return 0;
6251 }
6252
6253 static int
action_build(struct rte_swx_pipeline * p)6254 action_build(struct rte_swx_pipeline *p)
6255 {
6256 struct action *action;
6257
6258 p->action_instructions = calloc(p->n_actions,
6259 sizeof(struct instruction *));
6260 CHECK(p->action_instructions, ENOMEM);
6261
6262 TAILQ_FOREACH(action, &p->actions, node)
6263 p->action_instructions[action->id] = action->instructions;
6264
6265 return 0;
6266 }
6267
6268 static void
action_build_free(struct rte_swx_pipeline * p)6269 action_build_free(struct rte_swx_pipeline *p)
6270 {
6271 free(p->action_instructions);
6272 p->action_instructions = NULL;
6273 }
6274
6275 static void
action_free(struct rte_swx_pipeline * p)6276 action_free(struct rte_swx_pipeline *p)
6277 {
6278 action_build_free(p);
6279
6280 for ( ; ; ) {
6281 struct action *action;
6282
6283 action = TAILQ_FIRST(&p->actions);
6284 if (!action)
6285 break;
6286
6287 TAILQ_REMOVE(&p->actions, action, node);
6288 free(action->instructions);
6289 free(action);
6290 }
6291 }
6292
6293 /*
6294 * Table.
6295 */
6296 static struct table_type *
table_type_find(struct rte_swx_pipeline * p,const char * name)6297 table_type_find(struct rte_swx_pipeline *p, const char *name)
6298 {
6299 struct table_type *elem;
6300
6301 TAILQ_FOREACH(elem, &p->table_types, node)
6302 if (strcmp(elem->name, name) == 0)
6303 return elem;
6304
6305 return NULL;
6306 }
6307
6308 static struct table_type *
table_type_resolve(struct rte_swx_pipeline * p,const char * recommended_type_name,enum rte_swx_table_match_type match_type)6309 table_type_resolve(struct rte_swx_pipeline *p,
6310 const char *recommended_type_name,
6311 enum rte_swx_table_match_type match_type)
6312 {
6313 struct table_type *elem;
6314
6315 /* Only consider the recommended type if the match type is correct. */
6316 if (recommended_type_name)
6317 TAILQ_FOREACH(elem, &p->table_types, node)
6318 if (!strcmp(elem->name, recommended_type_name) &&
6319 (elem->match_type == match_type))
6320 return elem;
6321
6322 /* Ignore the recommended type and get the first element with this match
6323 * type.
6324 */
6325 TAILQ_FOREACH(elem, &p->table_types, node)
6326 if (elem->match_type == match_type)
6327 return elem;
6328
6329 return NULL;
6330 }
6331
6332 static struct table *
table_find(struct rte_swx_pipeline * p,const char * name)6333 table_find(struct rte_swx_pipeline *p, const char *name)
6334 {
6335 struct table *elem;
6336
6337 TAILQ_FOREACH(elem, &p->tables, node)
6338 if (strcmp(elem->name, name) == 0)
6339 return elem;
6340
6341 return NULL;
6342 }
6343
6344 static struct table *
table_find_by_id(struct rte_swx_pipeline * p,uint32_t id)6345 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id)
6346 {
6347 struct table *table = NULL;
6348
6349 TAILQ_FOREACH(table, &p->tables, node)
6350 if (table->id == id)
6351 return table;
6352
6353 return NULL;
6354 }
6355
6356 int
rte_swx_pipeline_table_type_register(struct rte_swx_pipeline * p,const char * name,enum rte_swx_table_match_type match_type,struct rte_swx_table_ops * ops)6357 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p,
6358 const char *name,
6359 enum rte_swx_table_match_type match_type,
6360 struct rte_swx_table_ops *ops)
6361 {
6362 struct table_type *elem;
6363
6364 CHECK(p, EINVAL);
6365
6366 CHECK_NAME(name, EINVAL);
6367 CHECK(!table_type_find(p, name), EEXIST);
6368
6369 CHECK(ops, EINVAL);
6370 CHECK(ops->create, EINVAL);
6371 CHECK(ops->lkp, EINVAL);
6372 CHECK(ops->free, EINVAL);
6373
6374 /* Node allocation. */
6375 elem = calloc(1, sizeof(struct table_type));
6376 CHECK(elem, ENOMEM);
6377
6378 /* Node initialization. */
6379 strcpy(elem->name, name);
6380 elem->match_type = match_type;
6381 memcpy(&elem->ops, ops, sizeof(*ops));
6382
6383 /* Node add to tailq. */
6384 TAILQ_INSERT_TAIL(&p->table_types, elem, node);
6385
6386 return 0;
6387 }
6388
6389 static enum rte_swx_table_match_type
table_match_type_resolve(struct rte_swx_match_field_params * fields,uint32_t n_fields)6390 table_match_type_resolve(struct rte_swx_match_field_params *fields,
6391 uint32_t n_fields)
6392 {
6393 uint32_t i;
6394
6395 for (i = 0; i < n_fields; i++)
6396 if (fields[i].match_type != RTE_SWX_TABLE_MATCH_EXACT)
6397 break;
6398
6399 if (i == n_fields)
6400 return RTE_SWX_TABLE_MATCH_EXACT;
6401
6402 if ((i == n_fields - 1) &&
6403 (fields[i].match_type == RTE_SWX_TABLE_MATCH_LPM))
6404 return RTE_SWX_TABLE_MATCH_LPM;
6405
6406 return RTE_SWX_TABLE_MATCH_WILDCARD;
6407 }
6408
6409 int
rte_swx_pipeline_table_config(struct rte_swx_pipeline * p,const char * name,struct rte_swx_pipeline_table_params * params,const char * recommended_table_type_name,const char * args,uint32_t size)6410 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p,
6411 const char *name,
6412 struct rte_swx_pipeline_table_params *params,
6413 const char *recommended_table_type_name,
6414 const char *args,
6415 uint32_t size)
6416 {
6417 struct table_type *type;
6418 struct table *t;
6419 struct action *default_action;
6420 struct header *header = NULL;
6421 int is_header = 0;
6422 uint32_t offset_prev = 0, action_data_size_max = 0, i;
6423
6424 CHECK(p, EINVAL);
6425
6426 CHECK_NAME(name, EINVAL);
6427 CHECK(!table_find(p, name), EEXIST);
6428
6429 CHECK(params, EINVAL);
6430
6431 /* Match checks. */
6432 CHECK(!params->n_fields || params->fields, EINVAL);
6433 for (i = 0; i < params->n_fields; i++) {
6434 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6435 struct header *h;
6436 struct field *hf, *mf;
6437 uint32_t offset;
6438
6439 CHECK_NAME(field->name, EINVAL);
6440
6441 hf = header_field_parse(p, field->name, &h);
6442 mf = metadata_field_parse(p, field->name);
6443 CHECK(hf || mf, EINVAL);
6444
6445 offset = hf ? hf->offset : mf->offset;
6446
6447 if (i == 0) {
6448 is_header = hf ? 1 : 0;
6449 header = hf ? h : NULL;
6450 offset_prev = offset;
6451
6452 continue;
6453 }
6454
6455 CHECK((is_header && hf && (h->id == header->id)) ||
6456 (!is_header && mf), EINVAL);
6457
6458 CHECK(offset > offset_prev, EINVAL);
6459 offset_prev = offset;
6460 }
6461
6462 /* Action checks. */
6463 CHECK(params->n_actions, EINVAL);
6464 CHECK(params->action_names, EINVAL);
6465 for (i = 0; i < params->n_actions; i++) {
6466 const char *action_name = params->action_names[i];
6467 struct action *a;
6468 uint32_t action_data_size;
6469
6470 CHECK_NAME(action_name, EINVAL);
6471
6472 a = action_find(p, action_name);
6473 CHECK(a, EINVAL);
6474
6475 action_data_size = a->st ? a->st->n_bits / 8 : 0;
6476 if (action_data_size > action_data_size_max)
6477 action_data_size_max = action_data_size;
6478 }
6479
6480 CHECK_NAME(params->default_action_name, EINVAL);
6481 for (i = 0; i < p->n_actions; i++)
6482 if (!strcmp(params->action_names[i],
6483 params->default_action_name))
6484 break;
6485 CHECK(i < params->n_actions, EINVAL);
6486 default_action = action_find(p, params->default_action_name);
6487 CHECK((default_action->st && params->default_action_data) ||
6488 !params->default_action_data, EINVAL);
6489
6490 /* Table type checks. */
6491 if (recommended_table_type_name)
6492 CHECK_NAME(recommended_table_type_name, EINVAL);
6493
6494 if (params->n_fields) {
6495 enum rte_swx_table_match_type match_type;
6496
6497 match_type = table_match_type_resolve(params->fields,
6498 params->n_fields);
6499 type = table_type_resolve(p,
6500 recommended_table_type_name,
6501 match_type);
6502 CHECK(type, EINVAL);
6503 } else {
6504 type = NULL;
6505 }
6506
6507 /* Memory allocation. */
6508 t = calloc(1, sizeof(struct table));
6509 CHECK(t, ENOMEM);
6510
6511 t->fields = calloc(params->n_fields, sizeof(struct match_field));
6512 if (!t->fields) {
6513 free(t);
6514 CHECK(0, ENOMEM);
6515 }
6516
6517 t->actions = calloc(params->n_actions, sizeof(struct action *));
6518 if (!t->actions) {
6519 free(t->fields);
6520 free(t);
6521 CHECK(0, ENOMEM);
6522 }
6523
6524 if (action_data_size_max) {
6525 t->default_action_data = calloc(1, action_data_size_max);
6526 if (!t->default_action_data) {
6527 free(t->actions);
6528 free(t->fields);
6529 free(t);
6530 CHECK(0, ENOMEM);
6531 }
6532 }
6533
6534 /* Node initialization. */
6535 strcpy(t->name, name);
6536 if (args && args[0])
6537 strcpy(t->args, args);
6538 t->type = type;
6539
6540 for (i = 0; i < params->n_fields; i++) {
6541 struct rte_swx_match_field_params *field = ¶ms->fields[i];
6542 struct match_field *f = &t->fields[i];
6543
6544 f->match_type = field->match_type;
6545 f->field = is_header ?
6546 header_field_parse(p, field->name, NULL) :
6547 metadata_field_parse(p, field->name);
6548 }
6549 t->n_fields = params->n_fields;
6550 t->is_header = is_header;
6551 t->header = header;
6552
6553 for (i = 0; i < params->n_actions; i++)
6554 t->actions[i] = action_find(p, params->action_names[i]);
6555 t->default_action = default_action;
6556 if (default_action->st)
6557 memcpy(t->default_action_data,
6558 params->default_action_data,
6559 default_action->st->n_bits / 8);
6560 t->n_actions = params->n_actions;
6561 t->default_action_is_const = params->default_action_is_const;
6562 t->action_data_size_max = action_data_size_max;
6563
6564 t->size = size;
6565 t->id = p->n_tables;
6566
6567 /* Node add to tailq. */
6568 TAILQ_INSERT_TAIL(&p->tables, t, node);
6569 p->n_tables++;
6570
6571 return 0;
6572 }
6573
6574 static struct rte_swx_table_params *
table_params_get(struct table * table)6575 table_params_get(struct table *table)
6576 {
6577 struct rte_swx_table_params *params;
6578 struct field *first, *last;
6579 uint8_t *key_mask;
6580 uint32_t key_size, key_offset, action_data_size, i;
6581
6582 /* Memory allocation. */
6583 params = calloc(1, sizeof(struct rte_swx_table_params));
6584 if (!params)
6585 return NULL;
6586
6587 /* Key offset and size. */
6588 first = table->fields[0].field;
6589 last = table->fields[table->n_fields - 1].field;
6590 key_offset = first->offset / 8;
6591 key_size = (last->offset + last->n_bits - first->offset) / 8;
6592
6593 /* Memory allocation. */
6594 key_mask = calloc(1, key_size);
6595 if (!key_mask) {
6596 free(params);
6597 return NULL;
6598 }
6599
6600 /* Key mask. */
6601 for (i = 0; i < table->n_fields; i++) {
6602 struct field *f = table->fields[i].field;
6603 uint32_t start = (f->offset - first->offset) / 8;
6604 size_t size = f->n_bits / 8;
6605
6606 memset(&key_mask[start], 0xFF, size);
6607 }
6608
6609 /* Action data size. */
6610 action_data_size = 0;
6611 for (i = 0; i < table->n_actions; i++) {
6612 struct action *action = table->actions[i];
6613 uint32_t ads = action->st ? action->st->n_bits / 8 : 0;
6614
6615 if (ads > action_data_size)
6616 action_data_size = ads;
6617 }
6618
6619 /* Fill in. */
6620 params->match_type = table->type->match_type;
6621 params->key_size = key_size;
6622 params->key_offset = key_offset;
6623 params->key_mask0 = key_mask;
6624 params->action_data_size = action_data_size;
6625 params->n_keys_max = table->size;
6626
6627 return params;
6628 }
6629
6630 static void
table_params_free(struct rte_swx_table_params * params)6631 table_params_free(struct rte_swx_table_params *params)
6632 {
6633 if (!params)
6634 return;
6635
6636 free(params->key_mask0);
6637 free(params);
6638 }
6639
6640 static int
table_state_build(struct rte_swx_pipeline * p)6641 table_state_build(struct rte_swx_pipeline *p)
6642 {
6643 struct table *table;
6644
6645 p->table_state = calloc(p->n_tables,
6646 sizeof(struct rte_swx_table_state));
6647 CHECK(p->table_state, ENOMEM);
6648
6649 TAILQ_FOREACH(table, &p->tables, node) {
6650 struct rte_swx_table_state *ts = &p->table_state[table->id];
6651
6652 if (table->type) {
6653 struct rte_swx_table_params *params;
6654
6655 /* ts->obj. */
6656 params = table_params_get(table);
6657 CHECK(params, ENOMEM);
6658
6659 ts->obj = table->type->ops.create(params,
6660 NULL,
6661 table->args,
6662 p->numa_node);
6663
6664 table_params_free(params);
6665 CHECK(ts->obj, ENODEV);
6666 }
6667
6668 /* ts->default_action_data. */
6669 if (table->action_data_size_max) {
6670 ts->default_action_data =
6671 malloc(table->action_data_size_max);
6672 CHECK(ts->default_action_data, ENOMEM);
6673
6674 memcpy(ts->default_action_data,
6675 table->default_action_data,
6676 table->action_data_size_max);
6677 }
6678
6679 /* ts->default_action_id. */
6680 ts->default_action_id = table->default_action->id;
6681 }
6682
6683 return 0;
6684 }
6685
6686 static void
table_state_build_free(struct rte_swx_pipeline * p)6687 table_state_build_free(struct rte_swx_pipeline *p)
6688 {
6689 uint32_t i;
6690
6691 if (!p->table_state)
6692 return;
6693
6694 for (i = 0; i < p->n_tables; i++) {
6695 struct rte_swx_table_state *ts = &p->table_state[i];
6696 struct table *table = table_find_by_id(p, i);
6697
6698 /* ts->obj. */
6699 if (table->type && ts->obj)
6700 table->type->ops.free(ts->obj);
6701
6702 /* ts->default_action_data. */
6703 free(ts->default_action_data);
6704 }
6705
6706 free(p->table_state);
6707 p->table_state = NULL;
6708 }
6709
6710 static void
table_state_free(struct rte_swx_pipeline * p)6711 table_state_free(struct rte_swx_pipeline *p)
6712 {
6713 table_state_build_free(p);
6714 }
6715
6716 static int
table_stub_lkp(void * table __rte_unused,void * mailbox __rte_unused,uint8_t ** key __rte_unused,uint64_t * action_id __rte_unused,uint8_t ** action_data __rte_unused,int * hit)6717 table_stub_lkp(void *table __rte_unused,
6718 void *mailbox __rte_unused,
6719 uint8_t **key __rte_unused,
6720 uint64_t *action_id __rte_unused,
6721 uint8_t **action_data __rte_unused,
6722 int *hit)
6723 {
6724 *hit = 0;
6725 return 1; /* DONE. */
6726 }
6727
6728 static int
table_build(struct rte_swx_pipeline * p)6729 table_build(struct rte_swx_pipeline *p)
6730 {
6731 uint32_t i;
6732
6733 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6734 struct thread *t = &p->threads[i];
6735 struct table *table;
6736
6737 t->tables = calloc(p->n_tables, sizeof(struct table_runtime));
6738 CHECK(t->tables, ENOMEM);
6739
6740 TAILQ_FOREACH(table, &p->tables, node) {
6741 struct table_runtime *r = &t->tables[table->id];
6742
6743 if (table->type) {
6744 uint64_t size;
6745
6746 size = table->type->ops.mailbox_size_get();
6747
6748 /* r->func. */
6749 r->func = table->type->ops.lkp;
6750
6751 /* r->mailbox. */
6752 if (size) {
6753 r->mailbox = calloc(1, size);
6754 CHECK(r->mailbox, ENOMEM);
6755 }
6756
6757 /* r->key. */
6758 r->key = table->is_header ?
6759 &t->structs[table->header->struct_id] :
6760 &t->structs[p->metadata_struct_id];
6761 } else {
6762 r->func = table_stub_lkp;
6763 }
6764 }
6765 }
6766
6767 return 0;
6768 }
6769
6770 static void
table_build_free(struct rte_swx_pipeline * p)6771 table_build_free(struct rte_swx_pipeline *p)
6772 {
6773 uint32_t i;
6774
6775 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6776 struct thread *t = &p->threads[i];
6777 uint32_t j;
6778
6779 if (!t->tables)
6780 continue;
6781
6782 for (j = 0; j < p->n_tables; j++) {
6783 struct table_runtime *r = &t->tables[j];
6784
6785 free(r->mailbox);
6786 }
6787
6788 free(t->tables);
6789 t->tables = NULL;
6790 }
6791 }
6792
6793 static void
table_free(struct rte_swx_pipeline * p)6794 table_free(struct rte_swx_pipeline *p)
6795 {
6796 table_build_free(p);
6797
6798 /* Tables. */
6799 for ( ; ; ) {
6800 struct table *elem;
6801
6802 elem = TAILQ_FIRST(&p->tables);
6803 if (!elem)
6804 break;
6805
6806 TAILQ_REMOVE(&p->tables, elem, node);
6807 free(elem->fields);
6808 free(elem->actions);
6809 free(elem->default_action_data);
6810 free(elem);
6811 }
6812
6813 /* Table types. */
6814 for ( ; ; ) {
6815 struct table_type *elem;
6816
6817 elem = TAILQ_FIRST(&p->table_types);
6818 if (!elem)
6819 break;
6820
6821 TAILQ_REMOVE(&p->table_types, elem, node);
6822 free(elem);
6823 }
6824 }
6825
6826 /*
6827 * Pipeline.
6828 */
6829 int
rte_swx_pipeline_config(struct rte_swx_pipeline ** p,int numa_node)6830 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node)
6831 {
6832 struct rte_swx_pipeline *pipeline;
6833
6834 /* Check input parameters. */
6835 CHECK(p, EINVAL);
6836
6837 /* Memory allocation. */
6838 pipeline = calloc(1, sizeof(struct rte_swx_pipeline));
6839 CHECK(pipeline, ENOMEM);
6840
6841 /* Initialization. */
6842 TAILQ_INIT(&pipeline->struct_types);
6843 TAILQ_INIT(&pipeline->port_in_types);
6844 TAILQ_INIT(&pipeline->ports_in);
6845 TAILQ_INIT(&pipeline->port_out_types);
6846 TAILQ_INIT(&pipeline->ports_out);
6847 TAILQ_INIT(&pipeline->extern_types);
6848 TAILQ_INIT(&pipeline->extern_objs);
6849 TAILQ_INIT(&pipeline->extern_funcs);
6850 TAILQ_INIT(&pipeline->headers);
6851 TAILQ_INIT(&pipeline->actions);
6852 TAILQ_INIT(&pipeline->table_types);
6853 TAILQ_INIT(&pipeline->tables);
6854
6855 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */
6856 pipeline->numa_node = numa_node;
6857
6858 *p = pipeline;
6859 return 0;
6860 }
6861
6862 void
rte_swx_pipeline_free(struct rte_swx_pipeline * p)6863 rte_swx_pipeline_free(struct rte_swx_pipeline *p)
6864 {
6865 if (!p)
6866 return;
6867
6868 free(p->instructions);
6869
6870 table_state_free(p);
6871 table_free(p);
6872 action_free(p);
6873 metadata_free(p);
6874 header_free(p);
6875 extern_func_free(p);
6876 extern_obj_free(p);
6877 port_out_free(p);
6878 port_in_free(p);
6879 struct_free(p);
6880
6881 free(p);
6882 }
6883
6884 int
rte_swx_pipeline_instructions_config(struct rte_swx_pipeline * p,const char ** instructions,uint32_t n_instructions)6885 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p,
6886 const char **instructions,
6887 uint32_t n_instructions)
6888 {
6889 int err;
6890 uint32_t i;
6891
6892 err = instruction_config(p, NULL, instructions, n_instructions);
6893 if (err)
6894 return err;
6895
6896 /* Thread instruction pointer reset. */
6897 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) {
6898 struct thread *t = &p->threads[i];
6899
6900 thread_ip_reset(p, t);
6901 }
6902
6903 return 0;
6904 }
6905
6906 int
rte_swx_pipeline_build(struct rte_swx_pipeline * p)6907 rte_swx_pipeline_build(struct rte_swx_pipeline *p)
6908 {
6909 int status;
6910
6911 CHECK(p, EINVAL);
6912 CHECK(p->build_done == 0, EEXIST);
6913
6914 status = port_in_build(p);
6915 if (status)
6916 goto error;
6917
6918 status = port_out_build(p);
6919 if (status)
6920 goto error;
6921
6922 status = struct_build(p);
6923 if (status)
6924 goto error;
6925
6926 status = extern_obj_build(p);
6927 if (status)
6928 goto error;
6929
6930 status = extern_func_build(p);
6931 if (status)
6932 goto error;
6933
6934 status = header_build(p);
6935 if (status)
6936 goto error;
6937
6938 status = metadata_build(p);
6939 if (status)
6940 goto error;
6941
6942 status = action_build(p);
6943 if (status)
6944 goto error;
6945
6946 status = table_build(p);
6947 if (status)
6948 goto error;
6949
6950 status = table_state_build(p);
6951 if (status)
6952 goto error;
6953
6954 p->build_done = 1;
6955 return 0;
6956
6957 error:
6958 table_state_build_free(p);
6959 table_build_free(p);
6960 action_build_free(p);
6961 metadata_build_free(p);
6962 header_build_free(p);
6963 extern_func_build_free(p);
6964 extern_obj_build_free(p);
6965 port_out_build_free(p);
6966 port_in_build_free(p);
6967 struct_build_free(p);
6968
6969 return status;
6970 }
6971
6972 void
rte_swx_pipeline_run(struct rte_swx_pipeline * p,uint32_t n_instructions)6973 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions)
6974 {
6975 uint32_t i;
6976
6977 for (i = 0; i < n_instructions; i++)
6978 instr_exec(p);
6979 }
6980
6981 void
rte_swx_pipeline_flush(struct rte_swx_pipeline * p)6982 rte_swx_pipeline_flush(struct rte_swx_pipeline *p)
6983 {
6984 uint32_t i;
6985
6986 for (i = 0; i < p->n_ports_out; i++) {
6987 struct port_out_runtime *port = &p->out[i];
6988
6989 if (port->flush)
6990 port->flush(port->obj);
6991 }
6992 }
6993
6994 /*
6995 * Control.
6996 */
6997 int
rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline * p,struct rte_swx_ctl_pipeline_info * pipeline)6998 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p,
6999 struct rte_swx_ctl_pipeline_info *pipeline)
7000 {
7001 struct action *action;
7002 struct table *table;
7003 uint32_t n_actions = 0, n_tables = 0;
7004
7005 if (!p || !pipeline)
7006 return -EINVAL;
7007
7008 TAILQ_FOREACH(action, &p->actions, node)
7009 n_actions++;
7010
7011 TAILQ_FOREACH(table, &p->tables, node)
7012 n_tables++;
7013
7014 pipeline->n_ports_in = p->n_ports_in;
7015 pipeline->n_ports_out = p->n_ports_out;
7016 pipeline->n_actions = n_actions;
7017 pipeline->n_tables = n_tables;
7018
7019 return 0;
7020 }
7021
7022 int
rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline * p,int * numa_node)7023 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node)
7024 {
7025 if (!p || !numa_node)
7026 return -EINVAL;
7027
7028 *numa_node = p->numa_node;
7029 return 0;
7030 }
7031
7032 int
rte_swx_ctl_action_info_get(struct rte_swx_pipeline * p,uint32_t action_id,struct rte_swx_ctl_action_info * action)7033 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p,
7034 uint32_t action_id,
7035 struct rte_swx_ctl_action_info *action)
7036 {
7037 struct action *a = NULL;
7038
7039 if (!p || (action_id >= p->n_actions) || !action)
7040 return -EINVAL;
7041
7042 a = action_find_by_id(p, action_id);
7043 if (!a)
7044 return -EINVAL;
7045
7046 strcpy(action->name, a->name);
7047 action->n_args = a->st ? a->st->n_fields : 0;
7048 return 0;
7049 }
7050
7051 int
rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline * p,uint32_t action_id,uint32_t action_arg_id,struct rte_swx_ctl_action_arg_info * action_arg)7052 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p,
7053 uint32_t action_id,
7054 uint32_t action_arg_id,
7055 struct rte_swx_ctl_action_arg_info *action_arg)
7056 {
7057 struct action *a = NULL;
7058 struct field *arg = NULL;
7059
7060 if (!p || (action_id >= p->n_actions) || !action_arg)
7061 return -EINVAL;
7062
7063 a = action_find_by_id(p, action_id);
7064 if (!a || !a->st || (action_arg_id >= a->st->n_fields))
7065 return -EINVAL;
7066
7067 arg = &a->st->fields[action_arg_id];
7068 strcpy(action_arg->name, arg->name);
7069 action_arg->n_bits = arg->n_bits;
7070
7071 return 0;
7072 }
7073
7074 int
rte_swx_ctl_table_info_get(struct rte_swx_pipeline * p,uint32_t table_id,struct rte_swx_ctl_table_info * table)7075 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p,
7076 uint32_t table_id,
7077 struct rte_swx_ctl_table_info *table)
7078 {
7079 struct table *t = NULL;
7080
7081 if (!p || !table)
7082 return -EINVAL;
7083
7084 t = table_find_by_id(p, table_id);
7085 if (!t)
7086 return -EINVAL;
7087
7088 strcpy(table->name, t->name);
7089 strcpy(table->args, t->args);
7090 table->n_match_fields = t->n_fields;
7091 table->n_actions = t->n_actions;
7092 table->default_action_is_const = t->default_action_is_const;
7093 table->size = t->size;
7094 return 0;
7095 }
7096
7097 int
rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline * p,uint32_t table_id,uint32_t match_field_id,struct rte_swx_ctl_table_match_field_info * match_field)7098 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p,
7099 uint32_t table_id,
7100 uint32_t match_field_id,
7101 struct rte_swx_ctl_table_match_field_info *match_field)
7102 {
7103 struct table *t;
7104 struct match_field *f;
7105
7106 if (!p || (table_id >= p->n_tables) || !match_field)
7107 return -EINVAL;
7108
7109 t = table_find_by_id(p, table_id);
7110 if (!t || (match_field_id >= t->n_fields))
7111 return -EINVAL;
7112
7113 f = &t->fields[match_field_id];
7114 match_field->match_type = f->match_type;
7115 match_field->is_header = t->is_header;
7116 match_field->n_bits = f->field->n_bits;
7117 match_field->offset = f->field->offset;
7118
7119 return 0;
7120 }
7121
7122 int
rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline * p,uint32_t table_id,uint32_t table_action_id,struct rte_swx_ctl_table_action_info * table_action)7123 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p,
7124 uint32_t table_id,
7125 uint32_t table_action_id,
7126 struct rte_swx_ctl_table_action_info *table_action)
7127 {
7128 struct table *t;
7129
7130 if (!p || (table_id >= p->n_tables) || !table_action)
7131 return -EINVAL;
7132
7133 t = table_find_by_id(p, table_id);
7134 if (!t || (table_action_id >= t->n_actions))
7135 return -EINVAL;
7136
7137 table_action->action_id = t->actions[table_action_id]->id;
7138
7139 return 0;
7140 }
7141
7142 int
rte_swx_ctl_table_ops_get(struct rte_swx_pipeline * p,uint32_t table_id,struct rte_swx_table_ops * table_ops,int * is_stub)7143 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p,
7144 uint32_t table_id,
7145 struct rte_swx_table_ops *table_ops,
7146 int *is_stub)
7147 {
7148 struct table *t;
7149
7150 if (!p || (table_id >= p->n_tables))
7151 return -EINVAL;
7152
7153 t = table_find_by_id(p, table_id);
7154 if (!t)
7155 return -EINVAL;
7156
7157 if (t->type) {
7158 if (table_ops)
7159 memcpy(table_ops, &t->type->ops, sizeof(*table_ops));
7160 *is_stub = 0;
7161 } else {
7162 *is_stub = 1;
7163 }
7164
7165 return 0;
7166 }
7167
7168 int
rte_swx_pipeline_table_state_get(struct rte_swx_pipeline * p,struct rte_swx_table_state ** table_state)7169 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p,
7170 struct rte_swx_table_state **table_state)
7171 {
7172 if (!p || !table_state || !p->build_done)
7173 return -EINVAL;
7174
7175 *table_state = p->table_state;
7176 return 0;
7177 }
7178
7179 int
rte_swx_pipeline_table_state_set(struct rte_swx_pipeline * p,struct rte_swx_table_state * table_state)7180 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p,
7181 struct rte_swx_table_state *table_state)
7182 {
7183 if (!p || !table_state || !p->build_done)
7184 return -EINVAL;
7185
7186 p->table_state = table_state;
7187 return 0;
7188 }
7189
7190 int
rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline * p,uint32_t port_id,struct rte_swx_port_in_stats * stats)7191 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p,
7192 uint32_t port_id,
7193 struct rte_swx_port_in_stats *stats)
7194 {
7195 struct port_in *port;
7196
7197 if (!p || !stats)
7198 return -EINVAL;
7199
7200 port = port_in_find(p, port_id);
7201 if (!port)
7202 return -EINVAL;
7203
7204 port->type->ops.stats_read(port->obj, stats);
7205 return 0;
7206 }
7207
7208 int
rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline * p,uint32_t port_id,struct rte_swx_port_out_stats * stats)7209 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p,
7210 uint32_t port_id,
7211 struct rte_swx_port_out_stats *stats)
7212 {
7213 struct port_out *port;
7214
7215 if (!p || !stats)
7216 return -EINVAL;
7217
7218 port = port_out_find(p, port_id);
7219 if (!port)
7220 return -EINVAL;
7221
7222 port->type->ops.stats_read(port->obj, stats);
7223 return 0;
7224 }
7225