1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
3 */
4
5 #include <stdio.h>
6
7 #include <rte_common.h>
8 #include <rte_flow.h>
9 #include <rte_ip.h>
10
11 #include "flow.h"
12 #include "ipsec-secgw.h"
13 #include "parser.h"
14
15 #define FLOW_RULES_MAX 128
16
17 struct flow_rule_entry {
18 uint8_t is_ipv4;
19 RTE_STD_C11
20 union {
21 struct {
22 struct rte_flow_item_ipv4 spec;
23 struct rte_flow_item_ipv4 mask;
24 } ipv4;
25 struct {
26 struct rte_flow_item_ipv6 spec;
27 struct rte_flow_item_ipv6 mask;
28 } ipv6;
29 };
30 uint16_t port;
31 uint16_t queue;
32 struct rte_flow *flow;
33 } flow_rule_tbl[FLOW_RULES_MAX];
34
35 int nb_flow_rule;
36
37 static void
ipv4_hdr_print(struct rte_ipv4_hdr * hdr)38 ipv4_hdr_print(struct rte_ipv4_hdr *hdr)
39 {
40 char a, b, c, d;
41
42 uint32_t_to_char(rte_bswap32(hdr->src_addr), &a, &b, &c, &d);
43 printf("src: %3hhu.%3hhu.%3hhu.%3hhu \t", a, b, c, d);
44
45 uint32_t_to_char(rte_bswap32(hdr->dst_addr), &a, &b, &c, &d);
46 printf("dst: %3hhu.%3hhu.%3hhu.%3hhu", a, b, c, d);
47 }
48
49 static int
ipv4_addr_cpy(rte_be32_t * spec,rte_be32_t * mask,char * token,struct parse_status * status)50 ipv4_addr_cpy(rte_be32_t *spec, rte_be32_t *mask, char *token,
51 struct parse_status *status)
52 {
53 struct in_addr ip;
54 uint32_t depth;
55
56 APP_CHECK(parse_ipv4_addr(token, &ip, &depth) == 0, status,
57 "unrecognized input \"%s\", expect valid ipv4 addr", token);
58 if (status->status < 0)
59 return -1;
60
61 if (depth > 32)
62 return -1;
63
64 memcpy(mask, &rte_flow_item_ipv4_mask.hdr.src_addr, sizeof(ip));
65
66 *spec = ip.s_addr;
67 if (depth < 32)
68 *mask = *mask << (32-depth);
69
70 return 0;
71 }
72
73 static void
ipv6_hdr_print(struct rte_ipv6_hdr * hdr)74 ipv6_hdr_print(struct rte_ipv6_hdr *hdr)
75 {
76 uint8_t *addr;
77
78 addr = hdr->src_addr;
79 printf("src: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx \t",
80 (uint16_t)((addr[0] << 8) | addr[1]),
81 (uint16_t)((addr[2] << 8) | addr[3]),
82 (uint16_t)((addr[4] << 8) | addr[5]),
83 (uint16_t)((addr[6] << 8) | addr[7]),
84 (uint16_t)((addr[8] << 8) | addr[9]),
85 (uint16_t)((addr[10] << 8) | addr[11]),
86 (uint16_t)((addr[12] << 8) | addr[13]),
87 (uint16_t)((addr[14] << 8) | addr[15]));
88
89 addr = hdr->dst_addr;
90 printf("dst: %4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx:%4hx",
91 (uint16_t)((addr[0] << 8) | addr[1]),
92 (uint16_t)((addr[2] << 8) | addr[3]),
93 (uint16_t)((addr[4] << 8) | addr[5]),
94 (uint16_t)((addr[6] << 8) | addr[7]),
95 (uint16_t)((addr[8] << 8) | addr[9]),
96 (uint16_t)((addr[10] << 8) | addr[11]),
97 (uint16_t)((addr[12] << 8) | addr[13]),
98 (uint16_t)((addr[14] << 8) | addr[15]));
99 }
100
101 static int
ipv6_addr_cpy(uint8_t * spec,uint8_t * mask,char * token,struct parse_status * status)102 ipv6_addr_cpy(uint8_t *spec, uint8_t *mask, char *token,
103 struct parse_status *status)
104 {
105 struct in6_addr ip;
106 uint32_t depth, i;
107
108 APP_CHECK(parse_ipv6_addr(token, &ip, &depth) == 0, status,
109 "unrecognized input \"%s\", expect valid ipv6 address", token);
110 if (status->status < 0)
111 return -1;
112
113 memcpy(mask, &rte_flow_item_ipv6_mask.hdr.src_addr, sizeof(ip));
114 memcpy(spec, ip.s6_addr, sizeof(struct in6_addr));
115
116 for (i = 0; i < depth && (i%8 <= sizeof(struct in6_addr)); i++)
117 mask[i/8] &= ~(1 << (7-i%8));
118
119 return 0;
120 }
121
122 void
parse_flow_tokens(char ** tokens,uint32_t n_tokens,struct parse_status * status)123 parse_flow_tokens(char **tokens, uint32_t n_tokens,
124 struct parse_status *status)
125 {
126 struct flow_rule_entry *rule;
127 uint32_t ti;
128
129 if (nb_flow_rule >= FLOW_RULES_MAX) {
130 printf("Too many flow rules\n");
131 return;
132 }
133
134 rule = &flow_rule_tbl[nb_flow_rule];
135 memset(rule, 0, sizeof(*rule));
136
137 if (strcmp(tokens[0], "ipv4") == 0) {
138 rule->is_ipv4 = 1;
139 } else if (strcmp(tokens[0], "ipv6") == 0) {
140 rule->is_ipv4 = 0;
141 } else {
142 APP_CHECK(0, status, "unrecognized input \"%s\"", tokens[0]);
143 return;
144 }
145
146 for (ti = 1; ti < n_tokens; ti++) {
147 if (strcmp(tokens[ti], "src") == 0) {
148 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
149 if (status->status < 0)
150 return;
151
152 if (rule->is_ipv4) {
153 if (ipv4_addr_cpy(&rule->ipv4.spec.hdr.src_addr,
154 &rule->ipv4.mask.hdr.src_addr,
155 tokens[ti], status))
156 return;
157 } else {
158 if (ipv6_addr_cpy(rule->ipv6.spec.hdr.src_addr,
159 rule->ipv6.mask.hdr.src_addr,
160 tokens[ti], status))
161 return;
162 }
163 }
164 if (strcmp(tokens[ti], "dst") == 0) {
165 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
166 if (status->status < 0)
167 return;
168
169 if (rule->is_ipv4) {
170 if (ipv4_addr_cpy(&rule->ipv4.spec.hdr.dst_addr,
171 &rule->ipv4.mask.hdr.dst_addr,
172 tokens[ti], status))
173 return;
174 } else {
175 if (ipv6_addr_cpy(rule->ipv6.spec.hdr.dst_addr,
176 rule->ipv6.mask.hdr.dst_addr,
177 tokens[ti], status))
178 return;
179 }
180 }
181
182 if (strcmp(tokens[ti], "port") == 0) {
183 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
184 if (status->status < 0)
185 return;
186 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status);
187 if (status->status < 0)
188 return;
189
190 rule->port = atoi(tokens[ti]);
191
192 INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
193 if (status->status < 0)
194 return;
195 APP_CHECK_TOKEN_IS_NUM(tokens, ti, status);
196 if (status->status < 0)
197 return;
198
199 rule->queue = atoi(tokens[ti]);
200 }
201 }
202
203 nb_flow_rule++;
204 }
205
206 #define MAX_RTE_FLOW_PATTERN (3)
207 #define MAX_RTE_FLOW_ACTIONS (2)
208
209 static void
flow_init_single(struct flow_rule_entry * rule)210 flow_init_single(struct flow_rule_entry *rule)
211 {
212 struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN] = {};
213 struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS] = {};
214 struct rte_flow_attr attr = {};
215 struct rte_flow_error err;
216 int ret;
217
218 attr.egress = 0;
219 attr.ingress = 1;
220
221 action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
222 action[0].conf = &(struct rte_flow_action_queue) {
223 .index = rule->queue,
224 };
225 action[1].type = RTE_FLOW_ACTION_TYPE_END;
226
227 pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
228
229 if (rule->is_ipv4) {
230 pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
231 pattern[1].spec = &rule->ipv4.spec;
232 pattern[1].mask = &rule->ipv4.mask;
233 } else {
234 pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
235 pattern[1].spec = &rule->ipv6.spec;
236 pattern[1].mask = &rule->ipv6.mask;
237 }
238
239 pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
240
241 ret = rte_flow_validate(rule->port, &attr, pattern, action, &err);
242 if (ret < 0) {
243 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
244 return;
245 }
246
247 rule->flow = rte_flow_create(rule->port, &attr, pattern, action, &err);
248 if (rule->flow == NULL)
249 RTE_LOG(ERR, IPSEC, "Flow creation return %s\n", err.message);
250 }
251
252 void
flow_init(void)253 flow_init(void)
254 {
255 struct flow_rule_entry *rule;
256 int i;
257
258 for (i = 0; i < nb_flow_rule; i++) {
259 rule = &flow_rule_tbl[i];
260 flow_init_single(rule);
261 }
262
263 for (i = 0; i < nb_flow_rule; i++) {
264 rule = &flow_rule_tbl[i];
265 if (rule->is_ipv4) {
266 printf("Flow #%3d: spec ipv4 ", i);
267 ipv4_hdr_print(&rule->ipv4.spec.hdr);
268 printf("\n");
269 printf(" mask ipv4 ");
270 ipv4_hdr_print(&rule->ipv4.mask.hdr);
271 } else {
272 printf("Flow #%3d: spec ipv6 ", i);
273 ipv6_hdr_print(&rule->ipv6.spec.hdr);
274 printf("\n");
275 printf(" mask ipv6 ");
276 ipv6_hdr_print(&rule->ipv6.mask.hdr);
277 }
278
279 printf("\tPort: %d, Queue: %d", rule->port, rule->queue);
280
281 if (rule->flow == NULL)
282 printf(" [UNSUPPORTED]");
283 printf("\n");
284 }
285 }
286