1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
3 * All rights reserved.
4 */
5 #include "base/common.h"
6 #include "cxgbe_flow.h"
7
8 #define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
9 do { \
10 if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
11 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
12 NULL, "Redefined match item with" \
13 " different values found"); \
14 (fs)->val.elem = (__v); \
15 (fs)->mask.elem = (__m); \
16 } while (0)
17
18 #define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
19 do { \
20 memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
21 memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
22 } while (0)
23
24 #define CXGBE_FILL_FS(v, m, elem) \
25 __CXGBE_FILL_FS(v, m, fs, elem, e)
26
27 #define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
28 __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
29
30 static int
cxgbe_validate_item(const struct rte_flow_item * i,struct rte_flow_error * e)31 cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
32 {
33 /* rte_flow specification does not allow it. */
34 if (!i->spec && (i->mask || i->last))
35 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
36 i, "last or mask given without spec");
37 /*
38 * We don't support it.
39 * Although, we can support values in last as 0's or last == spec.
40 * But this will not provide user with any additional functionality
41 * and will only increase the complexity for us.
42 */
43 if (i->last)
44 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
45 i, "last is not supported by chelsio pmd");
46 return 0;
47 }
48
49 /**
50 * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
51 * there's only 40-bits available to store match fields.
52 * So, to save space, optimize filter spec for some common
53 * known fields that hardware can parse against incoming
54 * packets automatically.
55 */
56 static void
cxgbe_tweak_filter_spec(struct adapter * adap,struct ch_filter_specification * fs)57 cxgbe_tweak_filter_spec(struct adapter *adap,
58 struct ch_filter_specification *fs)
59 {
60 /* Save 16-bit ethertype field space, by setting corresponding
61 * 1-bit flags in the filter spec for common known ethertypes.
62 * When hardware sees these flags, it automatically infers and
63 * matches incoming packets against the corresponding ethertype.
64 */
65 if (fs->mask.ethtype == 0xffff) {
66 switch (fs->val.ethtype) {
67 case RTE_ETHER_TYPE_IPV4:
68 if (adap->params.tp.ethertype_shift < 0) {
69 fs->type = FILTER_TYPE_IPV4;
70 fs->val.ethtype = 0;
71 fs->mask.ethtype = 0;
72 }
73 break;
74 case RTE_ETHER_TYPE_IPV6:
75 if (adap->params.tp.ethertype_shift < 0) {
76 fs->type = FILTER_TYPE_IPV6;
77 fs->val.ethtype = 0;
78 fs->mask.ethtype = 0;
79 }
80 break;
81 case RTE_ETHER_TYPE_VLAN:
82 if (adap->params.tp.ethertype_shift < 0 &&
83 adap->params.tp.vlan_shift >= 0) {
84 fs->val.ivlan_vld = 1;
85 fs->mask.ivlan_vld = 1;
86 fs->val.ethtype = 0;
87 fs->mask.ethtype = 0;
88 }
89 break;
90 case RTE_ETHER_TYPE_QINQ:
91 if (adap->params.tp.ethertype_shift < 0 &&
92 adap->params.tp.vnic_shift >= 0) {
93 fs->val.ovlan_vld = 1;
94 fs->mask.ovlan_vld = 1;
95 fs->val.ethtype = 0;
96 fs->mask.ethtype = 0;
97 }
98 break;
99 default:
100 break;
101 }
102 }
103 }
104
105 static void
cxgbe_fill_filter_region(struct adapter * adap,struct ch_filter_specification * fs)106 cxgbe_fill_filter_region(struct adapter *adap,
107 struct ch_filter_specification *fs)
108 {
109 struct tp_params *tp = &adap->params.tp;
110 u64 hash_filter_mask = tp->hash_filter_mask;
111 u64 ntuple_mask = 0;
112
113 fs->cap = 0;
114
115 if (!is_hashfilter(adap))
116 return;
117
118 if (fs->type) {
119 uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
120 0xff, 0xff, 0xff, 0xff,
121 0xff, 0xff, 0xff, 0xff,
122 0xff, 0xff, 0xff, 0xff};
123 uint8_t bitoff[16] = {0};
124
125 if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
126 !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
127 memcmp(fs->mask.lip, biton, sizeof(biton)) ||
128 memcmp(fs->mask.fip, biton, sizeof(biton)))
129 return;
130 } else {
131 uint32_t biton = 0xffffffff;
132 uint32_t bitoff = 0x0U;
133
134 if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
135 !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
136 memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
137 memcmp(fs->mask.fip, &biton, sizeof(biton)))
138 return;
139 }
140
141 if (!fs->val.lport || fs->mask.lport != 0xffff)
142 return;
143 if (!fs->val.fport || fs->mask.fport != 0xffff)
144 return;
145
146 if (tp->protocol_shift >= 0)
147 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
148 if (tp->ethertype_shift >= 0)
149 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
150 if (tp->port_shift >= 0)
151 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
152 if (tp->macmatch_shift >= 0)
153 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
154 if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
155 ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
156 tp->vlan_shift;
157 if (tp->vnic_shift >= 0) {
158 if (fs->mask.ovlan_vld)
159 ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
160 fs->mask.ovlan) << tp->vnic_shift;
161 else if (fs->mask.pfvf_vld)
162 ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
163 fs->mask.pf << 13 |
164 fs->mask.vf) << tp->vnic_shift;
165 }
166 if (tp->tos_shift >= 0)
167 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
168
169 if (ntuple_mask != hash_filter_mask)
170 return;
171
172 fs->cap = 1; /* use hash region */
173 }
174
175 static int
ch_rte_parsetype_eth(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)176 ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
177 struct ch_filter_specification *fs,
178 struct rte_flow_error *e)
179 {
180 const struct rte_flow_item_eth *spec = item->spec;
181 const struct rte_flow_item_eth *umask = item->mask;
182 const struct rte_flow_item_eth *mask;
183
184 /* If user has not given any mask, then use chelsio supported mask. */
185 mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
186
187 if (!spec)
188 return 0;
189
190 /* we don't support SRC_MAC filtering*/
191 if (!rte_is_zero_ether_addr(&spec->src) ||
192 (umask && !rte_is_zero_ether_addr(&umask->src)))
193 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
194 item,
195 "src mac filtering not supported");
196
197 if (!rte_is_zero_ether_addr(&spec->dst) ||
198 (umask && !rte_is_zero_ether_addr(&umask->dst))) {
199 CXGBE_FILL_FS(0, 0x1ff, macidx);
200 CXGBE_FILL_FS_MEMCPY(spec->dst.addr_bytes, mask->dst.addr_bytes,
201 dmac);
202 }
203
204 if (spec->type || (umask && umask->type))
205 CXGBE_FILL_FS(be16_to_cpu(spec->type),
206 be16_to_cpu(mask->type), ethtype);
207
208 return 0;
209 }
210
211 static int
ch_rte_parsetype_port(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)212 ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
213 struct ch_filter_specification *fs,
214 struct rte_flow_error *e)
215 {
216 const struct rte_flow_item_phy_port *val = item->spec;
217 const struct rte_flow_item_phy_port *umask = item->mask;
218 const struct rte_flow_item_phy_port *mask;
219
220 mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
221
222 if (!val)
223 return 0; /* Wildcard, match all physical ports */
224
225 if (val->index > 0x7)
226 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
227 item,
228 "port index up to 0x7 is supported");
229
230 if (val->index || (umask && umask->index))
231 CXGBE_FILL_FS(val->index, mask->index, iport);
232
233 return 0;
234 }
235
236 static int
ch_rte_parsetype_vlan(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)237 ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
238 struct ch_filter_specification *fs,
239 struct rte_flow_error *e)
240 {
241 const struct rte_flow_item_vlan *spec = item->spec;
242 const struct rte_flow_item_vlan *umask = item->mask;
243 const struct rte_flow_item_vlan *mask;
244
245 /* If user has not given any mask, then use chelsio supported mask. */
246 mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
247
248 /* If ethertype is already set and is not VLAN (0x8100) or
249 * QINQ(0x88A8), then don't proceed further. Otherwise,
250 * reset the outer ethertype, so that it can be replaced by
251 * innermost ethertype. Note that hardware will automatically
252 * match against VLAN or QINQ packets, based on 'ivlan_vld' or
253 * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
254 */
255 if (fs->mask.ethtype) {
256 if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
257 fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
258 return rte_flow_error_set(e, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
260 item,
261 "Ethertype must be 0x8100 or 0x88a8");
262 }
263
264 if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
265 CXGBE_FILL_FS(1, 1, ovlan_vld);
266 if (spec) {
267 if (spec->tci || (umask && umask->tci))
268 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
269 be16_to_cpu(mask->tci), ovlan);
270 fs->mask.ethtype = 0;
271 fs->val.ethtype = 0;
272 }
273 } else {
274 CXGBE_FILL_FS(1, 1, ivlan_vld);
275 if (spec) {
276 if (spec->tci || (umask && umask->tci))
277 CXGBE_FILL_FS(be16_to_cpu(spec->tci),
278 be16_to_cpu(mask->tci), ivlan);
279 fs->mask.ethtype = 0;
280 fs->val.ethtype = 0;
281 }
282 }
283
284 if (spec && (spec->inner_type || (umask && umask->inner_type)))
285 CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
286 be16_to_cpu(mask->inner_type), ethtype);
287
288 return 0;
289 }
290
291 static int
ch_rte_parsetype_pf(const void * dmask __rte_unused,const struct rte_flow_item * item __rte_unused,struct ch_filter_specification * fs,struct rte_flow_error * e __rte_unused)292 ch_rte_parsetype_pf(const void *dmask __rte_unused,
293 const struct rte_flow_item *item __rte_unused,
294 struct ch_filter_specification *fs,
295 struct rte_flow_error *e __rte_unused)
296 {
297 struct rte_flow *flow = (struct rte_flow *)fs->private;
298 struct rte_eth_dev *dev = flow->dev;
299 struct adapter *adap = ethdev2adap(dev);
300
301 CXGBE_FILL_FS(1, 1, pfvf_vld);
302
303 CXGBE_FILL_FS(adap->pf, 0x7, pf);
304 return 0;
305 }
306
307 static int
ch_rte_parsetype_vf(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)308 ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
309 struct ch_filter_specification *fs,
310 struct rte_flow_error *e)
311 {
312 const struct rte_flow_item_vf *umask = item->mask;
313 const struct rte_flow_item_vf *val = item->spec;
314 const struct rte_flow_item_vf *mask;
315
316 /* If user has not given any mask, then use chelsio supported mask. */
317 mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
318
319 CXGBE_FILL_FS(1, 1, pfvf_vld);
320
321 if (!val)
322 return 0; /* Wildcard, match all Vf */
323
324 if (val->id > UCHAR_MAX)
325 return rte_flow_error_set(e, EINVAL,
326 RTE_FLOW_ERROR_TYPE_ITEM,
327 item,
328 "VF ID > MAX(255)");
329
330 if (val->id || (umask && umask->id))
331 CXGBE_FILL_FS(val->id, mask->id, vf);
332
333 return 0;
334 }
335
336 static int
ch_rte_parsetype_udp(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)337 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
338 struct ch_filter_specification *fs,
339 struct rte_flow_error *e)
340 {
341 const struct rte_flow_item_udp *val = item->spec;
342 const struct rte_flow_item_udp *umask = item->mask;
343 const struct rte_flow_item_udp *mask;
344
345 mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
346
347 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
348 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
349 item,
350 "udp: only src/dst port supported");
351
352 CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
353 if (!val)
354 return 0;
355
356 if (val->hdr.src_port || (umask && umask->hdr.src_port))
357 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
358 be16_to_cpu(mask->hdr.src_port), fport);
359
360 if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
361 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
362 be16_to_cpu(mask->hdr.dst_port), lport);
363
364 return 0;
365 }
366
367 static int
ch_rte_parsetype_tcp(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)368 ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
369 struct ch_filter_specification *fs,
370 struct rte_flow_error *e)
371 {
372 const struct rte_flow_item_tcp *val = item->spec;
373 const struct rte_flow_item_tcp *umask = item->mask;
374 const struct rte_flow_item_tcp *mask;
375
376 mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
377
378 if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
379 mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
380 mask->hdr.tcp_urp)
381 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
382 item,
383 "tcp: only src/dst port supported");
384
385 CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
386 if (!val)
387 return 0;
388
389 if (val->hdr.src_port || (umask && umask->hdr.src_port))
390 CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
391 be16_to_cpu(mask->hdr.src_port), fport);
392
393 if (val->hdr.dst_port || (umask && umask->hdr.dst_port))
394 CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
395 be16_to_cpu(mask->hdr.dst_port), lport);
396
397 return 0;
398 }
399
400 static int
ch_rte_parsetype_ipv4(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)401 ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
402 struct ch_filter_specification *fs,
403 struct rte_flow_error *e)
404 {
405 const struct rte_flow_item_ipv4 *val = item->spec;
406 const struct rte_flow_item_ipv4 *umask = item->mask;
407 const struct rte_flow_item_ipv4 *mask;
408
409 mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
410
411 if (mask->hdr.time_to_live)
412 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
413 item, "ttl is not supported");
414
415 if (fs->mask.ethtype &&
416 (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
417 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
418 item,
419 "Couldn't find IPv4 ethertype");
420 fs->type = FILTER_TYPE_IPV4;
421 if (!val)
422 return 0; /* ipv4 wild card */
423
424 if (val->hdr.next_proto_id || (umask && umask->hdr.next_proto_id))
425 CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id,
426 proto);
427
428 if (val->hdr.dst_addr || (umask && umask->hdr.dst_addr))
429 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
430 lip);
431
432 if (val->hdr.src_addr || (umask && umask->hdr.src_addr))
433 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
434 fip);
435
436 if (val->hdr.type_of_service || (umask && umask->hdr.type_of_service))
437 CXGBE_FILL_FS(val->hdr.type_of_service,
438 mask->hdr.type_of_service, tos);
439
440 return 0;
441 }
442
443 static int
ch_rte_parsetype_ipv6(const void * dmask,const struct rte_flow_item * item,struct ch_filter_specification * fs,struct rte_flow_error * e)444 ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
445 struct ch_filter_specification *fs,
446 struct rte_flow_error *e)
447 {
448 const struct rte_flow_item_ipv6 *val = item->spec;
449 const struct rte_flow_item_ipv6 *umask = item->mask;
450 const struct rte_flow_item_ipv6 *mask;
451 u32 vtc_flow, vtc_flow_mask;
452 u8 z[16] = { 0 };
453
454 mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
455
456 vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
457
458 if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
459 mask->hdr.payload_len || mask->hdr.hop_limits)
460 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
461 item,
462 "flow/hop are not supported");
463
464 if (fs->mask.ethtype &&
465 (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
466 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
467 item,
468 "Couldn't find IPv6 ethertype");
469 fs->type = FILTER_TYPE_IPV6;
470 if (!val)
471 return 0; /* ipv6 wild card */
472
473 if (val->hdr.proto || (umask && umask->hdr.proto))
474 CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
475
476 vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
477 if (val->hdr.vtc_flow || (umask && umask->hdr.vtc_flow))
478 CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
479 RTE_IPV6_HDR_TC_SHIFT,
480 (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
481 RTE_IPV6_HDR_TC_SHIFT,
482 tos);
483
484 if (memcmp(val->hdr.dst_addr, z, sizeof(val->hdr.dst_addr)) ||
485 (umask &&
486 memcmp(umask->hdr.dst_addr, z, sizeof(umask->hdr.dst_addr))))
487 CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr,
488 lip);
489
490 if (memcmp(val->hdr.src_addr, z, sizeof(val->hdr.src_addr)) ||
491 (umask &&
492 memcmp(umask->hdr.src_addr, z, sizeof(umask->hdr.src_addr))))
493 CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr,
494 fip);
495
496 return 0;
497 }
498
499 static int
cxgbe_rtef_parse_attr(struct rte_flow * flow,const struct rte_flow_attr * attr,struct rte_flow_error * e)500 cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
501 struct rte_flow_error *e)
502 {
503 if (attr->egress)
504 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
505 attr, "attribute:<egress> is"
506 " not supported !");
507 if (attr->group > 0)
508 return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
509 attr, "group parameter is"
510 " not supported.");
511
512 flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
513
514 return 0;
515 }
516
check_rxq(struct rte_eth_dev * dev,uint16_t rxq)517 static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
518 {
519 struct port_info *pi = ethdev2pinfo(dev);
520
521 if (rxq > pi->n_rx_qsets)
522 return -EINVAL;
523 return 0;
524 }
525
cxgbe_validate_fidxondel(struct filter_entry * f,unsigned int fidx)526 static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
527 {
528 struct adapter *adap = ethdev2adap(f->dev);
529 struct ch_filter_specification fs = f->fs;
530 u8 nentries;
531
532 if (fidx >= adap->tids.nftids) {
533 dev_err(adap, "invalid flow index %d.\n", fidx);
534 return -EINVAL;
535 }
536
537 nentries = cxgbe_filter_slots(adap, fs.type);
538 if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
539 dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
540 return -EINVAL;
541 }
542
543 return 0;
544 }
545
546 static int
cxgbe_validate_fidxonadd(struct ch_filter_specification * fs,struct adapter * adap,unsigned int fidx)547 cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
548 struct adapter *adap, unsigned int fidx)
549 {
550 u8 nentries;
551
552 nentries = cxgbe_filter_slots(adap, fs->type);
553 if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
554 dev_err(adap, "filter index: %d is busy.\n", fidx);
555 return -EBUSY;
556 }
557
558 if (fidx >= adap->tids.nftids) {
559 dev_err(adap, "filter index (%u) >= max(%u)\n",
560 fidx, adap->tids.nftids);
561 return -ERANGE;
562 }
563
564 return 0;
565 }
566
567 static int
cxgbe_verify_fidx(struct rte_flow * flow,unsigned int fidx,uint8_t del)568 cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
569 {
570 if (flow->fs.cap)
571 return 0; /* Hash filters */
572 return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
573 cxgbe_validate_fidxonadd(&flow->fs,
574 ethdev2adap(flow->dev), fidx);
575 }
576
cxgbe_get_fidx(struct rte_flow * flow,unsigned int * fidx)577 static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
578 {
579 struct ch_filter_specification *fs = &flow->fs;
580 struct adapter *adap = ethdev2adap(flow->dev);
581
582 /* For tcam get the next available slot, if default value specified */
583 if (flow->fidx == FILTER_ID_MAX) {
584 u8 nentries;
585 int idx;
586
587 nentries = cxgbe_filter_slots(adap, fs->type);
588 idx = cxgbe_alloc_ftid(adap, nentries);
589 if (idx < 0) {
590 dev_err(adap, "unable to get a filter index in tcam\n");
591 return -ENOMEM;
592 }
593 *fidx = (unsigned int)idx;
594 } else {
595 *fidx = flow->fidx;
596 }
597
598 return 0;
599 }
600
601 static int
cxgbe_get_flow_item_index(const struct rte_flow_item items[],u32 type)602 cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
603 {
604 const struct rte_flow_item *i;
605 int j, index = -ENOENT;
606
607 for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
608 if (i->type == type) {
609 index = j;
610 break;
611 }
612 }
613
614 return index;
615 }
616
617 static int
ch_rte_parse_nat(uint8_t nmode,struct ch_filter_specification * fs)618 ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
619 {
620 /* nmode:
621 * BIT_0 = [src_ip], BIT_1 = [dst_ip]
622 * BIT_2 = [src_port], BIT_3 = [dst_port]
623 *
624 * Only below cases are supported as per our spec.
625 */
626 switch (nmode) {
627 case 0: /* 0000b */
628 fs->nat_mode = NAT_MODE_NONE;
629 break;
630 case 2: /* 0010b */
631 fs->nat_mode = NAT_MODE_DIP;
632 break;
633 case 5: /* 0101b */
634 fs->nat_mode = NAT_MODE_SIP_SP;
635 break;
636 case 7: /* 0111b */
637 fs->nat_mode = NAT_MODE_DIP_SIP_SP;
638 break;
639 case 10: /* 1010b */
640 fs->nat_mode = NAT_MODE_DIP_DP;
641 break;
642 case 11: /* 1011b */
643 fs->nat_mode = NAT_MODE_DIP_DP_SIP;
644 break;
645 case 14: /* 1110b */
646 fs->nat_mode = NAT_MODE_DIP_DP_SP;
647 break;
648 case 15: /* 1111b */
649 fs->nat_mode = NAT_MODE_ALL;
650 break;
651 default:
652 return -EINVAL;
653 }
654
655 return 0;
656 }
657
658 static int
ch_rte_parse_atype_switch(const struct rte_flow_action * a,const struct rte_flow_item items[],uint8_t * nmode,struct ch_filter_specification * fs,struct rte_flow_error * e)659 ch_rte_parse_atype_switch(const struct rte_flow_action *a,
660 const struct rte_flow_item items[],
661 uint8_t *nmode,
662 struct ch_filter_specification *fs,
663 struct rte_flow_error *e)
664 {
665 const struct rte_flow_action_of_set_vlan_vid *vlanid;
666 const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
667 const struct rte_flow_action_of_push_vlan *pushvlan;
668 const struct rte_flow_action_set_ipv4 *ipv4;
669 const struct rte_flow_action_set_ipv6 *ipv6;
670 const struct rte_flow_action_set_tp *tp_port;
671 const struct rte_flow_action_phy_port *port;
672 const struct rte_flow_action_set_mac *mac;
673 int item_index;
674 u16 tmp_vlan;
675
676 switch (a->type) {
677 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
678 vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
679 a->conf;
680 /* If explicitly asked to push a new VLAN header,
681 * then don't set rewrite mode. Otherwise, the
682 * incoming VLAN packets will get their VLAN fields
683 * rewritten, instead of adding an additional outer
684 * VLAN header.
685 */
686 if (fs->newvlan != VLAN_INSERT)
687 fs->newvlan = VLAN_REWRITE;
688 tmp_vlan = fs->vlan & 0xe000;
689 fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
690 break;
691 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
692 vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
693 a->conf;
694 /* If explicitly asked to push a new VLAN header,
695 * then don't set rewrite mode. Otherwise, the
696 * incoming VLAN packets will get their VLAN fields
697 * rewritten, instead of adding an additional outer
698 * VLAN header.
699 */
700 if (fs->newvlan != VLAN_INSERT)
701 fs->newvlan = VLAN_REWRITE;
702 tmp_vlan = fs->vlan & 0xfff;
703 fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
704 break;
705 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
706 pushvlan = (const struct rte_flow_action_of_push_vlan *)
707 a->conf;
708 if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
709 return rte_flow_error_set(e, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ACTION, a,
711 "only ethertype 0x8100 "
712 "supported for push vlan.");
713 fs->newvlan = VLAN_INSERT;
714 break;
715 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
716 fs->newvlan = VLAN_REMOVE;
717 break;
718 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
719 port = (const struct rte_flow_action_phy_port *)a->conf;
720 fs->eport = port->index;
721 break;
722 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
723 item_index = cxgbe_get_flow_item_index(items,
724 RTE_FLOW_ITEM_TYPE_IPV4);
725 if (item_index < 0)
726 return rte_flow_error_set(e, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ACTION, a,
728 "No RTE_FLOW_ITEM_TYPE_IPV4 "
729 "found.");
730
731 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
732 memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
733 *nmode |= 1 << 0;
734 break;
735 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
736 item_index = cxgbe_get_flow_item_index(items,
737 RTE_FLOW_ITEM_TYPE_IPV4);
738 if (item_index < 0)
739 return rte_flow_error_set(e, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ACTION, a,
741 "No RTE_FLOW_ITEM_TYPE_IPV4 "
742 "found.");
743
744 ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
745 memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
746 *nmode |= 1 << 1;
747 break;
748 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
749 item_index = cxgbe_get_flow_item_index(items,
750 RTE_FLOW_ITEM_TYPE_IPV6);
751 if (item_index < 0)
752 return rte_flow_error_set(e, EINVAL,
753 RTE_FLOW_ERROR_TYPE_ACTION, a,
754 "No RTE_FLOW_ITEM_TYPE_IPV6 "
755 "found.");
756
757 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
758 memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
759 *nmode |= 1 << 0;
760 break;
761 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
762 item_index = cxgbe_get_flow_item_index(items,
763 RTE_FLOW_ITEM_TYPE_IPV6);
764 if (item_index < 0)
765 return rte_flow_error_set(e, EINVAL,
766 RTE_FLOW_ERROR_TYPE_ACTION, a,
767 "No RTE_FLOW_ITEM_TYPE_IPV6 "
768 "found.");
769
770 ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
771 memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
772 *nmode |= 1 << 1;
773 break;
774 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
775 item_index = cxgbe_get_flow_item_index(items,
776 RTE_FLOW_ITEM_TYPE_TCP);
777 if (item_index < 0) {
778 item_index =
779 cxgbe_get_flow_item_index(items,
780 RTE_FLOW_ITEM_TYPE_UDP);
781 if (item_index < 0)
782 return rte_flow_error_set(e, EINVAL,
783 RTE_FLOW_ERROR_TYPE_ACTION, a,
784 "No RTE_FLOW_ITEM_TYPE_TCP or "
785 "RTE_FLOW_ITEM_TYPE_UDP found");
786 }
787
788 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
789 fs->nat_fport = be16_to_cpu(tp_port->port);
790 *nmode |= 1 << 2;
791 break;
792 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
793 item_index = cxgbe_get_flow_item_index(items,
794 RTE_FLOW_ITEM_TYPE_TCP);
795 if (item_index < 0) {
796 item_index =
797 cxgbe_get_flow_item_index(items,
798 RTE_FLOW_ITEM_TYPE_UDP);
799 if (item_index < 0)
800 return rte_flow_error_set(e, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ACTION, a,
802 "No RTE_FLOW_ITEM_TYPE_TCP or "
803 "RTE_FLOW_ITEM_TYPE_UDP found");
804 }
805
806 tp_port = (const struct rte_flow_action_set_tp *)a->conf;
807 fs->nat_lport = be16_to_cpu(tp_port->port);
808 *nmode |= 1 << 3;
809 break;
810 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
811 item_index = cxgbe_get_flow_item_index(items,
812 RTE_FLOW_ITEM_TYPE_ETH);
813 if (item_index < 0)
814 return rte_flow_error_set(e, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ACTION, a,
816 "No RTE_FLOW_ITEM_TYPE_ETH "
817 "found");
818 fs->swapmac = 1;
819 break;
820 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
821 item_index = cxgbe_get_flow_item_index(items,
822 RTE_FLOW_ITEM_TYPE_ETH);
823 if (item_index < 0)
824 return rte_flow_error_set(e, EINVAL,
825 RTE_FLOW_ERROR_TYPE_ACTION, a,
826 "No RTE_FLOW_ITEM_TYPE_ETH "
827 "found");
828 mac = (const struct rte_flow_action_set_mac *)a->conf;
829
830 fs->newsmac = 1;
831 memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
832 break;
833 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
834 item_index = cxgbe_get_flow_item_index(items,
835 RTE_FLOW_ITEM_TYPE_ETH);
836 if (item_index < 0)
837 return rte_flow_error_set(e, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ACTION, a,
839 "No RTE_FLOW_ITEM_TYPE_ETH found");
840 mac = (const struct rte_flow_action_set_mac *)a->conf;
841
842 fs->newdmac = 1;
843 memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
844 break;
845 default:
846 /* We are not supposed to come here */
847 return rte_flow_error_set(e, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ACTION, a,
849 "Action not supported");
850 }
851
852 return 0;
853 }
854
855 static int
cxgbe_rtef_parse_actions(struct rte_flow * flow,const struct rte_flow_item items[],const struct rte_flow_action action[],struct rte_flow_error * e)856 cxgbe_rtef_parse_actions(struct rte_flow *flow,
857 const struct rte_flow_item items[],
858 const struct rte_flow_action action[],
859 struct rte_flow_error *e)
860 {
861 struct ch_filter_specification *fs = &flow->fs;
862 uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
863 uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
864 const struct rte_flow_action_queue *q;
865 const struct rte_flow_action *a;
866 char abit = 0;
867 int ret;
868
869 for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
870 switch (a->type) {
871 case RTE_FLOW_ACTION_TYPE_VOID:
872 continue;
873 case RTE_FLOW_ACTION_TYPE_DROP:
874 if (abit++)
875 return rte_flow_error_set(e, EINVAL,
876 RTE_FLOW_ERROR_TYPE_ACTION, a,
877 "specify only 1 pass/drop");
878 fs->action = FILTER_DROP;
879 break;
880 case RTE_FLOW_ACTION_TYPE_QUEUE:
881 q = (const struct rte_flow_action_queue *)a->conf;
882 if (!q)
883 return rte_flow_error_set(e, EINVAL,
884 RTE_FLOW_ERROR_TYPE_ACTION, q,
885 "specify rx queue index");
886 if (check_rxq(flow->dev, q->index))
887 return rte_flow_error_set(e, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ACTION, q,
889 "Invalid rx queue");
890 if (abit++)
891 return rte_flow_error_set(e, EINVAL,
892 RTE_FLOW_ERROR_TYPE_ACTION, a,
893 "specify only 1 pass/drop");
894 fs->action = FILTER_PASS;
895 fs->dirsteer = 1;
896 fs->iq = q->index;
897 break;
898 case RTE_FLOW_ACTION_TYPE_COUNT:
899 fs->hitcnts = 1;
900 break;
901 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
902 vlan_set_vid++;
903 goto action_switch;
904 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
905 vlan_set_pcp++;
906 goto action_switch;
907 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
908 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
909 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
910 case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
911 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
912 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
913 nat_ipv4++;
914 goto action_switch;
915 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
916 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
917 nat_ipv6++;
918 goto action_switch;
919 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
920 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
921 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
922 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
923 action_switch:
924 /* We allow multiple switch actions, but switch is
925 * not compatible with either queue or drop
926 */
927 if (abit++ && fs->action != FILTER_SWITCH)
928 return rte_flow_error_set(e, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ACTION, a,
930 "overlapping action specified");
931 if (nat_ipv4 && nat_ipv6)
932 return rte_flow_error_set(e, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ACTION, a,
934 "Can't have one address ipv4 and the"
935 " other ipv6");
936
937 ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
938 e);
939 if (ret)
940 return ret;
941 fs->action = FILTER_SWITCH;
942 break;
943 default:
944 /* Not supported action : return error */
945 return rte_flow_error_set(e, ENOTSUP,
946 RTE_FLOW_ERROR_TYPE_ACTION,
947 a, "Action not supported");
948 }
949 }
950
951 if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
952 return rte_flow_error_set(e, EINVAL,
953 RTE_FLOW_ERROR_TYPE_ACTION, a,
954 "Both OF_SET_VLAN_VID and "
955 "OF_SET_VLAN_PCP must be specified");
956
957 if (ch_rte_parse_nat(nmode, fs))
958 return rte_flow_error_set(e, EINVAL,
959 RTE_FLOW_ERROR_TYPE_ACTION, a,
960 "invalid settings for swich action");
961 return 0;
962 }
963
964 static struct chrte_fparse parseitem[] = {
965 [RTE_FLOW_ITEM_TYPE_ETH] = {
966 .fptr = ch_rte_parsetype_eth,
967 .dmask = &(const struct rte_flow_item_eth){
968 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
969 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
970 .type = 0xffff,
971 }
972 },
973
974 [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
975 .fptr = ch_rte_parsetype_port,
976 .dmask = &(const struct rte_flow_item_phy_port){
977 .index = 0x7,
978 }
979 },
980
981 [RTE_FLOW_ITEM_TYPE_VLAN] = {
982 .fptr = ch_rte_parsetype_vlan,
983 .dmask = &(const struct rte_flow_item_vlan){
984 .tci = 0xffff,
985 .inner_type = 0xffff,
986 }
987 },
988
989 [RTE_FLOW_ITEM_TYPE_IPV4] = {
990 .fptr = ch_rte_parsetype_ipv4,
991 .dmask = &(const struct rte_flow_item_ipv4) {
992 .hdr = {
993 .src_addr = RTE_BE32(0xffffffff),
994 .dst_addr = RTE_BE32(0xffffffff),
995 .type_of_service = 0xff,
996 },
997 },
998 },
999
1000 [RTE_FLOW_ITEM_TYPE_IPV6] = {
1001 .fptr = ch_rte_parsetype_ipv6,
1002 .dmask = &(const struct rte_flow_item_ipv6) {
1003 .hdr = {
1004 .src_addr =
1005 "\xff\xff\xff\xff\xff\xff\xff\xff"
1006 "\xff\xff\xff\xff\xff\xff\xff\xff",
1007 .dst_addr =
1008 "\xff\xff\xff\xff\xff\xff\xff\xff"
1009 "\xff\xff\xff\xff\xff\xff\xff\xff",
1010 .vtc_flow = RTE_BE32(0xff000000),
1011 },
1012 },
1013 },
1014
1015 [RTE_FLOW_ITEM_TYPE_UDP] = {
1016 .fptr = ch_rte_parsetype_udp,
1017 .dmask = &rte_flow_item_udp_mask,
1018 },
1019
1020 [RTE_FLOW_ITEM_TYPE_TCP] = {
1021 .fptr = ch_rte_parsetype_tcp,
1022 .dmask = &rte_flow_item_tcp_mask,
1023 },
1024
1025 [RTE_FLOW_ITEM_TYPE_PF] = {
1026 .fptr = ch_rte_parsetype_pf,
1027 .dmask = NULL,
1028 },
1029
1030 [RTE_FLOW_ITEM_TYPE_VF] = {
1031 .fptr = ch_rte_parsetype_vf,
1032 .dmask = &(const struct rte_flow_item_vf){
1033 .id = 0xffffffff,
1034 }
1035 },
1036 };
1037
1038 static int
cxgbe_rtef_parse_items(struct rte_flow * flow,const struct rte_flow_item items[],struct rte_flow_error * e)1039 cxgbe_rtef_parse_items(struct rte_flow *flow,
1040 const struct rte_flow_item items[],
1041 struct rte_flow_error *e)
1042 {
1043 struct adapter *adap = ethdev2adap(flow->dev);
1044 const struct rte_flow_item *i;
1045 char repeat[ARRAY_SIZE(parseitem)] = {0};
1046
1047 for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
1048 struct chrte_fparse *idx;
1049 int ret;
1050
1051 if (i->type >= ARRAY_SIZE(parseitem))
1052 return rte_flow_error_set(e, ENOTSUP,
1053 RTE_FLOW_ERROR_TYPE_ITEM,
1054 i, "Item not supported");
1055
1056 switch (i->type) {
1057 case RTE_FLOW_ITEM_TYPE_VOID:
1058 continue;
1059 default:
1060 /* check if item is repeated */
1061 if (repeat[i->type] &&
1062 i->type != RTE_FLOW_ITEM_TYPE_VLAN)
1063 return rte_flow_error_set(e, ENOTSUP,
1064 RTE_FLOW_ERROR_TYPE_ITEM, i,
1065 "parse items cannot be repeated(except void/vlan)");
1066
1067 repeat[i->type] = 1;
1068
1069 /* validate the item */
1070 ret = cxgbe_validate_item(i, e);
1071 if (ret)
1072 return ret;
1073
1074 idx = &flow->item_parser[i->type];
1075 if (!idx || !idx->fptr) {
1076 return rte_flow_error_set(e, ENOTSUP,
1077 RTE_FLOW_ERROR_TYPE_ITEM, i,
1078 "Item not supported");
1079 } else {
1080 ret = idx->fptr(idx->dmask, i, &flow->fs, e);
1081 if (ret)
1082 return ret;
1083 }
1084 }
1085 }
1086
1087 cxgbe_tweak_filter_spec(adap, &flow->fs);
1088 cxgbe_fill_filter_region(adap, &flow->fs);
1089
1090 return 0;
1091 }
1092
1093 static int
cxgbe_flow_parse(struct rte_flow * flow,const struct rte_flow_attr * attr,const struct rte_flow_item item[],const struct rte_flow_action action[],struct rte_flow_error * e)1094 cxgbe_flow_parse(struct rte_flow *flow,
1095 const struct rte_flow_attr *attr,
1096 const struct rte_flow_item item[],
1097 const struct rte_flow_action action[],
1098 struct rte_flow_error *e)
1099 {
1100 int ret;
1101 /* parse user request into ch_filter_specification */
1102 ret = cxgbe_rtef_parse_attr(flow, attr, e);
1103 if (ret)
1104 return ret;
1105 ret = cxgbe_rtef_parse_items(flow, item, e);
1106 if (ret)
1107 return ret;
1108 return cxgbe_rtef_parse_actions(flow, item, action, e);
1109 }
1110
__cxgbe_flow_create(struct rte_eth_dev * dev,struct rte_flow * flow)1111 static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
1112 {
1113 struct ch_filter_specification *fs = &flow->fs;
1114 struct adapter *adap = ethdev2adap(dev);
1115 struct tid_info *t = &adap->tids;
1116 struct filter_ctx ctx;
1117 unsigned int fidx;
1118 int err;
1119
1120 if (cxgbe_get_fidx(flow, &fidx))
1121 return -ENOMEM;
1122 if (cxgbe_verify_fidx(flow, fidx, 0))
1123 return -1;
1124
1125 t4_init_completion(&ctx.completion);
1126 /* go create the filter */
1127 err = cxgbe_set_filter(dev, fidx, fs, &ctx);
1128 if (err) {
1129 dev_err(adap, "Error %d while creating filter.\n", err);
1130 return err;
1131 }
1132
1133 /* Poll the FW for reply */
1134 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1135 CXGBE_FLOW_POLL_MS,
1136 CXGBE_FLOW_POLL_CNT,
1137 &ctx.completion);
1138 if (err) {
1139 dev_err(adap, "Filter set operation timed out (%d)\n", err);
1140 return err;
1141 }
1142 if (ctx.result) {
1143 dev_err(adap, "Hardware error %d while creating the filter.\n",
1144 ctx.result);
1145 return ctx.result;
1146 }
1147
1148 if (fs->cap) { /* to destroy the filter */
1149 flow->fidx = ctx.tid;
1150 flow->f = lookup_tid(t, ctx.tid);
1151 } else {
1152 flow->fidx = fidx;
1153 flow->f = &adap->tids.ftid_tab[fidx];
1154 }
1155
1156 return 0;
1157 }
1158
1159 static struct rte_flow *
cxgbe_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item item[],const struct rte_flow_action action[],struct rte_flow_error * e)1160 cxgbe_flow_create(struct rte_eth_dev *dev,
1161 const struct rte_flow_attr *attr,
1162 const struct rte_flow_item item[],
1163 const struct rte_flow_action action[],
1164 struct rte_flow_error *e)
1165 {
1166 struct adapter *adap = ethdev2adap(dev);
1167 struct rte_flow *flow;
1168 int ret;
1169
1170 flow = t4_os_alloc(sizeof(struct rte_flow));
1171 if (!flow) {
1172 rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1173 NULL, "Unable to allocate memory for"
1174 " filter_entry");
1175 return NULL;
1176 }
1177
1178 flow->item_parser = parseitem;
1179 flow->dev = dev;
1180 flow->fs.private = (void *)flow;
1181
1182 if (cxgbe_flow_parse(flow, attr, item, action, e)) {
1183 t4_os_free(flow);
1184 return NULL;
1185 }
1186
1187 t4_os_lock(&adap->flow_lock);
1188 /* go, interact with cxgbe_filter */
1189 ret = __cxgbe_flow_create(dev, flow);
1190 t4_os_unlock(&adap->flow_lock);
1191 if (ret) {
1192 rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1193 NULL, "Unable to create flow rule");
1194 t4_os_free(flow);
1195 return NULL;
1196 }
1197
1198 flow->f->private = flow; /* Will be used during flush */
1199
1200 return flow;
1201 }
1202
__cxgbe_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow)1203 static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1204 {
1205 struct adapter *adap = ethdev2adap(dev);
1206 struct filter_entry *f = flow->f;
1207 struct ch_filter_specification *fs;
1208 struct filter_ctx ctx;
1209 int err;
1210
1211 fs = &f->fs;
1212 if (cxgbe_verify_fidx(flow, flow->fidx, 1))
1213 return -1;
1214
1215 t4_init_completion(&ctx.completion);
1216 err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
1217 if (err) {
1218 dev_err(adap, "Error %d while deleting filter.\n", err);
1219 return err;
1220 }
1221
1222 /* Poll the FW for reply */
1223 err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
1224 CXGBE_FLOW_POLL_MS,
1225 CXGBE_FLOW_POLL_CNT,
1226 &ctx.completion);
1227 if (err) {
1228 dev_err(adap, "Filter delete operation timed out (%d)\n", err);
1229 return err;
1230 }
1231 if (ctx.result) {
1232 dev_err(adap, "Hardware error %d while deleting the filter.\n",
1233 ctx.result);
1234 return ctx.result;
1235 }
1236
1237 return 0;
1238 }
1239
1240 static int
cxgbe_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * e)1241 cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1242 struct rte_flow_error *e)
1243 {
1244 struct adapter *adap = ethdev2adap(dev);
1245 int ret;
1246
1247 t4_os_lock(&adap->flow_lock);
1248 ret = __cxgbe_flow_destroy(dev, flow);
1249 t4_os_unlock(&adap->flow_lock);
1250 if (ret)
1251 return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1252 flow, "error destroying filter.");
1253 t4_os_free(flow);
1254 return 0;
1255 }
1256
__cxgbe_flow_query(struct rte_flow * flow,u64 * count,u64 * byte_count)1257 static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
1258 u64 *byte_count)
1259 {
1260 struct adapter *adap = ethdev2adap(flow->dev);
1261 struct ch_filter_specification fs = flow->f->fs;
1262 unsigned int fidx = flow->fidx;
1263 int ret = 0;
1264
1265 ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
1266 if (ret)
1267 return ret;
1268 return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
1269 }
1270
1271 static int
cxgbe_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * action,void * data,struct rte_flow_error * e)1272 cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1273 const struct rte_flow_action *action, void *data,
1274 struct rte_flow_error *e)
1275 {
1276 struct adapter *adap = ethdev2adap(flow->dev);
1277 struct ch_filter_specification fs;
1278 struct rte_flow_query_count *c;
1279 struct filter_entry *f;
1280 int ret;
1281
1282 RTE_SET_USED(dev);
1283
1284 f = flow->f;
1285 fs = f->fs;
1286
1287 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
1288 return rte_flow_error_set(e, ENOTSUP,
1289 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1290 "only count supported for query");
1291
1292 /*
1293 * This is a valid operation, Since we are allowed to do chelsio
1294 * specific operations in rte side of our code but not vise-versa
1295 *
1296 * So, fs can be queried/modified here BUT rte_flow_query_count
1297 * cannot be worked on by the lower layer since we want to maintain
1298 * it as rte_flow agnostic.
1299 */
1300 if (!fs.hitcnts)
1301 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1302 &fs, "filter hit counters were not"
1303 " enabled during filter creation");
1304
1305 c = (struct rte_flow_query_count *)data;
1306
1307 t4_os_lock(&adap->flow_lock);
1308 ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
1309 if (ret) {
1310 rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
1311 f, "cxgbe pmd failed to perform query");
1312 goto out;
1313 }
1314
1315 /* Query was successful */
1316 c->bytes_set = 1;
1317 c->hits_set = 1;
1318 if (c->reset)
1319 cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
1320
1321 out:
1322 t4_os_unlock(&adap->flow_lock);
1323 return ret;
1324 }
1325
1326 static int
cxgbe_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item item[],const struct rte_flow_action action[],struct rte_flow_error * e)1327 cxgbe_flow_validate(struct rte_eth_dev *dev,
1328 const struct rte_flow_attr *attr,
1329 const struct rte_flow_item item[],
1330 const struct rte_flow_action action[],
1331 struct rte_flow_error *e)
1332 {
1333 struct adapter *adap = ethdev2adap(dev);
1334 struct rte_flow *flow;
1335 unsigned int fidx;
1336 int ret = 0;
1337
1338 flow = t4_os_alloc(sizeof(struct rte_flow));
1339 if (!flow)
1340 return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1341 NULL,
1342 "Unable to allocate memory for filter_entry");
1343
1344 flow->item_parser = parseitem;
1345 flow->dev = dev;
1346 flow->fs.private = (void *)flow;
1347
1348 ret = cxgbe_flow_parse(flow, attr, item, action, e);
1349 if (ret) {
1350 t4_os_free(flow);
1351 return ret;
1352 }
1353
1354 if (cxgbe_validate_filter(adap, &flow->fs)) {
1355 t4_os_free(flow);
1356 return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1357 NULL,
1358 "validation failed. Check f/w config file.");
1359 }
1360
1361 t4_os_lock(&adap->flow_lock);
1362 if (cxgbe_get_fidx(flow, &fidx)) {
1363 ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1364 NULL, "no memory in tcam.");
1365 goto out;
1366 }
1367
1368 if (cxgbe_verify_fidx(flow, fidx, 0)) {
1369 ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1370 NULL, "validation failed");
1371 goto out;
1372 }
1373
1374 out:
1375 t4_os_unlock(&adap->flow_lock);
1376 t4_os_free(flow);
1377 return ret;
1378 }
1379
1380 /*
1381 * @ret : > 0 filter destroyed successfully
1382 * < 0 error destroying filter
1383 * == 1 filter not active / not found
1384 */
1385 static int
cxgbe_check_n_destroy(struct filter_entry * f,struct rte_eth_dev * dev)1386 cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
1387 {
1388 if (f && (f->valid || f->pending) &&
1389 f->dev == dev && /* Only if user has asked for this port */
1390 f->private) /* We (rte_flow) created this filter */
1391 return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
1392 return 1;
1393 }
1394
cxgbe_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * e)1395 static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
1396 {
1397 struct adapter *adap = ethdev2adap(dev);
1398 unsigned int i;
1399 int ret = 0;
1400
1401 t4_os_lock(&adap->flow_lock);
1402 if (adap->tids.ftid_tab) {
1403 struct filter_entry *f = &adap->tids.ftid_tab[0];
1404
1405 for (i = 0; i < adap->tids.nftids; i++, f++) {
1406 ret = cxgbe_check_n_destroy(f, dev);
1407 if (ret < 0) {
1408 rte_flow_error_set(e, ret,
1409 RTE_FLOW_ERROR_TYPE_HANDLE,
1410 f->private,
1411 "error destroying TCAM "
1412 "filter.");
1413 goto out;
1414 }
1415 }
1416 }
1417
1418 if (is_hashfilter(adap) && adap->tids.tid_tab) {
1419 struct filter_entry *f;
1420
1421 for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
1422 f = (struct filter_entry *)adap->tids.tid_tab[i];
1423
1424 ret = cxgbe_check_n_destroy(f, dev);
1425 if (ret < 0) {
1426 rte_flow_error_set(e, ret,
1427 RTE_FLOW_ERROR_TYPE_HANDLE,
1428 f->private,
1429 "error destroying HASH "
1430 "filter.");
1431 goto out;
1432 }
1433 }
1434 }
1435
1436 out:
1437 t4_os_unlock(&adap->flow_lock);
1438 return ret >= 0 ? 0 : ret;
1439 }
1440
1441 static const struct rte_flow_ops cxgbe_flow_ops = {
1442 .validate = cxgbe_flow_validate,
1443 .create = cxgbe_flow_create,
1444 .destroy = cxgbe_flow_destroy,
1445 .flush = cxgbe_flow_flush,
1446 .query = cxgbe_flow_query,
1447 .isolate = NULL,
1448 };
1449
1450 int
cxgbe_dev_flow_ops_get(struct rte_eth_dev * dev __rte_unused,const struct rte_flow_ops ** ops)1451 cxgbe_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
1452 const struct rte_flow_ops **ops)
1453 {
1454 *ops = &cxgbe_flow_ops;
1455 return 0;
1456 }
1457