xref: /f-stack/dpdk/drivers/net/enic/enic_clsf.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
8 #include <rte_hash.h>
9 #include <rte_byteorder.h>
10 #include <rte_ip.h>
11 #include <rte_tcp.h>
12 #include <rte_udp.h>
13 #include <rte_sctp.h>
14 
15 #include "enic_compat.h"
16 #include "enic.h"
17 #include "wq_enet_desc.h"
18 #include "rq_enet_desc.h"
19 #include "cq_enet_desc.h"
20 #include "vnic_enet.h"
21 #include "vnic_dev.h"
22 #include "vnic_wq.h"
23 #include "vnic_rq.h"
24 #include "vnic_cq.h"
25 #include "vnic_intr.h"
26 #include "vnic_nic.h"
27 
28 #ifdef RTE_ARCH_X86
29 #include <rte_hash_crc.h>
30 #define DEFAULT_HASH_FUNC       rte_hash_crc
31 #else
32 #include <rte_jhash.h>
33 #define DEFAULT_HASH_FUNC       rte_jhash
34 #endif
35 
36 #define ENICPMD_CLSF_HASH_ENTRIES       ENICPMD_FDIR_MAX
37 
38 static void copy_fltr_v1(struct filter_v2 *fltr,
39 		const struct rte_eth_fdir_input *input,
40 		const struct rte_eth_fdir_masks *masks);
41 static void copy_fltr_v2(struct filter_v2 *fltr,
42 		const struct rte_eth_fdir_input *input,
43 		const struct rte_eth_fdir_masks *masks);
44 
enic_fdir_info(struct enic * enic)45 void enic_fdir_info(struct enic *enic)
46 {
47 	enic->fdir.modes = (uint32_t)RTE_FDIR_MODE_PERFECT;
48 	enic->fdir.types_mask  = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
49 				 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
50 	if (enic->adv_filters) {
51 		enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
52 					 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
53 					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
54 					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
55 					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
56 					 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
57 		enic->fdir.copy_fltr_fn = copy_fltr_v2;
58 	} else {
59 		enic->fdir.copy_fltr_fn = copy_fltr_v1;
60 	}
61 }
62 
63 static void
enic_set_layer(struct filter_generic_1 * gp,unsigned int flag,enum filter_generic_1_layer layer,void * mask,void * val,unsigned int len)64 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
65 	       enum filter_generic_1_layer layer, void *mask, void *val,
66 	       unsigned int len)
67 {
68 	gp->mask_flags |= flag;
69 	gp->val_flags |= gp->mask_flags;
70 	memcpy(gp->layer[layer].mask, mask, len);
71 	memcpy(gp->layer[layer].val, val, len);
72 }
73 
74 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
75  * without advanced filter support.
76  */
77 static void
copy_fltr_v1(struct filter_v2 * fltr,const struct rte_eth_fdir_input * input,__rte_unused const struct rte_eth_fdir_masks * masks)78 copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
79 	     __rte_unused const struct rte_eth_fdir_masks *masks)
80 {
81 	fltr->type = FILTER_IPV4_5TUPLE;
82 	fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
83 		input->flow.ip4_flow.src_ip);
84 	fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
85 		input->flow.ip4_flow.dst_ip);
86 	fltr->u.ipv4.src_port = rte_be_to_cpu_16(
87 		input->flow.udp4_flow.src_port);
88 	fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
89 		input->flow.udp4_flow.dst_port);
90 
91 	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
92 		fltr->u.ipv4.protocol = PROTO_TCP;
93 	else
94 		fltr->u.ipv4.protocol = PROTO_UDP;
95 
96 	fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
97 }
98 
99 /* Copy Flow Director filter to a VIC generic filter (requires advanced
100  * filter support.
101  */
102 static void
copy_fltr_v2(struct filter_v2 * fltr,const struct rte_eth_fdir_input * input,const struct rte_eth_fdir_masks * masks)103 copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
104 	     const struct rte_eth_fdir_masks *masks)
105 {
106 	struct filter_generic_1 *gp = &fltr->u.generic_1;
107 
108 	fltr->type = FILTER_DPDK_1;
109 	memset(gp, 0, sizeof(*gp));
110 
111 	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
112 		struct rte_udp_hdr udp_mask, udp_val;
113 		memset(&udp_mask, 0, sizeof(udp_mask));
114 		memset(&udp_val, 0, sizeof(udp_val));
115 
116 		if (input->flow.udp4_flow.src_port) {
117 			udp_mask.src_port = masks->src_port_mask;
118 			udp_val.src_port = input->flow.udp4_flow.src_port;
119 		}
120 		if (input->flow.udp4_flow.dst_port) {
121 			udp_mask.dst_port = masks->dst_port_mask;
122 			udp_val.dst_port = input->flow.udp4_flow.dst_port;
123 		}
124 
125 		enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
126 			       &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
127 	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
128 		struct rte_tcp_hdr tcp_mask, tcp_val;
129 		memset(&tcp_mask, 0, sizeof(tcp_mask));
130 		memset(&tcp_val, 0, sizeof(tcp_val));
131 
132 		if (input->flow.tcp4_flow.src_port) {
133 			tcp_mask.src_port = masks->src_port_mask;
134 			tcp_val.src_port = input->flow.tcp4_flow.src_port;
135 		}
136 		if (input->flow.tcp4_flow.dst_port) {
137 			tcp_mask.dst_port = masks->dst_port_mask;
138 			tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
139 		}
140 
141 		enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
142 			       &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
143 	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
144 		struct rte_sctp_hdr sctp_mask, sctp_val;
145 		memset(&sctp_mask, 0, sizeof(sctp_mask));
146 		memset(&sctp_val, 0, sizeof(sctp_val));
147 
148 		if (input->flow.sctp4_flow.src_port) {
149 			sctp_mask.src_port = masks->src_port_mask;
150 			sctp_val.src_port = input->flow.sctp4_flow.src_port;
151 		}
152 		if (input->flow.sctp4_flow.dst_port) {
153 			sctp_mask.dst_port = masks->dst_port_mask;
154 			sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
155 		}
156 		if (input->flow.sctp4_flow.verify_tag) {
157 			sctp_mask.tag = 0xffffffff;
158 			sctp_val.tag = input->flow.sctp4_flow.verify_tag;
159 		}
160 
161 		/*
162 		 * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware
163 		 * has no "packet is SCTP" flag. Use flag=0 (generic L4) and
164 		 * manually set proto_id=sctp below.
165 		 */
166 		enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
167 			       &sctp_val, sizeof(struct rte_sctp_hdr));
168 	}
169 
170 	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
171 	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
172 	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
173 	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
174 		struct rte_ipv4_hdr ip4_mask, ip4_val;
175 		memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr));
176 		memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr));
177 
178 		if (input->flow.ip4_flow.tos) {
179 			ip4_mask.type_of_service = masks->ipv4_mask.tos;
180 			ip4_val.type_of_service = input->flow.ip4_flow.tos;
181 		}
182 		if (input->flow.ip4_flow.ttl) {
183 			ip4_mask.time_to_live = masks->ipv4_mask.ttl;
184 			ip4_val.time_to_live = input->flow.ip4_flow.ttl;
185 		}
186 		if (input->flow.ip4_flow.proto) {
187 			ip4_mask.next_proto_id = masks->ipv4_mask.proto;
188 			ip4_val.next_proto_id = input->flow.ip4_flow.proto;
189 		} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
190 			/* Explicitly match the SCTP protocol number */
191 			ip4_mask.next_proto_id = 0xff;
192 			ip4_val.next_proto_id = IPPROTO_SCTP;
193 		}
194 		if (input->flow.ip4_flow.src_ip) {
195 			ip4_mask.src_addr =  masks->ipv4_mask.src_ip;
196 			ip4_val.src_addr = input->flow.ip4_flow.src_ip;
197 		}
198 		if (input->flow.ip4_flow.dst_ip) {
199 			ip4_mask.dst_addr =  masks->ipv4_mask.dst_ip;
200 			ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
201 		}
202 
203 		enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
204 			&ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr));
205 	}
206 
207 	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
208 		struct rte_udp_hdr udp_mask, udp_val;
209 		memset(&udp_mask, 0, sizeof(udp_mask));
210 		memset(&udp_val, 0, sizeof(udp_val));
211 
212 		if (input->flow.udp6_flow.src_port) {
213 			udp_mask.src_port = masks->src_port_mask;
214 			udp_val.src_port = input->flow.udp6_flow.src_port;
215 		}
216 		if (input->flow.udp6_flow.dst_port) {
217 			udp_mask.dst_port = masks->dst_port_mask;
218 			udp_val.dst_port = input->flow.udp6_flow.dst_port;
219 		}
220 		enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
221 			       &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
222 	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
223 		struct rte_tcp_hdr tcp_mask, tcp_val;
224 		memset(&tcp_mask, 0, sizeof(tcp_mask));
225 		memset(&tcp_val, 0, sizeof(tcp_val));
226 
227 		if (input->flow.tcp6_flow.src_port) {
228 			tcp_mask.src_port = masks->src_port_mask;
229 			tcp_val.src_port = input->flow.tcp6_flow.src_port;
230 		}
231 		if (input->flow.tcp6_flow.dst_port) {
232 			tcp_mask.dst_port = masks->dst_port_mask;
233 			tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
234 		}
235 		enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
236 			       &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
237 	} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
238 		struct rte_sctp_hdr sctp_mask, sctp_val;
239 		memset(&sctp_mask, 0, sizeof(sctp_mask));
240 		memset(&sctp_val, 0, sizeof(sctp_val));
241 
242 		if (input->flow.sctp6_flow.src_port) {
243 			sctp_mask.src_port = masks->src_port_mask;
244 			sctp_val.src_port = input->flow.sctp6_flow.src_port;
245 		}
246 		if (input->flow.sctp6_flow.dst_port) {
247 			sctp_mask.dst_port = masks->dst_port_mask;
248 			sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
249 		}
250 		if (input->flow.sctp6_flow.verify_tag) {
251 			sctp_mask.tag = 0xffffffff;
252 			sctp_val.tag = input->flow.sctp6_flow.verify_tag;
253 		}
254 
255 		enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
256 			       &sctp_val, sizeof(struct rte_sctp_hdr));
257 	}
258 
259 	if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
260 	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
261 	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
262 	    input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
263 		struct rte_ipv6_hdr ipv6_mask, ipv6_val;
264 		memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr));
265 		memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr));
266 
267 		if (input->flow.ipv6_flow.proto) {
268 			ipv6_mask.proto = masks->ipv6_mask.proto;
269 			ipv6_val.proto = input->flow.ipv6_flow.proto;
270 		} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
271 			/* See comments for IPv4 SCTP above. */
272 			ipv6_mask.proto = 0xff;
273 			ipv6_val.proto = IPPROTO_SCTP;
274 		}
275 		memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
276 		       sizeof(ipv6_mask.src_addr));
277 		memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
278 		       sizeof(ipv6_val.src_addr));
279 		memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
280 		       sizeof(ipv6_mask.dst_addr));
281 		memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
282 		       sizeof(ipv6_val.dst_addr));
283 		if (input->flow.ipv6_flow.tc) {
284 			ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
285 			ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
286 		}
287 		if (input->flow.ipv6_flow.hop_limits) {
288 			ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
289 			ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
290 		}
291 
292 		enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
293 			&ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr));
294 	}
295 }
296 
enic_clsf_destroy(struct enic * enic)297 void enic_clsf_destroy(struct enic *enic)
298 {
299 	uint32_t index;
300 	struct enic_fdir_node *key;
301 	/* delete classifier entries */
302 	for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
303 		key = enic->fdir.nodes[index];
304 		if (key) {
305 			vnic_dev_classifier(enic->vdev, CLSF_DEL,
306 				&key->fltr_id, NULL, NULL);
307 			rte_free(key);
308 			enic->fdir.nodes[index] = NULL;
309 		}
310 	}
311 
312 	if (enic->fdir.hash) {
313 		rte_hash_free(enic->fdir.hash);
314 		enic->fdir.hash = NULL;
315 	}
316 }
317 
enic_clsf_init(struct enic * enic)318 int enic_clsf_init(struct enic *enic)
319 {
320 	char clsf_name[RTE_HASH_NAMESIZE];
321 	struct rte_hash_parameters hash_params = {
322 		.name = clsf_name,
323 		.entries = ENICPMD_CLSF_HASH_ENTRIES,
324 		.key_len = sizeof(struct rte_eth_fdir_filter),
325 		.hash_func = DEFAULT_HASH_FUNC,
326 		.hash_func_init_val = 0,
327 		.socket_id = SOCKET_ID_ANY,
328 	};
329 	snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
330 	enic->fdir.hash = rte_hash_create(&hash_params);
331 	memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
332 	enic->fdir.stats.free = ENICPMD_FDIR_MAX;
333 	return NULL == enic->fdir.hash;
334 }
335