xref: /dpdk/drivers/net/sfc/sfc_mae.h (revision 4c600599)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #ifndef _SFC_MAE_H
11 #define _SFC_MAE_H
12 
13 #include <stdbool.h>
14 
15 #include <rte_spinlock.h>
16 
17 #include "efx.h"
18 
19 #include "sfc_stats.h"
20 
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24 
25 /** FW-allocatable resource context */
26 struct sfc_mae_fw_rsrc {
27 	unsigned int			refcnt;
28 	RTE_STD_C11
29 	union {
30 		efx_mae_aset_id_t	aset_id;
31 		efx_mae_rule_id_t	rule_id;
32 		efx_mae_mac_id_t	mac_id;
33 		efx_mae_eh_id_t		eh_id;
34 	};
35 };
36 
37 /** Outer rule registry entry */
38 struct sfc_mae_outer_rule {
39 	TAILQ_ENTRY(sfc_mae_outer_rule)	entries;
40 	unsigned int			refcnt;
41 	efx_mae_match_spec_t		*match_spec;
42 	efx_tunnel_protocol_t		encap_type;
43 	struct sfc_mae_fw_rsrc		fw_rsrc;
44 };
45 
46 TAILQ_HEAD(sfc_mae_outer_rules, sfc_mae_outer_rule);
47 
48 /** MAC address registry entry */
49 struct sfc_mae_mac_addr {
50 	TAILQ_ENTRY(sfc_mae_mac_addr)	entries;
51 	unsigned int			refcnt;
52 	uint8_t				addr_bytes[EFX_MAC_ADDR_LEN];
53 	struct sfc_mae_fw_rsrc		fw_rsrc;
54 };
55 
56 TAILQ_HEAD(sfc_mae_mac_addrs, sfc_mae_mac_addr);
57 
58 /** Encap. header registry entry */
59 struct sfc_mae_encap_header {
60 	TAILQ_ENTRY(sfc_mae_encap_header)	entries;
61 	unsigned int				refcnt;
62 	uint8_t					*buf;
63 	size_t					size;
64 	efx_tunnel_protocol_t			type;
65 	struct sfc_mae_fw_rsrc			fw_rsrc;
66 };
67 
68 TAILQ_HEAD(sfc_mae_encap_headers, sfc_mae_encap_header);
69 
70 /* Counter ID */
71 struct sfc_mae_counter_id {
72 	/* ID of a counter in MAE */
73 	efx_counter_t			mae_id;
74 	/* ID of a counter in RTE */
75 	uint32_t			rte_id;
76 	/* RTE counter ID validity status */
77 	bool				rte_id_valid;
78 
79 	/* Flow Tunnel (FT) GROUP hit counter (or NULL) */
80 	uint64_t			*ft_group_hit_counter;
81 	/* Flow Tunnel (FT) context (for JUMP rules; otherwise, NULL) */
82 	struct sfc_flow_tunnel		*ft;
83 };
84 
85 /** Action set registry entry */
86 struct sfc_mae_action_set {
87 	TAILQ_ENTRY(sfc_mae_action_set)	entries;
88 	unsigned int			refcnt;
89 	struct sfc_mae_counter_id	*counters;
90 	uint32_t			n_counters;
91 	efx_mae_actions_t		*spec;
92 	struct sfc_mae_encap_header	*encap_header;
93 	struct sfc_mae_mac_addr		*dst_mac_addr;
94 	struct sfc_mae_mac_addr		*src_mac_addr;
95 	struct sfc_mae_fw_rsrc		fw_rsrc;
96 };
97 
98 TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set);
99 
100 /** Options for MAE support status */
101 enum sfc_mae_status {
102 	SFC_MAE_STATUS_UNKNOWN = 0,
103 	SFC_MAE_STATUS_UNSUPPORTED,
104 	SFC_MAE_STATUS_SUPPORTED,
105 	SFC_MAE_STATUS_ADMIN,
106 };
107 
108 /*
109  * Encap. header bounce buffer. It is used to store header data
110  * when parsing the header definition in the action VXLAN_ENCAP.
111  */
112 struct sfc_mae_bounce_eh {
113 	uint8_t				*buf;
114 	size_t				buf_size;
115 	size_t				size;
116 	efx_tunnel_protocol_t		type;
117 };
118 
119 /** Counter collection entry */
120 struct sfc_mae_counter {
121 	bool				inuse;
122 	uint32_t			generation_count;
123 	union sfc_pkts_bytes		value;
124 	union sfc_pkts_bytes		reset;
125 
126 	uint64_t			*ft_group_hit_counter;
127 };
128 
129 struct sfc_mae_counters_xstats {
130 	uint64_t			not_inuse_update;
131 	uint64_t			realloc_update;
132 };
133 
134 struct sfc_mae_counters {
135 	/** An array of all MAE counters */
136 	struct sfc_mae_counter		*mae_counters;
137 	/** Extra statistics for counters */
138 	struct sfc_mae_counters_xstats	xstats;
139 	/** Count of all MAE counters */
140 	unsigned int			n_mae_counters;
141 };
142 
143 /** Options for MAE counter polling mode */
144 enum sfc_mae_counter_polling_mode {
145 	SFC_MAE_COUNTER_POLLING_OFF = 0,
146 	SFC_MAE_COUNTER_POLLING_SERVICE,
147 	SFC_MAE_COUNTER_POLLING_THREAD,
148 };
149 
150 struct sfc_mae_counter_registry {
151 	/* Common counter information */
152 	/** Counters collection */
153 	struct sfc_mae_counters		counters;
154 
155 	/* Information used by counter update service */
156 	/** Callback to get packets from RxQ */
157 	eth_rx_burst_t			rx_pkt_burst;
158 	/** Data for the callback to get packets */
159 	struct sfc_dp_rxq		*rx_dp;
160 	/** Number of buffers pushed to the RxQ */
161 	unsigned int			pushed_n_buffers;
162 	/** Are credits used by counter stream */
163 	bool				use_credits;
164 
165 	/* Information used by configuration routines */
166 	enum sfc_mae_counter_polling_mode polling_mode;
167 	union {
168 		struct {
169 			/** Counter service core ID */
170 			uint32_t			core_id;
171 			/** Counter service ID */
172 			uint32_t			id;
173 		} service;
174 		struct {
175 			/** Counter thread ID */
176 			pthread_t			id;
177 			/** The thread should keep running */
178 			bool				run;
179 		} thread;
180 	} polling;
181 };
182 
183 /**
184  * MAE rules used to capture traffic generated by VFs and direct it to
185  * representors (one for each VF).
186  */
187 #define SFC_MAE_NB_REPR_RULES_MAX	(64)
188 
189 /** Rules to forward traffic from PHY port to PF and from PF to PHY port */
190 #define SFC_MAE_NB_SWITCHDEV_RULES	(2)
191 /** Maximum required internal MAE rules */
192 #define SFC_MAE_NB_RULES_MAX		(SFC_MAE_NB_SWITCHDEV_RULES + \
193 					 SFC_MAE_NB_REPR_RULES_MAX)
194 
195 struct sfc_mae_rule {
196 	efx_mae_match_spec_t		*spec;
197 	efx_mae_actions_t		*actions;
198 	efx_mae_aset_id_t		action_set;
199 	efx_mae_rule_id_t		rule_id;
200 };
201 
202 struct sfc_mae_internal_rules {
203 	/*
204 	 * Rules required to sustain switchdev mode or to provide
205 	 * port representor functionality.
206 	 */
207 	struct sfc_mae_rule		rules[SFC_MAE_NB_RULES_MAX];
208 };
209 
210 struct sfc_mae {
211 	/** Assigned switch domain identifier */
212 	uint16_t			switch_domain_id;
213 	/** Assigned switch port identifier */
214 	uint16_t			switch_port_id;
215 	/** NIC support for MAE status */
216 	enum sfc_mae_status		status;
217 	/** Priority level limit for MAE outer rules */
218 	unsigned int			nb_outer_rule_prios_max;
219 	/** Priority level limit for MAE action rules */
220 	unsigned int			nb_action_rule_prios_max;
221 	/** Encapsulation support status */
222 	uint32_t			encap_types_supported;
223 	/** Outer rule registry */
224 	struct sfc_mae_outer_rules	outer_rules;
225 	/** Encap. header registry */
226 	struct sfc_mae_encap_headers	encap_headers;
227 	/** MAC address registry */
228 	struct sfc_mae_mac_addrs	mac_addrs;
229 	/** Action set registry */
230 	struct sfc_mae_action_sets	action_sets;
231 	/** Encap. header bounce buffer */
232 	struct sfc_mae_bounce_eh	bounce_eh;
233 	/** Flag indicating whether counter-only RxQ is running */
234 	bool				counter_rxq_running;
235 	/** Counter registry */
236 	struct sfc_mae_counter_registry	counter_registry;
237 	/** Driver-internal flow rules */
238 	struct sfc_mae_internal_rules	internal_rules;
239 	/**
240 	 * Switchdev default rules. They forward traffic from PHY port
241 	 * to PF and vice versa.
242 	 */
243 	struct sfc_mae_rule		*switchdev_rule_pf_to_ext;
244 	struct sfc_mae_rule		*switchdev_rule_ext_to_pf;
245 };
246 
247 struct sfc_adapter;
248 struct sfc_flow_spec;
249 
250 /** This implementation supports double-tagging */
251 #define SFC_MAE_MATCH_VLAN_MAX_NTAGS	(2)
252 
253 /** It is possible to keep track of one item ETH and two items VLAN */
254 #define SFC_MAE_L2_MAX_NITEMS		(SFC_MAE_MATCH_VLAN_MAX_NTAGS + 1)
255 
256 /** Auxiliary entry format to keep track of L2 "type" ("inner_type") */
257 struct sfc_mae_ethertype {
258 	rte_be16_t	value;
259 	rte_be16_t	mask;
260 };
261 
262 struct sfc_mae_pattern_data {
263 	/**
264 	 * Keeps track of "type" ("inner_type") mask and value for each
265 	 * parsed L2 item in a pattern. These values/masks get filled
266 	 * in MAE match specification at the end of parsing. Also, this
267 	 * information is used to conduct consistency checks:
268 	 *
269 	 * - If an item ETH is followed by a single item VLAN,
270 	 *   the former must have "type" set to one of supported
271 	 *   TPID values (0x8100, 0x88a8, 0x9100, 0x9200, 0x9300),
272 	 *   or 0x0000/0x0000.
273 	 *
274 	 * - If an item ETH is followed by two items VLAN, the
275 	 *   item ETH must have "type" set to one of supported TPID
276 	 *   values (0x88a8, 0x9100, 0x9200, 0x9300), or 0x0000/0x0000,
277 	 *   and the outermost VLAN item must have "inner_type" set
278 	 *   to TPID value 0x8100, or 0x0000/0x0000
279 	 *
280 	 * - If a L2 item is followed by a L3 one, the former must
281 	 *   indicate "type" ("inner_type") which corresponds to
282 	 *   the protocol used in the L3 item, or 0x0000/0x0000.
283 	 *
284 	 * In turn, mapping between RTE convention (above requirements) and
285 	 * MAE fields is non-trivial. The following scheme indicates
286 	 * which item EtherTypes go to which MAE fields in the case
287 	 * of single tag:
288 	 *
289 	 * ETH	(0x8100)	--> VLAN0_PROTO_BE
290 	 * VLAN	(L3 EtherType)	--> ETHER_TYPE_BE
291 	 *
292 	 * Similarly, in the case of double tagging:
293 	 *
294 	 * ETH	(0x88a8)	--> VLAN0_PROTO_BE
295 	 * VLAN	(0x8100)	--> VLAN1_PROTO_BE
296 	 * VLAN	(L3 EtherType)	--> ETHER_TYPE_BE
297 	 */
298 	struct sfc_mae_ethertype	ethertypes[SFC_MAE_L2_MAX_NITEMS];
299 
300 	rte_be16_t			tci_masks[SFC_MAE_MATCH_VLAN_MAX_NTAGS];
301 
302 	unsigned int			nb_vlan_tags;
303 
304 	/**
305 	 * L3 requirement for the innermost L2 item's "type" ("inner_type").
306 	 * This contains one of:
307 	 * - 0x0800/0xffff: IPV4
308 	 * - 0x86dd/0xffff: IPV6
309 	 * - 0x0000/0x0000: no L3 item
310 	 */
311 	struct sfc_mae_ethertype	innermost_ethertype_restriction;
312 
313 	/**
314 	 * The following two fields keep track of L3 "proto" mask and value.
315 	 * The corresponding fields get filled in MAE match specification
316 	 * at the end of parsing. Also, the information is used by a
317 	 * post-check to enforce consistency requirements:
318 	 *
319 	 * - If a L3 item is followed by an item TCP, the former has
320 	 *   its "proto" set to either 0x06/0xff or 0x00/0x00.
321 	 *
322 	 * - If a L3 item is followed by an item UDP, the former has
323 	 *   its "proto" set to either 0x11/0xff or 0x00/0x00.
324 	 */
325 	uint8_t				l3_next_proto_value;
326 	uint8_t				l3_next_proto_mask;
327 
328 	/*
329 	 * L4 requirement for L3 item's "proto".
330 	 * This contains one of:
331 	 * - 0x06/0xff: TCP
332 	 * - 0x11/0xff: UDP
333 	 * - 0x00/0x00: no L4 item
334 	 */
335 	uint8_t				l3_next_proto_restriction_value;
336 	uint8_t				l3_next_proto_restriction_mask;
337 
338 	/* Projected state of EFX_MAE_FIELD_HAS_OVLAN match bit */
339 	bool				has_ovlan_value;
340 	bool				has_ovlan_mask;
341 
342 	/* Projected state of EFX_MAE_FIELD_HAS_IVLAN match bit */
343 	bool				has_ivlan_value;
344 	bool				has_ivlan_mask;
345 };
346 
347 struct sfc_mae_parse_ctx {
348 	struct sfc_adapter		*sa;
349 	efx_mae_match_spec_t		*match_spec_action;
350 	efx_mae_match_spec_t		*match_spec_outer;
351 	/*
352 	 * This points to either of the above two specifications depending
353 	 * on which part of the pattern is being parsed (outer / inner).
354 	 */
355 	efx_mae_match_spec_t		*match_spec;
356 	/*
357 	 * This points to either "field_ids_remap_to_encap"
358 	 * or "field_ids_no_remap" (see sfc_mae.c) depending on
359 	 * which part of the pattern is being parsed.
360 	 */
361 	const efx_mae_field_id_t	*field_ids_remap;
362 	/* These two fields correspond to the tunnel-specific default mask. */
363 	size_t				tunnel_def_mask_size;
364 	const void			*tunnel_def_mask;
365 	bool				match_mport_set;
366 	enum sfc_flow_tunnel_rule_type	ft_rule_type;
367 	struct sfc_mae_pattern_data	pattern_data;
368 	efx_tunnel_protocol_t		encap_type;
369 	const struct rte_flow_item	*pattern;
370 	unsigned int			priority;
371 	struct sfc_flow_tunnel		*ft;
372 };
373 
374 int sfc_mae_attach(struct sfc_adapter *sa);
375 void sfc_mae_detach(struct sfc_adapter *sa);
376 sfc_flow_cleanup_cb_t sfc_mae_flow_cleanup;
377 int sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
378 			       const struct rte_flow_item pattern[],
379 			       struct sfc_flow_spec_mae *spec,
380 			       struct rte_flow_error *error);
381 int sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
382 			       const struct rte_flow_action actions[],
383 			       struct sfc_flow_spec_mae *spec_mae,
384 			       struct rte_flow_error *error);
385 sfc_flow_verify_cb_t sfc_mae_flow_verify;
386 sfc_flow_insert_cb_t sfc_mae_flow_insert;
387 sfc_flow_remove_cb_t sfc_mae_flow_remove;
388 sfc_flow_query_cb_t sfc_mae_flow_query;
389 
390 /**
391  * The value used to represent the lowest priority.
392  * Used in MAE rule API.
393  */
394 #define SFC_MAE_RULE_PRIO_LOWEST	(-1)
395 
396 /**
397  * Insert a driver-internal flow rule that matches traffic originating from
398  * some m-port selector and redirects it to another one
399  * (eg. PF --> PHY, PHY --> PF).
400  *
401  * If requested priority is negative, use the lowest priority.
402  */
403 int sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
404 					 const efx_mport_sel_t *mport_match,
405 					 const efx_mport_sel_t *mport_deliver,
406 					 int prio, struct sfc_mae_rule **rulep);
407 void sfc_mae_rule_del(struct sfc_adapter *sa, struct sfc_mae_rule *rule);
408 int sfc_mae_switchdev_init(struct sfc_adapter *sa);
409 void sfc_mae_switchdev_fini(struct sfc_adapter *sa);
410 
411 #ifdef __cplusplus
412 }
413 #endif
414 #endif /* _SFC_MAE_H */
415