1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2021 NXP
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ethdev.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
18
19 #include <rte_fslmc.h>
20 #include <fsl_dpdmux.h>
21 #include <fsl_dpkg.h>
22
23 #include <dpaa2_ethdev.h>
24 #include <dpaa2_pmd_logs.h>
25
26 struct dpaa2_dpdmux_dev {
27 TAILQ_ENTRY(dpaa2_dpdmux_dev) next;
28 /**< Pointer to Next device instance */
29 struct fsl_mc_io dpdmux; /** handle to DPDMUX portal object */
30 uint16_t token;
31 uint32_t dpdmux_id; /*HW ID for DPDMUX object */
32 uint8_t num_ifs; /* Number of interfaces in DPDMUX */
33 };
34
35 struct rte_flow {
36 struct dpdmux_rule_cfg rule;
37 };
38
39 TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev);
40 static struct dpdmux_dev_list dpdmux_dev_list =
41 TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */
42
get_dpdmux_from_id(uint32_t dpdmux_id)43 static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id)
44 {
45 struct dpaa2_dpdmux_dev *dpdmux_dev = NULL;
46
47 /* Get DPBP dev handle from list using index */
48 TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) {
49 if (dpdmux_dev->dpdmux_id == dpdmux_id)
50 break;
51 }
52
53 return dpdmux_dev;
54 }
55
56 struct rte_flow *
rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,struct rte_flow_item * pattern[],struct rte_flow_action * actions[])57 rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id,
58 struct rte_flow_item *pattern[],
59 struct rte_flow_action *actions[])
60 {
61 struct dpaa2_dpdmux_dev *dpdmux_dev;
62 struct dpkg_profile_cfg kg_cfg;
63 const struct rte_flow_action_vf *vf_conf;
64 struct dpdmux_cls_action dpdmux_action;
65 struct rte_flow *flow = NULL;
66 void *key_iova, *mask_iova, *key_cfg_iova = NULL;
67 uint8_t key_size = 0;
68 int ret;
69 static int i;
70
71 if (!pattern || !actions || !pattern[0] || !actions[0])
72 return NULL;
73
74 /* Find the DPDMUX from dpdmux_id in our list */
75 dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
76 if (!dpdmux_dev) {
77 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
78 return NULL;
79 }
80
81 key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE,
82 RTE_CACHE_LINE_SIZE);
83 if (!key_cfg_iova) {
84 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
85 return NULL;
86 }
87 flow = rte_zmalloc(NULL, sizeof(struct rte_flow) +
88 (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
89 if (!flow) {
90 DPAA2_PMD_ERR(
91 "Memory allocation failure for rule configuration\n");
92 goto creation_error;
93 }
94 key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));
95 mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
96
97 /* Currently taking only IP protocol as an extract type.
98 * This can be extended to other fields using pattern->type.
99 */
100 memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
101
102 switch (pattern[0]->type) {
103 case RTE_FLOW_ITEM_TYPE_IPV4:
104 {
105 const struct rte_flow_item_ipv4 *spec;
106
107 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP;
108 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO;
109 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
110 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
111 kg_cfg.num_extracts = 1;
112
113 spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec;
114 memcpy(key_iova, (const void *)(&spec->hdr.next_proto_id),
115 sizeof(uint8_t));
116 memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t));
117 key_size = sizeof(uint8_t);
118 }
119 break;
120
121 case RTE_FLOW_ITEM_TYPE_UDP:
122 {
123 const struct rte_flow_item_udp *spec;
124 uint16_t udp_dst_port;
125
126 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_UDP;
127 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
128 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
129 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
130 kg_cfg.num_extracts = 1;
131
132 spec = (const struct rte_flow_item_udp *)pattern[0]->spec;
133 udp_dst_port = rte_constant_bswap16(spec->hdr.dst_port);
134 memcpy((void *)key_iova, (const void *)&udp_dst_port,
135 sizeof(rte_be16_t));
136 memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
137 key_size = sizeof(uint16_t);
138 }
139 break;
140
141 case RTE_FLOW_ITEM_TYPE_ETH:
142 {
143 const struct rte_flow_item_eth *spec;
144 uint16_t eth_type;
145
146 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_ETH;
147 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_ETH_TYPE;
148 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
149 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
150 kg_cfg.num_extracts = 1;
151
152 spec = (const struct rte_flow_item_eth *)pattern[0]->spec;
153 eth_type = rte_constant_bswap16(spec->type);
154 memcpy((void *)key_iova, (const void *)ð_type,
155 sizeof(rte_be16_t));
156 memcpy(mask_iova, pattern[0]->mask, sizeof(uint16_t));
157 key_size = sizeof(uint16_t);
158 }
159 break;
160
161 case RTE_FLOW_ITEM_TYPE_RAW:
162 {
163 const struct rte_flow_item_raw *spec;
164
165 spec = (const struct rte_flow_item_raw *)pattern[0]->spec;
166 kg_cfg.extracts[0].extract.from_data.offset = spec->offset;
167 kg_cfg.extracts[0].extract.from_data.size = spec->length;
168 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
169 kg_cfg.num_extracts = 1;
170 memcpy((void *)key_iova, (const void *)spec->pattern,
171 spec->length);
172 memcpy(mask_iova, pattern[0]->mask, spec->length);
173
174 key_size = spec->length;
175 }
176 break;
177
178 default:
179 DPAA2_PMD_ERR("Not supported pattern type: %d",
180 pattern[0]->type);
181 goto creation_error;
182 }
183
184 ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova);
185 if (ret) {
186 DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret);
187 goto creation_error;
188 }
189
190 /* Multiple rules with same DPKG extracts (kg_cfg.extracts) like same
191 * offset and length values in raw is supported right now. Different
192 * values of kg_cfg may not work.
193 */
194 if (i == 0) {
195 ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
196 dpdmux_dev->token,
197 (uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova)));
198 if (ret) {
199 DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)",
200 ret);
201 goto creation_error;
202 }
203 }
204 /* As now our key extract parameters are set, let us configure
205 * the rule.
206 */
207 flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova));
208 flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova));
209 flow->rule.key_size = key_size;
210 flow->rule.entry_index = i++;
211
212 vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf);
213 if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) {
214 DPAA2_PMD_ERR("Invalid destination id\n");
215 goto creation_error;
216 }
217 dpdmux_action.dest_if = vf_conf->id;
218
219 ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
220 dpdmux_dev->token, &flow->rule,
221 &dpdmux_action);
222 if (ret) {
223 DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)",
224 ret);
225 goto creation_error;
226 }
227
228 return flow;
229
230 creation_error:
231 rte_free((void *)key_cfg_iova);
232 rte_free((void *)flow);
233 return NULL;
234 }
235
236 int
rte_pmd_dpaa2_mux_rx_frame_len(uint32_t dpdmux_id,uint16_t max_rx_frame_len)237 rte_pmd_dpaa2_mux_rx_frame_len(uint32_t dpdmux_id, uint16_t max_rx_frame_len)
238 {
239 struct dpaa2_dpdmux_dev *dpdmux_dev;
240 int ret;
241
242 /* Find the DPDMUX from dpdmux_id in our list */
243 dpdmux_dev = get_dpdmux_from_id(dpdmux_id);
244 if (!dpdmux_dev) {
245 DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id);
246 return -1;
247 }
248
249 ret = dpdmux_set_max_frame_length(&dpdmux_dev->dpdmux,
250 CMD_PRI_LOW, dpdmux_dev->token, max_rx_frame_len);
251 if (ret) {
252 DPAA2_PMD_ERR("DPDMUX:Unable to set mtu. check config %d", ret);
253 return ret;
254 }
255
256 DPAA2_PMD_INFO("dpdmux mtu set as %u",
257 DPAA2_MAX_RX_PKT_LEN - RTE_ETHER_CRC_LEN);
258
259 return ret;
260 }
261
262 static int
dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,struct vfio_device_info * obj_info __rte_unused,int dpdmux_id)263 dpaa2_create_dpdmux_device(int vdev_fd __rte_unused,
264 struct vfio_device_info *obj_info __rte_unused,
265 int dpdmux_id)
266 {
267 struct dpaa2_dpdmux_dev *dpdmux_dev;
268 struct dpdmux_attr attr;
269 int ret;
270 uint16_t maj_ver;
271 uint16_t min_ver;
272
273 PMD_INIT_FUNC_TRACE();
274
275 /* Allocate DPAA2 dpdmux handle */
276 dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0);
277 if (!dpdmux_dev) {
278 DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device");
279 return -1;
280 }
281
282 /* Open the dpdmux object */
283 dpdmux_dev->dpdmux.regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX);
284 ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id,
285 &dpdmux_dev->token);
286 if (ret) {
287 DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret);
288 goto init_err;
289 }
290
291 ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
292 dpdmux_dev->token, &attr);
293 if (ret) {
294 DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret);
295 goto init_err;
296 }
297
298 ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
299 dpdmux_dev->token, attr.default_if);
300 if (ret) {
301 DPAA2_PMD_ERR("setting default interface failed in %s",
302 __func__);
303 goto init_err;
304 }
305
306 ret = dpdmux_get_api_version(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
307 &maj_ver, &min_ver);
308 if (ret) {
309 DPAA2_PMD_ERR("setting version failed in %s",
310 __func__);
311 goto init_err;
312 }
313
314 /* The new dpdmux_set/get_resetable() API are available starting with
315 * DPDMUX_VER_MAJOR==6 and DPDMUX_VER_MINOR==6
316 */
317 if (maj_ver >= 6 && min_ver >= 6) {
318 ret = dpdmux_set_resetable(&dpdmux_dev->dpdmux, CMD_PRI_LOW,
319 dpdmux_dev->token,
320 DPDMUX_SKIP_DEFAULT_INTERFACE |
321 DPDMUX_SKIP_UNICAST_RULES |
322 DPDMUX_SKIP_MULTICAST_RULES);
323 if (ret) {
324 DPAA2_PMD_ERR("setting default interface failed in %s",
325 __func__);
326 goto init_err;
327 }
328 }
329
330 if (maj_ver >= 6 && min_ver >= 9) {
331 struct dpdmux_error_cfg mux_err_cfg;
332
333 memset(&mux_err_cfg, 0, sizeof(mux_err_cfg));
334 mux_err_cfg.error_action = DPDMUX_ERROR_ACTION_CONTINUE;
335 mux_err_cfg.errors = DPDMUX_ERROR_DISC;
336
337 ret = dpdmux_if_set_errors_behavior(&dpdmux_dev->dpdmux,
338 CMD_PRI_LOW,
339 dpdmux_dev->token, dpdmux_id,
340 &mux_err_cfg);
341 if (ret) {
342 DPAA2_PMD_ERR("dpdmux_if_set_errors_behavior %s err %d",
343 __func__, ret);
344 goto init_err;
345 }
346 }
347
348 dpdmux_dev->dpdmux_id = dpdmux_id;
349 dpdmux_dev->num_ifs = attr.num_ifs;
350
351 TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next);
352
353 return 0;
354
355 init_err:
356 rte_free(dpdmux_dev);
357
358 return -1;
359 }
360
361 static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = {
362 .dev_type = DPAA2_MUX,
363 .create = dpaa2_create_dpdmux_device,
364 };
365
366 RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj);
367