1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2019 NXP
5 *
6 */
7
8 #include <time.h>
9 #include <net/if.h>
10
11 #include <rte_mbuf.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_malloc.h>
14 #include <rte_memcpy.h>
15 #include <rte_string_fns.h>
16 #include <rte_cycles.h>
17 #include <rte_kvargs.h>
18 #include <rte_dev.h>
19
20 #include <dpaa2_pmd_logs.h>
21 #include <dpaa2_hw_pvt.h>
22 #include <dpaa2_hw_mempool.h>
23
24 #include "../dpaa2_ethdev.h"
25
26 int
27 dpaa2_distset_to_dpkg_profile_cfg(
28 uint64_t req_dist_set,
29 struct dpkg_profile_cfg *kg_cfg);
30
31 int
rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,uint16_t offset,uint8_t size)32 rte_pmd_dpaa2_set_custom_hash(uint16_t port_id,
33 uint16_t offset,
34 uint8_t size)
35 {
36 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
37 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
38 struct fsl_mc_io *dpni = priv->hw;
39 struct dpni_rx_tc_dist_cfg tc_cfg;
40 struct dpkg_profile_cfg kg_cfg;
41 void *p_params;
42 int ret, tc_index = 0;
43
44 p_params = rte_zmalloc(
45 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
46 if (!p_params) {
47 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
48 return -ENOMEM;
49 }
50
51 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA;
52 kg_cfg.extracts[0].extract.from_data.offset = offset;
53 kg_cfg.extracts[0].extract.from_data.size = size;
54 kg_cfg.extracts[0].num_of_byte_masks = 0;
55 kg_cfg.num_extracts = 1;
56
57 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
58 if (ret) {
59 DPAA2_PMD_ERR("Unable to prepare extract parameters");
60 rte_free(p_params);
61 return ret;
62 }
63
64 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
65 tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params));
66 tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
67 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
68
69 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
70 &tc_cfg);
71 rte_free(p_params);
72 if (ret) {
73 DPAA2_PMD_ERR(
74 "Setting distribution for Rx failed with err: %d",
75 ret);
76 return ret;
77 }
78
79 return 0;
80 }
81
82 int
dpaa2_setup_flow_dist(struct rte_eth_dev * eth_dev,uint64_t req_dist_set,int tc_index)83 dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
84 uint64_t req_dist_set, int tc_index)
85 {
86 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
87 struct fsl_mc_io *dpni = priv->hw;
88 struct dpni_rx_dist_cfg tc_cfg;
89 struct dpkg_profile_cfg kg_cfg;
90 void *p_params;
91 int ret, tc_dist_queues;
92
93 /*TC distribution size is set with dist_queues or
94 * nb_rx_queues % dist_queues in order of TC priority index.
95 * Calculating dist size for this tc_index:-
96 */
97 tc_dist_queues = eth_dev->data->nb_rx_queues -
98 tc_index * priv->dist_queues;
99 if (tc_dist_queues <= 0) {
100 DPAA2_PMD_INFO("No distribution on TC%d", tc_index);
101 return 0;
102 }
103
104 if (tc_dist_queues > priv->dist_queues)
105 tc_dist_queues = priv->dist_queues;
106
107 p_params = rte_malloc(
108 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
109 if (!p_params) {
110 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
111 return -ENOMEM;
112 }
113
114 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
115 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
116
117 ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
118 if (ret) {
119 DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
120 req_dist_set);
121 rte_free(p_params);
122 return ret;
123 }
124
125 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
126 tc_cfg.dist_size = tc_dist_queues;
127 tc_cfg.enable = true;
128 tc_cfg.tc = tc_index;
129
130 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
131 if (ret) {
132 DPAA2_PMD_ERR("Unable to prepare extract parameters");
133 rte_free(p_params);
134 return ret;
135 }
136
137 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token, &tc_cfg);
138 rte_free(p_params);
139 if (ret) {
140 DPAA2_PMD_ERR(
141 "Setting distribution for Rx failed with err: %d",
142 ret);
143 return ret;
144 }
145
146 return 0;
147 }
148
dpaa2_remove_flow_dist(struct rte_eth_dev * eth_dev,uint8_t tc_index)149 int dpaa2_remove_flow_dist(
150 struct rte_eth_dev *eth_dev,
151 uint8_t tc_index)
152 {
153 struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
154 struct fsl_mc_io *dpni = priv->hw;
155 struct dpni_rx_dist_cfg tc_cfg;
156 struct dpkg_profile_cfg kg_cfg;
157 void *p_params;
158 int ret;
159
160 p_params = rte_malloc(
161 NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
162 if (!p_params) {
163 DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
164 return -ENOMEM;
165 }
166
167 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
168 tc_cfg.dist_size = 0;
169 tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
170 tc_cfg.enable = true;
171 tc_cfg.tc = tc_index;
172
173 memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
174 kg_cfg.num_extracts = 0;
175 ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
176 if (ret) {
177 DPAA2_PMD_ERR("Unable to prepare extract parameters");
178 rte_free(p_params);
179 return ret;
180 }
181
182 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW, priv->token,
183 &tc_cfg);
184 rte_free(p_params);
185 if (ret)
186 DPAA2_PMD_ERR(
187 "Setting distribution for Rx failed with err: %d",
188 ret);
189 return ret;
190 }
191
192 int
dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set,struct dpkg_profile_cfg * kg_cfg)193 dpaa2_distset_to_dpkg_profile_cfg(
194 uint64_t req_dist_set,
195 struct dpkg_profile_cfg *kg_cfg)
196 {
197 uint32_t loop = 0, i = 0, dist_field = 0;
198 int l2_configured = 0, l3_configured = 0;
199 int l4_configured = 0, sctp_configured = 0;
200
201 memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
202 while (req_dist_set) {
203 if (req_dist_set % 2 != 0) {
204 dist_field = 1U << loop;
205 switch (dist_field) {
206 case ETH_RSS_L2_PAYLOAD:
207
208 if (l2_configured)
209 break;
210 l2_configured = 1;
211
212 kg_cfg->extracts[i].extract.from_hdr.prot =
213 NET_PROT_ETH;
214 kg_cfg->extracts[i].extract.from_hdr.field =
215 NH_FLD_ETH_TYPE;
216 kg_cfg->extracts[i].type =
217 DPKG_EXTRACT_FROM_HDR;
218 kg_cfg->extracts[i].extract.from_hdr.type =
219 DPKG_FULL_FIELD;
220 i++;
221 break;
222
223 case ETH_RSS_IPV4:
224 case ETH_RSS_FRAG_IPV4:
225 case ETH_RSS_NONFRAG_IPV4_OTHER:
226 case ETH_RSS_IPV6:
227 case ETH_RSS_FRAG_IPV6:
228 case ETH_RSS_NONFRAG_IPV6_OTHER:
229 case ETH_RSS_IPV6_EX:
230
231 if (l3_configured)
232 break;
233 l3_configured = 1;
234
235 kg_cfg->extracts[i].extract.from_hdr.prot =
236 NET_PROT_IP;
237 kg_cfg->extracts[i].extract.from_hdr.field =
238 NH_FLD_IP_SRC;
239 kg_cfg->extracts[i].type =
240 DPKG_EXTRACT_FROM_HDR;
241 kg_cfg->extracts[i].extract.from_hdr.type =
242 DPKG_FULL_FIELD;
243 i++;
244
245 kg_cfg->extracts[i].extract.from_hdr.prot =
246 NET_PROT_IP;
247 kg_cfg->extracts[i].extract.from_hdr.field =
248 NH_FLD_IP_DST;
249 kg_cfg->extracts[i].type =
250 DPKG_EXTRACT_FROM_HDR;
251 kg_cfg->extracts[i].extract.from_hdr.type =
252 DPKG_FULL_FIELD;
253 i++;
254
255 kg_cfg->extracts[i].extract.from_hdr.prot =
256 NET_PROT_IP;
257 kg_cfg->extracts[i].extract.from_hdr.field =
258 NH_FLD_IP_PROTO;
259 kg_cfg->extracts[i].type =
260 DPKG_EXTRACT_FROM_HDR;
261 kg_cfg->extracts[i].extract.from_hdr.type =
262 DPKG_FULL_FIELD;
263 kg_cfg->num_extracts++;
264 i++;
265 break;
266
267 case ETH_RSS_NONFRAG_IPV4_TCP:
268 case ETH_RSS_NONFRAG_IPV6_TCP:
269 case ETH_RSS_NONFRAG_IPV4_UDP:
270 case ETH_RSS_NONFRAG_IPV6_UDP:
271 case ETH_RSS_IPV6_TCP_EX:
272 case ETH_RSS_IPV6_UDP_EX:
273
274 if (l4_configured)
275 break;
276 l4_configured = 1;
277
278 kg_cfg->extracts[i].extract.from_hdr.prot =
279 NET_PROT_TCP;
280 kg_cfg->extracts[i].extract.from_hdr.field =
281 NH_FLD_TCP_PORT_SRC;
282 kg_cfg->extracts[i].type =
283 DPKG_EXTRACT_FROM_HDR;
284 kg_cfg->extracts[i].extract.from_hdr.type =
285 DPKG_FULL_FIELD;
286 i++;
287
288 kg_cfg->extracts[i].extract.from_hdr.prot =
289 NET_PROT_TCP;
290 kg_cfg->extracts[i].extract.from_hdr.field =
291 NH_FLD_TCP_PORT_SRC;
292 kg_cfg->extracts[i].type =
293 DPKG_EXTRACT_FROM_HDR;
294 kg_cfg->extracts[i].extract.from_hdr.type =
295 DPKG_FULL_FIELD;
296 i++;
297 break;
298
299 case ETH_RSS_NONFRAG_IPV4_SCTP:
300 case ETH_RSS_NONFRAG_IPV6_SCTP:
301
302 if (sctp_configured)
303 break;
304 sctp_configured = 1;
305
306 kg_cfg->extracts[i].extract.from_hdr.prot =
307 NET_PROT_SCTP;
308 kg_cfg->extracts[i].extract.from_hdr.field =
309 NH_FLD_SCTP_PORT_SRC;
310 kg_cfg->extracts[i].type =
311 DPKG_EXTRACT_FROM_HDR;
312 kg_cfg->extracts[i].extract.from_hdr.type =
313 DPKG_FULL_FIELD;
314 i++;
315
316 kg_cfg->extracts[i].extract.from_hdr.prot =
317 NET_PROT_SCTP;
318 kg_cfg->extracts[i].extract.from_hdr.field =
319 NH_FLD_SCTP_PORT_DST;
320 kg_cfg->extracts[i].type =
321 DPKG_EXTRACT_FROM_HDR;
322 kg_cfg->extracts[i].extract.from_hdr.type =
323 DPKG_FULL_FIELD;
324 i++;
325 break;
326
327 default:
328 DPAA2_PMD_WARN(
329 "Unsupported flow dist option %x",
330 dist_field);
331 return -EINVAL;
332 }
333 }
334 req_dist_set = req_dist_set >> 1;
335 loop++;
336 }
337 kg_cfg->num_extracts = i;
338 return 0;
339 }
340
341 int
dpaa2_attach_bp_list(struct dpaa2_dev_priv * priv,void * blist)342 dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
343 void *blist)
344 {
345 /* Function to attach a DPNI with a buffer pool list. Buffer pool list
346 * handle is passed in blist.
347 */
348 int32_t retcode;
349 struct fsl_mc_io *dpni = priv->hw;
350 struct dpni_pools_cfg bpool_cfg;
351 struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
352 struct dpni_buffer_layout layout;
353 int tot_size;
354
355 /* ... rx buffer layout .
356 * Check alignment for buffer layouts first
357 */
358
359 /* ... rx buffer layout ... */
360 tot_size = RTE_PKTMBUF_HEADROOM;
361 tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN);
362
363 memset(&layout, 0, sizeof(struct dpni_buffer_layout));
364 layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
365 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
366 DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
367 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
368 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
369 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
370
371 layout.pass_timestamp = true;
372 layout.pass_frame_status = 1;
373 layout.private_data_size = DPAA2_FD_PTA_SIZE;
374 layout.pass_parser_result = 1;
375 layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN;
376 layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE -
377 DPAA2_MBUF_HW_ANNOTATION;
378 retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
379 DPNI_QUEUE_RX, &layout);
380 if (retcode) {
381 DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
382 retcode);
383 return retcode;
384 }
385
386 /*Attach buffer pool to the network interface as described by the user*/
387 memset(&bpool_cfg, 0, sizeof(struct dpni_pools_cfg));
388 bpool_cfg.num_dpbp = 1;
389 bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
390 bpool_cfg.pools[0].backup_pool = 0;
391 bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size,
392 DPAA2_PACKET_LAYOUT_ALIGN);
393 bpool_cfg.pools[0].priority_mask = 0;
394
395 retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
396 if (retcode != 0) {
397 DPAA2_PMD_ERR("Error configuring buffer pool on interface."
398 " bpid = %d error code = %d",
399 bpool_cfg.pools[0].dpbp_id, retcode);
400 return retcode;
401 }
402
403 priv->bp_list = bp_list;
404 return 0;
405 }
406