1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2022 Xilinx, Inc.
4 */
5
6 #include <stdbool.h>
7 #include <stdint.h>
8
9 #include <rte_common.h>
10 #include <rte_flow.h>
11 #include <rte_tailq.h>
12
13 #include "efx.h"
14
15 #include "sfc.h"
16 #include "sfc_debug.h"
17 #include "sfc_flow_rss.h"
18 #include "sfc_log.h"
19 #include "sfc_rx.h"
20
21 int
sfc_flow_rss_attach(struct sfc_adapter * sa)22 sfc_flow_rss_attach(struct sfc_adapter *sa)
23 {
24 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
25 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
26 int rc;
27
28 sfc_log_init(sa, "entry");
29
30 flow_rss->qid_span_max = encp->enc_rx_scale_indirection_max_nqueues;
31 flow_rss->nb_tbl_entries_min = encp->enc_rx_scale_tbl_min_nentries;
32 flow_rss->nb_tbl_entries_max = encp->enc_rx_scale_tbl_max_nentries;
33
34 sfc_log_init(sa, "allocate the bounce buffer for indirection entries");
35 flow_rss->bounce_tbl = rte_calloc("sfc_flow_rss_bounce_tbl",
36 flow_rss->nb_tbl_entries_max,
37 sizeof(*flow_rss->bounce_tbl), 0);
38 if (flow_rss->bounce_tbl == NULL) {
39 rc = ENOMEM;
40 goto fail;
41 }
42
43 TAILQ_INIT(&flow_rss->ctx_list);
44
45 sfc_log_init(sa, "done");
46
47 return 0;
48
49 fail:
50 sfc_log_init(sa, "failed %d", rc);
51
52 return rc;
53 }
54
55 void
sfc_flow_rss_detach(struct sfc_adapter * sa)56 sfc_flow_rss_detach(struct sfc_adapter *sa)
57 {
58 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
59
60 sfc_log_init(sa, "entry");
61
62 sfc_log_init(sa, "free the bounce buffer for indirection entries");
63 rte_free(flow_rss->bounce_tbl);
64
65 sfc_log_init(sa, "done");
66 }
67
68 int
sfc_flow_rss_parse_conf(struct sfc_adapter * sa,const struct rte_flow_action_rss * in,struct sfc_flow_rss_conf * out,uint16_t * sw_qid_minp)69 sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
70 const struct rte_flow_action_rss *in,
71 struct sfc_flow_rss_conf *out, uint16_t *sw_qid_minp)
72 {
73 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
74 const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
75 const struct sfc_rss *ethdev_rss = &sas->rss;
76 uint16_t sw_qid_min;
77 uint16_t sw_qid_max;
78 const uint8_t *key;
79 unsigned int i;
80 int rc;
81
82 if (in->level) {
83 /*
84 * The caller demands that RSS hash be computed
85 * within the given encapsulation frame / level.
86 * Per flow control for that is not implemented.
87 */
88 sfc_err(sa, "flow-rss: parse: 'level' must be 0");
89 return EINVAL;
90 }
91
92 if (in->types != 0) {
93 rc = sfc_rx_hf_rte_to_efx(sa, in->types,
94 &out->efx_hash_types);
95 if (rc != 0) {
96 sfc_err(sa, "flow-rss: parse: failed to process 'types'");
97 return rc;
98 }
99 } else {
100 sfc_dbg(sa, "flow-rss: parse: 'types' is 0; proceeding with ethdev setting");
101 out->efx_hash_types = ethdev_rss->hash_types;
102 }
103
104 if (in->key_len != 0) {
105 if (in->key_len != sizeof(out->key)) {
106 sfc_err(sa, "flow-rss: parse: 'key_len' must be either %zu or 0",
107 sizeof(out->key));
108 return EINVAL;
109 }
110
111 if (in->key == NULL) {
112 sfc_err(sa, "flow-rss: parse: 'key' is NULL");
113 return EINVAL;
114 }
115
116 key = in->key;
117 } else {
118 sfc_dbg(sa, "flow-rss: parse: 'key_len' is 0; proceeding with ethdev key");
119 key = ethdev_rss->key;
120 }
121
122 rte_memcpy(out->key, key, sizeof(out->key));
123
124 switch (in->func) {
125 case RTE_ETH_HASH_FUNCTION_DEFAULT:
126 /*
127 * DEFAULT means that conformance to a specific
128 * hash algorithm is a don't care to the caller.
129 * The driver can pick the one it deems optimal.
130 */
131 break;
132 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
133 if (ethdev_rss->hash_alg != EFX_RX_HASHALG_TOEPLITZ) {
134 sfc_err(sa, "flow-rss: parse: 'func' TOEPLITZ is unavailable; use DEFAULT");
135 return EINVAL;
136 }
137 break;
138 default:
139 sfc_err(sa, "flow-rss: parse: 'func' #%d is unsupported", in->func);
140 return EINVAL;
141 }
142
143 out->rte_hash_function = in->func;
144
145 if (in->queue_num == 0) {
146 sfc_err(sa, "flow-rss: parse: 'queue_num' is 0; MIN=1");
147 return EINVAL;
148 }
149
150 if (in->queue_num > flow_rss->nb_tbl_entries_max) {
151 sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u",
152 flow_rss->nb_tbl_entries_max);
153 return EINVAL;
154 }
155
156 if (in->queue == NULL) {
157 sfc_err(sa, "flow-rss: parse: 'queue' is NULL");
158 return EINVAL;
159 }
160
161 sw_qid_min = sas->ethdev_rxq_count - 1;
162 sw_qid_max = 0;
163
164 out->nb_qid_offsets = 0;
165
166 for (i = 0; i < in->queue_num; ++i) {
167 uint16_t sw_qid = in->queue[i];
168
169 if (sw_qid >= sas->ethdev_rxq_count) {
170 sfc_err(sa, "flow-rss: parse: queue=%u does not exist",
171 sw_qid);
172 return EINVAL;
173 }
174
175 if (sw_qid < sw_qid_min)
176 sw_qid_min = sw_qid;
177
178 if (sw_qid > sw_qid_max)
179 sw_qid_max = sw_qid;
180
181 if (sw_qid != in->queue[0] + i)
182 out->nb_qid_offsets = in->queue_num;
183 }
184
185 out->qid_span = sw_qid_max - sw_qid_min + 1;
186
187 if (out->qid_span > flow_rss->qid_span_max) {
188 sfc_err(sa, "flow-rss: parse: queue ID span %u is too large; MAX=%u",
189 out->qid_span, flow_rss->qid_span_max);
190 return EINVAL;
191 }
192
193 if (sw_qid_minp != NULL)
194 *sw_qid_minp = sw_qid_min;
195
196 return 0;
197 }
198
199 struct sfc_flow_rss_ctx *
sfc_flow_rss_ctx_reuse(struct sfc_adapter * sa,const struct sfc_flow_rss_conf * conf,uint16_t sw_qid_min,const uint16_t * sw_qids)200 sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
201 const struct sfc_flow_rss_conf *conf,
202 uint16_t sw_qid_min, const uint16_t *sw_qids)
203 {
204 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
205 static struct sfc_flow_rss_ctx *ctx;
206
207 SFC_ASSERT(sfc_adapter_is_locked(sa));
208
209 TAILQ_FOREACH(ctx, &flow_rss->ctx_list, entries) {
210 if (memcmp(&ctx->conf, conf, sizeof(*conf)) != 0)
211 continue;
212
213 if (conf->nb_qid_offsets != 0) {
214 bool match_confirmed = true;
215 unsigned int i;
216
217 for (i = 0; i < conf->nb_qid_offsets; ++i) {
218 uint16_t qid_offset = sw_qids[i] - sw_qid_min;
219
220 if (ctx->qid_offsets[i] != qid_offset) {
221 match_confirmed = false;
222 break;
223 }
224 }
225
226 if (!match_confirmed)
227 continue;
228 }
229
230 sfc_dbg(sa, "flow-rss: reusing ctx=%p", ctx);
231 ++(ctx->refcnt);
232 return ctx;
233 }
234
235 return NULL;
236 }
237
238 int
sfc_flow_rss_ctx_add(struct sfc_adapter * sa,const struct sfc_flow_rss_conf * conf,uint16_t sw_qid_min,const uint16_t * sw_qids,struct sfc_flow_rss_ctx ** ctxp)239 sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
240 const struct sfc_flow_rss_conf *conf, uint16_t sw_qid_min,
241 const uint16_t *sw_qids, struct sfc_flow_rss_ctx **ctxp)
242 {
243 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
244 struct sfc_flow_rss_ctx *ctx;
245
246 SFC_ASSERT(sfc_adapter_is_locked(sa));
247
248 ctx = rte_zmalloc("sfc_flow_rss_ctx", sizeof(*ctx), 0);
249 if (ctx == NULL)
250 return ENOMEM;
251
252 if (conf->nb_qid_offsets != 0) {
253 unsigned int i;
254
255 ctx->qid_offsets = rte_calloc("sfc_flow_rss_ctx_qid_offsets",
256 conf->nb_qid_offsets,
257 sizeof(*ctx->qid_offsets), 0);
258 if (ctx->qid_offsets == NULL) {
259 rte_free(ctx);
260 return ENOMEM;
261 }
262
263 for (i = 0; i < conf->nb_qid_offsets; ++i)
264 ctx->qid_offsets[i] = sw_qids[i] - sw_qid_min;
265 }
266
267 ctx->conf = *conf;
268 ctx->refcnt = 1;
269
270 TAILQ_INSERT_TAIL(&flow_rss->ctx_list, ctx, entries);
271
272 *ctxp = ctx;
273
274 sfc_dbg(sa, "flow-rss: added ctx=%p", ctx);
275
276 return 0;
277 }
278
279 void
sfc_flow_rss_ctx_del(struct sfc_adapter * sa,struct sfc_flow_rss_ctx * ctx)280 sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
281 {
282 struct sfc_flow_rss *flow_rss = &sa->flow_rss;
283
284 if (ctx == NULL)
285 return;
286
287 SFC_ASSERT(sfc_adapter_is_locked(sa));
288
289 if (ctx->dummy)
290 return;
291
292 SFC_ASSERT(ctx->refcnt != 0);
293
294 --(ctx->refcnt);
295
296 if (ctx->refcnt != 0)
297 return;
298
299 if (ctx->nic_handle_refcnt != 0) {
300 sfc_err(sa, "flow-rss: deleting ctx=%p abandons its NIC resource: handle=0x%08x, refcnt=%u",
301 ctx, ctx->nic_handle, ctx->nic_handle_refcnt);
302 }
303
304 TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries);
305 rte_free(ctx->qid_offsets);
306 rte_free(ctx);
307
308 sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx);
309 }
310
311 static int
sfc_flow_rss_ctx_program_tbl(struct sfc_adapter * sa,unsigned int nb_tbl_entries,const struct sfc_flow_rss_ctx * ctx)312 sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
313 unsigned int nb_tbl_entries,
314 const struct sfc_flow_rss_ctx *ctx)
315 {
316 const struct sfc_flow_rss_conf *conf = &ctx->conf;
317 unsigned int *tbl = sa->flow_rss.bounce_tbl;
318 unsigned int i;
319
320 SFC_ASSERT(sfc_adapter_is_locked(sa));
321
322 if (nb_tbl_entries == 0)
323 return 0;
324
325 if (conf->nb_qid_offsets != 0) {
326 SFC_ASSERT(ctx->qid_offsets != NULL);
327
328 for (i = 0; i < nb_tbl_entries; ++i)
329 tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets];
330 } else {
331 for (i = 0; i < nb_tbl_entries; ++i)
332 tbl[i] = i % conf->qid_span;
333 }
334
335 return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle,
336 tbl, nb_tbl_entries);
337 }
338
339 int
sfc_flow_rss_ctx_program(struct sfc_adapter * sa,struct sfc_flow_rss_ctx * ctx)340 sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
341 {
342 efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE;
343 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
344 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
345 const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
346 struct sfc_rss *ethdev_rss = &sas->rss;
347 struct sfc_flow_rss_conf *conf;
348 bool allocation_done = B_FALSE;
349 unsigned int nb_qid_offsets;
350 unsigned int nb_tbl_entries;
351 int rc;
352
353 if (ctx == NULL)
354 return 0;
355
356 conf = &ctx->conf;
357
358 SFC_ASSERT(sfc_adapter_is_locked(sa));
359
360 if (conf->nb_qid_offsets != 0)
361 nb_qid_offsets = conf->nb_qid_offsets;
362 else
363 nb_qid_offsets = conf->qid_span;
364
365 if (!RTE_IS_POWER_OF_2(nb_qid_offsets)) {
366 /*
367 * Most likely, it pays to enlarge the indirection
368 * table to facilitate better distribution quality.
369 */
370 nb_qid_offsets = flow_rss->nb_tbl_entries_max;
371 }
372
373 nb_tbl_entries = RTE_MAX(flow_rss->nb_tbl_entries_min, nb_qid_offsets);
374
375 if (conf->rte_hash_function == RTE_ETH_HASH_FUNCTION_DEFAULT &&
376 conf->nb_qid_offsets == 0 &&
377 conf->qid_span <= encp->enc_rx_scale_even_spread_max_nqueues) {
378 /*
379 * Conformance to a specific hash algorithm is a don't care to
380 * the user. The queue array is contiguous and ascending. That
381 * means that the even spread context may be requested here in
382 * order to avoid wasting precious indirection table resources.
383 */
384 ctx_type = EFX_RX_SCALE_EVEN_SPREAD;
385 nb_tbl_entries = 0;
386 }
387
388 if (ctx->nic_handle_refcnt == 0) {
389 rc = efx_rx_scale_context_alloc_v2(sa->nic, ctx_type,
390 conf->qid_span,
391 nb_tbl_entries,
392 &ctx->nic_handle);
393 if (rc != 0) {
394 sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; rc=%d",
395 ctx, ctx_type, conf->qid_span, nb_tbl_entries, rc);
396 goto fail;
397 }
398
399 sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; handle=0x%08x",
400 ctx, ctx_type, conf->qid_span, nb_tbl_entries,
401 ctx->nic_handle);
402
403 ++(ctx->nic_handle_refcnt);
404 allocation_done = B_TRUE;
405 } else {
406 ++(ctx->nic_handle_refcnt);
407 return 0;
408 }
409
410 rc = efx_rx_scale_mode_set(sa->nic, ctx->nic_handle,
411 ethdev_rss->hash_alg,
412 (ctx->dummy) ? ethdev_rss->hash_types :
413 conf->efx_hash_types,
414 B_TRUE);
415 if (rc != 0) {
416 sfc_err(sa, "flow-rss: failed to configure hash for ctx=%p: efx_hash_alg=%d, efx_hash_types=0x%08x; rc=%d",
417 ctx, ethdev_rss->hash_alg,
418 (ctx->dummy) ? ethdev_rss->hash_types :
419 conf->efx_hash_types,
420 rc);
421 goto fail;
422 }
423
424 rc = efx_rx_scale_key_set(sa->nic, ctx->nic_handle,
425 (ctx->dummy) ? ethdev_rss->key : conf->key,
426 RTE_DIM(conf->key));
427 if (rc != 0) {
428 sfc_err(sa, "flow-rss: failed to set key for ctx=%p; rc=%d",
429 ctx, rc);
430 goto fail;
431 }
432
433 rc = sfc_flow_rss_ctx_program_tbl(sa, nb_tbl_entries, ctx);
434 if (rc != 0) {
435 sfc_err(sa, "flow-rss: failed to program table for ctx=%p: nb_tbl_entries=%u; rc=%d",
436 ctx, nb_tbl_entries, rc);
437 goto fail;
438 }
439
440 return 0;
441
442 fail:
443 if (allocation_done)
444 sfc_flow_rss_ctx_terminate(sa, ctx);
445
446 return rc;
447 }
448
449 void
sfc_flow_rss_ctx_terminate(struct sfc_adapter * sa,struct sfc_flow_rss_ctx * ctx)450 sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
451 {
452 if (ctx == NULL)
453 return;
454
455 SFC_ASSERT(sfc_adapter_is_locked(sa));
456
457 SFC_ASSERT(ctx->nic_handle_refcnt != 0);
458 --(ctx->nic_handle_refcnt);
459
460 if (ctx->nic_handle_refcnt == 0) {
461 int rc;
462
463 rc = efx_rx_scale_context_free(sa->nic, ctx->nic_handle);
464 if (rc != 0) {
465 sfc_err(sa, "flow-rss: failed to release NIC resource for ctx=%p: handle=0x%08x; rc=%d",
466 ctx, ctx->nic_handle, rc);
467
468 sfc_warn(sa, "flow-rss: proceeding despite the prior error");
469 }
470
471 sfc_dbg(sa, "flow-rss: released NIC resource for ctx=%p; rc=%d",
472 ctx, rc);
473 }
474 }
475