1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
3 */
4
5 #include <rte_log.h>
6 #include <rte_errno.h>
7 #include <rte_malloc.h>
8 #include <rte_regexdev.h>
9 #include <rte_regexdev_core.h>
10 #include <rte_regexdev_driver.h>
11
12 #include <mlx5_glue.h>
13 #include <mlx5_devx_cmds.h>
14 #include <mlx5_prm.h>
15 #include <mlx5_common_os.h>
16
17 #include "mlx5_regex.h"
18 #include "mlx5_regex_utils.h"
19 #include "mlx5_rxp_csrs.h"
20 #include "mlx5_rxp.h"
21
22 #define MLX5_REGEX_MAX_MATCHES MLX5_RXP_MAX_MATCHES
23 #define MLX5_REGEX_MAX_PAYLOAD_SIZE MLX5_RXP_MAX_JOB_LENGTH
24 #define MLX5_REGEX_MAX_RULES_PER_GROUP UINT32_MAX
25 #define MLX5_REGEX_MAX_GROUPS MLX5_RXP_MAX_SUBSETS
26
27 /* Private Declarations */
28 static int
29 rxp_poll_csr_for_value(struct ibv_context *ctx, uint32_t *value,
30 uint32_t address, uint32_t expected_value,
31 uint32_t expected_mask, uint32_t timeout_ms, uint8_t id);
32 static int
33 mlnx_set_database(struct mlx5_regex_priv *priv, uint8_t id, uint8_t db_to_use);
34 static int
35 mlnx_resume_database(struct mlx5_regex_priv *priv, uint8_t id);
36 static int
37 mlnx_update_database(struct mlx5_regex_priv *priv, uint8_t id);
38 static int
39 program_rxp_rules(struct mlx5_regex_priv *priv,
40 struct mlx5_rxp_ctl_rules_pgm *rules, uint8_t id);
41 static int
42 rxp_init_eng(struct mlx5_regex_priv *priv, uint8_t id);
43 static int
44 write_private_rules(struct mlx5_regex_priv *priv,
45 struct mlx5_rxp_ctl_rules_pgm *rules,
46 uint8_t id);
47 static int
48 write_shared_rules(struct mlx5_regex_priv *priv,
49 struct mlx5_rxp_ctl_rules_pgm *rules, uint32_t count,
50 uint8_t db_to_program);
51 static int
52 rxp_db_setup(struct mlx5_regex_priv *priv);
53 static void
54 rxp_dump_csrs(struct ibv_context *ctx, uint8_t id);
55 static int
56 rxp_write_rules_via_cp(struct ibv_context *ctx,
57 struct mlx5_rxp_rof_entry *rules,
58 int count, uint8_t id);
59 static int
60 rxp_flush_rules(struct ibv_context *ctx, struct mlx5_rxp_rof_entry *rules,
61 int count, uint8_t id);
62 static int
63 rxp_start_engine(struct ibv_context *ctx, uint8_t id);
64 static int
65 rxp_stop_engine(struct ibv_context *ctx, uint8_t id);
66
67 static void __rte_unused
rxp_dump_csrs(struct ibv_context * ctx __rte_unused,uint8_t id __rte_unused)68 rxp_dump_csrs(struct ibv_context *ctx __rte_unused, uint8_t id __rte_unused)
69 {
70 uint32_t reg, i;
71
72 /* Main CSRs*/
73 for (i = 0; i < MLX5_RXP_CSR_NUM_ENTRIES; i++) {
74 if (mlx5_devx_regex_register_read(ctx, id,
75 (MLX5_RXP_CSR_WIDTH * i) +
76 MLX5_RXP_CSR_BASE_ADDRESS,
77 ®)) {
78 DRV_LOG(ERR, "Failed to read Main CSRs Engine %d!", id);
79 return;
80 }
81 DRV_LOG(DEBUG, "RXP Main CSRs (Eng%d) register (%d): %08x",
82 id, i, reg);
83 }
84 /* RTRU CSRs*/
85 for (i = 0; i < MLX5_RXP_CSR_NUM_ENTRIES; i++) {
86 if (mlx5_devx_regex_register_read(ctx, id,
87 (MLX5_RXP_CSR_WIDTH * i) +
88 MLX5_RXP_RTRU_CSR_BASE_ADDRESS,
89 ®)) {
90 DRV_LOG(ERR, "Failed to read RTRU CSRs Engine %d!", id);
91 return;
92 }
93 DRV_LOG(DEBUG, "RXP RTRU CSRs (Eng%d) register (%d): %08x",
94 id, i, reg);
95 }
96 /* STAT CSRs */
97 for (i = 0; i < MLX5_RXP_CSR_NUM_ENTRIES; i++) {
98 if (mlx5_devx_regex_register_read(ctx, id,
99 (MLX5_RXP_CSR_WIDTH * i) +
100 MLX5_RXP_STATS_CSR_BASE_ADDRESS,
101 ®)) {
102 DRV_LOG(ERR, "Failed to read STAT CSRs Engine %d!", id);
103 return;
104 }
105 DRV_LOG(DEBUG, "RXP STAT CSRs (Eng%d) register (%d): %08x",
106 id, i, reg);
107 }
108 }
109
110 int
mlx5_regex_info_get(struct rte_regexdev * dev __rte_unused,struct rte_regexdev_info * info)111 mlx5_regex_info_get(struct rte_regexdev *dev __rte_unused,
112 struct rte_regexdev_info *info)
113 {
114 info->max_matches = MLX5_REGEX_MAX_MATCHES;
115 info->max_payload_size = MLX5_REGEX_MAX_PAYLOAD_SIZE;
116 info->max_rules_per_group = MLX5_REGEX_MAX_RULES_PER_GROUP;
117 info->max_groups = MLX5_REGEX_MAX_GROUPS;
118 info->max_queue_pairs = 1;
119 info->regexdev_capa = RTE_REGEXDEV_SUPP_PCRE_GREEDY_F |
120 RTE_REGEXDEV_CAPA_QUEUE_PAIR_OOS_F;
121 info->rule_flags = 0;
122 info->max_queue_pairs = 10;
123 return 0;
124 }
125
126 /**
127 * Actual writing of RXP instructions to RXP via CSRs.
128 */
129 static int
rxp_write_rules_via_cp(struct ibv_context * ctx,struct mlx5_rxp_rof_entry * rules,int count,uint8_t id)130 rxp_write_rules_via_cp(struct ibv_context *ctx,
131 struct mlx5_rxp_rof_entry *rules,
132 int count, uint8_t id)
133 {
134 int i, ret = 0;
135 uint32_t tmp;
136
137 for (i = 0; i < count; i++) {
138 tmp = (uint32_t)rules[i].value;
139 ret |= mlx5_devx_regex_register_write(ctx, id,
140 MLX5_RXP_RTRU_CSR_DATA_0,
141 tmp);
142 tmp = (uint32_t)(rules[i].value >> 32);
143 ret |= mlx5_devx_regex_register_write(ctx, id,
144 MLX5_RXP_RTRU_CSR_DATA_0 +
145 MLX5_RXP_CSR_WIDTH, tmp);
146 tmp = rules[i].addr;
147 ret |= mlx5_devx_regex_register_write(ctx, id,
148 MLX5_RXP_RTRU_CSR_ADDR,
149 tmp);
150 if (ret) {
151 DRV_LOG(ERR, "Failed to copy instructions to RXP.");
152 return -1;
153 }
154 }
155 DRV_LOG(DEBUG, "Written %d instructions", count);
156 return 0;
157 }
158
159 static int
rxp_flush_rules(struct ibv_context * ctx,struct mlx5_rxp_rof_entry * rules,int count,uint8_t id)160 rxp_flush_rules(struct ibv_context *ctx, struct mlx5_rxp_rof_entry *rules,
161 int count, uint8_t id)
162 {
163 uint32_t val, fifo_depth;
164 int ret;
165
166 ret = rxp_write_rules_via_cp(ctx, rules, count, id);
167 if (ret < 0) {
168 DRV_LOG(ERR, "Failed to write rules via CSRs.");
169 return -1;
170 }
171 ret = mlx5_devx_regex_register_read(ctx, id,
172 MLX5_RXP_RTRU_CSR_CAPABILITY,
173 &fifo_depth);
174 if (ret) {
175 DRV_LOG(ERR, "CSR read failed!");
176 return -1;
177 }
178 ret = rxp_poll_csr_for_value(ctx, &val, MLX5_RXP_RTRU_CSR_FIFO_STAT,
179 count, ~0,
180 MLX5_RXP_POLL_CSR_FOR_VALUE_TIMEOUT, id);
181 if (ret < 0) {
182 if (ret == -EBUSY)
183 DRV_LOG(ERR, "Rules not rx by RXP: credit: %d, depth:"
184 " %d", val, fifo_depth);
185 else
186 DRV_LOG(ERR, "CSR poll failed, can't read value!");
187 return ret;
188 }
189 DRV_LOG(DEBUG, "RTRU FIFO depth: 0x%x", fifo_depth);
190 ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
191 &val);
192 if (ret) {
193 DRV_LOG(ERR, "CSR read failed!");
194 return -1;
195 }
196 val |= MLX5_RXP_RTRU_CSR_CTRL_GO;
197 ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
198 val);
199 if (ret) {
200 DRV_LOG(ERR, "CSR write failed!");
201 return -1;
202 }
203 ret = rxp_poll_csr_for_value(ctx, &val, MLX5_RXP_RTRU_CSR_STATUS,
204 MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
205 MLX5_RXP_RTRU_CSR_STATUS_UPDATE_DONE,
206 MLX5_RXP_POLL_CSR_FOR_VALUE_TIMEOUT, id);
207 if (ret < 0) {
208 if (ret == -EBUSY)
209 DRV_LOG(ERR, "Rules update timeout: 0x%08X", val);
210 else
211 DRV_LOG(ERR, "CSR poll failed, can't read value!");
212 return ret;
213 }
214 if (mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
215 &val)) {
216 DRV_LOG(ERR, "CSR read failed!");
217 return -1;
218 }
219 val &= ~(MLX5_RXP_RTRU_CSR_CTRL_GO);
220 if (mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
221 val)) {
222 DRV_LOG(ERR, "CSR write failed!");
223 return -1;
224 }
225
226 DRV_LOG(DEBUG, "RXP Flush rules finished.");
227 return 0;
228 }
229
230 static int
rxp_poll_csr_for_value(struct ibv_context * ctx,uint32_t * value,uint32_t address,uint32_t expected_value,uint32_t expected_mask,uint32_t timeout_ms,uint8_t id)231 rxp_poll_csr_for_value(struct ibv_context *ctx, uint32_t *value,
232 uint32_t address, uint32_t expected_value,
233 uint32_t expected_mask, uint32_t timeout_ms, uint8_t id)
234 {
235 unsigned int i;
236 int ret;
237
238 ret = -EBUSY;
239 for (i = 0; i < timeout_ms; i++) {
240 if (mlx5_devx_regex_register_read(ctx, id, address, value))
241 return -1;
242 if ((*value & expected_mask) == expected_value) {
243 ret = 0;
244 break;
245 }
246 rte_delay_us(1000);
247 }
248 return ret;
249 }
250
251 static int
rxp_start_engine(struct ibv_context * ctx,uint8_t id)252 rxp_start_engine(struct ibv_context *ctx, uint8_t id)
253 {
254 uint32_t ctrl;
255 int ret;
256
257 ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
258 if (ret)
259 return ret;
260 ctrl |= MLX5_RXP_CSR_CTRL_GO;
261 ctrl |= MLX5_RXP_CSR_CTRL_DISABLE_L2C;
262 ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl);
263 return ret;
264 }
265
266 static int
rxp_stop_engine(struct ibv_context * ctx,uint8_t id)267 rxp_stop_engine(struct ibv_context *ctx, uint8_t id)
268 {
269 uint32_t ctrl;
270 int ret;
271
272 ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
273 if (ret)
274 return ret;
275 ctrl &= ~MLX5_RXP_CSR_CTRL_GO;
276 ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl);
277 return ret;
278 }
279
280 static int
rxp_init_rtru(struct ibv_context * ctx,uint8_t id,uint32_t init_bits)281 rxp_init_rtru(struct ibv_context *ctx, uint8_t id, uint32_t init_bits)
282 {
283 uint32_t ctrl_value;
284 uint32_t poll_value;
285 uint32_t expected_value;
286 uint32_t expected_mask;
287 int ret;
288
289 /* Read the rtru ctrl CSR. */
290 ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
291 &ctrl_value);
292 if (ret)
293 return -1;
294 /* Clear any previous init modes. */
295 ctrl_value &= ~(MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_MASK);
296 if (ctrl_value & MLX5_RXP_RTRU_CSR_CTRL_INIT) {
297 ctrl_value &= ~(MLX5_RXP_RTRU_CSR_CTRL_INIT);
298 mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
299 ctrl_value);
300 }
301 /* Set the init_mode bits in the rtru ctrl CSR. */
302 ctrl_value |= init_bits;
303 mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
304 ctrl_value);
305 /* Need to sleep for a short period after pulsing the rtru init bit. */
306 rte_delay_us(20000);
307 /* Poll the rtru status CSR until all the init done bits are set. */
308 DRV_LOG(DEBUG, "waiting for RXP rule memory to complete init");
309 /* Set the init bit in the rtru ctrl CSR. */
310 ctrl_value |= MLX5_RXP_RTRU_CSR_CTRL_INIT;
311 mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
312 ctrl_value);
313 /* Clear the init bit in the rtru ctrl CSR */
314 ctrl_value &= ~MLX5_RXP_RTRU_CSR_CTRL_INIT;
315 mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
316 ctrl_value);
317 /* Check that the following bits are set in the RTRU_CSR. */
318 if (init_bits == MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_L1_L2) {
319 /* Must be incremental mode */
320 expected_value = MLX5_RXP_RTRU_CSR_STATUS_L1C_INIT_DONE |
321 MLX5_RXP_RTRU_CSR_STATUS_L2C_INIT_DONE;
322 } else {
323 expected_value = MLX5_RXP_RTRU_CSR_STATUS_IM_INIT_DONE |
324 MLX5_RXP_RTRU_CSR_STATUS_L1C_INIT_DONE |
325 MLX5_RXP_RTRU_CSR_STATUS_L2C_INIT_DONE;
326 }
327 expected_mask = expected_value;
328 ret = rxp_poll_csr_for_value(ctx, &poll_value,
329 MLX5_RXP_RTRU_CSR_STATUS,
330 expected_value, expected_mask,
331 MLX5_RXP_CSR_STATUS_TRIAL_TIMEOUT, id);
332 if (ret)
333 return ret;
334 DRV_LOG(DEBUG, "rule memory initialise: 0x%08X", poll_value);
335 /* Clear the init bit in the rtru ctrl CSR */
336 ctrl_value &= ~(MLX5_RXP_RTRU_CSR_CTRL_INIT);
337 mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_RTRU_CSR_CTRL,
338 ctrl_value);
339 return 0;
340 }
341
342 static int
rxp_parse_rof(const char * buf,uint32_t len,struct mlx5_rxp_ctl_rules_pgm ** rules)343 rxp_parse_rof(const char *buf, uint32_t len,
344 struct mlx5_rxp_ctl_rules_pgm **rules)
345 {
346 static const char del[] = "\n\r";
347 char *line;
348 char *tmp;
349 char *cur_pos;
350 uint32_t lines = 0;
351 uint32_t entries;
352 struct mlx5_rxp_rof_entry *curentry;
353
354 tmp = rte_malloc("", len, 0);
355 if (!tmp)
356 return -ENOMEM;
357 memcpy(tmp, buf, len);
358 line = strtok(tmp, del);
359 while (line) {
360 if (line[0] != '#' && line[0] != '\0')
361 lines++;
362 line = strtok(NULL, del);
363 }
364 *rules = rte_malloc("", lines * sizeof(*curentry) + sizeof(**rules), 0);
365 if (!(*rules)) {
366 rte_free(tmp);
367 return -ENOMEM;
368 }
369 memset(*rules, 0, lines * sizeof(curentry) + sizeof(**rules));
370 curentry = (*rules)->rules;
371 (*rules)->hdr.cmd = MLX5_RXP_CTL_RULES_PGM;
372 entries = 0;
373 memcpy(tmp, buf, len);
374 line = strtok(tmp, del);
375 while (line) {
376 if (line[0] == '#' || line[0] == '\0') {
377 line = strtok(NULL, del);
378 continue;
379 }
380 curentry->type = strtoul(line, &cur_pos, 10);
381 if (cur_pos == line || cur_pos[0] != ',')
382 goto parse_error;
383 cur_pos++;
384 curentry->addr = strtoul(cur_pos, &cur_pos, 16);
385 if (cur_pos[0] != ',')
386 goto parse_error;
387 cur_pos++;
388 curentry->value = strtoull(cur_pos, &cur_pos, 16);
389 if (cur_pos[0] != '\0' && cur_pos[0] != '\n')
390 goto parse_error;
391 curentry++;
392 entries++;
393 if (entries > lines)
394 goto parse_error;
395 line = strtok(NULL, del);
396 }
397 (*rules)->count = entries;
398 (*rules)->hdr.len = entries * sizeof(*curentry) + sizeof(**rules);
399 rte_free(tmp);
400 return 0;
401 parse_error:
402 rte_free(tmp);
403 if (*rules)
404 rte_free(*rules);
405 return -EINVAL;
406 }
407
408 static int
mlnx_set_database(struct mlx5_regex_priv * priv,uint8_t id,uint8_t db_to_use)409 mlnx_set_database(struct mlx5_regex_priv *priv, uint8_t id, uint8_t db_to_use)
410 {
411 int ret;
412 uint32_t umem_id;
413
414 ret = mlx5_devx_regex_database_stop(priv->ctx, id);
415 if (ret < 0) {
416 DRV_LOG(ERR, "stop engine failed!");
417 return ret;
418 }
419 umem_id = mlx5_os_get_umem_id(priv->db[db_to_use].umem.umem);
420 ret = mlx5_devx_regex_database_program(priv->ctx, id, umem_id, 0);
421 if (ret < 0) {
422 DRV_LOG(ERR, "program db failed!");
423 return ret;
424 }
425 return 0;
426 }
427
428 static int
mlnx_resume_database(struct mlx5_regex_priv * priv,uint8_t id)429 mlnx_resume_database(struct mlx5_regex_priv *priv, uint8_t id)
430 {
431 mlx5_devx_regex_database_resume(priv->ctx, id);
432 return 0;
433 }
434
435 /*
436 * Assign db memory for RXP programming.
437 */
438 static int
mlnx_update_database(struct mlx5_regex_priv * priv,uint8_t id)439 mlnx_update_database(struct mlx5_regex_priv *priv, uint8_t id)
440 {
441 unsigned int i;
442 uint8_t db_free = MLX5_RXP_DB_NOT_ASSIGNED;
443 uint8_t eng_assigned = MLX5_RXP_DB_NOT_ASSIGNED;
444
445 /* Check which database rxp_eng is currently located if any? */
446 for (i = 0; i < (priv->nb_engines + MLX5_RXP_EM_COUNT);
447 i++) {
448 if (priv->db[i].db_assigned_to_eng_num == id) {
449 eng_assigned = i;
450 break;
451 }
452 }
453 /*
454 * If private mode then, we can keep the same db ptr as RXP will be
455 * programming EM itself if necessary, however need to see if
456 * programmed yet.
457 */
458 if ((priv->prog_mode == MLX5_RXP_PRIVATE_PROG_MODE) &&
459 (eng_assigned != MLX5_RXP_DB_NOT_ASSIGNED))
460 return eng_assigned;
461 /* Check for inactive db memory to use. */
462 for (i = 0; i < (priv->nb_engines + MLX5_RXP_EM_COUNT);
463 i++) {
464 if (priv->db[i].active == true)
465 continue; /* Already in use, so skip db. */
466 /* Set this db to active now as free to use. */
467 priv->db[i].active = true;
468 /* Now unassign last db index in use by RXP Eng. */
469 if (eng_assigned != MLX5_RXP_DB_NOT_ASSIGNED) {
470 priv->db[eng_assigned].active = false;
471 priv->db[eng_assigned].db_assigned_to_eng_num =
472 MLX5_RXP_DB_NOT_ASSIGNED;
473
474 /* Set all DB memory to 0's before setting up DB. */
475 memset(priv->db[i].ptr, 0x00, MLX5_MAX_DB_SIZE);
476 }
477 /* Now reassign new db index with RXP Engine. */
478 priv->db[i].db_assigned_to_eng_num = id;
479 db_free = i;
480 break;
481 }
482 if (db_free == MLX5_RXP_DB_NOT_ASSIGNED)
483 return -1;
484 return db_free;
485 }
486
487 /*
488 * Program RXP instruction db to RXP engine/s.
489 */
490 static int
program_rxp_rules(struct mlx5_regex_priv * priv,struct mlx5_rxp_ctl_rules_pgm * rules,uint8_t id)491 program_rxp_rules(struct mlx5_regex_priv *priv,
492 struct mlx5_rxp_ctl_rules_pgm *rules, uint8_t id)
493 {
494 int ret, db_free;
495 uint32_t rule_cnt;
496
497 rule_cnt = rules->count;
498 db_free = mlnx_update_database(priv, id);
499 if (db_free < 0) {
500 DRV_LOG(ERR, "Failed to setup db memory!");
501 return db_free;
502 }
503 if (priv->prog_mode == MLX5_RXP_PRIVATE_PROG_MODE) {
504 /* Register early to ensure RXP writes to EM use valid addr. */
505 ret = mlnx_set_database(priv, id, db_free);
506 if (ret < 0) {
507 DRV_LOG(ERR, "Failed to register db memory!");
508 return ret;
509 }
510 }
511 ret = write_private_rules(priv, rules, id);
512 if (ret < 0) {
513 DRV_LOG(ERR, "Failed to write rules!");
514 return ret;
515 }
516 if (priv->prog_mode == MLX5_RXP_SHARED_PROG_MODE) {
517 /* Write external rules directly to EM. */
518 rules->count = rule_cnt;
519 /* Now write external instructions to EM. */
520 ret = write_shared_rules(priv, rules, rules->hdr.len, db_free);
521 if (ret < 0) {
522 DRV_LOG(ERR, "Failed to write EM rules!");
523 return ret;
524 }
525 ret = mlnx_set_database(priv, id, db_free);
526 if (ret < 0) {
527 DRV_LOG(ERR, "Failed to register db memory!");
528 return ret;
529 }
530 }
531 ret = mlnx_resume_database(priv, id);
532 if (ret < 0) {
533 DRV_LOG(ERR, "Failed to resume engine!");
534 return ret;
535 }
536 DRV_LOG(DEBUG, "Programmed RXP Engine %d\n", id);
537 rules->count = rule_cnt;
538 return 0;
539 }
540
541 static int
rxp_init_eng(struct mlx5_regex_priv * priv,uint8_t id)542 rxp_init_eng(struct mlx5_regex_priv *priv, uint8_t id)
543 {
544 uint32_t ctrl;
545 uint32_t reg;
546 struct ibv_context *ctx = priv->ctx;
547 int ret;
548
549 ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
550 if (ret)
551 return ret;
552 if (ctrl & MLX5_RXP_CSR_CTRL_INIT) {
553 ctrl &= ~MLX5_RXP_CSR_CTRL_INIT;
554 ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL,
555 ctrl);
556 if (ret)
557 return ret;
558 }
559 ctrl |= MLX5_RXP_CSR_CTRL_INIT;
560 ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl);
561 if (ret)
562 return ret;
563 ctrl &= ~MLX5_RXP_CSR_CTRL_INIT;
564 ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL, ctrl);
565 if (ret)
566 return ret;
567 rte_delay_us(20000);
568 ret = rxp_poll_csr_for_value(ctx, &ctrl, MLX5_RXP_CSR_STATUS,
569 MLX5_RXP_CSR_STATUS_INIT_DONE,
570 MLX5_RXP_CSR_STATUS_INIT_DONE,
571 MLX5_RXP_CSR_STATUS_TRIAL_TIMEOUT, id);
572 if (ret)
573 return ret;
574 ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CTRL, &ctrl);
575 if (ret)
576 return ret;
577 ctrl &= ~MLX5_RXP_CSR_CTRL_INIT;
578 ret = mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_CTRL,
579 ctrl);
580 if (ret)
581 return ret;
582 ret = rxp_init_rtru(ctx, id, MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_IM_L1_L2);
583 if (ret)
584 return ret;
585 ret = mlx5_devx_regex_register_read(ctx, id, MLX5_RXP_CSR_CAPABILITY_5,
586 ®);
587 if (ret)
588 return ret;
589 DRV_LOG(DEBUG, "max matches: %d, DDOS threshold: %d", reg >> 16,
590 reg & 0xffff);
591 if ((reg >> 16) >= priv->nb_max_matches)
592 ret = mlx5_devx_regex_register_write(ctx, id,
593 MLX5_RXP_CSR_MAX_MATCH,
594 priv->nb_max_matches);
595 else
596 ret = mlx5_devx_regex_register_write(ctx, id,
597 MLX5_RXP_CSR_MAX_MATCH,
598 (reg >> 16));
599 ret |= mlx5_devx_regex_register_write(ctx, id, MLX5_RXP_CSR_MAX_PREFIX,
600 (reg & 0xFFFF));
601 ret |= mlx5_devx_regex_register_write(ctx, id,
602 MLX5_RXP_CSR_MAX_LATENCY, 0);
603 ret |= mlx5_devx_regex_register_write(ctx, id,
604 MLX5_RXP_CSR_MAX_PRI_THREAD, 0);
605 return ret;
606 }
607
608 static int
write_private_rules(struct mlx5_regex_priv * priv,struct mlx5_rxp_ctl_rules_pgm * rules,uint8_t id)609 write_private_rules(struct mlx5_regex_priv *priv,
610 struct mlx5_rxp_ctl_rules_pgm *rules,
611 uint8_t id)
612 {
613 unsigned int pending;
614 uint32_t block, reg, val, rule_cnt, rule_offset, rtru_max_num_entries;
615 int ret = 1;
616
617 if (priv->prog_mode == MLX5_RXP_MODE_NOT_DEFINED)
618 return -EINVAL;
619 if (rules->hdr.len == 0 || rules->hdr.cmd < MLX5_RXP_CTL_RULES_PGM ||
620 rules->hdr.cmd > MLX5_RXP_CTL_RULES_PGM_INCR)
621 return -EINVAL;
622 /* For a non-incremental rules program, re-init the RXP. */
623 if (rules->hdr.cmd == MLX5_RXP_CTL_RULES_PGM) {
624 ret = rxp_init_eng(priv, id);
625 if (ret < 0)
626 return ret;
627 } else if (rules->hdr.cmd == MLX5_RXP_CTL_RULES_PGM_INCR) {
628 /* Flush RXP L1 and L2 cache by using MODE_L1_L2. */
629 ret = rxp_init_rtru(priv->ctx, id,
630 MLX5_RXP_RTRU_CSR_CTRL_INIT_MODE_L1_L2);
631 if (ret < 0)
632 return ret;
633 }
634 if (rules->count == 0)
635 return -EINVAL;
636 /* Confirm the RXP is initialised. */
637 if (mlx5_devx_regex_register_read(priv->ctx, id,
638 MLX5_RXP_CSR_STATUS, &val)) {
639 DRV_LOG(ERR, "Failed to read from RXP!");
640 return -ENODEV;
641 }
642 if (!(val & MLX5_RXP_CSR_STATUS_INIT_DONE)) {
643 DRV_LOG(ERR, "RXP not initialised...");
644 return -EBUSY;
645 }
646 /* Get the RTRU maximum number of entries allowed. */
647 if (mlx5_devx_regex_register_read(priv->ctx, id,
648 MLX5_RXP_RTRU_CSR_CAPABILITY, &rtru_max_num_entries)) {
649 DRV_LOG(ERR, "Failed to read RTRU capability!");
650 return -ENODEV;
651 }
652 rtru_max_num_entries = (rtru_max_num_entries & 0x00FF);
653 rule_cnt = 0;
654 pending = 0;
655 while (rules->count > 0) {
656 if ((rules->rules[rule_cnt].type == MLX5_RXP_ROF_ENTRY_INST) ||
657 (rules->rules[rule_cnt].type == MLX5_RXP_ROF_ENTRY_IM) ||
658 (rules->rules[rule_cnt].type == MLX5_RXP_ROF_ENTRY_EM)) {
659 if ((rules->rules[rule_cnt].type ==
660 MLX5_RXP_ROF_ENTRY_EM) &&
661 (priv->prog_mode == MLX5_RXP_SHARED_PROG_MODE)) {
662 /* Skip EM rules programming. */
663 if (pending > 0) {
664 /* Flush any rules that are pending. */
665 rule_offset = (rule_cnt - pending);
666 ret = rxp_flush_rules(priv->ctx,
667 &rules->rules[rule_offset],
668 pending, id);
669 if (ret < 0) {
670 DRV_LOG(ERR, "Flushing rules.");
671 return -ENODEV;
672 }
673 pending = 0;
674 }
675 rule_cnt++;
676 } else {
677 pending++;
678 rule_cnt++;
679 /*
680 * If parsing the last rule, or if reached the
681 * maximum number of rules for this batch, then
682 * flush the rules batch to the RXP.
683 */
684 if ((rules->count == 1) ||
685 (pending == rtru_max_num_entries)) {
686 rule_offset = (rule_cnt - pending);
687 ret = rxp_flush_rules(priv->ctx,
688 &rules->rules[rule_offset],
689 pending, id);
690 if (ret < 0) {
691 DRV_LOG(ERR, "Flushing rules.");
692 return -ENODEV;
693 }
694 pending = 0;
695 }
696 }
697 } else if ((rules->rules[rule_cnt].type ==
698 MLX5_RXP_ROF_ENTRY_EQ) ||
699 (rules->rules[rule_cnt].type ==
700 MLX5_RXP_ROF_ENTRY_GTE) ||
701 (rules->rules[rule_cnt].type ==
702 MLX5_RXP_ROF_ENTRY_LTE) ||
703 (rules->rules[rule_cnt].type ==
704 MLX5_RXP_ROF_ENTRY_CHECKSUM) ||
705 (rules->rules[rule_cnt].type ==
706 MLX5_RXP_ROF_ENTRY_CHECKSUM_EX_EM)) {
707 if (pending) {
708 /* Flush rules before checking reg values. */
709 rule_offset = (rule_cnt - pending);
710 ret = rxp_flush_rules(priv->ctx,
711 &rules->rules[rule_offset],
712 pending, id);
713 if (ret < 0) {
714 DRV_LOG(ERR, "Failed to flush rules.");
715 return -ENODEV;
716 }
717 }
718 block = (rules->rules[rule_cnt].addr >> 16) & 0xFFFF;
719 if (block == 0)
720 reg = MLX5_RXP_CSR_BASE_ADDRESS;
721 else if (block == 1)
722 reg = MLX5_RXP_RTRU_CSR_BASE_ADDRESS;
723 else {
724 DRV_LOG(ERR, "Invalid ROF register 0x%08X!",
725 rules->rules[rule_cnt].addr);
726 return -EINVAL;
727 }
728 reg += (rules->rules[rule_cnt].addr & 0xFFFF) *
729 MLX5_RXP_CSR_WIDTH;
730 ret = mlx5_devx_regex_register_read(priv->ctx, id,
731 reg, &val);
732 if (ret) {
733 DRV_LOG(ERR, "RXP CSR read failed!");
734 return ret;
735 }
736 if ((priv->prog_mode == MLX5_RXP_SHARED_PROG_MODE) &&
737 ((rules->rules[rule_cnt].type ==
738 MLX5_RXP_ROF_ENTRY_CHECKSUM_EX_EM) &&
739 (val != rules->rules[rule_cnt].value))) {
740 DRV_LOG(ERR, "Unexpected value for register:");
741 DRV_LOG(ERR, "reg %x" PRIu32 " got %x" PRIu32,
742 rules->rules[rule_cnt].addr, val);
743 DRV_LOG(ERR, "expected %" PRIx64 ".",
744 rules->rules[rule_cnt].value);
745 return -EINVAL;
746 } else if ((priv->prog_mode ==
747 MLX5_RXP_PRIVATE_PROG_MODE) &&
748 (rules->rules[rule_cnt].type ==
749 MLX5_RXP_ROF_ENTRY_CHECKSUM) &&
750 (val != rules->rules[rule_cnt].value)) {
751 DRV_LOG(ERR, "Unexpected value for register:");
752 DRV_LOG(ERR, "reg %x" PRIu32 " got %x" PRIu32,
753 rules->rules[rule_cnt].addr, val);
754 DRV_LOG(ERR, "expected %" PRIx64 ".",
755 rules->rules[rule_cnt].value);
756 return -EINVAL;
757 } else if ((rules->rules[rule_cnt].type ==
758 MLX5_RXP_ROF_ENTRY_EQ) &&
759 (val != rules->rules[rule_cnt].value)) {
760 DRV_LOG(ERR, "Unexpected value for register:");
761 DRV_LOG(ERR, "reg %x" PRIu32 " got %x" PRIu32,
762 rules->rules[rule_cnt].addr, val);
763 DRV_LOG(ERR, "expected %" PRIx64 ".",
764 rules->rules[rule_cnt].value);
765 return -EINVAL;
766 } else if ((rules->rules[rule_cnt].type ==
767 MLX5_RXP_ROF_ENTRY_GTE) &&
768 (val < rules->rules[rule_cnt].value)) {
769 DRV_LOG(ERR, "Unexpected value reg 0x%08X,",
770 rules->rules[rule_cnt].addr);
771 DRV_LOG(ERR, "got %X, expected >= %" PRIx64 ".",
772 val, rules->rules[rule_cnt].value);
773 return -EINVAL;
774 } else if ((rules->rules[rule_cnt].type ==
775 MLX5_RXP_ROF_ENTRY_LTE) &&
776 (val > rules->rules[rule_cnt].value)) {
777 DRV_LOG(ERR, "Unexpected value reg 0x%08X,",
778 rules->rules[rule_cnt].addr);
779 DRV_LOG(ERR, "got %08X expected <= %" PRIx64,
780 val, rules->rules[rule_cnt].value);
781 return -EINVAL;
782 }
783 rule_cnt++;
784 pending = 0;
785 } else {
786 DRV_LOG(ERR, "Error: Invalid rule type %d!",
787 rules->rules[rule_cnt].type);
788 return -EINVAL;
789 }
790 rules->count--;
791 }
792 return ret;
793 }
794
795 /*
796 * Shared memory programming mode, here all external db instructions are written
797 * to EM via the host.
798 */
799 static int
write_shared_rules(struct mlx5_regex_priv * priv,struct mlx5_rxp_ctl_rules_pgm * rules,uint32_t count,uint8_t db_to_program)800 write_shared_rules(struct mlx5_regex_priv *priv,
801 struct mlx5_rxp_ctl_rules_pgm *rules, uint32_t count,
802 uint8_t db_to_program)
803 {
804 uint32_t rule_cnt, rof_rule_addr;
805 uint64_t tmp_write_swap[4];
806
807 if (priv->prog_mode == MLX5_RXP_MODE_NOT_DEFINED)
808 return -EINVAL;
809 if ((rules->count == 0) || (count == 0))
810 return -EINVAL;
811 rule_cnt = 0;
812 /*
813 * Note the following section of code carries out a 32byte swap of
814 * instruction to coincide with HW 32byte swap. This may need removed
815 * in new variants of this programming function!
816 */
817 while (rule_cnt < rules->count) {
818 if ((rules->rules[rule_cnt].type == MLX5_RXP_ROF_ENTRY_EM) &&
819 (priv->prog_mode == MLX5_RXP_SHARED_PROG_MODE)) {
820 /*
821 * Note there are always blocks of 8 instructions for
822 * 7's written sequentially. However there is no
823 * guarantee that all blocks are sequential!
824 */
825 if (count >= (rule_cnt + MLX5_RXP_INST_BLOCK_SIZE)) {
826 /*
827 * Ensure memory write not exceeding boundary
828 * Check essential to ensure 0x10000 offset
829 * accounted for!
830 */
831 if ((uint8_t *)((uint8_t *)
832 priv->db[db_to_program].ptr +
833 ((rules->rules[rule_cnt + 7].addr <<
834 MLX5_RXP_INST_OFFSET))) >=
835 ((uint8_t *)((uint8_t *)
836 priv->db[db_to_program].ptr +
837 MLX5_MAX_DB_SIZE))) {
838 DRV_LOG(ERR, "DB exceeded memory!");
839 return -ENODEV;
840 }
841 /*
842 * Rule address Offset to align with RXP
843 * external instruction offset.
844 */
845 rof_rule_addr = (rules->rules[rule_cnt].addr <<
846 MLX5_RXP_INST_OFFSET);
847 /* 32 byte instruction swap (sw work around)! */
848 tmp_write_swap[0] = le64toh(
849 rules->rules[(rule_cnt + 4)].value);
850 tmp_write_swap[1] = le64toh(
851 rules->rules[(rule_cnt + 5)].value);
852 tmp_write_swap[2] = le64toh(
853 rules->rules[(rule_cnt + 6)].value);
854 tmp_write_swap[3] = le64toh(
855 rules->rules[(rule_cnt + 7)].value);
856 /* Write only 4 of the 8 instructions. */
857 memcpy((uint8_t *)((uint8_t *)
858 priv->db[db_to_program].ptr +
859 rof_rule_addr), &tmp_write_swap,
860 (sizeof(uint64_t) * 4));
861 /* Write 1st 4 rules of block after last 4. */
862 rof_rule_addr = (rules->rules[
863 (rule_cnt + 4)].addr <<
864 MLX5_RXP_INST_OFFSET);
865 tmp_write_swap[0] = le64toh(
866 rules->rules[(rule_cnt + 0)].value);
867 tmp_write_swap[1] = le64toh(
868 rules->rules[(rule_cnt + 1)].value);
869 tmp_write_swap[2] = le64toh(
870 rules->rules[(rule_cnt + 2)].value);
871 tmp_write_swap[3] = le64toh(
872 rules->rules[(rule_cnt + 3)].value);
873 memcpy((uint8_t *)((uint8_t *)
874 priv->db[db_to_program].ptr +
875 rof_rule_addr), &tmp_write_swap,
876 (sizeof(uint64_t) * 4));
877 } else
878 return -1;
879 /* Fast forward as already handled block of 8. */
880 rule_cnt += MLX5_RXP_INST_BLOCK_SIZE;
881 } else
882 rule_cnt++; /* Must be something other than EM rule. */
883 }
884 return 0;
885 }
886
887 static int
rxp_db_setup(struct mlx5_regex_priv * priv)888 rxp_db_setup(struct mlx5_regex_priv *priv)
889 {
890 int ret;
891 uint8_t i;
892
893 /* Setup database memories for both RXP engines + reprogram memory. */
894 for (i = 0; i < (priv->nb_engines + MLX5_RXP_EM_COUNT); i++) {
895 priv->db[i].ptr = rte_malloc("", MLX5_MAX_DB_SIZE, 0);
896 if (!priv->db[i].ptr) {
897 DRV_LOG(ERR, "Failed to alloc db memory!");
898 ret = ENODEV;
899 goto tidyup_error;
900 }
901 /* Register the memory. */
902 priv->db[i].umem.umem = mlx5_glue->devx_umem_reg(priv->ctx,
903 priv->db[i].ptr,
904 MLX5_MAX_DB_SIZE, 7);
905 if (!priv->db[i].umem.umem) {
906 DRV_LOG(ERR, "Failed to register memory!");
907 ret = ENODEV;
908 goto tidyup_error;
909 }
910 /* Ensure set all DB memory to 0's before setting up DB. */
911 memset(priv->db[i].ptr, 0x00, MLX5_MAX_DB_SIZE);
912 /* No data currently in database. */
913 priv->db[i].len = 0;
914 priv->db[i].active = false;
915 priv->db[i].db_assigned_to_eng_num = MLX5_RXP_DB_NOT_ASSIGNED;
916 }
917 return 0;
918 tidyup_error:
919 for (i = 0; i < (priv->nb_engines + MLX5_RXP_EM_COUNT); i++) {
920 if (priv->db[i].ptr)
921 rte_free(priv->db[i].ptr);
922 if (priv->db[i].umem.umem)
923 mlx5_glue->devx_umem_dereg(priv->db[i].umem.umem);
924 }
925 return -ret;
926 }
927
928 int
mlx5_regex_rules_db_import(struct rte_regexdev * dev,const char * rule_db,uint32_t rule_db_len)929 mlx5_regex_rules_db_import(struct rte_regexdev *dev,
930 const char *rule_db, uint32_t rule_db_len)
931 {
932 struct mlx5_regex_priv *priv = dev->data->dev_private;
933 struct mlx5_rxp_ctl_rules_pgm *rules = NULL;
934 uint32_t id;
935 int ret;
936
937 if (priv->prog_mode == MLX5_RXP_MODE_NOT_DEFINED) {
938 DRV_LOG(ERR, "RXP programming mode not set!");
939 return -1;
940 }
941 if (rule_db == NULL) {
942 DRV_LOG(ERR, "Database empty!");
943 return -ENODEV;
944 }
945 if (rule_db_len == 0)
946 return -EINVAL;
947 ret = rxp_parse_rof(rule_db, rule_db_len, &rules);
948 if (ret) {
949 DRV_LOG(ERR, "Can't parse ROF file.");
950 return ret;
951 }
952 /* Need to ensure RXP not busy before stop! */
953 for (id = 0; id < priv->nb_engines; id++) {
954 ret = rxp_stop_engine(priv->ctx, id);
955 if (ret) {
956 DRV_LOG(ERR, "Can't stop engine.");
957 ret = -ENODEV;
958 goto tidyup_error;
959 }
960 ret = program_rxp_rules(priv, rules, id);
961 if (ret < 0) {
962 DRV_LOG(ERR, "Failed to program rxp rules.");
963 ret = -ENODEV;
964 goto tidyup_error;
965 }
966 ret = rxp_start_engine(priv->ctx, id);
967 if (ret) {
968 DRV_LOG(ERR, "Can't start engine.");
969 ret = -ENODEV;
970 goto tidyup_error;
971 }
972 }
973 rte_free(rules);
974 return 0;
975 tidyup_error:
976 rte_free(rules);
977 return ret;
978 }
979
980 int
mlx5_regex_configure(struct rte_regexdev * dev,const struct rte_regexdev_config * cfg)981 mlx5_regex_configure(struct rte_regexdev *dev,
982 const struct rte_regexdev_config *cfg)
983 {
984 struct mlx5_regex_priv *priv = dev->data->dev_private;
985 int ret;
986
987 if (priv->prog_mode == MLX5_RXP_MODE_NOT_DEFINED)
988 return -1;
989 priv->nb_queues = cfg->nb_queue_pairs;
990 dev->data->dev_conf.nb_queue_pairs = priv->nb_queues;
991 priv->qps = rte_zmalloc(NULL, sizeof(struct mlx5_regex_qp) *
992 priv->nb_queues, 0);
993 if (!priv->nb_queues) {
994 DRV_LOG(ERR, "can't allocate qps memory");
995 rte_errno = ENOMEM;
996 return -rte_errno;
997 }
998 priv->nb_max_matches = cfg->nb_max_matches;
999 /* Setup rxp db memories. */
1000 if (rxp_db_setup(priv)) {
1001 DRV_LOG(ERR, "Failed to setup RXP db memory");
1002 rte_errno = ENOMEM;
1003 return -rte_errno;
1004 }
1005 if (cfg->rule_db != NULL) {
1006 ret = mlx5_regex_rules_db_import(dev, cfg->rule_db,
1007 cfg->rule_db_len);
1008 if (ret < 0) {
1009 DRV_LOG(ERR, "Failed to program rxp rules.");
1010 rte_errno = ENODEV;
1011 goto configure_error;
1012 }
1013 } else
1014 DRV_LOG(DEBUG, "Regex config without rules programming!");
1015 return 0;
1016 configure_error:
1017 if (priv->qps)
1018 rte_free(priv->qps);
1019 return -rte_errno;
1020 }
1021