1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
3 */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_regexdev.h>
11 #include <rte_regexdev_core.h>
12 #include <rte_regexdev_driver.h>
13
14
15 /* REE common headers */
16 #include "cn9k_regexdev.h"
17 #include "cn9k_regexdev_compiler.h"
18
19
20 /* HW matches are at offset 0x80 from RES_PTR_ADDR
21 * In op structure matches starts at W5 (0x28)
22 * There is a need to copy to 0x28 to 0x80 The matches that are at the tail
23 * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied
24 */
25 #define REE_NUM_MATCHES_ALIGN 11
26 /* The REE co-processor will write up to 254 job match structures
27 * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80.
28 */
29 #define REE_MATCH_OFFSET 0x80
30
31 #define REE_MAX_RULES_PER_GROUP 0xFFFF
32 #define REE_MAX_GROUPS 0xFFFF
33
34
35 #define REE_RULE_DB_VERSION 2
36 #define REE_RULE_DB_REVISION 0
37
38 struct ree_rule_db_entry {
39 uint8_t type;
40 uint32_t addr;
41 uint64_t value;
42 };
43
44 struct ree_rule_db {
45 uint32_t version;
46 uint32_t revision;
47 uint32_t number_of_entries;
48 struct ree_rule_db_entry entries[];
49 } __rte_packed;
50
51 static void
qp_memzone_name_get(char * name,int size,int dev_id,int qp_id)52 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
53 {
54 snprintf(name, size, "cn9k_ree_lf_mem_%u:%u", dev_id, qp_id);
55 }
56
57 static struct roc_ree_qp *
ree_qp_create(const struct rte_regexdev * dev,uint16_t qp_id)58 ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
59 {
60 struct cn9k_ree_data *data = dev->data->dev_private;
61 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
62 struct roc_ree_vf *vf = &data->vf;
63 const struct rte_memzone *lf_mem;
64 uint32_t len, iq_len, size_div2;
65 char name[RTE_MEMZONE_NAMESIZE];
66 uint64_t used_len, iova;
67 struct roc_ree_qp *qp;
68 uint8_t *va;
69 int ret;
70
71 /* Allocate queue pair */
72 qp = rte_zmalloc("CN9K Regex PMD Queue Pair", sizeof(*qp),
73 ROC_ALIGN);
74 if (qp == NULL) {
75 cn9k_err("Could not allocate queue pair");
76 return NULL;
77 }
78
79 iq_len = REE_IQ_LEN;
80
81 /*
82 * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
83 * and a power of 2.
84 * effective queue size to software is (size - 1) * 128
85 */
86 size_div2 = iq_len >> 1;
87
88 /* For pending queue */
89 len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
90
91 /* So that instruction queues start as pg size aligned */
92 len = RTE_ALIGN(len, pg_sz);
93
94 /* For instruction queues */
95 len += REE_IQ_LEN * sizeof(union roc_ree_inst);
96
97 /* Waste after instruction queues */
98 len = RTE_ALIGN(len, pg_sz);
99
100 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
101 qp_id);
102
103 lf_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
104 RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
105 RTE_CACHE_LINE_SIZE);
106 if (lf_mem == NULL) {
107 cn9k_err("Could not allocate reserved memzone");
108 goto qp_free;
109 }
110
111 va = lf_mem->addr;
112 iova = lf_mem->iova;
113
114 memset(va, 0, len);
115
116 /* Initialize pending queue */
117 qp->pend_q.rid_queue = (struct roc_ree_rid *)va;
118 qp->pend_q.enq_tail = 0;
119 qp->pend_q.deq_head = 0;
120 qp->pend_q.pending_count = 0;
121
122 used_len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
123 used_len = RTE_ALIGN(used_len, pg_sz);
124 iova += used_len;
125
126 qp->iq_dma_addr = iova;
127 qp->id = qp_id;
128 qp->base = roc_ree_qp_get_base(vf, qp_id);
129 qp->roc_regexdev_jobid = 0;
130 qp->write_offset = 0;
131
132 ret = roc_ree_iq_enable(vf, qp, REE_QUEUE_HI_PRIO, size_div2);
133 if (ret) {
134 cn9k_err("Could not enable instruction queue");
135 goto qp_free;
136 }
137
138 return qp;
139
140 qp_free:
141 rte_free(qp);
142 return NULL;
143 }
144
145 static int
ree_qp_destroy(const struct rte_regexdev * dev,struct roc_ree_qp * qp)146 ree_qp_destroy(const struct rte_regexdev *dev, struct roc_ree_qp *qp)
147 {
148 const struct rte_memzone *lf_mem;
149 char name[RTE_MEMZONE_NAMESIZE];
150 int ret;
151
152 roc_ree_iq_disable(qp);
153
154 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
155 qp->id);
156
157 lf_mem = rte_memzone_lookup(name);
158
159 ret = rte_memzone_free(lf_mem);
160 if (ret)
161 return ret;
162
163 rte_free(qp);
164
165 return 0;
166 }
167
168 static int
ree_queue_pair_release(struct rte_regexdev * dev,uint16_t qp_id)169 ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
170 {
171 struct cn9k_ree_data *data = dev->data->dev_private;
172 struct roc_ree_qp *qp = data->queue_pairs[qp_id];
173 int ret;
174
175 ree_func_trace("Queue=%d", qp_id);
176
177 if (qp == NULL)
178 return -EINVAL;
179
180 ret = ree_qp_destroy(dev, qp);
181 if (ret) {
182 cn9k_err("Could not destroy queue pair %d", qp_id);
183 return ret;
184 }
185
186 data->queue_pairs[qp_id] = NULL;
187
188 return 0;
189 }
190
191 static struct rte_regexdev *
ree_dev_register(const char * name)192 ree_dev_register(const char *name)
193 {
194 struct rte_regexdev *dev;
195
196 cn9k_ree_dbg("Creating regexdev %s\n", name);
197
198 /* allocate device structure */
199 dev = rte_regexdev_register(name);
200 if (dev == NULL) {
201 cn9k_err("Failed to allocate regex device for %s", name);
202 return NULL;
203 }
204
205 /* allocate private device structure */
206 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
207 dev->data->dev_private =
208 rte_zmalloc_socket("regexdev device private",
209 sizeof(struct cn9k_ree_data),
210 RTE_CACHE_LINE_SIZE,
211 rte_socket_id());
212
213 if (dev->data->dev_private == NULL) {
214 cn9k_err("Cannot allocate memory for dev %s private data",
215 name);
216
217 rte_regexdev_unregister(dev);
218 return NULL;
219 }
220 }
221
222 return dev;
223 }
224
225 static int
ree_dev_unregister(struct rte_regexdev * dev)226 ree_dev_unregister(struct rte_regexdev *dev)
227 {
228 cn9k_ree_dbg("Closing regex device %s", dev->device->name);
229
230 /* free regex device */
231 rte_regexdev_unregister(dev);
232
233 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
234 rte_free(dev->data->dev_private);
235
236 return 0;
237 }
238
239 static int
ree_dev_fini(struct rte_regexdev * dev)240 ree_dev_fini(struct rte_regexdev *dev)
241 {
242 struct cn9k_ree_data *data = dev->data->dev_private;
243 struct roc_ree_vf *vf = &data->vf;
244 int i, ret;
245
246 ree_func_trace();
247
248 for (i = 0; i < data->nb_queue_pairs; i++) {
249 ret = ree_queue_pair_release(dev, i);
250 if (ret)
251 return ret;
252 }
253
254 ret = roc_ree_queues_detach(vf);
255 if (ret)
256 cn9k_err("Could not detach queues");
257
258 /* TEMP : should be in lib */
259 rte_free(data->queue_pairs);
260 rte_free(data->rules);
261
262 roc_ree_dev_fini(vf);
263
264 ret = ree_dev_unregister(dev);
265 if (ret)
266 cn9k_err("Could not destroy PMD");
267
268 return ret;
269 }
270
271 static inline int
ree_enqueue(struct roc_ree_qp * qp,struct rte_regex_ops * op,struct roc_ree_pending_queue * pend_q)272 ree_enqueue(struct roc_ree_qp *qp, struct rte_regex_ops *op,
273 struct roc_ree_pending_queue *pend_q)
274 {
275 union roc_ree_inst inst;
276 union ree_res *res;
277 uint32_t offset;
278
279 if (unlikely(pend_q->pending_count >= REE_DEFAULT_CMD_QLEN)) {
280 cn9k_err("Pending count %" PRIu64 " is greater than Q size %d",
281 pend_q->pending_count, REE_DEFAULT_CMD_QLEN);
282 return -EAGAIN;
283 }
284 if (unlikely(op->mbuf->data_len > REE_MAX_PAYLOAD_SIZE ||
285 op->mbuf->data_len == 0)) {
286 cn9k_err("Packet length %d is greater than MAX payload %d",
287 op->mbuf->data_len, REE_MAX_PAYLOAD_SIZE);
288 return -EAGAIN;
289 }
290
291 /* W 0 */
292 inst.cn98xx.ooj = 1;
293 inst.cn98xx.dg = 0;
294 inst.cn98xx.doneint = 0;
295 /* W 1 */
296 inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t);
297 /* W 2 */
298 inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF;
299 inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32;
300
301 /* W 3 */
302 inst.cn98xx.res_ptr_addr = (uint64_t)op;
303 /* W 4 */
304 inst.cn98xx.wq_ptr = 0;
305 /* W 5 */
306 inst.cn98xx.ggrp = 0;
307 inst.cn98xx.tt = 0;
308 inst.cn98xx.tag = 0;
309 /* W 6 */
310 inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF;
311 if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F)
312 inst.cn98xx.ree_job_ctrl = (0x2 << 8);
313 else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F)
314 inst.cn98xx.ree_job_ctrl = (0x1 << 8);
315 else
316 inst.cn98xx.ree_job_ctrl = 0;
317 inst.cn98xx.ree_job_id = qp->roc_regexdev_jobid;
318 /* W 7 */
319 inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
320 if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
321 inst.cn98xx.ree_job_subset_id_1 = op->group_id1;
322 else
323 inst.cn98xx.ree_job_subset_id_1 = op->group_id0;
324 if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F)
325 inst.cn98xx.ree_job_subset_id_2 = op->group_id2;
326 else
327 inst.cn98xx.ree_job_subset_id_2 = op->group_id0;
328 if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)
329 inst.cn98xx.ree_job_subset_id_3 = op->group_id3;
330 else
331 inst.cn98xx.ree_job_subset_id_3 = op->group_id0;
332
333 /* Copy REE command to Q */
334 offset = qp->write_offset * sizeof(inst);
335 memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst));
336
337 pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op;
338 pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
339
340 /* Mark result as not done */
341 res = (union ree_res *)(op);
342 res->s.done = 0;
343 res->s.ree_err = 0;
344
345 /* We will use soft queue length here to limit requests */
346 REE_MOD_INC(pend_q->enq_tail, REE_DEFAULT_CMD_QLEN);
347 pend_q->pending_count += 1;
348 REE_MOD_INC(qp->roc_regexdev_jobid, 0xFFFFFF);
349 REE_MOD_INC(qp->write_offset, REE_IQ_LEN);
350
351 return 0;
352 }
353
354 static uint16_t
cn9k_ree_enqueue_burst(struct rte_regexdev * dev,uint16_t qp_id,struct rte_regex_ops ** ops,uint16_t nb_ops)355 cn9k_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
356 struct rte_regex_ops **ops, uint16_t nb_ops)
357 {
358 struct cn9k_ree_data *data = dev->data->dev_private;
359 struct roc_ree_qp *qp = data->queue_pairs[qp_id];
360 struct roc_ree_pending_queue *pend_q;
361 uint16_t nb_allowed, count = 0;
362 struct rte_regex_ops *op;
363 int ret;
364
365 pend_q = &qp->pend_q;
366
367 nb_allowed = REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
368 if (nb_ops > nb_allowed)
369 nb_ops = nb_allowed;
370
371 for (count = 0; count < nb_ops; count++) {
372 op = ops[count];
373 ret = ree_enqueue(qp, op, pend_q);
374
375 if (unlikely(ret))
376 break;
377 }
378
379 /*
380 * Make sure all instructions are written before DOORBELL is activated
381 */
382 rte_io_wmb();
383
384 /* Update Doorbell */
385 plt_write64(count, qp->base + REE_LF_DOORBELL);
386
387 return count;
388 }
389
390 static inline void
ree_dequeue_post_process(struct rte_regex_ops * ops)391 ree_dequeue_post_process(struct rte_regex_ops *ops)
392 {
393 uint8_t ree_res_mcnt, ree_res_dmcnt;
394 int off = REE_MATCH_OFFSET;
395 struct ree_res_s_98 *res;
396 uint16_t ree_res_status;
397 uint64_t match;
398
399 res = (struct ree_res_s_98 *)ops;
400 /* store res values on stack since ops and res
401 * are using the same memory
402 */
403 ree_res_status = res->ree_res_status;
404 ree_res_mcnt = res->ree_res_mcnt;
405 ree_res_dmcnt = res->ree_res_dmcnt;
406 ops->rsp_flags = 0;
407 ops->nb_actual_matches = ree_res_dmcnt;
408 ops->nb_matches = ree_res_mcnt;
409 if (unlikely(res->ree_err)) {
410 ops->nb_actual_matches = 0;
411 ops->nb_matches = 0;
412 }
413
414 if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
415 if (ree_res_status & REE_STATUS_PMI_SOJ_BIT)
416 ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
417 if (ree_res_status & REE_STATUS_PMI_EOJ_BIT)
418 ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
419 if (ree_res_status & REE_STATUS_ML_CNT_DET_BIT)
420 ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
421 if (ree_res_status & REE_STATUS_MM_CNT_DET_BIT)
422 ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
423 if (ree_res_status & REE_STATUS_MP_CNT_DET_BIT)
424 ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
425 }
426 if (ops->nb_matches > 0) {
427 /* Move the matches to the correct offset */
428 off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ?
429 ops->nb_matches : REE_NUM_MATCHES_ALIGN);
430 match = (uint64_t)ops + REE_MATCH_OFFSET;
431 match += (ops->nb_matches - off) *
432 sizeof(union ree_match);
433 memcpy((void *)ops->matches, (void *)match,
434 off * sizeof(union ree_match));
435 }
436 }
437
438 static uint16_t
cn9k_ree_dequeue_burst(struct rte_regexdev * dev,uint16_t qp_id,struct rte_regex_ops ** ops,uint16_t nb_ops)439 cn9k_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
440 struct rte_regex_ops **ops, uint16_t nb_ops)
441 {
442 struct cn9k_ree_data *data = dev->data->dev_private;
443 struct roc_ree_qp *qp = data->queue_pairs[qp_id];
444 struct roc_ree_pending_queue *pend_q;
445 int i, nb_pending, nb_completed = 0;
446 volatile struct ree_res_s_98 *res;
447 struct roc_ree_rid *rid;
448
449 pend_q = &qp->pend_q;
450
451 nb_pending = pend_q->pending_count;
452
453 if (nb_ops > nb_pending)
454 nb_ops = nb_pending;
455
456 for (i = 0; i < nb_ops; i++) {
457 rid = &pend_q->rid_queue[pend_q->deq_head];
458 res = (volatile struct ree_res_s_98 *)(rid->rid);
459
460 /* Check response header done bit if completed */
461 if (unlikely(!res->done))
462 break;
463
464 ops[i] = (struct rte_regex_ops *)(rid->rid);
465 ops[i]->user_id = rid->user_id;
466
467 REE_MOD_INC(pend_q->deq_head, REE_DEFAULT_CMD_QLEN);
468 pend_q->pending_count -= 1;
469 }
470
471 nb_completed = i;
472
473 for (i = 0; i < nb_completed; i++)
474 ree_dequeue_post_process(ops[i]);
475
476 return nb_completed;
477 }
478
479 static int
cn9k_ree_dev_info_get(struct rte_regexdev * dev,struct rte_regexdev_info * info)480 cn9k_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
481 {
482 struct cn9k_ree_data *data = dev->data->dev_private;
483 struct roc_ree_vf *vf = &data->vf;
484
485 ree_func_trace();
486
487 if (info == NULL)
488 return -EINVAL;
489
490 info->driver_name = dev->device->driver->name;
491 info->dev = dev->device;
492
493 info->max_queue_pairs = vf->max_queues;
494 info->max_matches = vf->max_matches;
495 info->max_payload_size = REE_MAX_PAYLOAD_SIZE;
496 info->max_rules_per_group = data->max_rules_per_group;
497 info->max_groups = data->max_groups;
498 info->regexdev_capa = data->regexdev_capa;
499 info->rule_flags = data->rule_flags;
500
501 return 0;
502 }
503
504 static int
cn9k_ree_dev_config(struct rte_regexdev * dev,const struct rte_regexdev_config * cfg)505 cn9k_ree_dev_config(struct rte_regexdev *dev,
506 const struct rte_regexdev_config *cfg)
507 {
508 struct cn9k_ree_data *data = dev->data->dev_private;
509 struct roc_ree_vf *vf = &data->vf;
510 const struct ree_rule_db *rule_db;
511 uint32_t rule_db_len;
512 int ret;
513
514 ree_func_trace();
515
516 if (cfg->nb_queue_pairs > vf->max_queues) {
517 cn9k_err("Invalid number of queue pairs requested");
518 return -EINVAL;
519 }
520
521 if (cfg->nb_max_matches != vf->max_matches) {
522 cn9k_err("Invalid number of max matches requested");
523 return -EINVAL;
524 }
525
526 if (cfg->dev_cfg_flags != 0) {
527 cn9k_err("Invalid device configuration flags requested");
528 return -EINVAL;
529 }
530
531 /* Unregister error interrupts */
532 if (vf->err_intr_registered)
533 roc_ree_err_intr_unregister(vf);
534
535 /* Detach queues */
536 if (vf->nb_queues) {
537 ret = roc_ree_queues_detach(vf);
538 if (ret) {
539 cn9k_err("Could not detach REE queues");
540 return ret;
541 }
542 }
543
544 /* TEMP : should be in lib */
545 if (data->queue_pairs == NULL) { /* first time configuration */
546 data->queue_pairs = rte_zmalloc("regexdev->queue_pairs",
547 sizeof(data->queue_pairs[0]) *
548 cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE);
549
550 if (data->queue_pairs == NULL) {
551 data->nb_queue_pairs = 0;
552 cn9k_err("Failed to get memory for qp meta data, nb_queues %u",
553 cfg->nb_queue_pairs);
554 return -ENOMEM;
555 }
556 } else { /* re-configure */
557 uint16_t old_nb_queues = data->nb_queue_pairs;
558 void **qp;
559 unsigned int i;
560
561 qp = data->queue_pairs;
562
563 for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) {
564 ret = ree_queue_pair_release(dev, i);
565 if (ret < 0)
566 return ret;
567 }
568
569 qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
570 RTE_CACHE_LINE_SIZE);
571 if (qp == NULL) {
572 cn9k_err("Failed to realloc qp meta data, nb_queues %u",
573 cfg->nb_queue_pairs);
574 return -ENOMEM;
575 }
576
577 if (cfg->nb_queue_pairs > old_nb_queues) {
578 uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues;
579 memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs);
580 }
581
582 data->queue_pairs = qp;
583 }
584 data->nb_queue_pairs = cfg->nb_queue_pairs;
585
586 /* Attach queues */
587 cn9k_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
588 ret = roc_ree_queues_attach(vf, cfg->nb_queue_pairs);
589 if (ret) {
590 cn9k_err("Could not attach queues");
591 return -ENODEV;
592 }
593
594 ret = roc_ree_msix_offsets_get(vf);
595 if (ret) {
596 cn9k_err("Could not get MSI-X offsets");
597 goto queues_detach;
598 }
599
600 if (cfg->rule_db && cfg->rule_db_len) {
601 cn9k_ree_dbg("rule_db length %d", cfg->rule_db_len);
602 rule_db = (const struct ree_rule_db *)cfg->rule_db;
603 rule_db_len = rule_db->number_of_entries *
604 sizeof(struct ree_rule_db_entry);
605 cn9k_ree_dbg("rule_db number of entries %d",
606 rule_db->number_of_entries);
607 if (rule_db_len > cfg->rule_db_len) {
608 cn9k_err("Could not program rule db");
609 ret = -EINVAL;
610 goto queues_detach;
611 }
612 ret = roc_ree_rule_db_prog(vf, (const char *)rule_db->entries,
613 rule_db_len, NULL, REE_NON_INC_PROG);
614 if (ret) {
615 cn9k_err("Could not program rule db");
616 goto queues_detach;
617 }
618 }
619
620 dev->enqueue = cn9k_ree_enqueue_burst;
621 dev->dequeue = cn9k_ree_dequeue_burst;
622
623 rte_mb();
624 return 0;
625
626 queues_detach:
627 roc_ree_queues_detach(vf);
628 return ret;
629 }
630
631 static int
cn9k_ree_stop(struct rte_regexdev * dev)632 cn9k_ree_stop(struct rte_regexdev *dev)
633 {
634 RTE_SET_USED(dev);
635
636 ree_func_trace();
637 return 0;
638 }
639
640 static int
cn9k_ree_start(struct rte_regexdev * dev)641 cn9k_ree_start(struct rte_regexdev *dev)
642 {
643 struct cn9k_ree_data *data = dev->data->dev_private;
644 struct roc_ree_vf *vf = &data->vf;
645 uint32_t rule_db_len = 0;
646 int ret;
647
648 ree_func_trace();
649
650 ret = roc_ree_rule_db_len_get(vf, &rule_db_len, NULL);
651 if (ret)
652 return ret;
653 if (rule_db_len == 0) {
654 cn9k_err("Rule db not programmed");
655 return -EFAULT;
656 }
657
658 return 0;
659 }
660
661 static int
cn9k_ree_close(struct rte_regexdev * dev)662 cn9k_ree_close(struct rte_regexdev *dev)
663 {
664 return ree_dev_fini(dev);
665 }
666
667 static int
cn9k_ree_queue_pair_setup(struct rte_regexdev * dev,uint16_t qp_id,const struct rte_regexdev_qp_conf * qp_conf)668 cn9k_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
669 const struct rte_regexdev_qp_conf *qp_conf)
670 {
671 struct cn9k_ree_data *data = dev->data->dev_private;
672 struct roc_ree_qp *qp;
673
674 ree_func_trace("Queue=%d", qp_id);
675
676 if (data->queue_pairs[qp_id] != NULL)
677 ree_queue_pair_release(dev, qp_id);
678
679 if (qp_conf->nb_desc > REE_DEFAULT_CMD_QLEN) {
680 cn9k_err("Could not setup queue pair for %u descriptors",
681 qp_conf->nb_desc);
682 return -EINVAL;
683 }
684 if (qp_conf->qp_conf_flags != 0) {
685 cn9k_err("Could not setup queue pair with configuration flags 0x%x",
686 qp_conf->qp_conf_flags);
687 return -EINVAL;
688 }
689
690 qp = ree_qp_create(dev, qp_id);
691 if (qp == NULL) {
692 cn9k_err("Could not create queue pair %d", qp_id);
693 return -ENOMEM;
694 }
695 data->queue_pairs[qp_id] = qp;
696
697 return 0;
698 }
699
700 static int
cn9k_ree_rule_db_compile_activate(struct rte_regexdev * dev)701 cn9k_ree_rule_db_compile_activate(struct rte_regexdev *dev)
702 {
703 return cn9k_ree_rule_db_compile_prog(dev);
704 }
705
706 static int
cn9k_ree_rule_db_update(struct rte_regexdev * dev,const struct rte_regexdev_rule * rules,uint16_t nb_rules)707 cn9k_ree_rule_db_update(struct rte_regexdev *dev,
708 const struct rte_regexdev_rule *rules, uint16_t nb_rules)
709 {
710 struct cn9k_ree_data *data = dev->data->dev_private;
711 struct rte_regexdev_rule *old_ptr;
712 uint32_t i, sum_nb_rules;
713
714 ree_func_trace("nb_rules=%d", nb_rules);
715
716 for (i = 0; i < nb_rules; i++) {
717 if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE)
718 break;
719 if (rules[i].group_id >= data->max_groups)
720 break;
721 if (rules[i].rule_id >= data->max_rules_per_group)
722 break;
723 /* logical implication
724 * p q p -> q
725 * 0 0 1
726 * 0 1 1
727 * 1 0 0
728 * 1 1 1
729 */
730 if ((~(rules[i].rule_flags) | data->rule_flags) == 0)
731 break;
732 }
733 nb_rules = i;
734
735 if (data->nb_rules == 0) {
736
737 data->rules = rte_malloc("rte_regexdev_rules",
738 nb_rules*sizeof(struct rte_regexdev_rule), 0);
739 if (data->rules == NULL)
740 return -ENOMEM;
741
742 memcpy(data->rules, rules,
743 nb_rules*sizeof(struct rte_regexdev_rule));
744 data->nb_rules = nb_rules;
745 } else {
746
747 old_ptr = data->rules;
748 sum_nb_rules = data->nb_rules + nb_rules;
749 data->rules = rte_realloc(data->rules,
750 sum_nb_rules * sizeof(struct rte_regexdev_rule),
751 0);
752 if (data->rules == NULL) {
753 data->rules = old_ptr;
754 return -ENOMEM;
755 }
756 memcpy(&data->rules[data->nb_rules], rules,
757 nb_rules*sizeof(struct rte_regexdev_rule));
758 data->nb_rules = sum_nb_rules;
759 }
760 return nb_rules;
761 }
762
763 static int
cn9k_ree_rule_db_import(struct rte_regexdev * dev,const char * rule_db,uint32_t rule_db_len)764 cn9k_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
765 uint32_t rule_db_len)
766 {
767 struct cn9k_ree_data *data = dev->data->dev_private;
768 struct roc_ree_vf *vf = &data->vf;
769 const struct ree_rule_db *ree_rule_db;
770 uint32_t ree_rule_db_len;
771 int ret;
772
773 ree_func_trace("rule_db_len=%d", rule_db_len);
774
775 ree_rule_db = (const struct ree_rule_db *)rule_db;
776 ree_rule_db_len = ree_rule_db->number_of_entries *
777 sizeof(struct ree_rule_db_entry);
778 if (ree_rule_db_len > rule_db_len) {
779 cn9k_err("Could not program rule db");
780 return -EINVAL;
781 }
782 ret = roc_ree_rule_db_prog(vf, (const char *)ree_rule_db->entries,
783 ree_rule_db_len, NULL, REE_NON_INC_PROG);
784 if (ret) {
785 cn9k_err("Could not program rule db");
786 return -ENOSPC;
787 }
788 return 0;
789 }
790
791 static int
cn9k_ree_rule_db_export(struct rte_regexdev * dev,char * rule_db)792 cn9k_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
793 {
794 struct cn9k_ree_data *data = dev->data->dev_private;
795 struct roc_ree_vf *vf = &data->vf;
796 struct ree_rule_db *ree_rule_db;
797 uint32_t rule_dbi_len;
798 uint32_t rule_db_len;
799 int ret;
800
801 ree_func_trace();
802
803 ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
804 if (ret)
805 return ret;
806
807 if (rule_db == NULL) {
808 rule_db_len += sizeof(struct ree_rule_db);
809 return rule_db_len;
810 }
811
812 ree_rule_db = (struct ree_rule_db *)rule_db;
813 ret = roc_ree_rule_db_get(vf, (char *)ree_rule_db->entries,
814 rule_db_len, NULL, 0);
815 if (ret) {
816 cn9k_err("Could not export rule db");
817 return -EFAULT;
818 }
819 ree_rule_db->number_of_entries =
820 rule_db_len/sizeof(struct ree_rule_db_entry);
821 ree_rule_db->revision = REE_RULE_DB_REVISION;
822 ree_rule_db->version = REE_RULE_DB_VERSION;
823
824 return 0;
825 }
826
827 static struct rte_regexdev_ops cn9k_ree_ops = {
828 .dev_info_get = cn9k_ree_dev_info_get,
829 .dev_configure = cn9k_ree_dev_config,
830 .dev_qp_setup = cn9k_ree_queue_pair_setup,
831 .dev_start = cn9k_ree_start,
832 .dev_stop = cn9k_ree_stop,
833 .dev_close = cn9k_ree_close,
834 .dev_attr_get = NULL,
835 .dev_attr_set = NULL,
836 .dev_rule_db_update = cn9k_ree_rule_db_update,
837 .dev_rule_db_compile_activate =
838 cn9k_ree_rule_db_compile_activate,
839 .dev_db_import = cn9k_ree_rule_db_import,
840 .dev_db_export = cn9k_ree_rule_db_export,
841 .dev_xstats_names_get = NULL,
842 .dev_xstats_get = NULL,
843 .dev_xstats_by_name_get = NULL,
844 .dev_xstats_reset = NULL,
845 .dev_selftest = NULL,
846 .dev_dump = NULL,
847 };
848
849 static int
cn9k_ree_pci_probe(struct rte_pci_driver * pci_drv __rte_unused,struct rte_pci_device * pci_dev)850 cn9k_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
851 struct rte_pci_device *pci_dev)
852 {
853 char name[RTE_REGEXDEV_NAME_MAX_LEN];
854 struct cn9k_ree_data *data;
855 struct rte_regexdev *dev;
856 struct roc_ree_vf *vf;
857 int ret;
858
859 ret = roc_plt_init();
860 if (ret < 0) {
861 plt_err("Failed to initialize platform model");
862 return ret;
863 }
864
865 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
866
867 dev = ree_dev_register(name);
868 if (dev == NULL) {
869 ret = -ENODEV;
870 goto exit;
871 }
872
873 dev->dev_ops = &cn9k_ree_ops;
874 dev->device = &pci_dev->device;
875
876 /* Get private data space allocated */
877 data = dev->data->dev_private;
878 vf = &data->vf;
879 vf->pci_dev = pci_dev;
880 ret = roc_ree_dev_init(vf);
881 if (ret) {
882 plt_err("Failed to initialize roc cpt rc=%d", ret);
883 goto dev_unregister;
884 }
885
886 data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
887 RTE_REGEX_PCRE_RULE_ANCHORED_F;
888 data->regexdev_capa = 0;
889 data->max_groups = REE_MAX_GROUPS;
890 data->max_rules_per_group = REE_MAX_RULES_PER_GROUP;
891 data->nb_rules = 0;
892
893 dev->state = RTE_REGEXDEV_READY;
894 return 0;
895
896 dev_unregister:
897 ree_dev_unregister(dev);
898 exit:
899 cn9k_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
900 pci_dev->id.vendor_id, pci_dev->id.device_id);
901 return ret;
902 }
903
904 static int
cn9k_ree_pci_remove(struct rte_pci_device * pci_dev)905 cn9k_ree_pci_remove(struct rte_pci_device *pci_dev)
906 {
907 char name[RTE_REGEXDEV_NAME_MAX_LEN];
908 struct rte_regexdev *dev = NULL;
909
910 if (pci_dev == NULL)
911 return -EINVAL;
912
913 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
914
915 dev = rte_regexdev_get_device_by_name(name);
916
917 if (dev == NULL)
918 return -ENODEV;
919
920 return ree_dev_fini(dev);
921 }
922
923 static struct rte_pci_id pci_id_ree_table[] = {
924 {
925 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
926 PCI_DEVID_CNXK_RVU_REE_PF)
927 },
928 {
929 .vendor_id = 0,
930 }
931 };
932
933 static struct rte_pci_driver cn9k_regexdev_pmd = {
934 .id_table = pci_id_ree_table,
935 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
936 .probe = cn9k_ree_pci_probe,
937 .remove = cn9k_ree_pci_remove,
938 };
939
940
941 RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_CN9K_PMD, cn9k_regexdev_pmd);
942 RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_CN9K_PMD, pci_id_ree_table);
943