1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include <rte_cryptodev.h>
6 #include <rte_eventdev.h>
7 #include <rte_security.h>
8 #include <rte_security_driver.h>
9 #include <rte_pmd_cnxk.h>
10
11 #include <cn10k_ethdev.h>
12 #include <cnxk_security.h>
13 #include <roc_priv.h>
14
15 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
16 { /* AES GCM */
17 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 {.sym = {
19 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
20 {.aead = {
21 .algo = RTE_CRYPTO_AEAD_AES_GCM,
22 .block_size = 16,
23 .key_size = {
24 .min = 16,
25 .max = 32,
26 .increment = 8
27 },
28 .digest_size = {
29 .min = 16,
30 .max = 16,
31 .increment = 0
32 },
33 .aad_size = {
34 .min = 8,
35 .max = 12,
36 .increment = 4
37 },
38 .iv_size = {
39 .min = 12,
40 .max = 12,
41 .increment = 0
42 }
43 }, }
44 }, }
45 },
46 { /* AES CBC */
47 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
48 {.sym = {
49 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
50 {.cipher = {
51 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
52 .block_size = 16,
53 .key_size = {
54 .min = 16,
55 .max = 32,
56 .increment = 8
57 },
58 .iv_size = {
59 .min = 16,
60 .max = 16,
61 .increment = 0
62 }
63 }, }
64 }, }
65 },
66 { /* AES CTR */
67 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
68 {.sym = {
69 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
70 {.cipher = {
71 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
72 .block_size = 16,
73 .key_size = {
74 .min = 16,
75 .max = 32,
76 .increment = 8
77 },
78 .iv_size = {
79 .min = 12,
80 .max = 16,
81 .increment = 4
82 }
83 }, }
84 }, }
85 },
86 { /* 3DES CBC */
87 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
88 {.sym = {
89 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
90 {.cipher = {
91 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
92 .block_size = 8,
93 .key_size = {
94 .min = 24,
95 .max = 24,
96 .increment = 0
97 },
98 .iv_size = {
99 .min = 8,
100 .max = 16,
101 .increment = 8
102 }
103 }, }
104 }, }
105 },
106 { /* AES-XCBC */
107 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
108 { .sym = {
109 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
110 {.auth = {
111 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
112 .block_size = 16,
113 .key_size = {
114 .min = 16,
115 .max = 16,
116 .increment = 0
117 },
118 .digest_size = {
119 .min = 12,
120 .max = 12,
121 .increment = 0,
122 },
123 }, }
124 }, }
125 },
126 { /* SHA1 HMAC */
127 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
128 {.sym = {
129 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
130 {.auth = {
131 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
132 .block_size = 64,
133 .key_size = {
134 .min = 20,
135 .max = 64,
136 .increment = 1
137 },
138 .digest_size = {
139 .min = 12,
140 .max = 12,
141 .increment = 0
142 },
143 }, }
144 }, }
145 },
146 { /* SHA256 HMAC */
147 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
148 {.sym = {
149 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
150 {.auth = {
151 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
152 .block_size = 64,
153 .key_size = {
154 .min = 1,
155 .max = 1024,
156 .increment = 1
157 },
158 .digest_size = {
159 .min = 16,
160 .max = 32,
161 .increment = 16
162 },
163 }, }
164 }, }
165 },
166 { /* SHA384 HMAC */
167 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
168 {.sym = {
169 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
170 {.auth = {
171 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
172 .block_size = 64,
173 .key_size = {
174 .min = 1,
175 .max = 1024,
176 .increment = 1
177 },
178 .digest_size = {
179 .min = 24,
180 .max = 48,
181 .increment = 24
182 },
183 }, }
184 }, }
185 },
186 { /* SHA512 HMAC */
187 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
188 {.sym = {
189 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
190 {.auth = {
191 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
192 .block_size = 128,
193 .key_size = {
194 .min = 1,
195 .max = 1024,
196 .increment = 1
197 },
198 .digest_size = {
199 .min = 32,
200 .max = 64,
201 .increment = 32
202 },
203 }, }
204 }, }
205 },
206 { /* AES GMAC (AUTH) */
207 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
208 {.sym = {
209 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
210 {.auth = {
211 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
212 .block_size = 16,
213 .key_size = {
214 .min = 16,
215 .max = 32,
216 .increment = 8
217 },
218 .digest_size = {
219 .min = 8,
220 .max = 16,
221 .increment = 4
222 },
223 .iv_size = {
224 .min = 12,
225 .max = 12,
226 .increment = 0
227 }
228 }, }
229 }, }
230 },
231 { /* NULL (AUTH) */
232 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
233 {.sym = {
234 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
235 {.auth = {
236 .algo = RTE_CRYPTO_AUTH_NULL,
237 .block_size = 1,
238 .key_size = {
239 .min = 0,
240 .max = 0,
241 .increment = 0
242 },
243 .digest_size = {
244 .min = 0,
245 .max = 0,
246 .increment = 0
247 },
248 }, },
249 }, },
250 },
251 { /* NULL (CIPHER) */
252 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
253 {.sym = {
254 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
255 {.cipher = {
256 .algo = RTE_CRYPTO_CIPHER_NULL,
257 .block_size = 1,
258 .key_size = {
259 .min = 0,
260 .max = 0,
261 .increment = 0
262 },
263 .iv_size = {
264 .min = 0,
265 .max = 0,
266 .increment = 0
267 }
268 }, },
269 }, }
270 },
271
272 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
273 };
274
275 static const struct rte_security_capability cn10k_eth_sec_capabilities[] = {
276 { /* IPsec Inline Protocol ESP Tunnel Ingress */
277 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
278 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
279 .ipsec = {
280 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
281 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
282 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
283 .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
284 .options = {
285 .udp_encap = 1,
286 .udp_ports_verify = 1,
287 .copy_df = 1,
288 .copy_dscp = 1,
289 .copy_flabel = 1,
290 .tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR,
291 .dec_ttl = 1,
292 .ip_csum_enable = 1,
293 .l4_csum_enable = 1,
294 .stats = 1,
295 .esn = 1,
296 },
297 },
298 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
299 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
300 },
301 { /* IPsec Inline Protocol ESP Tunnel Egress */
302 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
303 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
304 .ipsec = {
305 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
306 .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
307 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
308 .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
309 .options = {
310 .iv_gen_disable = 1,
311 .udp_encap = 1,
312 .udp_ports_verify = 1,
313 .copy_df = 1,
314 .copy_dscp = 1,
315 .copy_flabel = 1,
316 .dec_ttl = 1,
317 .ip_csum_enable = 1,
318 .l4_csum_enable = 1,
319 .stats = 1,
320 .esn = 1,
321 },
322 },
323 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
324 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
325 },
326 { /* IPsec Inline Protocol ESP Transport Egress */
327 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
328 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
329 .ipsec = {
330 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
331 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
332 .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
333 .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
334 .options = {
335 .iv_gen_disable = 1,
336 .udp_encap = 1,
337 .udp_ports_verify = 1,
338 .copy_df = 1,
339 .copy_dscp = 1,
340 .dec_ttl = 1,
341 .ip_csum_enable = 1,
342 .l4_csum_enable = 1,
343 .stats = 1,
344 .esn = 1,
345 },
346 },
347 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
348 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
349 },
350 { /* IPsec Inline Protocol ESP Transport Ingress */
351 .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
352 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
353 .ipsec = {
354 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
355 .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
356 .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
357 .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
358 .options = {
359 .udp_encap = 1,
360 .udp_ports_verify = 1,
361 .copy_df = 1,
362 .copy_dscp = 1,
363 .dec_ttl = 1,
364 .ip_csum_enable = 1,
365 .l4_csum_enable = 1,
366 .stats = 1,
367 .esn = 1,
368 },
369 },
370 .crypto_capabilities = cn10k_eth_sec_crypto_caps,
371 .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
372 },
373 {
374 .action = RTE_SECURITY_ACTION_TYPE_NONE
375 }
376 };
377
378 static inline void
cnxk_pktmbuf_free_no_cache(struct rte_mbuf * mbuf)379 cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
380 {
381 struct rte_mbuf *next;
382
383 if (!mbuf)
384 return;
385 do {
386 next = mbuf->next;
387 roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
388 mbuf = next;
389 } while (mbuf != NULL);
390 }
391
392 void
cn10k_eth_sec_sso_work_cb(uint64_t * gw,void * args,uint32_t soft_exp_event)393 cn10k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
394 {
395 struct rte_eth_event_ipsec_desc desc;
396 struct cn10k_sec_sess_priv sess_priv;
397 struct cn10k_outb_priv_data *priv;
398 struct roc_ot_ipsec_outb_sa *sa;
399 struct cpt_cn10k_res_s *res;
400 struct rte_eth_dev *eth_dev;
401 struct cnxk_eth_dev *dev;
402 static uint64_t warn_cnt;
403 uint16_t dlen_adj, rlen;
404 struct rte_mbuf *mbuf;
405 uintptr_t sa_base;
406 uintptr_t nixtx;
407 uint8_t port;
408
409 RTE_SET_USED(args);
410
411 switch ((gw[0] >> 28) & 0xF) {
412 case RTE_EVENT_TYPE_ETHDEV:
413 /* Event from inbound inline dev due to IPSEC packet bad L4 */
414 mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
415 plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
416 cnxk_pktmbuf_free_no_cache(mbuf);
417 return;
418 case RTE_EVENT_TYPE_CPU:
419 /* Check for subtype */
420 if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
421 /* Event from outbound inline error */
422 mbuf = (struct rte_mbuf *)gw[1];
423 break;
424 }
425 /* Fall through */
426 default:
427 if (soft_exp_event & 0x1) {
428 sa = (struct roc_ot_ipsec_outb_sa *)args;
429 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
430 desc.metadata = (uint64_t)priv->userdata;
431 desc.subtype = RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY;
432 eth_dev = &rte_eth_devices[soft_exp_event >> 8];
433 rte_eth_dev_callback_process(eth_dev,
434 RTE_ETH_EVENT_IPSEC, &desc);
435 } else {
436 plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx",
437 gw[0], gw[1]);
438 }
439 return;
440 }
441
442 /* Get ethdev port from tag */
443 port = gw[0] & 0xFF;
444 eth_dev = &rte_eth_devices[port];
445 dev = cnxk_eth_pmd_priv(eth_dev);
446
447 sess_priv.u64 = *rte_security_dynfield(mbuf);
448 /* Calculate dlen adj */
449 dlen_adj = mbuf->pkt_len - mbuf->l2_len;
450 rlen = (dlen_adj + sess_priv.roundup_len) +
451 (sess_priv.roundup_byte - 1);
452 rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
453 rlen += sess_priv.partial_len;
454 dlen_adj = rlen - dlen_adj;
455
456 /* Find the res area residing on next cacheline after end of data */
457 nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
458 nixtx += BIT_ULL(7);
459 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
460 res = (struct cpt_cn10k_res_s *)nixtx;
461
462 plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x",
463 mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode);
464
465 sess_priv.u64 = *rte_security_dynfield(mbuf);
466
467 sa_base = dev->outb.sa_base;
468 sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
469 priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(sa);
470
471 memset(&desc, 0, sizeof(desc));
472
473 switch (res->uc_compcode) {
474 case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
475 desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
476 break;
477 case ROC_IE_OT_UCC_ERR_PKT_IP:
478 warn_cnt++;
479 if (warn_cnt % 10000 == 0)
480 plt_warn("Outbound error, bad ip pkt, mbuf %p,"
481 " sa_index %u (total warnings %" PRIu64 ")",
482 mbuf, sess_priv.sa_idx, warn_cnt);
483 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
484 break;
485 default:
486 warn_cnt++;
487 if (warn_cnt % 10000 == 0)
488 plt_warn("Outbound error, mbuf %p, sa_index %u,"
489 " compcode %x uc %x,"
490 " (total warnings %" PRIu64 ")",
491 mbuf, sess_priv.sa_idx, res->compcode,
492 res->uc_compcode, warn_cnt);
493 desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN;
494 break;
495 }
496
497 desc.metadata = (uint64_t)priv->userdata;
498 rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
499 cnxk_pktmbuf_free_no_cache(mbuf);
500 }
501
502 static void
outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa * outb_sa,const char * __iv_str)503 outb_dbg_iv_update(struct roc_ot_ipsec_outb_sa *outb_sa, const char *__iv_str)
504 {
505 uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
506 char *iv_str = strdup(__iv_str);
507 char *iv_b = NULL, len = 16;
508 char *save;
509 int i;
510
511 if (!iv_str)
512 return;
513
514 if (outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_GCM ||
515 outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CTR ||
516 outb_sa->w2.s.enc_type == ROC_IE_OT_SA_ENC_AES_CCM ||
517 outb_sa->w2.s.auth_type == ROC_IE_OT_SA_AUTH_AES_GMAC) {
518 memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
519 memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
520
521 iv_dbg = outb_sa->iv.s.iv_dbg1;
522 for (i = 0; i < 4; i++) {
523 iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
524 if (!iv_b)
525 break;
526 iv_dbg[i] = strtoul(iv_b, NULL, 0);
527 }
528 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
529
530 iv_dbg = outb_sa->iv.s.iv_dbg2;
531 for (i = 0; i < 4; i++) {
532 iv_b = strtok_r(NULL, ",", &save);
533 if (!iv_b)
534 break;
535 iv_dbg[i] = strtoul(iv_b, NULL, 0);
536 }
537 *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
538
539 } else {
540 iv_dbg = outb_sa->iv.iv_dbg;
541 memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
542
543 for (i = 0; i < len; i++) {
544 iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
545 if (!iv_b)
546 break;
547 iv_dbg[i] = strtoul(iv_b, NULL, 0);
548 }
549 *(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
550 *(uint64_t *)&iv_dbg[8] =
551 rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
552 }
553
554 /* Update source of IV */
555 outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
556 free(iv_str);
557 }
558
559 static int
cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix * roc_nix,struct roc_ot_ipsec_outb_sa * sa,void * sa_cptr,struct rte_security_ipsec_xform * ipsec_xfrm,uint32_t sa_idx)560 cn10k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix,
561 struct roc_ot_ipsec_outb_sa *sa, void *sa_cptr,
562 struct rte_security_ipsec_xform *ipsec_xfrm,
563 uint32_t sa_idx)
564 {
565 uint64_t *ring_base, ring_addr;
566
567 if (ipsec_xfrm->life.bytes_soft_limit |
568 ipsec_xfrm->life.packets_soft_limit) {
569 ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
570 if (ring_base == NULL)
571 return -ENOTSUP;
572
573 ring_addr = ring_base[sa_idx >>
574 ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
575 sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
576 sa->ctx.err_ctl.s.address = ring_addr >> 3;
577 sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
578 }
579
580 return 0;
581 }
582
583 static int
cn10k_eth_sec_session_create(void * device,struct rte_security_session_conf * conf,struct rte_security_session * sess,struct rte_mempool * mempool)584 cn10k_eth_sec_session_create(void *device,
585 struct rte_security_session_conf *conf,
586 struct rte_security_session *sess,
587 struct rte_mempool *mempool)
588 {
589 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
590 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
591 struct rte_security_ipsec_xform *ipsec;
592 struct cn10k_sec_sess_priv sess_priv;
593 struct rte_crypto_sym_xform *crypto;
594 struct cnxk_eth_sec_sess *eth_sec;
595 struct roc_nix *nix = &dev->nix;
596 bool inbound, inl_dev;
597 rte_spinlock_t *lock;
598 char tbuf[128] = {0};
599 int rc = 0;
600
601 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
602 return -ENOTSUP;
603
604 if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
605 return -ENOTSUP;
606
607 if (rte_security_dynfield_register() < 0)
608 return -ENOTSUP;
609
610 if (conf->ipsec.options.ip_reassembly_en &&
611 dev->reass_dynfield_off < 0) {
612 if (rte_eth_ip_reassembly_dynfield_register(&dev->reass_dynfield_off,
613 &dev->reass_dynflag_bit) < 0)
614 return -rte_errno;
615 }
616
617 ipsec = &conf->ipsec;
618 crypto = conf->crypto_xform;
619 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
620 inl_dev = !!dev->inb.inl_dev;
621
622 /* Search if a session already exits */
623 if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
624 plt_err("%s SA with SPI %u already in use",
625 inbound ? "Inbound" : "Outbound", ipsec->spi);
626 return -EEXIST;
627 }
628
629 if (rte_mempool_get(mempool, (void **)ð_sec)) {
630 plt_err("Could not allocate security session private data");
631 return -ENOMEM;
632 }
633
634 memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
635 sess_priv.u64 = 0;
636
637 lock = inbound ? &dev->inb.lock : &dev->outb.lock;
638 rte_spinlock_lock(lock);
639
640 /* Acquire lock on inline dev for inbound */
641 if (inbound && inl_dev)
642 roc_nix_inl_dev_lock();
643
644 if (inbound) {
645 struct roc_ot_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
646 struct cn10k_inb_priv_data *inb_priv;
647 uint32_t spi_mask;
648 uintptr_t sa;
649
650 PLT_STATIC_ASSERT(sizeof(struct cn10k_inb_priv_data) <
651 ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
652
653 spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
654
655 /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
656 sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
657 if (!sa && dev->inb.inl_dev) {
658 snprintf(tbuf, sizeof(tbuf),
659 "Failed to create ingress sa, inline dev "
660 "not found or spi not in range");
661 rc = -ENOTSUP;
662 goto mempool_put;
663 } else if (!sa) {
664 snprintf(tbuf, sizeof(tbuf),
665 "Failed to create ingress sa");
666 rc = -EFAULT;
667 goto mempool_put;
668 }
669
670 inb_sa = (struct roc_ot_ipsec_inb_sa *)sa;
671
672 /* Check if SA is already in use */
673 if (inb_sa->w2.s.valid) {
674 snprintf(tbuf, sizeof(tbuf),
675 "Inbound SA with SPI %u already in use",
676 ipsec->spi);
677 rc = -EBUSY;
678 goto mempool_put;
679 }
680
681 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
682 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
683
684 /* Fill inbound sa params */
685 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
686 true);
687 if (rc) {
688 snprintf(tbuf, sizeof(tbuf),
689 "Failed to init inbound sa, rc=%d", rc);
690 goto mempool_put;
691 }
692
693 inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
694 /* Back pointer to get eth_sec */
695 inb_priv->eth_sec = eth_sec;
696 /* Save userdata in inb private area */
697 inb_priv->userdata = conf->userdata;
698
699 /* Save SA index/SPI in cookie for now */
700 inb_sa_dptr->w1.s.cookie =
701 rte_cpu_to_be_32(ipsec->spi & spi_mask);
702
703 if (ipsec->options.stats == 1) {
704 /* Enable mib counters */
705 inb_sa_dptr->w0.s.count_mib_bytes = 1;
706 inb_sa_dptr->w0.s.count_mib_pkts = 1;
707 }
708 /* Prepare session priv */
709 sess_priv.inb_sa = 1;
710 sess_priv.sa_idx = ipsec->spi & spi_mask;
711
712 /* Pointer from eth_sec -> inb_sa */
713 eth_sec->sa = inb_sa;
714 eth_sec->sess = sess;
715 eth_sec->sa_idx = ipsec->spi & spi_mask;
716 eth_sec->spi = ipsec->spi;
717 eth_sec->inl_dev = !!dev->inb.inl_dev;
718 eth_sec->inb = true;
719
720 TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
721 dev->inb.nb_sess++;
722 /* Sync session in context cache */
723 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
724 eth_sec->inb,
725 sizeof(struct roc_ot_ipsec_inb_sa));
726 if (rc)
727 goto mempool_put;
728
729 if (conf->ipsec.options.ip_reassembly_en) {
730 inb_priv->reass_dynfield_off = dev->reass_dynfield_off;
731 inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
732 }
733
734 } else {
735 struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
736 struct cn10k_outb_priv_data *outb_priv;
737 struct cnxk_ipsec_outb_rlens *rlens;
738 uint64_t sa_base = dev->outb.sa_base;
739 const char *iv_str;
740 uint32_t sa_idx;
741
742 PLT_STATIC_ASSERT(sizeof(struct cn10k_outb_priv_data) <
743 ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
744
745 /* Alloc an sa index */
746 rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
747 if (rc)
748 goto mempool_put;
749
750 outb_sa = roc_nix_inl_ot_ipsec_outb_sa(sa_base, sa_idx);
751 outb_priv = roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(outb_sa);
752 rlens = &outb_priv->rlens;
753
754 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
755 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
756
757 /* Fill outbound sa params */
758 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
759 if (rc) {
760 snprintf(tbuf, sizeof(tbuf),
761 "Failed to init outbound sa, rc=%d", rc);
762 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
763 goto mempool_put;
764 }
765
766 if (conf->ipsec.options.iv_gen_disable == 1) {
767 iv_str = getenv("ETH_SEC_IV_OVR");
768 if (iv_str)
769 outb_dbg_iv_update(outb_sa_dptr, iv_str);
770 }
771 /* Fill outbound sa misc params */
772 rc = cn10k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr,
773 outb_sa, ipsec, sa_idx);
774 if (rc) {
775 snprintf(tbuf, sizeof(tbuf),
776 "Failed to init outb sa misc params, rc=%d",
777 rc);
778 rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
779 goto mempool_put;
780 }
781
782 /* Save userdata */
783 outb_priv->userdata = conf->userdata;
784 outb_priv->sa_idx = sa_idx;
785 outb_priv->eth_sec = eth_sec;
786
787 /* Save rlen info */
788 cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
789
790 if (ipsec->options.stats == 1) {
791 /* Enable mib counters */
792 outb_sa_dptr->w0.s.count_mib_bytes = 1;
793 outb_sa_dptr->w0.s.count_mib_pkts = 1;
794 }
795
796 /* Prepare session priv */
797 sess_priv.sa_idx = outb_priv->sa_idx;
798 sess_priv.roundup_byte = rlens->roundup_byte;
799 sess_priv.roundup_len = rlens->roundup_len;
800 sess_priv.partial_len = rlens->partial_len;
801 sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
802 sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
803 /* Propagate inner checksum enable from SA to fast path */
804 sess_priv.chksum = (!ipsec->options.ip_csum_enable << 1 |
805 !ipsec->options.l4_csum_enable);
806 sess_priv.dec_ttl = ipsec->options.dec_ttl;
807
808 /* Pointer from eth_sec -> outb_sa */
809 eth_sec->sa = outb_sa;
810 eth_sec->sess = sess;
811 eth_sec->sa_idx = sa_idx;
812 eth_sec->spi = ipsec->spi;
813
814 TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
815 dev->outb.nb_sess++;
816 /* Sync session in context cache */
817 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
818 eth_sec->inb,
819 sizeof(struct roc_ot_ipsec_outb_sa));
820 if (rc)
821 goto mempool_put;
822 }
823 if (inbound && inl_dev)
824 roc_nix_inl_dev_unlock();
825 rte_spinlock_unlock(lock);
826
827 plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
828 inbound ? "inbound" : "outbound", eth_sec->spi,
829 eth_sec->sa_idx, eth_sec->inl_dev);
830 /*
831 * Update fast path info in priv area.
832 */
833 set_sec_session_private_data(sess, (void *)sess_priv.u64);
834
835 return 0;
836 mempool_put:
837 if (inbound && inl_dev)
838 roc_nix_inl_dev_unlock();
839 rte_spinlock_unlock(lock);
840
841 rte_mempool_put(mempool, eth_sec);
842 if (rc)
843 plt_err("%s", tbuf);
844 return rc;
845 }
846
847 static int
cn10k_eth_sec_session_destroy(void * device,struct rte_security_session * sess)848 cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
849 {
850 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
851 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
852 struct cnxk_eth_sec_sess *eth_sec;
853 struct rte_mempool *mp;
854 rte_spinlock_t *lock;
855 void *sa_dptr;
856
857 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
858 if (!eth_sec)
859 return -ENOENT;
860
861 lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
862 rte_spinlock_lock(lock);
863
864 if (eth_sec->inl_dev)
865 roc_nix_inl_dev_lock();
866
867 if (eth_sec->inb) {
868 /* Disable SA */
869 sa_dptr = dev->inb.sa_dptr;
870 roc_ot_ipsec_inb_sa_init(sa_dptr, true);
871
872 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
873 eth_sec->inb,
874 sizeof(struct roc_ot_ipsec_inb_sa));
875 TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
876 dev->inb.nb_sess--;
877 } else {
878 /* Disable SA */
879 sa_dptr = dev->outb.sa_dptr;
880 roc_ot_ipsec_outb_sa_init(sa_dptr);
881
882 roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
883 eth_sec->inb,
884 sizeof(struct roc_ot_ipsec_outb_sa));
885 /* Release Outbound SA index */
886 cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
887 TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
888 dev->outb.nb_sess--;
889 }
890 if (eth_sec->inl_dev)
891 roc_nix_inl_dev_unlock();
892
893 rte_spinlock_unlock(lock);
894
895 plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
896 eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
897 eth_sec->sa_idx, eth_sec->inl_dev);
898
899 /* Put eth_sec object back to pool */
900 mp = rte_mempool_from_obj(eth_sec);
901 set_sec_session_private_data(sess, NULL);
902 rte_mempool_put(mp, eth_sec);
903 return 0;
904 }
905
906 static const struct rte_security_capability *
cn10k_eth_sec_capabilities_get(void * device __rte_unused)907 cn10k_eth_sec_capabilities_get(void *device __rte_unused)
908 {
909 return cn10k_eth_sec_capabilities;
910 }
911
912 static int
cn10k_eth_sec_session_update(void * device,struct rte_security_session * sess,struct rte_security_session_conf * conf)913 cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
914 struct rte_security_session_conf *conf)
915 {
916 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
917 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
918 struct roc_ot_ipsec_inb_sa *inb_sa_dptr;
919 struct rte_security_ipsec_xform *ipsec;
920 struct rte_crypto_sym_xform *crypto;
921 struct cnxk_eth_sec_sess *eth_sec;
922 bool inbound;
923 int rc;
924
925 if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
926 conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
927 return -ENOENT;
928
929 ipsec = &conf->ipsec;
930 crypto = conf->crypto_xform;
931 inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
932
933 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
934 if (!eth_sec)
935 return -ENOENT;
936
937 eth_sec->spi = conf->ipsec.spi;
938
939 if (inbound) {
940 inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
941 memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
942
943 rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
944 true);
945 if (rc)
946 return -EINVAL;
947
948 rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa,
949 eth_sec->inb,
950 sizeof(struct roc_ot_ipsec_inb_sa));
951 if (rc)
952 return -EINVAL;
953 } else {
954 struct roc_ot_ipsec_outb_sa *outb_sa_dptr;
955
956 outb_sa_dptr = (struct roc_ot_ipsec_outb_sa *)dev->outb.sa_dptr;
957 memset(outb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_outb_sa));
958
959 rc = cnxk_ot_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
960 if (rc)
961 return -EINVAL;
962 rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa,
963 eth_sec->inb,
964 sizeof(struct roc_ot_ipsec_outb_sa));
965 if (rc)
966 return -EINVAL;
967 }
968
969 return 0;
970 }
971
972 int
rte_pmd_cnxk_hw_sa_read(void * device,struct rte_security_session * sess,void * data,uint32_t len)973 rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
974 void *data, uint32_t len)
975 {
976 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
977 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
978 struct cnxk_eth_sec_sess *eth_sec;
979 int rc;
980
981 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
982 if (eth_sec == NULL)
983 return -EINVAL;
984
985 rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
986 ROC_NIX_INL_SA_OP_FLUSH);
987 if (rc)
988 return -EINVAL;
989 rte_delay_ms(1);
990 memcpy(data, eth_sec->sa, len);
991
992 return 0;
993 }
994
995 int
rte_pmd_cnxk_hw_sa_write(void * device,struct rte_security_session * sess,void * data,uint32_t len)996 rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
997 void *data, uint32_t len)
998 {
999 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1000 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1001 struct cnxk_eth_sec_sess *eth_sec;
1002 int rc = -EINVAL;
1003
1004 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1005 if (eth_sec == NULL)
1006 return rc;
1007 rc = roc_nix_inl_ctx_write(&dev->nix, data, eth_sec->sa, eth_sec->inb,
1008 len);
1009 if (rc)
1010 return rc;
1011
1012 return 0;
1013 }
1014
1015 static int
cn10k_eth_sec_session_stats_get(void * device,struct rte_security_session * sess,struct rte_security_stats * stats)1016 cn10k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
1017 struct rte_security_stats *stats)
1018 {
1019 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
1020 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1021 struct cnxk_eth_sec_sess *eth_sec;
1022 int rc;
1023
1024 eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
1025 if (eth_sec == NULL)
1026 return -EINVAL;
1027
1028 rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb,
1029 ROC_NIX_INL_SA_OP_FLUSH);
1030 if (rc)
1031 return -EINVAL;
1032 rte_delay_ms(1);
1033
1034 stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
1035
1036 if (eth_sec->inb) {
1037 stats->ipsec.ipackets =
1038 ((struct roc_ot_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_pkts;
1039 stats->ipsec.ibytes =
1040 ((struct roc_ot_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_octs;
1041 } else {
1042 stats->ipsec.opackets =
1043 ((struct roc_ot_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_pkts;
1044 stats->ipsec.obytes =
1045 ((struct roc_ot_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_octs;
1046 }
1047
1048 return 0;
1049 }
1050
1051 void
cn10k_eth_sec_ops_override(void)1052 cn10k_eth_sec_ops_override(void)
1053 {
1054 static int init_once;
1055
1056 if (init_once)
1057 return;
1058 init_once = 1;
1059
1060 /* Update platform specific ops */
1061 cnxk_eth_sec_ops.session_create = cn10k_eth_sec_session_create;
1062 cnxk_eth_sec_ops.session_destroy = cn10k_eth_sec_session_destroy;
1063 cnxk_eth_sec_ops.capabilities_get = cn10k_eth_sec_capabilities_get;
1064 cnxk_eth_sec_ops.session_update = cn10k_eth_sec_session_update;
1065 cnxk_eth_sec_ops.session_stats_get = cn10k_eth_sec_session_stats_get;
1066 }
1067