1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
3 */
4
5 #ifndef __OTX2_ETHDEV_SEC_TX_H__
6 #define __OTX2_ETHDEV_SEC_TX_H__
7
8 #include <rte_security.h>
9 #include <rte_mbuf.h>
10
11 #include "otx2_ethdev_sec.h"
12 #include "otx2_security.h"
13
14 struct otx2_ipsec_fp_out_hdr {
15 uint32_t ip_id;
16 uint32_t seq;
17 uint8_t iv[16];
18 };
19
20 static __rte_always_inline int32_t
otx2_ipsec_fp_out_rlen_get(struct otx2_sec_session_ipsec_ip * sess,uint32_t plen)21 otx2_ipsec_fp_out_rlen_get(struct otx2_sec_session_ipsec_ip *sess,
22 uint32_t plen)
23 {
24 uint32_t enc_payload_len;
25
26 enc_payload_len = RTE_ALIGN_CEIL(plen + sess->roundup_len,
27 sess->roundup_byte);
28
29 return sess->partial_len + enc_payload_len;
30 }
31
32 static __rte_always_inline void
33 otx2_ssogws_head_wait(struct otx2_ssogws *ws);
34
35 static __rte_always_inline int
otx2_sec_event_tx(struct otx2_ssogws * ws,struct rte_event * ev,struct rte_mbuf * m,const struct otx2_eth_txq * txq,const uint32_t offload_flags)36 otx2_sec_event_tx(struct otx2_ssogws *ws, struct rte_event *ev,
37 struct rte_mbuf *m, const struct otx2_eth_txq *txq,
38 const uint32_t offload_flags)
39 {
40 uint32_t dlen, rlen, desc_headroom, extend_head, extend_tail;
41 struct otx2_sec_session_ipsec_ip *sess;
42 struct otx2_ipsec_fp_out_hdr *hdr;
43 struct otx2_ipsec_fp_out_sa *sa;
44 uint64_t data_addr, desc_addr;
45 struct otx2_sec_session *priv;
46 struct otx2_cpt_inst_s inst;
47 uint64_t lmt_status;
48 char *data;
49
50 struct desc {
51 struct otx2_cpt_res cpt_res __rte_aligned(OTX2_CPT_RES_ALIGN);
52 struct nix_send_hdr_s nix_hdr
53 __rte_aligned(OTX2_NIX_SEND_DESC_ALIGN);
54 union nix_send_sg_s nix_sg;
55 struct nix_iova_s nix_iova;
56 } *sd;
57
58 priv = get_sec_session_private_data((void *)(*rte_security_dynfield(m)));
59 sess = &priv->ipsec.ip;
60 sa = &sess->out_sa;
61
62 RTE_ASSERT(sess->cpt_lmtline != NULL);
63 RTE_ASSERT(!(offload_flags & (NIX_TX_OFFLOAD_MBUF_NOFF_F |
64 NIX_TX_OFFLOAD_VLAN_QINQ_F)));
65
66 dlen = rte_pktmbuf_pkt_len(m) + sizeof(*hdr) - RTE_ETHER_HDR_LEN;
67 rlen = otx2_ipsec_fp_out_rlen_get(sess, dlen - sizeof(*hdr));
68
69 RTE_BUILD_BUG_ON(OTX2_CPT_RES_ALIGN % OTX2_NIX_SEND_DESC_ALIGN);
70 RTE_BUILD_BUG_ON(sizeof(sd->cpt_res) % OTX2_NIX_SEND_DESC_ALIGN);
71
72 extend_head = sizeof(*hdr);
73 extend_tail = rlen - dlen;
74
75 desc_headroom = (OTX2_CPT_RES_ALIGN - 1) + sizeof(*sd);
76
77 if (unlikely(!rte_pktmbuf_is_contiguous(m)) ||
78 unlikely(rte_pktmbuf_headroom(m) < extend_head + desc_headroom) ||
79 unlikely(rte_pktmbuf_tailroom(m) < extend_tail)) {
80 goto drop;
81 }
82
83 /*
84 * Extend mbuf data to point to the expected packet buffer for NIX.
85 * This includes the Ethernet header followed by the encrypted IPsec
86 * payload
87 */
88 rte_pktmbuf_append(m, extend_tail);
89 data = rte_pktmbuf_prepend(m, extend_head);
90 data_addr = rte_pktmbuf_iova(m);
91
92 /*
93 * Move the Ethernet header, to insert otx2_ipsec_fp_out_hdr prior
94 * to the IP header
95 */
96 memcpy(data, data + sizeof(*hdr), RTE_ETHER_HDR_LEN);
97
98 hdr = (struct otx2_ipsec_fp_out_hdr *)(data + RTE_ETHER_HDR_LEN);
99
100 if (sa->ctl.enc_type == OTX2_IPSEC_FP_SA_ENC_AES_GCM) {
101 /* AES-128-GCM */
102 memcpy(hdr->iv, &sa->nonce, 4);
103 memset(hdr->iv + 4, 0, 12); //TODO: make it random
104 } else {
105 /* AES-128-[CBC] + [SHA1] */
106 memset(hdr->iv, 0, 16); //TODO: make it random
107 }
108
109 /* Keep CPT result and NIX send descriptors in headroom */
110 sd = (void *)RTE_PTR_ALIGN(data - desc_headroom, OTX2_CPT_RES_ALIGN);
111 desc_addr = data_addr - RTE_PTR_DIFF(data, sd);
112
113 /* Prepare CPT instruction */
114
115 inst.nixtx_addr = (desc_addr + offsetof(struct desc, nix_hdr)) >> 4;
116 inst.doneint = 0;
117 inst.nixtxl = 1;
118 inst.res_addr = desc_addr + offsetof(struct desc, cpt_res);
119 inst.u64[2] = 0;
120 inst.u64[3] = 0;
121 inst.wqe_ptr = desc_addr >> 3; /* FIXME: Handle errors */
122 inst.qord = 1;
123 inst.opcode = OTX2_CPT_OP_INLINE_IPSEC_OUTB;
124 inst.dlen = dlen;
125 inst.dptr = data_addr + RTE_ETHER_HDR_LEN;
126 inst.u64[7] = sess->inst_w7;
127
128 /* First word contains 8 bit completion code & 8 bit uc comp code */
129 sd->cpt_res.u16[0] = 0;
130
131 /* Prepare NIX send descriptors for output expected from CPT */
132
133 sd->nix_hdr.w0.u = 0;
134 sd->nix_hdr.w1.u = 0;
135 sd->nix_hdr.w0.sq = txq->sq;
136 sd->nix_hdr.w0.sizem1 = 1;
137 sd->nix_hdr.w0.total = rte_pktmbuf_data_len(m);
138 sd->nix_hdr.w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id);
139
140 sd->nix_sg.u = 0;
141 sd->nix_sg.subdc = NIX_SUBDC_SG;
142 sd->nix_sg.ld_type = NIX_SENDLDTYPE_LDD;
143 sd->nix_sg.segs = 1;
144 sd->nix_sg.seg1_size = rte_pktmbuf_data_len(m);
145
146 sd->nix_iova.addr = rte_mbuf_data_iova(m);
147
148 /* Mark mempool object as "put" since it is freed by NIX */
149 __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
150
151 if (!ev->sched_type)
152 otx2_ssogws_head_wait(ws);
153
154 inst.param1 = sess->esn_hi >> 16;
155 inst.param2 = sess->esn_hi & 0xffff;
156
157 hdr->seq = rte_cpu_to_be_32(sess->seq);
158 hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
159
160 sess->ip_id++;
161 sess->esn++;
162
163 rte_io_wmb();
164
165 do {
166 otx2_lmt_mov(sess->cpt_lmtline, &inst, 2);
167 lmt_status = otx2_lmt_submit(sess->cpt_nq_reg);
168 } while (lmt_status == 0);
169
170 return 1;
171
172 drop:
173 if (offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
174 /* Don't free if reference count > 1 */
175 if (rte_pktmbuf_prefree_seg(m) == NULL)
176 return 0;
177 }
178 rte_pktmbuf_free(m);
179 return 0;
180 }
181
182 #endif /* __OTX2_ETHDEV_SEC_TX_H__ */
183