1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
3 */
4 #include <rte_cryptodev.h>
5 #include <rte_ethdev.h>
6
7 #include "otx2_cryptodev.h"
8 #include "otx2_cryptodev_hw_access.h"
9 #include "otx2_cryptodev_mbox.h"
10 #include "otx2_dev.h"
11 #include "otx2_ethdev.h"
12 #include "otx2_sec_idev.h"
13 #include "otx2_mbox.h"
14
15 #include "cpt_pmd_logs.h"
16
17 int
otx2_cpt_hardware_caps_get(const struct rte_cryptodev * dev,union cpt_eng_caps * hw_caps)18 otx2_cpt_hardware_caps_get(const struct rte_cryptodev *dev,
19 union cpt_eng_caps *hw_caps)
20 {
21 struct otx2_cpt_vf *vf = dev->data->dev_private;
22 struct otx2_dev *otx2_dev = &vf->otx2_dev;
23 struct cpt_caps_rsp_msg *rsp;
24 int ret;
25
26 otx2_mbox_alloc_msg_cpt_caps_get(otx2_dev->mbox);
27
28 ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
29 if (ret)
30 return -EIO;
31
32 if (rsp->cpt_pf_drv_version != OTX2_CPT_PMD_VERSION) {
33 otx2_err("Incompatible CPT PMD version"
34 "(Kernel: 0x%04x DPDK: 0x%04x)",
35 rsp->cpt_pf_drv_version, OTX2_CPT_PMD_VERSION);
36 return -EPIPE;
37 }
38
39 memcpy(hw_caps, rsp->eng_caps,
40 sizeof(union cpt_eng_caps) * CPT_MAX_ENG_TYPES);
41
42 return 0;
43 }
44
45 int
otx2_cpt_available_queues_get(const struct rte_cryptodev * dev,uint16_t * nb_queues)46 otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
47 uint16_t *nb_queues)
48 {
49 struct otx2_cpt_vf *vf = dev->data->dev_private;
50 struct otx2_dev *otx2_dev = &vf->otx2_dev;
51 struct free_rsrcs_rsp *rsp;
52 int ret;
53
54 otx2_mbox_alloc_msg_free_rsrc_cnt(otx2_dev->mbox);
55
56 ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
57 if (ret)
58 return -EIO;
59
60 *nb_queues = rsp->cpt;
61 return 0;
62 }
63
64 int
otx2_cpt_queues_attach(const struct rte_cryptodev * dev,uint8_t nb_queues)65 otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues)
66 {
67 struct otx2_cpt_vf *vf = dev->data->dev_private;
68 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
69 struct rsrc_attach_req *req;
70
71 /* Ask AF to attach required LFs */
72
73 req = otx2_mbox_alloc_msg_attach_resources(mbox);
74
75 /* 1 LF = 1 queue */
76 req->cptlfs = nb_queues;
77
78 if (otx2_mbox_process(mbox) < 0)
79 return -EIO;
80
81 /* Update number of attached queues */
82 vf->nb_queues = nb_queues;
83
84 return 0;
85 }
86
87 int
otx2_cpt_queues_detach(const struct rte_cryptodev * dev)88 otx2_cpt_queues_detach(const struct rte_cryptodev *dev)
89 {
90 struct otx2_cpt_vf *vf = dev->data->dev_private;
91 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
92 struct rsrc_detach_req *req;
93
94 req = otx2_mbox_alloc_msg_detach_resources(mbox);
95 req->cptlfs = true;
96 req->partial = true;
97 if (otx2_mbox_process(mbox) < 0)
98 return -EIO;
99
100 /* Queues have been detached */
101 vf->nb_queues = 0;
102
103 return 0;
104 }
105
106 int
otx2_cpt_msix_offsets_get(const struct rte_cryptodev * dev)107 otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev)
108 {
109 struct otx2_cpt_vf *vf = dev->data->dev_private;
110 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
111 struct msix_offset_rsp *rsp;
112 uint32_t i, ret;
113
114 /* Get CPT MSI-X vector offsets */
115
116 otx2_mbox_alloc_msg_msix_offset(mbox);
117
118 ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
119 if (ret)
120 return ret;
121
122 for (i = 0; i < vf->nb_queues; i++)
123 vf->lf_msixoff[i] = rsp->cptlf_msixoff[i];
124
125 return 0;
126 }
127
128 static int
otx2_cpt_send_mbox_msg(struct otx2_cpt_vf * vf)129 otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf)
130 {
131 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
132 int ret;
133
134 otx2_mbox_msg_send(mbox, 0);
135
136 ret = otx2_mbox_wait_for_rsp(mbox, 0);
137 if (ret < 0) {
138 CPT_LOG_ERR("Could not get mailbox response");
139 return ret;
140 }
141
142 return 0;
143 }
144
145 int
otx2_cpt_af_reg_read(const struct rte_cryptodev * dev,uint64_t reg,uint64_t * val)146 otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
147 uint64_t *val)
148 {
149 struct otx2_cpt_vf *vf = dev->data->dev_private;
150 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
151 struct otx2_mbox_dev *mdev = &mbox->dev[0];
152 struct cpt_rd_wr_reg_msg *msg;
153 int ret, off;
154
155 msg = (struct cpt_rd_wr_reg_msg *)
156 otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
157 sizeof(*msg));
158 if (msg == NULL) {
159 CPT_LOG_ERR("Could not allocate mailbox message");
160 return -EFAULT;
161 }
162
163 msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
164 msg->hdr.sig = OTX2_MBOX_REQ_SIG;
165 msg->hdr.pcifunc = vf->otx2_dev.pf_func;
166 msg->is_write = 0;
167 msg->reg_offset = reg;
168 msg->ret_val = val;
169
170 ret = otx2_cpt_send_mbox_msg(vf);
171 if (ret < 0)
172 return ret;
173
174 off = mbox->rx_start +
175 RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
176 msg = (struct cpt_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
177
178 *val = msg->val;
179
180 return 0;
181 }
182
183 int
otx2_cpt_af_reg_write(const struct rte_cryptodev * dev,uint64_t reg,uint64_t val)184 otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
185 uint64_t val)
186 {
187 struct otx2_cpt_vf *vf = dev->data->dev_private;
188 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
189 struct cpt_rd_wr_reg_msg *msg;
190
191 msg = (struct cpt_rd_wr_reg_msg *)
192 otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
193 sizeof(*msg));
194 if (msg == NULL) {
195 CPT_LOG_ERR("Could not allocate mailbox message");
196 return -EFAULT;
197 }
198
199 msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
200 msg->hdr.sig = OTX2_MBOX_REQ_SIG;
201 msg->hdr.pcifunc = vf->otx2_dev.pf_func;
202 msg->is_write = 1;
203 msg->reg_offset = reg;
204 msg->val = val;
205
206 return otx2_cpt_send_mbox_msg(vf);
207 }
208
209 int
otx2_cpt_inline_init(const struct rte_cryptodev * dev)210 otx2_cpt_inline_init(const struct rte_cryptodev *dev)
211 {
212 struct otx2_cpt_vf *vf = dev->data->dev_private;
213 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
214 struct cpt_rx_inline_lf_cfg_msg *msg;
215 int ret;
216
217 msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
218 msg->sso_pf_func = otx2_sso_pf_func_get();
219
220 otx2_mbox_msg_send(mbox, 0);
221 ret = otx2_mbox_process(mbox);
222 if (ret < 0)
223 return -EIO;
224
225 return 0;
226 }
227
228 int
otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev * dev,struct otx2_cpt_qp * qp,uint16_t port_id)229 otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp,
230 uint16_t port_id)
231 {
232 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
233 struct otx2_cpt_vf *vf = dev->data->dev_private;
234 struct otx2_mbox *mbox = vf->otx2_dev.mbox;
235 struct cpt_inline_ipsec_cfg_msg *msg;
236 struct otx2_eth_dev *otx2_eth_dev;
237 int ret;
238
239 if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
240 return -EINVAL;
241
242 otx2_eth_dev = otx2_eth_pmd_priv(eth_dev);
243
244 msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
245 msg->dir = CPT_INLINE_OUTBOUND;
246 msg->enable = 1;
247 msg->slot = qp->id;
248
249 msg->nix_pf_func = otx2_eth_dev->pf_func;
250
251 otx2_mbox_msg_send(mbox, 0);
252 ret = otx2_mbox_process(mbox);
253 if (ret < 0)
254 return -EIO;
255
256 return 0;
257 }
258