1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
3 */
4 #ifndef _OTX_CRYPTODEV_HW_ACCESS_H_
5 #define _OTX_CRYPTODEV_HW_ACCESS_H_
6
7 #include <stdbool.h>
8
9 #include <rte_branch_prediction.h>
10 #include <rte_cryptodev.h>
11 #include <rte_cycles.h>
12 #include <rte_io.h>
13 #include <rte_memory.h>
14 #include <rte_prefetch.h>
15
16 #include "otx_cryptodev.h"
17
18 #include "cpt_common.h"
19 #include "cpt_hw_types.h"
20 #include "cpt_mcode_defines.h"
21 #include "cpt_pmd_logs.h"
22
23 #define CPT_INTR_POLL_INTERVAL_MS (50)
24
25 /* Default command queue length */
26 #define DEFAULT_CMD_QCHUNKS 2
27 #define DEFAULT_CMD_QCHUNK_SIZE 1023
28 #define DEFAULT_CMD_QLEN \
29 (DEFAULT_CMD_QCHUNK_SIZE * DEFAULT_CMD_QCHUNKS)
30
31 #define CPT_CSR_REG_BASE(cpt) ((cpt)->reg_base)
32
33 /* Read hw register */
34 #define CPT_READ_CSR(__hw_addr, __offset) \
35 rte_read64_relaxed((uint8_t *)__hw_addr + __offset)
36
37 /* Write hw register */
38 #define CPT_WRITE_CSR(__hw_addr, __offset, __val) \
39 rte_write64_relaxed((__val), ((uint8_t *)__hw_addr + __offset))
40
41 /* cpt instance */
42 struct cpt_instance {
43 uint32_t queue_id;
44 uintptr_t rsvd;
45 struct rte_mempool *sess_mp;
46 struct rte_mempool *sess_mp_priv;
47 struct cpt_qp_meta_info meta_info;
48 };
49
50 struct command_chunk {
51 /** 128-byte aligned real_vaddr */
52 uint8_t *head;
53 /** 128-byte aligned real_dma_addr */
54 phys_addr_t dma_addr;
55 };
56
57 /**
58 * Command queue structure
59 */
60 struct command_queue {
61 /** Command queue host write idx */
62 uint32_t idx;
63 /** Command queue chunk */
64 uint32_t cchunk;
65 /** Command queue head; instructions are inserted here */
66 uint8_t *qhead;
67 /** Command chunk list head */
68 struct command_chunk chead[DEFAULT_CMD_QCHUNKS];
69 };
70
71 /**
72 * CPT VF device structure
73 */
74 struct cpt_vf {
75 /** CPT instance */
76 struct cpt_instance instance;
77 /** Register start address */
78 uint8_t *reg_base;
79 /** Command queue information */
80 struct command_queue cqueue;
81 /** Pending queue information */
82 struct pending_queue pqueue;
83
84 /** Below fields are accessed only in control path */
85
86 /** Env specific pdev representing the pci dev */
87 void *pdev;
88 /** Calculated queue size */
89 uint32_t qsize;
90 /** Device index (0...CPT_MAX_VQ_NUM)*/
91 uint8_t vfid;
92 /** VF type of cpt_vf_type_t (SE_TYPE(2) or AE_TYPE(1) */
93 uint8_t vftype;
94 /** VF group (0 - 8) */
95 uint8_t vfgrp;
96 /** Operating node: Bits (46:44) in BAR0 address */
97 uint8_t node;
98
99 /** VF-PF mailbox communication */
100
101 /** Flag if acked */
102 bool pf_acked;
103 /** Flag if not acked */
104 bool pf_nacked;
105
106 /** Device name */
107 char dev_name[32];
108 } __rte_cache_aligned;
109
110 /*
111 * CPT Registers map for 81xx
112 */
113
114 /* VF registers */
115 #define CPTX_VQX_CTL(a, b) (0x0000100ll + 0x1000000000ll * \
116 ((a) & 0x0) + 0x100000ll * (b))
117 #define CPTX_VQX_SADDR(a, b) (0x0000200ll + 0x1000000000ll * \
118 ((a) & 0x0) + 0x100000ll * (b))
119 #define CPTX_VQX_DONE_WAIT(a, b) (0x0000400ll + 0x1000000000ll * \
120 ((a) & 0x0) + 0x100000ll * (b))
121 #define CPTX_VQX_INPROG(a, b) (0x0000410ll + 0x1000000000ll * \
122 ((a) & 0x0) + 0x100000ll * (b))
123 #define CPTX_VQX_DONE(a, b) (0x0000420ll + 0x1000000000ll * \
124 ((a) & 0x1) + 0x100000ll * (b))
125 #define CPTX_VQX_DONE_ACK(a, b) (0x0000440ll + 0x1000000000ll * \
126 ((a) & 0x1) + 0x100000ll * (b))
127 #define CPTX_VQX_DONE_INT_W1S(a, b) (0x0000460ll + 0x1000000000ll * \
128 ((a) & 0x1) + 0x100000ll * (b))
129 #define CPTX_VQX_DONE_INT_W1C(a, b) (0x0000468ll + 0x1000000000ll * \
130 ((a) & 0x1) + 0x100000ll * (b))
131 #define CPTX_VQX_DONE_ENA_W1S(a, b) (0x0000470ll + 0x1000000000ll * \
132 ((a) & 0x1) + 0x100000ll * (b))
133 #define CPTX_VQX_DONE_ENA_W1C(a, b) (0x0000478ll + 0x1000000000ll * \
134 ((a) & 0x1) + 0x100000ll * (b))
135 #define CPTX_VQX_MISC_INT(a, b) (0x0000500ll + 0x1000000000ll * \
136 ((a) & 0x1) + 0x100000ll * (b))
137 #define CPTX_VQX_MISC_INT_W1S(a, b) (0x0000508ll + 0x1000000000ll * \
138 ((a) & 0x1) + 0x100000ll * (b))
139 #define CPTX_VQX_MISC_ENA_W1S(a, b) (0x0000510ll + 0x1000000000ll * \
140 ((a) & 0x1) + 0x100000ll * (b))
141 #define CPTX_VQX_MISC_ENA_W1C(a, b) (0x0000518ll + 0x1000000000ll * \
142 ((a) & 0x1) + 0x100000ll * (b))
143 #define CPTX_VQX_DOORBELL(a, b) (0x0000600ll + 0x1000000000ll * \
144 ((a) & 0x1) + 0x100000ll * (b))
145 #define CPTX_VFX_PF_MBOXX(a, b, c) (0x0001000ll + 0x1000000000ll * \
146 ((a) & 0x1) + 0x100000ll * (b) + \
147 8ll * ((c) & 0x1))
148
149 /* VF HAL functions */
150
151 void
152 otx_cpt_poll_misc(struct cpt_vf *cptvf);
153
154 int
155 otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name);
156
157 int
158 otx_cpt_deinit_device(void *dev);
159
160 int
161 otx_cpt_get_resource(const struct rte_cryptodev *dev, uint8_t group,
162 struct cpt_instance **instance, uint16_t qp_id);
163
164 int
165 otx_cpt_put_resource(struct cpt_instance *instance);
166
167 int
168 otx_cpt_start_device(void *cptvf);
169
170 void
171 otx_cpt_stop_device(void *cptvf);
172
173 /* Write to VQX_DOORBELL register
174 */
175 static __rte_always_inline void
otx_cpt_write_vq_doorbell(struct cpt_vf * cptvf,uint32_t val)176 otx_cpt_write_vq_doorbell(struct cpt_vf *cptvf, uint32_t val)
177 {
178 cptx_vqx_doorbell_t vqx_dbell;
179
180 vqx_dbell.u = 0;
181 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
182 CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
183 CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u);
184 }
185
186 static __rte_always_inline uint32_t
otx_cpt_read_vq_doorbell(struct cpt_vf * cptvf)187 otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf)
188 {
189 cptx_vqx_doorbell_t vqx_dbell;
190
191 vqx_dbell.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
192 CPTX_VQX_DOORBELL(0, 0));
193 return vqx_dbell.s.dbell_cnt;
194 }
195
196 static __rte_always_inline void
otx_cpt_ring_dbell(struct cpt_instance * instance,uint16_t count)197 otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
198 {
199 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
200 /* Memory barrier to flush pending writes */
201 rte_smp_wmb();
202 otx_cpt_write_vq_doorbell(cptvf, count);
203 }
204
205 static __rte_always_inline void *
get_cpt_inst(struct command_queue * cqueue)206 get_cpt_inst(struct command_queue *cqueue)
207 {
208 CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx);
209 return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE];
210 }
211
212 static __rte_always_inline void
fill_cpt_inst(struct cpt_instance * instance,void * req,uint64_t ucmd_w3)213 fill_cpt_inst(struct cpt_instance *instance, void *req, uint64_t ucmd_w3)
214 {
215 struct command_queue *cqueue;
216 cpt_inst_s_t *cpt_ist_p;
217 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
218 struct cpt_request_info *user_req = (struct cpt_request_info *)req;
219 cqueue = &cptvf->cqueue;
220 cpt_ist_p = get_cpt_inst(cqueue);
221 rte_prefetch_non_temporal(cpt_ist_p);
222
223 /* EI0, EI1, EI2, EI3 are already prepared */
224 /* HW W0 */
225 cpt_ist_p->u[0] = 0;
226 /* HW W1 */
227 cpt_ist_p->s8x.res_addr = user_req->comp_baddr;
228 /* HW W2 */
229 cpt_ist_p->u[2] = 0;
230 /* HW W3 */
231 cpt_ist_p->s8x.wq_ptr = 0;
232
233 /* MC EI0 */
234 cpt_ist_p->s8x.ei0 = user_req->ist.ei0;
235 /* MC EI1 */
236 cpt_ist_p->s8x.ei1 = user_req->ist.ei1;
237 /* MC EI2 */
238 cpt_ist_p->s8x.ei2 = user_req->ist.ei2;
239 /* MC EI3 */
240 cpt_ist_p->s8x.ei3 = ucmd_w3;
241 }
242
243 static __rte_always_inline void
mark_cpt_inst(struct cpt_instance * instance)244 mark_cpt_inst(struct cpt_instance *instance)
245 {
246 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
247 struct command_queue *queue = &cptvf->cqueue;
248 if (unlikely(++queue->idx >= DEFAULT_CMD_QCHUNK_SIZE)) {
249 uint32_t cchunk = queue->cchunk;
250 MOD_INC(cchunk, DEFAULT_CMD_QCHUNKS);
251 queue->qhead = queue->chead[cchunk].head;
252 queue->idx = 0;
253 queue->cchunk = cchunk;
254 }
255 }
256
257 static __rte_always_inline uint8_t
check_nb_command_id(struct cpt_request_info * user_req,struct cpt_instance * instance)258 check_nb_command_id(struct cpt_request_info *user_req,
259 struct cpt_instance *instance)
260 {
261 uint8_t ret = ERR_REQ_PENDING;
262 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
263 volatile cpt_res_s_t *cptres;
264
265 cptres = (volatile cpt_res_s_t *)user_req->completion_addr;
266
267 if (unlikely(cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE)) {
268 /*
269 * Wait for some time for this command to get completed
270 * before timing out
271 */
272 if (rte_get_timer_cycles() < user_req->time_out)
273 return ret;
274 /*
275 * TODO: See if alternate caddr can be used to not loop
276 * longer than needed.
277 */
278 if ((cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE) &&
279 (user_req->extra_time < TIME_IN_RESET_COUNT)) {
280 user_req->extra_time++;
281 return ret;
282 }
283
284 if (cptres->s8x.compcode != CPT_8X_COMP_E_NOTDONE)
285 goto complete;
286
287 ret = ERR_REQ_TIMEOUT;
288 CPT_LOG_DP_ERR("Request %p timedout", user_req);
289 otx_cpt_poll_misc(cptvf);
290 goto exit;
291 }
292
293 complete:
294 if (likely(cptres->s8x.compcode == CPT_8X_COMP_E_GOOD)) {
295 ret = 0; /* success */
296 if (unlikely((uint8_t)*user_req->alternate_caddr)) {
297 ret = (uint8_t)*user_req->alternate_caddr;
298 CPT_LOG_DP_ERR("Request %p : failed with microcode"
299 " error, MC completion code : 0x%x", user_req,
300 ret);
301 }
302 CPT_LOG_DP_DEBUG("MC status %.8x\n",
303 *((volatile uint32_t *)user_req->alternate_caddr));
304 CPT_LOG_DP_DEBUG("HW status %.8x\n",
305 *((volatile uint32_t *)user_req->completion_addr));
306 } else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) ||
307 (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) {
308 ret = (uint8_t)*user_req->alternate_caddr;
309 if (!ret)
310 ret = ERR_BAD_ALT_CCODE;
311 CPT_LOG_DP_DEBUG("Request %p : failed with %s : err code :%x",
312 user_req,
313 (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT) ?
314 "DMA Fault" : "Software error", ret);
315 } else {
316 CPT_LOG_DP_ERR("Request %p : unexpected completion code %d",
317 user_req, cptres->s8x.compcode);
318 ret = (uint8_t)*user_req->alternate_caddr;
319 }
320
321 exit:
322 return ret;
323 }
324
325 #endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */
326