1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include <errno.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9
10 #include "roc_api.h"
11 #include "roc_priv.h"
12
13 #define RVU_AF_AFPF_MBOX0 (0x02000)
14 #define RVU_AF_AFPF_MBOX1 (0x02008)
15
16 #define RVU_PF_PFAF_MBOX0 (0xC00)
17 #define RVU_PF_PFAF_MBOX1 (0xC08)
18
19 #define RVU_PF_VFX_PFVF_MBOX0 (0x0000)
20 #define RVU_PF_VFX_PFVF_MBOX1 (0x0008)
21
22 #define RVU_VF_VFPF_MBOX0 (0x0000)
23 #define RVU_VF_VFPF_MBOX1 (0x0008)
24
25 /* RCLK, SCLK in MHz */
26 uint16_t dev_rclk_freq;
27 uint16_t dev_sclk_freq;
28
29 static inline uint16_t
msgs_offset(void)30 msgs_offset(void)
31 {
32 return PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
33 }
34
35 void
mbox_fini(struct mbox * mbox)36 mbox_fini(struct mbox *mbox)
37 {
38 mbox->reg_base = 0;
39 mbox->hwbase = 0;
40 plt_free(mbox->dev);
41 mbox->dev = NULL;
42 }
43
44 void
mbox_reset(struct mbox * mbox,int devid)45 mbox_reset(struct mbox *mbox, int devid)
46 {
47 struct mbox_dev *mdev = &mbox->dev[devid];
48 struct mbox_hdr *tx_hdr =
49 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
50 struct mbox_hdr *rx_hdr =
51 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
52
53 plt_spinlock_lock(&mdev->mbox_lock);
54 mdev->msg_size = 0;
55 mdev->rsp_size = 0;
56 tx_hdr->msg_size = 0;
57 tx_hdr->num_msgs = 0;
58 rx_hdr->msg_size = 0;
59 rx_hdr->num_msgs = 0;
60 plt_spinlock_unlock(&mdev->mbox_lock);
61 }
62
63 int
mbox_init(struct mbox * mbox,uintptr_t hwbase,uintptr_t reg_base,int direction,int ndevs,uint64_t intr_offset)64 mbox_init(struct mbox *mbox, uintptr_t hwbase, uintptr_t reg_base,
65 int direction, int ndevs, uint64_t intr_offset)
66 {
67 struct mbox_dev *mdev;
68 char *var, *var_to;
69 int devid;
70
71 mbox->intr_offset = intr_offset;
72 mbox->reg_base = reg_base;
73 mbox->hwbase = hwbase;
74
75 switch (direction) {
76 case MBOX_DIR_AFPF:
77 case MBOX_DIR_PFVF:
78 mbox->tx_start = MBOX_DOWN_TX_START;
79 mbox->rx_start = MBOX_DOWN_RX_START;
80 mbox->tx_size = MBOX_DOWN_TX_SIZE;
81 mbox->rx_size = MBOX_DOWN_RX_SIZE;
82 break;
83 case MBOX_DIR_PFAF:
84 case MBOX_DIR_VFPF:
85 mbox->tx_start = MBOX_DOWN_RX_START;
86 mbox->rx_start = MBOX_DOWN_TX_START;
87 mbox->tx_size = MBOX_DOWN_RX_SIZE;
88 mbox->rx_size = MBOX_DOWN_TX_SIZE;
89 break;
90 case MBOX_DIR_AFPF_UP:
91 case MBOX_DIR_PFVF_UP:
92 mbox->tx_start = MBOX_UP_TX_START;
93 mbox->rx_start = MBOX_UP_RX_START;
94 mbox->tx_size = MBOX_UP_TX_SIZE;
95 mbox->rx_size = MBOX_UP_RX_SIZE;
96 break;
97 case MBOX_DIR_PFAF_UP:
98 case MBOX_DIR_VFPF_UP:
99 mbox->tx_start = MBOX_UP_RX_START;
100 mbox->rx_start = MBOX_UP_TX_START;
101 mbox->tx_size = MBOX_UP_RX_SIZE;
102 mbox->rx_size = MBOX_UP_TX_SIZE;
103 break;
104 default:
105 return -ENODEV;
106 }
107
108 switch (direction) {
109 case MBOX_DIR_AFPF:
110 case MBOX_DIR_AFPF_UP:
111 mbox->trigger = RVU_AF_AFPF_MBOX0;
112 mbox->tr_shift = 4;
113 break;
114 case MBOX_DIR_PFAF:
115 case MBOX_DIR_PFAF_UP:
116 mbox->trigger = RVU_PF_PFAF_MBOX1;
117 mbox->tr_shift = 0;
118 break;
119 case MBOX_DIR_PFVF:
120 case MBOX_DIR_PFVF_UP:
121 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
122 mbox->tr_shift = 12;
123 break;
124 case MBOX_DIR_VFPF:
125 case MBOX_DIR_VFPF_UP:
126 mbox->trigger = RVU_VF_VFPF_MBOX1;
127 mbox->tr_shift = 0;
128 break;
129 default:
130 return -ENODEV;
131 }
132
133 mbox->dev = plt_zmalloc(ndevs * sizeof(struct mbox_dev), ROC_ALIGN);
134 if (!mbox->dev) {
135 mbox_fini(mbox);
136 return -ENOMEM;
137 }
138 mbox->ndevs = ndevs;
139 for (devid = 0; devid < ndevs; devid++) {
140 mdev = &mbox->dev[devid];
141 mdev->mbase = (void *)(mbox->hwbase + (devid * MBOX_SIZE));
142 plt_spinlock_init(&mdev->mbox_lock);
143 /* Init header to reset value */
144 mbox_reset(mbox, devid);
145 }
146
147 var = getenv("ROC_CN10K_MBOX_TIMEOUT");
148 var_to = getenv("ROC_MBOX_TIMEOUT");
149
150 if (var)
151 mbox->rsp_tmo = atoi(var);
152 else if (var_to)
153 mbox->rsp_tmo = atoi(var_to);
154 else
155 mbox->rsp_tmo = MBOX_RSP_TIMEOUT;
156
157 return 0;
158 }
159
160 /**
161 * @internal
162 * Allocate a message response
163 */
164 struct mbox_msghdr *
mbox_alloc_msg_rsp(struct mbox * mbox,int devid,int size,int size_rsp)165 mbox_alloc_msg_rsp(struct mbox *mbox, int devid, int size, int size_rsp)
166 {
167 struct mbox_dev *mdev = &mbox->dev[devid];
168 struct mbox_msghdr *msghdr = NULL;
169
170 plt_spinlock_lock(&mdev->mbox_lock);
171 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
172 size_rsp = PLT_ALIGN(size_rsp, MBOX_MSG_ALIGN);
173 /* Check if there is space in mailbox */
174 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset())
175 goto exit;
176 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset())
177 goto exit;
178 if (mdev->msg_size == 0)
179 mdev->num_msgs = 0;
180 mdev->num_msgs++;
181
182 msghdr = (struct mbox_msghdr *)(((uintptr_t)mdev->mbase +
183 mbox->tx_start + msgs_offset() +
184 mdev->msg_size));
185
186 /* Clear the whole msg region */
187 mbox_memset(msghdr, 0, sizeof(*msghdr) + size);
188 /* Init message header with reset values */
189 msghdr->ver = MBOX_VERSION;
190 mdev->msg_size += size;
191 mdev->rsp_size += size_rsp;
192 msghdr->next_msgoff = mdev->msg_size + msgs_offset();
193 exit:
194 plt_spinlock_unlock(&mdev->mbox_lock);
195
196 return msghdr;
197 }
198
199 /**
200 * @internal
201 * Send a mailbox message
202 */
203 void
mbox_msg_send(struct mbox * mbox,int devid)204 mbox_msg_send(struct mbox *mbox, int devid)
205 {
206 struct mbox_dev *mdev = &mbox->dev[devid];
207 struct mbox_hdr *tx_hdr =
208 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->tx_start);
209 struct mbox_hdr *rx_hdr =
210 (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
211
212 /* Reset header for next messages */
213 tx_hdr->msg_size = mdev->msg_size;
214 mdev->msg_size = 0;
215 mdev->rsp_size = 0;
216 mdev->msgs_acked = 0;
217
218 /* num_msgs != 0 signals to the peer that the buffer has a number of
219 * messages. So this should be written after copying txmem
220 */
221 tx_hdr->num_msgs = mdev->num_msgs;
222 rx_hdr->num_msgs = 0;
223
224 /* Sync mbox data into memory */
225 plt_wmb();
226
227 /* The interrupt should be fired after num_msgs is written
228 * to the shared memory
229 */
230 plt_write64(1, (volatile void *)(mbox->reg_base +
231 (mbox->trigger |
232 (devid << mbox->tr_shift))));
233 }
234
235 /**
236 * @internal
237 * Wait and get mailbox response
238 */
239 int
mbox_get_rsp(struct mbox * mbox,int devid,void ** msg)240 mbox_get_rsp(struct mbox *mbox, int devid, void **msg)
241 {
242 struct mbox_dev *mdev = &mbox->dev[devid];
243 struct mbox_msghdr *msghdr;
244 uint64_t offset;
245 int rc;
246
247 rc = mbox_wait_for_rsp(mbox, devid);
248 if (rc < 0)
249 return -EIO;
250
251 plt_rmb();
252
253 offset = mbox->rx_start +
254 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
255 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
256 if (msg != NULL)
257 *msg = msghdr;
258
259 return msghdr->rc;
260 }
261
262 /**
263 * Polling for given wait time to get mailbox response
264 */
265 static int
mbox_poll(struct mbox * mbox,uint32_t wait)266 mbox_poll(struct mbox *mbox, uint32_t wait)
267 {
268 uint32_t timeout = 0, sleep = 1;
269 uint32_t wait_us = wait * 1000;
270 uint64_t rsp_reg = 0;
271 uintptr_t reg_addr;
272
273 reg_addr = mbox->reg_base + mbox->intr_offset;
274 do {
275 rsp_reg = plt_read64(reg_addr);
276
277 if (timeout >= wait_us)
278 return -ETIMEDOUT;
279
280 plt_delay_us(sleep);
281 timeout += sleep;
282 } while (!rsp_reg);
283
284 plt_rmb();
285
286 /* Clear interrupt */
287 plt_write64(rsp_reg, reg_addr);
288
289 /* Reset mbox */
290 mbox_reset(mbox, 0);
291
292 return 0;
293 }
294
295 /**
296 * @internal
297 * Wait and get mailbox response with timeout
298 */
299 int
mbox_get_rsp_tmo(struct mbox * mbox,int devid,void ** msg,uint32_t tmo)300 mbox_get_rsp_tmo(struct mbox *mbox, int devid, void **msg, uint32_t tmo)
301 {
302 struct mbox_dev *mdev = &mbox->dev[devid];
303 struct mbox_msghdr *msghdr;
304 uint64_t offset;
305 int rc;
306
307 rc = mbox_wait_for_rsp_tmo(mbox, devid, tmo);
308 if (rc != 1)
309 return -EIO;
310
311 plt_rmb();
312
313 offset = mbox->rx_start +
314 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
315 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
316 if (msg != NULL)
317 *msg = msghdr;
318
319 return msghdr->rc;
320 }
321
322 static int
mbox_wait(struct mbox * mbox,int devid,uint32_t rst_timo)323 mbox_wait(struct mbox *mbox, int devid, uint32_t rst_timo)
324 {
325 volatile struct mbox_dev *mdev = &mbox->dev[devid];
326 uint32_t timeout = 0, sleep = 1;
327
328 rst_timo = rst_timo * 1000; /* Milli seconds to micro seconds */
329 while (mdev->num_msgs > mdev->msgs_acked) {
330 plt_delay_us(sleep);
331 timeout += sleep;
332 if (timeout >= rst_timo) {
333 struct mbox_hdr *tx_hdr =
334 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
335 mbox->tx_start);
336 struct mbox_hdr *rx_hdr =
337 (struct mbox_hdr *)((uintptr_t)mdev->mbase +
338 mbox->rx_start);
339
340 plt_err("MBOX[devid: %d] message wait timeout %d, "
341 "num_msgs: %d, msgs_acked: %d "
342 "(tx/rx num_msgs: %d/%d), msg_size: %d, "
343 "rsp_size: %d",
344 devid, timeout, mdev->num_msgs,
345 mdev->msgs_acked, tx_hdr->num_msgs,
346 rx_hdr->num_msgs, mdev->msg_size,
347 mdev->rsp_size);
348
349 return -EIO;
350 }
351 plt_rmb();
352 }
353 return 0;
354 }
355
356 int
mbox_wait_for_rsp_tmo(struct mbox * mbox,int devid,uint32_t tmo)357 mbox_wait_for_rsp_tmo(struct mbox *mbox, int devid, uint32_t tmo)
358 {
359 struct mbox_dev *mdev = &mbox->dev[devid];
360 int rc = 0;
361
362 /* Sync with mbox region */
363 plt_rmb();
364
365 if (mbox->trigger == RVU_PF_VFX_PFVF_MBOX1 ||
366 mbox->trigger == RVU_PF_VFX_PFVF_MBOX0) {
367 /* In case of VF, Wait a bit more to account round trip delay */
368 tmo = tmo * 2;
369 }
370
371 /* Wait message */
372 if (plt_thread_is_intr())
373 rc = mbox_poll(mbox, tmo);
374 else
375 rc = mbox_wait(mbox, devid, tmo);
376
377 if (!rc)
378 rc = mdev->num_msgs;
379
380 return rc;
381 }
382
383 /**
384 * @internal
385 * Wait for the mailbox response
386 */
387 int
mbox_wait_for_rsp(struct mbox * mbox,int devid)388 mbox_wait_for_rsp(struct mbox *mbox, int devid)
389 {
390 return mbox_wait_for_rsp_tmo(mbox, devid, mbox->rsp_tmo);
391 }
392
393 int
mbox_get_availmem(struct mbox * mbox,int devid)394 mbox_get_availmem(struct mbox *mbox, int devid)
395 {
396 struct mbox_dev *mdev = &mbox->dev[devid];
397 int avail;
398
399 plt_spinlock_lock(&mdev->mbox_lock);
400 avail = mbox->tx_size - mdev->msg_size - msgs_offset();
401 plt_spinlock_unlock(&mdev->mbox_lock);
402
403 return avail;
404 }
405
406 int
send_ready_msg(struct mbox * mbox,uint16_t * pcifunc)407 send_ready_msg(struct mbox *mbox, uint16_t *pcifunc)
408 {
409 struct ready_msg_rsp *rsp;
410 int rc;
411
412 mbox_alloc_msg_ready(mbox);
413
414 rc = mbox_process_msg(mbox, (void *)&rsp);
415 if (rc)
416 return rc;
417
418 if (rsp->hdr.ver != MBOX_VERSION) {
419 plt_err("Incompatible MBox versions(AF: 0x%04x Client: 0x%04x)",
420 rsp->hdr.ver, MBOX_VERSION);
421 return -EPIPE;
422 }
423
424 if (pcifunc)
425 *pcifunc = rsp->hdr.pcifunc;
426
427 /* Save rclk & sclk freq */
428 if (!dev_rclk_freq || !dev_sclk_freq) {
429 dev_rclk_freq = rsp->rclk_freq;
430 dev_sclk_freq = rsp->sclk_freq;
431 }
432 return 0;
433 }
434
435 int
reply_invalid_msg(struct mbox * mbox,int devid,uint16_t pcifunc,uint16_t id)436 reply_invalid_msg(struct mbox *mbox, int devid, uint16_t pcifunc, uint16_t id)
437 {
438 struct msg_rsp *rsp;
439
440 rsp = (struct msg_rsp *)mbox_alloc_msg(mbox, devid, sizeof(*rsp));
441 if (!rsp)
442 return -ENOMEM;
443 rsp->hdr.id = id;
444 rsp->hdr.sig = MBOX_RSP_SIG;
445 rsp->hdr.rc = MBOX_MSG_INVALID;
446 rsp->hdr.pcifunc = pcifunc;
447
448 return 0;
449 }
450
451 /**
452 * @internal
453 * Convert mail box ID to name
454 */
455 const char *
mbox_id2name(uint16_t id)456 mbox_id2name(uint16_t id)
457 {
458 switch (id) {
459 default:
460 return "INVALID ID";
461 #define M(_name, _id, _1, _2, _3) \
462 case _id: \
463 return #_name;
464 MBOX_MESSAGES
465 MBOX_UP_CGX_MESSAGES
466 #undef M
467 }
468 }
469
470 int
mbox_id2size(uint16_t id)471 mbox_id2size(uint16_t id)
472 {
473 switch (id) {
474 default:
475 return 0;
476 #define M(_1, _id, _2, _req_type, _3) \
477 case _id: \
478 return sizeof(struct _req_type);
479 MBOX_MESSAGES
480 MBOX_UP_CGX_MESSAGES
481 #undef M
482 }
483 }
484