1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #include <fcntl.h>
6 #include <inttypes.h>
7 #include <string.h>
8 #include <sys/mman.h>
9 #include <unistd.h>
10
11 #include "roc_api.h"
12 #include "roc_priv.h"
13
14 /* PCI Extended capability ID */
15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */
16
17 /* Single Root I/O Virtualization */
18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
19
20 static void *
mbox_mem_map(off_t off,size_t size)21 mbox_mem_map(off_t off, size_t size)
22 {
23 void *va = MAP_FAILED;
24 int mem_fd;
25
26 if (size <= 0 || !off) {
27 plt_err("Invalid mbox area off 0x%lx size %lu", off, size);
28 goto error;
29 }
30
31 mem_fd = open("/dev/mem", O_RDWR);
32 if (mem_fd < 0)
33 goto error;
34
35 va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE,
36 PLT_MAP_SHARED, mem_fd, off);
37 close(mem_fd);
38
39 if (va == MAP_FAILED)
40 plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd,
41 (intmax_t)off);
42 error:
43 return va;
44 }
45
46 static void
mbox_mem_unmap(void * va,size_t size)47 mbox_mem_unmap(void *va, size_t size)
48 {
49 if (va)
50 munmap(va, size);
51 }
52
53 static int
pf_af_sync_msg(struct dev * dev,struct mbox_msghdr ** rsp)54 pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
55 {
56 uint32_t timeout = 0, sleep = 1;
57 struct mbox *mbox = dev->mbox;
58 struct mbox_dev *mdev = &mbox->dev[0];
59
60 volatile uint64_t int_status = 0;
61 struct mbox_msghdr *msghdr;
62 uint64_t off;
63 int rc = 0;
64
65 /* We need to disable PF interrupts. We are in timer interrupt */
66 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
67
68 /* Send message */
69 mbox_msg_send(mbox, 0);
70
71 do {
72 plt_delay_ms(sleep);
73 timeout += sleep;
74 if (timeout >= mbox->rsp_tmo) {
75 plt_err("Message timeout: %dms", mbox->rsp_tmo);
76 rc = -EIO;
77 break;
78 }
79 int_status = plt_read64(dev->bar2 + RVU_PF_INT);
80 } while ((int_status & 0x1) != 0x1);
81
82 /* Clear */
83 plt_write64(int_status, dev->bar2 + RVU_PF_INT);
84
85 /* Enable interrupts */
86 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
87
88 if (rc == 0) {
89 /* Get message */
90 off = mbox->rx_start +
91 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
92 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
93 if (rsp)
94 *rsp = msghdr;
95 rc = msghdr->rc;
96 }
97
98 return rc;
99 }
100
101 static int
af_pf_wait_msg(struct dev * dev,uint16_t vf,int num_msg)102 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
103 {
104 uint32_t timeout = 0, sleep = 1;
105 struct mbox *mbox = dev->mbox;
106 struct mbox_dev *mdev = &mbox->dev[0];
107 volatile uint64_t int_status;
108 struct mbox_hdr *req_hdr;
109 struct mbox_msghdr *msg;
110 struct mbox_msghdr *rsp;
111 uint64_t offset;
112 size_t size;
113 int i;
114
115 /* We need to disable PF interrupts. We are in timer interrupt */
116 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
117
118 /* Send message */
119 mbox_msg_send(mbox, 0);
120
121 do {
122 plt_delay_ms(sleep);
123 timeout++;
124 if (timeout >= mbox->rsp_tmo) {
125 plt_err("Routed messages %d timeout: %dms", num_msg,
126 mbox->rsp_tmo);
127 break;
128 }
129 int_status = plt_read64(dev->bar2 + RVU_PF_INT);
130 } while ((int_status & 0x1) != 0x1);
131
132 /* Clear */
133 plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
134
135 /* Enable interrupts */
136 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
137
138 plt_spinlock_lock(&mdev->mbox_lock);
139
140 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 if (req_hdr->num_msgs != num_msg)
142 plt_err("Routed messages: %d received: %d", num_msg,
143 req_hdr->num_msgs);
144
145 /* Get messages from mbox */
146 offset = mbox->rx_start +
147 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 for (i = 0; i < req_hdr->num_msgs; i++) {
149 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 size = mbox->rx_start + msg->next_msgoff - offset;
151
152 /* Reserve PF/VF mbox message */
153 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
154 rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 if (!rsp) {
156 plt_err("Failed to reserve VF%d message", vf);
157 continue;
158 }
159
160 mbox_rsp_init(msg->id, rsp);
161
162 /* Copy message from AF<->PF mbox to PF<->VF mbox */
163 mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
164 (uint8_t *)msg + sizeof(struct mbox_msghdr),
165 size - sizeof(struct mbox_msghdr));
166
167 /* Set status and sender pf_func data */
168 rsp->rc = msg->rc;
169 rsp->pcifunc = msg->pcifunc;
170
171 /* Whenever a PF comes up, AF sends the link status to it but
172 * when VF comes up no such event is sent to respective VF.
173 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
174 * purpose and send the link status of PF to VF.
175 */
176 if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
177 /* Send link status to VF */
178 struct cgx_link_user_info linfo;
179 struct mbox_msghdr *vf_msg;
180 size_t sz;
181
182 /* Get the link status */
183 memset(&linfo, 0, sizeof(struct cgx_link_user_info));
184 if (dev->ops && dev->ops->link_status_get)
185 dev->ops->link_status_get(dev->roc_nix, &linfo);
186
187 sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT),
188 MBOX_MSG_ALIGN);
189 /* Prepare the message to be sent */
190 vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
191 if (vf_msg) {
192 mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
193 memcpy((uint8_t *)vf_msg +
194 sizeof(struct mbox_msghdr), &linfo,
195 sizeof(struct cgx_link_user_info));
196
197 vf_msg->rc = msg->rc;
198 vf_msg->pcifunc = msg->pcifunc;
199 /* Send to VF */
200 mbox_msg_send(&dev->mbox_vfpf_up, vf);
201 }
202 }
203
204 offset = mbox->rx_start + msg->next_msgoff;
205 }
206 plt_spinlock_unlock(&mdev->mbox_lock);
207
208 return req_hdr->num_msgs;
209 }
210
211 static int
vf_pf_process_msgs(struct dev * dev,uint16_t vf)212 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
213 {
214 struct mbox *mbox = &dev->mbox_vfpf;
215 struct mbox_dev *mdev = &mbox->dev[vf];
216 struct mbox_hdr *req_hdr;
217 struct mbox_msghdr *msg;
218 int offset, routed = 0;
219 size_t size;
220 uint16_t i;
221
222 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
223 if (!req_hdr->num_msgs)
224 return 0;
225
226 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
227
228 for (i = 0; i < req_hdr->num_msgs; i++) {
229 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
230 size = mbox->rx_start + msg->next_msgoff - offset;
231
232 /* RVU_PF_FUNC_S */
233 msg->pcifunc = dev_pf_func(dev->pf, vf);
234
235 if (msg->id == MBOX_MSG_READY) {
236 struct ready_msg_rsp *rsp;
237 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
238
239 /* Handle READY message in PF */
240 dev->active_vfs[vf / max_bits] |=
241 BIT_ULL(vf % max_bits);
242 rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
243 mbox, vf, sizeof(*rsp));
244 if (!rsp) {
245 plt_err("Failed to alloc VF%d READY message",
246 vf);
247 continue;
248 }
249
250 mbox_rsp_init(msg->id, rsp);
251
252 /* PF/VF function ID */
253 rsp->hdr.pcifunc = msg->pcifunc;
254 rsp->hdr.rc = 0;
255 } else {
256 struct mbox_msghdr *af_req;
257 /* Reserve AF/PF mbox message */
258 size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
259 af_req = mbox_alloc_msg(dev->mbox, 0, size);
260 if (af_req == NULL)
261 return -ENOSPC;
262 mbox_req_init(msg->id, af_req);
263
264 /* Copy message from VF<->PF mbox to PF<->AF mbox */
265 mbox_memcpy((uint8_t *)af_req +
266 sizeof(struct mbox_msghdr),
267 (uint8_t *)msg + sizeof(struct mbox_msghdr),
268 size - sizeof(struct mbox_msghdr));
269 af_req->pcifunc = msg->pcifunc;
270 routed++;
271 }
272 offset = mbox->rx_start + msg->next_msgoff;
273 }
274
275 if (routed > 0) {
276 plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
277 dev->pf, routed, vf);
278 af_pf_wait_msg(dev, vf, routed);
279 mbox_reset(dev->mbox, 0);
280 }
281
282 /* Send mbox responses to VF */
283 if (mdev->num_msgs) {
284 plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf,
285 mdev->num_msgs, vf);
286 mbox_msg_send(mbox, vf);
287 }
288
289 return i;
290 }
291
292 static int
vf_pf_process_up_msgs(struct dev * dev,uint16_t vf)293 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
294 {
295 struct mbox *mbox = &dev->mbox_vfpf_up;
296 struct mbox_dev *mdev = &mbox->dev[vf];
297 struct mbox_hdr *req_hdr;
298 struct mbox_msghdr *msg;
299 int msgs_acked = 0;
300 int offset;
301 uint16_t i;
302
303 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
304 if (req_hdr->num_msgs == 0)
305 return 0;
306
307 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
308
309 for (i = 0; i < req_hdr->num_msgs; i++) {
310 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
311
312 msgs_acked++;
313 /* RVU_PF_FUNC_S */
314 msg->pcifunc = dev_pf_func(dev->pf, vf);
315
316 switch (msg->id) {
317 case MBOX_MSG_CGX_LINK_EVENT:
318 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
319 msg->id, mbox_id2name(msg->id),
320 msg->pcifunc, dev_get_pf(msg->pcifunc),
321 dev_get_vf(msg->pcifunc));
322 break;
323 case MBOX_MSG_CGX_PTP_RX_INFO:
324 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
325 msg->id, mbox_id2name(msg->id),
326 msg->pcifunc, dev_get_pf(msg->pcifunc),
327 dev_get_vf(msg->pcifunc));
328 break;
329 default:
330 plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
331 msg->id, mbox_id2name(msg->id), msg->pcifunc);
332 }
333 offset = mbox->rx_start + msg->next_msgoff;
334 }
335 mbox_reset(mbox, vf);
336 mdev->msgs_acked = msgs_acked;
337 plt_wmb();
338
339 return i;
340 }
341
342 static void
roc_vf_pf_mbox_handle_msg(void * param)343 roc_vf_pf_mbox_handle_msg(void *param)
344 {
345 uint16_t vf, max_vf, max_bits;
346 struct dev *dev = param;
347
348 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
349 max_vf = max_bits * MAX_VFPF_DWORD_BITS;
350
351 for (vf = 0; vf < max_vf; vf++) {
352 if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
353 plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
354 dev->pf, dev->vf);
355 vf_pf_process_msgs(dev, vf);
356 /* UP messages */
357 vf_pf_process_up_msgs(dev, vf);
358 dev->intr.bits[vf / max_bits] &=
359 ~(BIT_ULL(vf % max_bits));
360 }
361 }
362 dev->timer_set = 0;
363 }
364
365 static void
roc_vf_pf_mbox_irq(void * param)366 roc_vf_pf_mbox_irq(void *param)
367 {
368 struct dev *dev = param;
369 bool alarm_set = false;
370 uint64_t intr;
371 int vfpf;
372
373 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
374 intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
375 if (!intr)
376 continue;
377
378 plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
379 vfpf, intr, dev->pf, dev->vf);
380
381 /* Save and clear intr bits */
382 dev->intr.bits[vfpf] |= intr;
383 plt_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
384 alarm_set = true;
385 }
386
387 if (!dev->timer_set && alarm_set) {
388 dev->timer_set = 1;
389 /* Start timer to handle messages */
390 plt_alarm_set(VF_PF_MBOX_TIMER_MS, roc_vf_pf_mbox_handle_msg,
391 dev);
392 }
393 }
394
395 static void
process_msgs(struct dev * dev,struct mbox * mbox)396 process_msgs(struct dev *dev, struct mbox *mbox)
397 {
398 struct mbox_dev *mdev = &mbox->dev[0];
399 struct mbox_hdr *req_hdr;
400 struct mbox_msghdr *msg;
401 int msgs_acked = 0;
402 int offset;
403 uint16_t i;
404
405 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
406 if (req_hdr->num_msgs == 0)
407 return;
408
409 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
410 for (i = 0; i < req_hdr->num_msgs; i++) {
411 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
412
413 msgs_acked++;
414 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
415 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
416 dev_get_vf(msg->pcifunc));
417
418 switch (msg->id) {
419 /* Add message id's that are handled here */
420 case MBOX_MSG_READY:
421 /* Get our identity */
422 dev->pf_func = msg->pcifunc;
423 break;
424
425 default:
426 if (msg->rc)
427 plt_err("Message (%s) response has err=%d",
428 mbox_id2name(msg->id), msg->rc);
429 break;
430 }
431 offset = mbox->rx_start + msg->next_msgoff;
432 }
433
434 mbox_reset(mbox, 0);
435 /* Update acked if someone is waiting a message */
436 mdev->msgs_acked = msgs_acked;
437 plt_wmb();
438 }
439
440 /* Copies the message received from AF and sends it to VF */
441 static void
pf_vf_mbox_send_up_msg(struct dev * dev,void * rec_msg)442 pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
443 {
444 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
445 struct mbox *vf_mbox = &dev->mbox_vfpf_up;
446 struct msg_req *msg = rec_msg;
447 struct mbox_msghdr *vf_msg;
448 uint16_t vf;
449 size_t size;
450
451 size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
452 /* Send UP message to all VF's */
453 for (vf = 0; vf < vf_mbox->ndevs; vf++) {
454 /* VF active */
455 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
456 continue;
457
458 plt_base_dbg("(%s) size: %zx to VF: %d",
459 mbox_id2name(msg->hdr.id), size, vf);
460
461 /* Reserve PF/VF mbox message */
462 vf_msg = mbox_alloc_msg(vf_mbox, vf, size);
463 if (!vf_msg) {
464 plt_err("Failed to alloc VF%d UP message", vf);
465 continue;
466 }
467 mbox_req_init(msg->hdr.id, vf_msg);
468
469 /*
470 * Copy message from AF<->PF UP mbox
471 * to PF<->VF UP mbox
472 */
473 mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
474 (uint8_t *)msg + sizeof(struct mbox_msghdr),
475 size - sizeof(struct mbox_msghdr));
476
477 vf_msg->rc = msg->hdr.rc;
478 /* Set PF to be a sender */
479 vf_msg->pcifunc = dev->pf_func;
480
481 /* Send to VF */
482 mbox_msg_send(vf_mbox, vf);
483 }
484 }
485
486 static int
mbox_up_handler_cgx_link_event(struct dev * dev,struct cgx_link_info_msg * msg,struct msg_rsp * rsp)487 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
488 struct msg_rsp *rsp)
489 {
490 struct cgx_link_user_info *linfo = &msg->link_info;
491 void *roc_nix = dev->roc_nix;
492
493 plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
494 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
495 linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
496 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
497 dev_get_vf(msg->hdr.pcifunc));
498
499 /* PF gets link notification from AF */
500 if (dev_get_pf(msg->hdr.pcifunc) == 0) {
501 if (dev->ops && dev->ops->link_status_update)
502 dev->ops->link_status_update(roc_nix, linfo);
503
504 /* Forward the same message as received from AF to VF */
505 pf_vf_mbox_send_up_msg(dev, msg);
506 } else {
507 /* VF gets link up notification */
508 if (dev->ops && dev->ops->link_status_update)
509 dev->ops->link_status_update(roc_nix, linfo);
510 }
511
512 rsp->hdr.rc = 0;
513 return 0;
514 }
515
516 static int
mbox_up_handler_cgx_ptp_rx_info(struct dev * dev,struct cgx_ptp_rx_info_msg * msg,struct msg_rsp * rsp)517 mbox_up_handler_cgx_ptp_rx_info(struct dev *dev,
518 struct cgx_ptp_rx_info_msg *msg,
519 struct msg_rsp *rsp)
520 {
521 void *roc_nix = dev->roc_nix;
522
523 plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
524 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
525 msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id,
526 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
527 dev_get_vf(msg->hdr.pcifunc));
528
529 /* PF gets PTP notification from AF */
530 if (dev_get_pf(msg->hdr.pcifunc) == 0) {
531 if (dev->ops && dev->ops->ptp_info_update)
532 dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
533
534 /* Forward the same message as received from AF to VF */
535 pf_vf_mbox_send_up_msg(dev, msg);
536 } else {
537 /* VF gets PTP notification */
538 if (dev->ops && dev->ops->ptp_info_update)
539 dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
540 }
541
542 rsp->hdr.rc = 0;
543 return 0;
544 }
545
546 static int
mbox_process_msgs_up(struct dev * dev,struct mbox_msghdr * req)547 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
548 {
549 /* Check if valid, if not reply with a invalid msg */
550 if (req->sig != MBOX_REQ_SIG)
551 return -EIO;
552
553 switch (req->id) {
554 default:
555 reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
556 break;
557 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
558 case _id: { \
559 struct _rsp_type *rsp; \
560 int err; \
561 rsp = (struct _rsp_type *)mbox_alloc_msg( \
562 &dev->mbox_up, 0, sizeof(struct _rsp_type)); \
563 if (!rsp) \
564 return -ENOMEM; \
565 rsp->hdr.id = _id; \
566 rsp->hdr.sig = MBOX_RSP_SIG; \
567 rsp->hdr.pcifunc = dev->pf_func; \
568 rsp->hdr.rc = 0; \
569 err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \
570 rsp); \
571 return err; \
572 }
573 MBOX_UP_CGX_MESSAGES
574 #undef M
575 }
576
577 return -ENODEV;
578 }
579
580 static void
process_msgs_up(struct dev * dev,struct mbox * mbox)581 process_msgs_up(struct dev *dev, struct mbox *mbox)
582 {
583 struct mbox_dev *mdev = &mbox->dev[0];
584 struct mbox_hdr *req_hdr;
585 struct mbox_msghdr *msg;
586 int i, err, offset;
587
588 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
589 if (req_hdr->num_msgs == 0)
590 return;
591
592 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
593 for (i = 0; i < req_hdr->num_msgs; i++) {
594 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
595
596 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
597 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
598 dev_get_vf(msg->pcifunc));
599 err = mbox_process_msgs_up(dev, msg);
600 if (err)
601 plt_err("Error %d handling 0x%x (%s)", err, msg->id,
602 mbox_id2name(msg->id));
603 offset = mbox->rx_start + msg->next_msgoff;
604 }
605 /* Send mbox responses */
606 if (mdev->num_msgs) {
607 plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
608 mbox_msg_send(mbox, 0);
609 }
610 }
611
612 static void
roc_pf_vf_mbox_irq(void * param)613 roc_pf_vf_mbox_irq(void *param)
614 {
615 struct dev *dev = param;
616 uint64_t intr;
617
618 intr = plt_read64(dev->bar2 + RVU_VF_INT);
619 if (intr == 0)
620 plt_base_dbg("Proceeding to check mbox UP messages if any");
621
622 plt_write64(intr, dev->bar2 + RVU_VF_INT);
623 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
624
625 /* First process all configuration messages */
626 process_msgs(dev, dev->mbox);
627
628 /* Process Uplink messages */
629 process_msgs_up(dev, &dev->mbox_up);
630 }
631
632 static void
roc_af_pf_mbox_irq(void * param)633 roc_af_pf_mbox_irq(void *param)
634 {
635 struct dev *dev = param;
636 uint64_t intr;
637
638 intr = plt_read64(dev->bar2 + RVU_PF_INT);
639 if (intr == 0)
640 plt_base_dbg("Proceeding to check mbox UP messages if any");
641
642 plt_write64(intr, dev->bar2 + RVU_PF_INT);
643 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
644
645 /* First process all configuration messages */
646 process_msgs(dev, dev->mbox);
647
648 /* Process Uplink messages */
649 process_msgs_up(dev, &dev->mbox_up);
650 }
651
652 static int
mbox_register_pf_irq(struct plt_pci_device * pci_dev,struct dev * dev)653 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
654 {
655 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
656 int i, rc;
657
658 /* HW clear irq */
659 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
660 plt_write64(~0ull,
661 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
662
663 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
664
665 dev->timer_set = 0;
666
667 /* MBOX interrupt for VF(0...63) <-> PF */
668 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
669 RVU_PF_INT_VEC_VFPF_MBOX0);
670
671 if (rc) {
672 plt_err("Fail to register PF(VF0-63) mbox irq");
673 return rc;
674 }
675 /* MBOX interrupt for VF(64...128) <-> PF */
676 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
677 RVU_PF_INT_VEC_VFPF_MBOX1);
678
679 if (rc) {
680 plt_err("Fail to register PF(VF64-128) mbox irq");
681 return rc;
682 }
683 /* MBOX interrupt AF <-> PF */
684 rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev,
685 RVU_PF_INT_VEC_AFPF_MBOX);
686 if (rc) {
687 plt_err("Fail to register AF<->PF mbox irq");
688 return rc;
689 }
690
691 /* HW enable intr */
692 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
693 plt_write64(~0ull,
694 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
695
696 plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
697 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
698
699 return rc;
700 }
701
702 static int
mbox_register_vf_irq(struct plt_pci_device * pci_dev,struct dev * dev)703 mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
704 {
705 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
706 int rc;
707
708 /* Clear irq */
709 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
710
711 /* MBOX interrupt PF <-> VF */
712 rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev,
713 RVU_VF_INT_VEC_MBOX);
714 if (rc) {
715 plt_err("Fail to register PF<->VF mbox irq");
716 return rc;
717 }
718
719 /* HW enable intr */
720 plt_write64(~0ull, dev->bar2 + RVU_VF_INT);
721 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
722
723 return rc;
724 }
725
726 static int
mbox_register_irq(struct plt_pci_device * pci_dev,struct dev * dev)727 mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev)
728 {
729 if (dev_is_vf(dev))
730 return mbox_register_vf_irq(pci_dev, dev);
731 else
732 return mbox_register_pf_irq(pci_dev, dev);
733 }
734
735 static void
mbox_unregister_pf_irq(struct plt_pci_device * pci_dev,struct dev * dev)736 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
737 {
738 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
739 int i;
740
741 /* HW clear irq */
742 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
743 plt_write64(~0ull,
744 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
745
746 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
747
748 dev->timer_set = 0;
749
750 plt_alarm_cancel(roc_vf_pf_mbox_handle_msg, dev);
751
752 /* Unregister the interrupt handler for each vectors */
753 /* MBOX interrupt for VF(0...63) <-> PF */
754 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
755 RVU_PF_INT_VEC_VFPF_MBOX0);
756
757 /* MBOX interrupt for VF(64...128) <-> PF */
758 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
759 RVU_PF_INT_VEC_VFPF_MBOX1);
760
761 /* MBOX interrupt AF <-> PF */
762 dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev,
763 RVU_PF_INT_VEC_AFPF_MBOX);
764 }
765
766 static void
mbox_unregister_vf_irq(struct plt_pci_device * pci_dev,struct dev * dev)767 mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
768 {
769 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
770
771 /* Clear irq */
772 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
773
774 /* Unregister the interrupt handler */
775 dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev,
776 RVU_VF_INT_VEC_MBOX);
777 }
778
779 static void
mbox_unregister_irq(struct plt_pci_device * pci_dev,struct dev * dev)780 mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev)
781 {
782 if (dev_is_vf(dev))
783 mbox_unregister_vf_irq(pci_dev, dev);
784 else
785 mbox_unregister_pf_irq(pci_dev, dev);
786 }
787
788 static int
vf_flr_send_msg(struct dev * dev,uint16_t vf)789 vf_flr_send_msg(struct dev *dev, uint16_t vf)
790 {
791 struct mbox *mbox = dev->mbox;
792 struct msg_req *req;
793 int rc;
794
795 req = mbox_alloc_msg_vf_flr(mbox);
796 if (req == NULL)
797 return -ENOSPC;
798 /* Overwrite pcifunc to indicate VF */
799 req->hdr.pcifunc = dev_pf_func(dev->pf, vf);
800
801 /* Sync message in interrupt context */
802 rc = pf_af_sync_msg(dev, NULL);
803 if (rc)
804 plt_err("Failed to send VF FLR mbox msg, rc=%d", rc);
805
806 return rc;
807 }
808
809 static void
roc_pf_vf_flr_irq(void * param)810 roc_pf_vf_flr_irq(void *param)
811 {
812 struct dev *dev = (struct dev *)param;
813 uint16_t max_vf = 64, vf;
814 uintptr_t bar2;
815 uint64_t intr;
816 int i;
817
818 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
819 bar2 = dev->bar2;
820
821 plt_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
822
823 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
824 intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i));
825 if (!intr)
826 continue;
827
828 for (vf = 0; vf < max_vf; vf++) {
829 if (!(intr & (1ULL << vf)))
830 continue;
831
832 plt_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d", i,
833 intr, (64 * i + vf));
834 /* Clear interrupt */
835 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
836 /* Disable the interrupt */
837 plt_write64(BIT_ULL(vf),
838 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
839 /* Inform AF about VF reset */
840 vf_flr_send_msg(dev, vf);
841
842 /* Signal FLR finish */
843 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
844 /* Enable interrupt */
845 plt_write64(~0ull, bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
846 }
847 }
848 }
849
850 static int
vf_flr_unregister_irqs(struct plt_pci_device * pci_dev,struct dev * dev)851 vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
852 {
853 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
854 int i;
855
856 plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
857
858 /* HW clear irq */
859 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
860 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
861
862 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
863 RVU_PF_INT_VEC_VFFLR0);
864
865 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
866 RVU_PF_INT_VEC_VFFLR1);
867
868 return 0;
869 }
870
871 static int
vf_flr_register_irqs(struct plt_pci_device * pci_dev,struct dev * dev)872 vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
873 {
874 struct plt_intr_handle *handle = pci_dev->intr_handle;
875 int i, rc;
876
877 plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
878
879 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
880 RVU_PF_INT_VEC_VFFLR0);
881 if (rc)
882 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
883
884 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
885 RVU_PF_INT_VEC_VFFLR1);
886 if (rc)
887 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
888
889 /* Enable HW interrupt */
890 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
891 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
892 plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
893 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
894 }
895 return 0;
896 }
897
898 static void
clear_rvum_interrupts(struct dev * dev)899 clear_rvum_interrupts(struct dev *dev)
900 {
901 uint64_t intr;
902 int i;
903
904 if (dev_is_vf(dev)) {
905 /* Clear VF mbox interrupt */
906 intr = plt_read64(dev->bar2 + RVU_VF_INT);
907 if (intr)
908 plt_write64(intr, dev->bar2 + RVU_VF_INT);
909 } else {
910 /* Clear AF PF interrupt line */
911 intr = plt_read64(dev->bar2 + RVU_PF_INT);
912 if (intr)
913 plt_write64(intr, dev->bar2 + RVU_PF_INT);
914 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
915 /* Clear MBOX interrupts */
916 intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(i));
917 if (intr)
918 plt_write64(intr,
919 dev->bar2 +
920 RVU_PF_VFPF_MBOX_INTX(i));
921 /* Clear VF FLR interrupts */
922 intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i));
923 if (intr)
924 plt_write64(intr,
925 dev->bar2 + RVU_PF_VFFLR_INTX(i));
926 }
927 }
928 }
929
930 int
dev_active_vfs(struct dev * dev)931 dev_active_vfs(struct dev *dev)
932 {
933 int i, count = 0;
934
935 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
936 count += __builtin_popcount(dev->active_vfs[i]);
937
938 return count;
939 }
940
941 static void
dev_vf_hwcap_update(struct plt_pci_device * pci_dev,struct dev * dev)942 dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
943 {
944 switch (pci_dev->id.device_id) {
945 case PCI_DEVID_CNXK_RVU_PF:
946 break;
947 case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
948 case PCI_DEVID_CNXK_RVU_NPA_VF:
949 case PCI_DEVID_CN10K_RVU_CPT_VF:
950 case PCI_DEVID_CN9K_RVU_CPT_VF:
951 case PCI_DEVID_CNXK_RVU_AF_VF:
952 case PCI_DEVID_CNXK_RVU_VF:
953 case PCI_DEVID_CNXK_RVU_SDP_VF:
954 dev->hwcap |= DEV_HWCAP_F_VF;
955 break;
956 }
957 }
958
959 static uintptr_t
dev_vf_mbase_get(struct plt_pci_device * pci_dev,struct dev * dev)960 dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev)
961 {
962 void *vf_mbase = NULL;
963 uintptr_t pa;
964
965 if (dev_is_vf(dev))
966 return 0;
967
968 /* For CN10K onwards, it is just after PF MBOX */
969 if (!roc_model_is_cn9k())
970 return dev->bar4 + MBOX_SIZE;
971
972 pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR);
973 if (!pa) {
974 plt_err("Invalid VF mbox base pa");
975 return pa;
976 }
977
978 vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs);
979 if (vf_mbase == MAP_FAILED) {
980 plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa,
981 errno);
982 return 0;
983 }
984 return (uintptr_t)vf_mbase;
985 }
986
987 static void
dev_vf_mbase_put(struct plt_pci_device * pci_dev,uintptr_t vf_mbase)988 dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
989 {
990 if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k())
991 return;
992
993 mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
994 }
995
996 static int
dev_setup_shared_lmt_region(struct mbox * mbox,bool valid_iova,uint64_t iova)997 dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
998 {
999 struct lmtst_tbl_setup_req *req;
1000
1001 req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
1002 if (!req)
1003 return -ENOSPC;
1004
1005 /* This pcifunc is defined with primary pcifunc whose LMT address
1006 * will be shared. If call contains valid IOVA, following pcifunc
1007 * field is of no use.
1008 */
1009 req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
1010 req->use_local_lmt_region = valid_iova;
1011 req->lmt_iova = iova;
1012
1013 return mbox_process(mbox);
1014 }
1015
1016 /* Total no of lines * size of each lmtline */
1017 #define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
1018 static int
dev_lmt_setup(struct dev * dev)1019 dev_lmt_setup(struct dev *dev)
1020 {
1021 char name[PLT_MEMZONE_NAMESIZE];
1022 const struct plt_memzone *mz;
1023 struct idev_cfg *idev;
1024 int rc;
1025
1026 if (roc_model_is_cn9k()) {
1027 dev->lmt_base = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
1028 return 0;
1029 }
1030
1031 /* [CN10K, .) */
1032
1033 /* Set common lmt region from second pf_func onwards. */
1034 if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
1035 dev->pf_func != idev_lmt_pffunc_get()) {
1036 rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
1037 if (!rc) {
1038 /* On success, updating lmt base of secondary pf_funcs
1039 * with primary pf_func's lmt base.
1040 */
1041 dev->lmt_base = roc_idev_lmt_base_addr_get();
1042 return rc;
1043 }
1044 plt_err("Failed to setup shared lmt region, pf_func %d err %d "
1045 "Using respective LMT region per pf func",
1046 dev->pf_func, rc);
1047 }
1048
1049 /* Allocating memory for LMT region */
1050 sprintf(name, "LMT_MAP%x", dev->pf_func);
1051
1052 /* Setting alignment to ensure correct masking for resetting to lmt base
1053 * of a core after all lmt lines under that core are used.
1054 * Alignment value LMT_REGION_SIZE to handle the case where all lines
1055 * are used by 1 core.
1056 */
1057 mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE,
1058 LMT_REGION_SIZE);
1059 if (!mz) {
1060 plt_err("Memory alloc failed: %s", strerror(errno));
1061 goto fail;
1062 }
1063
1064 /* Share the IOVA address with Kernel */
1065 rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
1066 if (rc) {
1067 errno = rc;
1068 goto free;
1069 }
1070
1071 dev->lmt_base = mz->iova;
1072 dev->lmt_mz = mz;
1073 /* Base LMT address should be chosen from only those pci funcs which
1074 * participate in LMT shared mode.
1075 */
1076 if (!dev->disable_shared_lmt) {
1077 idev = idev_get_cfg();
1078 if (!idev) {
1079 errno = EFAULT;
1080 goto free;
1081 }
1082
1083 if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
1084 idev->lmt_base_addr = dev->lmt_base;
1085 idev->lmt_pf_func = dev->pf_func;
1086 idev->num_lmtlines = RVU_LMT_LINE_MAX;
1087 }
1088 }
1089
1090 return 0;
1091 free:
1092 plt_memzone_free(mz);
1093 fail:
1094 return -errno;
1095 }
1096
1097 int
dev_init(struct dev * dev,struct plt_pci_device * pci_dev)1098 dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
1099 {
1100 int direction, up_direction, rc;
1101 uintptr_t bar2, bar4, mbox;
1102 uintptr_t vf_mbase = 0;
1103 uint64_t intr_offset;
1104
1105 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
1106 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
1107 if (bar2 == 0 || bar4 == 0) {
1108 plt_err("Failed to get PCI bars");
1109 rc = -ENODEV;
1110 goto error;
1111 }
1112
1113 /* Trigger fault on bar2 and bar4 regions
1114 * to avoid BUG_ON in remap_pfn_range()
1115 * in latest kernel.
1116 */
1117 *(volatile uint64_t *)bar2;
1118 *(volatile uint64_t *)bar4;
1119
1120 /* Check ROC model supported */
1121 if (roc_model->flag == 0) {
1122 rc = UTIL_ERR_INVALID_MODEL;
1123 goto error;
1124 }
1125
1126 dev->maxvf = pci_dev->max_vfs;
1127 dev->bar2 = bar2;
1128 dev->bar4 = bar4;
1129 dev_vf_hwcap_update(pci_dev, dev);
1130
1131 if (dev_is_vf(dev)) {
1132 mbox = (roc_model_is_cn9k() ?
1133 bar4 : (bar2 + RVU_VF_MBOX_REGION));
1134 direction = MBOX_DIR_VFPF;
1135 up_direction = MBOX_DIR_VFPF_UP;
1136 intr_offset = RVU_VF_INT;
1137 } else {
1138 mbox = bar4;
1139 direction = MBOX_DIR_PFAF;
1140 up_direction = MBOX_DIR_PFAF_UP;
1141 intr_offset = RVU_PF_INT;
1142 }
1143
1144 /* Clear all RVUM interrupts */
1145 clear_rvum_interrupts(dev);
1146
1147 /* Initialize the local mbox */
1148 rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
1149 if (rc)
1150 goto error;
1151 dev->mbox = &dev->mbox_local;
1152
1153 rc = mbox_init(&dev->mbox_up, mbox, bar2, up_direction, 1, intr_offset);
1154 if (rc)
1155 goto mbox_fini;
1156
1157 /* Register mbox interrupts */
1158 rc = mbox_register_irq(pci_dev, dev);
1159 if (rc)
1160 goto mbox_fini;
1161
1162 /* Check the readiness of PF/VF */
1163 rc = send_ready_msg(dev->mbox, &dev->pf_func);
1164 if (rc)
1165 goto mbox_unregister;
1166
1167 dev->pf = dev_get_pf(dev->pf_func);
1168 dev->vf = dev_get_vf(dev->pf_func);
1169 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
1170
1171 /* Allocate memory for device ops */
1172 dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0);
1173 if (dev->ops == NULL) {
1174 rc = -ENOMEM;
1175 goto mbox_unregister;
1176 }
1177
1178 /* Found VF devices in a PF device */
1179 if (pci_dev->max_vfs > 0) {
1180 /* Remap mbox area for all vf's */
1181 vf_mbase = dev_vf_mbase_get(pci_dev, dev);
1182 if (!vf_mbase) {
1183 rc = -ENODEV;
1184 goto mbox_unregister;
1185 }
1186 /* Init mbox object */
1187 rc = mbox_init(&dev->mbox_vfpf, vf_mbase, bar2, MBOX_DIR_PFVF,
1188 pci_dev->max_vfs, intr_offset);
1189 if (rc)
1190 goto iounmap;
1191
1192 /* PF -> VF UP messages */
1193 rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, bar2,
1194 MBOX_DIR_PFVF_UP, pci_dev->max_vfs, intr_offset);
1195 if (rc)
1196 goto iounmap;
1197 }
1198
1199 /* Register VF-FLR irq handlers */
1200 if (!dev_is_vf(dev)) {
1201 rc = vf_flr_register_irqs(pci_dev, dev);
1202 if (rc)
1203 goto iounmap;
1204 }
1205 dev->mbox_active = 1;
1206
1207 rc = npa_lf_init(dev, pci_dev);
1208 if (rc)
1209 goto iounmap;
1210
1211 /* Setup LMT line base */
1212 rc = dev_lmt_setup(dev);
1213 if (rc)
1214 goto iounmap;
1215
1216 return rc;
1217 iounmap:
1218 dev_vf_mbase_put(pci_dev, vf_mbase);
1219 mbox_unregister:
1220 mbox_unregister_irq(pci_dev, dev);
1221 if (dev->ops)
1222 plt_free(dev->ops);
1223 mbox_fini:
1224 mbox_fini(dev->mbox);
1225 mbox_fini(&dev->mbox_up);
1226 error:
1227 return rc;
1228 }
1229
1230 int
dev_fini(struct dev * dev,struct plt_pci_device * pci_dev)1231 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
1232 {
1233 struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1234 struct mbox *mbox;
1235
1236 /* Check if this dev hosts npalf and has 1+ refs */
1237 if (idev_npa_lf_active(dev) > 1)
1238 return -EAGAIN;
1239
1240 /* Clear references to this pci dev */
1241 npa_lf_fini();
1242
1243 /* Releasing memory allocated for lmt region */
1244 if (dev->lmt_mz)
1245 plt_memzone_free(dev->lmt_mz);
1246
1247 mbox_unregister_irq(pci_dev, dev);
1248
1249 if (!dev_is_vf(dev))
1250 vf_flr_unregister_irqs(pci_dev, dev);
1251 /* Release PF - VF */
1252 mbox = &dev->mbox_vfpf;
1253 if (mbox->hwbase && mbox->dev)
1254 dev_vf_mbase_put(pci_dev, mbox->hwbase);
1255
1256 if (dev->ops)
1257 plt_free(dev->ops);
1258
1259 mbox_fini(mbox);
1260 mbox = &dev->mbox_vfpf_up;
1261 mbox_fini(mbox);
1262
1263 /* Release PF - AF */
1264 mbox = dev->mbox;
1265 mbox_fini(mbox);
1266 mbox = &dev->mbox_up;
1267 mbox_fini(mbox);
1268 dev->mbox_active = 0;
1269
1270 /* Disable MSIX vectors */
1271 dev_irqs_disable(intr_handle);
1272 return 0;
1273 }
1274