1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include <fcntl.h>
6 #include <inttypes.h>
7 #include <sys/mman.h>
8 #include <unistd.h>
9 
10 #include <rte_alarm.h>
11 #include <rte_common.h>
12 #include <rte_eal.h>
13 #include <rte_memcpy.h>
14 #include <rte_eal_paging.h>
15 
16 #include "otx2_dev.h"
17 #include "otx2_mbox.h"
18 
19 #define RVU_MAX_VF		64 /* RVU_PF_VFPF_MBOX_INT(0..1) */
20 #define RVU_MAX_INT_RETRY	3
21 
22 /* PF/VF message handling timer */
23 #define VF_PF_MBOX_TIMER_MS	(20 * 1000)
24 
25 static void *
mbox_mem_map(off_t off,size_t size)26 mbox_mem_map(off_t off, size_t size)
27 {
28 	void *va = MAP_FAILED;
29 	int mem_fd;
30 
31 	if (size <= 0)
32 		goto error;
33 
34 	mem_fd = open("/dev/mem", O_RDWR);
35 	if (mem_fd < 0)
36 		goto error;
37 
38 	va = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
39 			RTE_MAP_SHARED, mem_fd, off);
40 	close(mem_fd);
41 
42 	if (va == NULL)
43 		otx2_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd",
44 			 size, mem_fd, (intmax_t)off);
45 error:
46 	return va;
47 }
48 
49 static void
mbox_mem_unmap(void * va,size_t size)50 mbox_mem_unmap(void *va, size_t size)
51 {
52 	if (va)
53 		rte_mem_unmap(va, size);
54 }
55 
56 static int
pf_af_sync_msg(struct otx2_dev * dev,struct mbox_msghdr ** rsp)57 pf_af_sync_msg(struct otx2_dev *dev, struct mbox_msghdr **rsp)
58 {
59 	uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
60 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
61 	volatile uint64_t int_status;
62 	struct mbox_msghdr *msghdr;
63 	uint64_t off;
64 	int rc = 0;
65 
66 	/* We need to disable PF interrupts. We are in timer interrupt */
67 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
68 
69 	/* Send message */
70 	otx2_mbox_msg_send(mbox, 0);
71 
72 	do {
73 		rte_delay_ms(sleep);
74 		timeout += sleep;
75 		if (timeout >= MBOX_RSP_TIMEOUT) {
76 			otx2_err("Message timeout: %dms", MBOX_RSP_TIMEOUT);
77 			rc = -EIO;
78 			break;
79 		}
80 		int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
81 	} while ((int_status & 0x1) != 0x1);
82 
83 	/* Clear */
84 	otx2_write64(int_status, dev->bar2 + RVU_PF_INT);
85 
86 	/* Enable interrupts */
87 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
88 
89 	if (rc == 0) {
90 		/* Get message */
91 		off = mbox->rx_start +
92 			RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
93 		msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
94 		if (rsp)
95 			*rsp = msghdr;
96 		rc = msghdr->rc;
97 	}
98 
99 	return rc;
100 }
101 
102 static int
af_pf_wait_msg(struct otx2_dev * dev,uint16_t vf,int num_msg)103 af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)
104 {
105 	uint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;
106 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
107 	volatile uint64_t int_status;
108 	struct mbox_hdr *req_hdr;
109 	struct mbox_msghdr *msg;
110 	struct mbox_msghdr *rsp;
111 	uint64_t offset;
112 	size_t size;
113 	int i;
114 
115 	/* We need to disable PF interrupts. We are in timer interrupt */
116 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
117 
118 	/* Send message */
119 	otx2_mbox_msg_send(mbox, 0);
120 
121 	do {
122 		rte_delay_ms(sleep);
123 		timeout++;
124 		if (timeout >= MBOX_RSP_TIMEOUT) {
125 			otx2_err("Routed messages %d timeout: %dms",
126 				 num_msg, MBOX_RSP_TIMEOUT);
127 			break;
128 		}
129 		int_status = otx2_read64(dev->bar2 + RVU_PF_INT);
130 	} while ((int_status & 0x1) != 0x1);
131 
132 	/* Clear */
133 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
134 
135 	/* Enable interrupts */
136 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
137 
138 	rte_spinlock_lock(&mdev->mbox_lock);
139 
140 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 	if (req_hdr->num_msgs != num_msg)
142 		otx2_err("Routed messages: %d received: %d", num_msg,
143 			 req_hdr->num_msgs);
144 
145 	/* Get messages from mbox */
146 	offset = mbox->rx_start +
147 			RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 	for (i = 0; i < req_hdr->num_msgs; i++) {
149 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 		size = mbox->rx_start + msg->next_msgoff - offset;
151 
152 		/* Reserve PF/VF mbox message */
153 		size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
154 		rsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 		otx2_mbox_rsp_init(msg->id, rsp);
156 
157 		/* Copy message from AF<->PF mbox to PF<->VF mbox */
158 		otx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
159 				 (uint8_t *)msg + sizeof(struct mbox_msghdr),
160 				 size - sizeof(struct mbox_msghdr));
161 
162 		/* Set status and sender pf_func data */
163 		rsp->rc = msg->rc;
164 		rsp->pcifunc = msg->pcifunc;
165 
166 		offset = mbox->rx_start + msg->next_msgoff;
167 	}
168 	rte_spinlock_unlock(&mdev->mbox_lock);
169 
170 	return req_hdr->num_msgs;
171 }
172 
173 static int
vf_pf_process_msgs(struct otx2_dev * dev,uint16_t vf)174 vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)
175 {
176 	int offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;
177 	struct otx2_mbox_dev *mdev = &mbox->dev[vf];
178 	struct mbox_hdr *req_hdr;
179 	struct mbox_msghdr *msg;
180 	size_t size;
181 	uint16_t i;
182 
183 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
184 	if (!req_hdr->num_msgs)
185 		return 0;
186 
187 	offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
188 
189 	for (i = 0; i < req_hdr->num_msgs; i++) {
190 
191 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
192 		size = mbox->rx_start + msg->next_msgoff - offset;
193 
194 		/* RVU_PF_FUNC_S */
195 		msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
196 
197 		if (msg->id == MBOX_MSG_READY) {
198 			struct ready_msg_rsp *rsp;
199 			uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
200 
201 			/* Handle READY message in PF */
202 			dev->active_vfs[vf / max_bits] |=
203 						BIT_ULL(vf % max_bits);
204 			rsp = (struct ready_msg_rsp *)
205 			       otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));
206 			otx2_mbox_rsp_init(msg->id, rsp);
207 
208 			/* PF/VF function ID */
209 			rsp->hdr.pcifunc = msg->pcifunc;
210 			rsp->hdr.rc = 0;
211 		} else {
212 			struct mbox_msghdr *af_req;
213 			/* Reserve AF/PF mbox message */
214 			size = RTE_ALIGN(size, MBOX_MSG_ALIGN);
215 			af_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);
216 			otx2_mbox_req_init(msg->id, af_req);
217 
218 			/* Copy message from VF<->PF mbox to PF<->AF mbox */
219 			otx2_mbox_memcpy((uint8_t *)af_req +
220 				   sizeof(struct mbox_msghdr),
221 				   (uint8_t *)msg + sizeof(struct mbox_msghdr),
222 				   size - sizeof(struct mbox_msghdr));
223 			af_req->pcifunc = msg->pcifunc;
224 			routed++;
225 		}
226 		offset = mbox->rx_start + msg->next_msgoff;
227 	}
228 
229 	if (routed > 0) {
230 		otx2_base_dbg("pf:%d routed %d messages from vf:%d to AF",
231 			      dev->pf, routed, vf);
232 		af_pf_wait_msg(dev, vf, routed);
233 		otx2_mbox_reset(dev->mbox, 0);
234 	}
235 
236 	/* Send mbox responses to VF */
237 	if (mdev->num_msgs) {
238 		otx2_base_dbg("pf:%d reply %d messages to vf:%d",
239 			      dev->pf, mdev->num_msgs, vf);
240 		otx2_mbox_msg_send(mbox, vf);
241 	}
242 
243 	return i;
244 }
245 
246 static int
vf_pf_process_up_msgs(struct otx2_dev * dev,uint16_t vf)247 vf_pf_process_up_msgs(struct otx2_dev *dev, uint16_t vf)
248 {
249 	struct otx2_mbox *mbox = &dev->mbox_vfpf_up;
250 	struct otx2_mbox_dev *mdev = &mbox->dev[vf];
251 	struct mbox_hdr *req_hdr;
252 	struct mbox_msghdr *msg;
253 	int msgs_acked = 0;
254 	int offset;
255 	uint16_t i;
256 
257 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
258 	if (req_hdr->num_msgs == 0)
259 		return 0;
260 
261 	offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
262 
263 	for (i = 0; i < req_hdr->num_msgs; i++) {
264 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
265 
266 		msgs_acked++;
267 		/* RVU_PF_FUNC_S */
268 		msg->pcifunc = otx2_pfvf_func(dev->pf, vf);
269 
270 		switch (msg->id) {
271 		case MBOX_MSG_CGX_LINK_EVENT:
272 			otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
273 				      msg->id, otx2_mbox_id2name(msg->id),
274 				      msg->pcifunc, otx2_get_pf(msg->pcifunc),
275 				      otx2_get_vf(msg->pcifunc));
276 			break;
277 		case MBOX_MSG_CGX_PTP_RX_INFO:
278 			otx2_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
279 				      msg->id, otx2_mbox_id2name(msg->id),
280 				      msg->pcifunc, otx2_get_pf(msg->pcifunc),
281 				      otx2_get_vf(msg->pcifunc));
282 			break;
283 		default:
284 			otx2_err("Not handled UP msg 0x%x (%s) func:0x%x",
285 				 msg->id, otx2_mbox_id2name(msg->id),
286 				 msg->pcifunc);
287 		}
288 		offset = mbox->rx_start + msg->next_msgoff;
289 	}
290 	otx2_mbox_reset(mbox, vf);
291 	mdev->msgs_acked = msgs_acked;
292 	rte_wmb();
293 
294 	return i;
295 }
296 
297 static void
otx2_vf_pf_mbox_handle_msg(void * param)298 otx2_vf_pf_mbox_handle_msg(void *param)
299 {
300 	uint16_t vf, max_vf, max_bits;
301 	struct otx2_dev *dev = param;
302 
303 	max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
304 	max_vf = max_bits * MAX_VFPF_DWORD_BITS;
305 
306 	for (vf = 0; vf < max_vf; vf++) {
307 		if (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {
308 			otx2_base_dbg("Process vf:%d request (pf:%d, vf:%d)",
309 				       vf, dev->pf, dev->vf);
310 			vf_pf_process_msgs(dev, vf);
311 			/* UP messages */
312 			vf_pf_process_up_msgs(dev, vf);
313 			dev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));
314 		}
315 	}
316 	dev->timer_set = 0;
317 }
318 
319 static void
otx2_vf_pf_mbox_irq(void * param)320 otx2_vf_pf_mbox_irq(void *param)
321 {
322 	struct otx2_dev *dev = param;
323 	bool alarm_set = false;
324 	uint64_t intr;
325 	int vfpf;
326 
327 	for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
328 		intr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
329 		if (!intr)
330 			continue;
331 
332 		otx2_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
333 			      vfpf, intr, dev->pf, dev->vf);
334 
335 		/* Save and clear intr bits */
336 		dev->intr.bits[vfpf] |= intr;
337 		otx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
338 		alarm_set = true;
339 	}
340 
341 	if (!dev->timer_set && alarm_set) {
342 		dev->timer_set = 1;
343 		/* Start timer to handle messages */
344 		rte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,
345 				  otx2_vf_pf_mbox_handle_msg, dev);
346 	}
347 }
348 
349 static void
otx2_process_msgs(struct otx2_dev * dev,struct otx2_mbox * mbox)350 otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)
351 {
352 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
353 	struct mbox_hdr *req_hdr;
354 	struct mbox_msghdr *msg;
355 	int msgs_acked = 0;
356 	int offset;
357 	uint16_t i;
358 
359 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
360 	if (req_hdr->num_msgs == 0)
361 		return;
362 
363 	offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
364 	for (i = 0; i < req_hdr->num_msgs; i++) {
365 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
366 
367 		msgs_acked++;
368 		otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
369 			      msg->id, otx2_mbox_id2name(msg->id),
370 			      otx2_get_pf(msg->pcifunc),
371 			      otx2_get_vf(msg->pcifunc));
372 
373 		switch (msg->id) {
374 			/* Add message id's that are handled here */
375 		case MBOX_MSG_READY:
376 			/* Get our identity */
377 			dev->pf_func = msg->pcifunc;
378 			break;
379 
380 		default:
381 			if (msg->rc)
382 				otx2_err("Message (%s) response has err=%d",
383 					 otx2_mbox_id2name(msg->id), msg->rc);
384 			break;
385 		}
386 		offset = mbox->rx_start + msg->next_msgoff;
387 	}
388 
389 	otx2_mbox_reset(mbox, 0);
390 	/* Update acked if someone is waiting a message */
391 	mdev->msgs_acked = msgs_acked;
392 	rte_wmb();
393 }
394 
395 /* Copies the message received from AF and sends it to VF */
396 static void
pf_vf_mbox_send_up_msg(struct otx2_dev * dev,void * rec_msg)397 pf_vf_mbox_send_up_msg(struct otx2_dev *dev, void *rec_msg)
398 {
399 	uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
400 	struct otx2_mbox *vf_mbox = &dev->mbox_vfpf_up;
401 	struct msg_req *msg = rec_msg;
402 	struct mbox_msghdr *vf_msg;
403 	uint16_t vf;
404 	size_t size;
405 
406 	size = RTE_ALIGN(otx2_mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
407 	/* Send UP message to all VF's */
408 	for (vf = 0; vf < vf_mbox->ndevs; vf++) {
409 		/* VF active */
410 		if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
411 			continue;
412 
413 		otx2_base_dbg("(%s) size: %zx to VF: %d",
414 			      otx2_mbox_id2name(msg->hdr.id), size, vf);
415 
416 		/* Reserve PF/VF mbox message */
417 		vf_msg = otx2_mbox_alloc_msg(vf_mbox, vf, size);
418 		if (!vf_msg) {
419 			otx2_err("Failed to alloc VF%d UP message", vf);
420 			continue;
421 		}
422 		otx2_mbox_req_init(msg->hdr.id, vf_msg);
423 
424 		/*
425 		 * Copy message from AF<->PF UP mbox
426 		 * to PF<->VF UP mbox
427 		 */
428 		otx2_mbox_memcpy((uint8_t *)vf_msg +
429 				 sizeof(struct mbox_msghdr), (uint8_t *)msg
430 				 + sizeof(struct mbox_msghdr), size -
431 				 sizeof(struct mbox_msghdr));
432 
433 		vf_msg->rc = msg->hdr.rc;
434 		/* Set PF to be a sender */
435 		vf_msg->pcifunc = dev->pf_func;
436 
437 		/* Send to VF */
438 		otx2_mbox_msg_send(vf_mbox, vf);
439 	}
440 }
441 
442 static int
otx2_mbox_up_handler_cgx_link_event(struct otx2_dev * dev,struct cgx_link_info_msg * msg,struct msg_rsp * rsp)443 otx2_mbox_up_handler_cgx_link_event(struct otx2_dev *dev,
444 				    struct cgx_link_info_msg *msg,
445 				    struct msg_rsp *rsp)
446 {
447 	struct cgx_link_user_info *linfo = &msg->link_info;
448 
449 	otx2_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
450 		      otx2_get_pf(dev->pf_func), otx2_get_vf(dev->pf_func),
451 		      linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
452 		      otx2_mbox_id2name(msg->hdr.id),
453 		      otx2_get_pf(msg->hdr.pcifunc),
454 		      otx2_get_vf(msg->hdr.pcifunc));
455 
456 	/* PF gets link notification from AF */
457 	if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
458 		if (dev->ops && dev->ops->link_status_update)
459 			dev->ops->link_status_update(dev, linfo);
460 
461 		/* Forward the same message as received from AF to VF */
462 		pf_vf_mbox_send_up_msg(dev, msg);
463 	} else {
464 		/* VF gets link up notification */
465 		if (dev->ops && dev->ops->link_status_update)
466 			dev->ops->link_status_update(dev, linfo);
467 	}
468 
469 	rsp->hdr.rc = 0;
470 	return 0;
471 }
472 
473 static int
otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev * dev,struct cgx_ptp_rx_info_msg * msg,struct msg_rsp * rsp)474 otx2_mbox_up_handler_cgx_ptp_rx_info(struct otx2_dev *dev,
475 				     struct cgx_ptp_rx_info_msg *msg,
476 				     struct msg_rsp *rsp)
477 {
478 	otx2_nix_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
479 		 otx2_get_pf(dev->pf_func),
480 		 otx2_get_vf(dev->pf_func),
481 		 msg->ptp_en ? "ENABLED" : "DISABLED",
482 		 msg->hdr.id, otx2_mbox_id2name(msg->hdr.id),
483 		 otx2_get_pf(msg->hdr.pcifunc),
484 		 otx2_get_vf(msg->hdr.pcifunc));
485 
486 	/* PF gets PTP notification from AF */
487 	if (otx2_get_pf(msg->hdr.pcifunc) == 0) {
488 		if (dev->ops && dev->ops->ptp_info_update)
489 			dev->ops->ptp_info_update(dev, msg->ptp_en);
490 
491 		/* Forward the same message as received from AF to VF */
492 		pf_vf_mbox_send_up_msg(dev, msg);
493 	} else {
494 		/* VF gets PTP notification */
495 		if (dev->ops && dev->ops->ptp_info_update)
496 			dev->ops->ptp_info_update(dev, msg->ptp_en);
497 	}
498 
499 	rsp->hdr.rc = 0;
500 	return 0;
501 }
502 
503 static int
mbox_process_msgs_up(struct otx2_dev * dev,struct mbox_msghdr * req)504 mbox_process_msgs_up(struct otx2_dev *dev, struct mbox_msghdr *req)
505 {
506 	/* Check if valid, if not reply with a invalid msg */
507 	if (req->sig != OTX2_MBOX_REQ_SIG)
508 		return -EIO;
509 
510 	switch (req->id) {
511 #define M(_name, _id, _fn_name, _req_type, _rsp_type)		\
512 	case _id: {						\
513 		struct _rsp_type *rsp;				\
514 		int err;					\
515 								\
516 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(	\
517 			&dev->mbox_up, 0,			\
518 			sizeof(struct _rsp_type));		\
519 		if (!rsp)					\
520 			return -ENOMEM;				\
521 								\
522 		rsp->hdr.id = _id;				\
523 		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;		\
524 		rsp->hdr.pcifunc = dev->pf_func;		\
525 		rsp->hdr.rc = 0;				\
526 								\
527 		err = otx2_mbox_up_handler_ ## _fn_name(	\
528 			dev, (struct _req_type *)req, rsp);	\
529 		return err;					\
530 	}
531 MBOX_UP_CGX_MESSAGES
532 #undef M
533 
534 	default :
535 		otx2_reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
536 	}
537 
538 	return -ENODEV;
539 }
540 
541 static void
otx2_process_msgs_up(struct otx2_dev * dev,struct otx2_mbox * mbox)542 otx2_process_msgs_up(struct otx2_dev *dev, struct otx2_mbox *mbox)
543 {
544 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
545 	struct mbox_hdr *req_hdr;
546 	struct mbox_msghdr *msg;
547 	int i, err, offset;
548 
549 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
550 	if (req_hdr->num_msgs == 0)
551 		return;
552 
553 	offset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
554 	for (i = 0; i < req_hdr->num_msgs; i++) {
555 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
556 
557 		otx2_base_dbg("Message 0x%x (%s) pf:%d/vf:%d",
558 				msg->id, otx2_mbox_id2name(msg->id),
559 				otx2_get_pf(msg->pcifunc),
560 				otx2_get_vf(msg->pcifunc));
561 		err = mbox_process_msgs_up(dev, msg);
562 		if (err)
563 			otx2_err("Error %d handling 0x%x (%s)",
564 				 err, msg->id, otx2_mbox_id2name(msg->id));
565 		offset = mbox->rx_start + msg->next_msgoff;
566 	}
567 	/* Send mbox responses */
568 	if (mdev->num_msgs) {
569 		otx2_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
570 		otx2_mbox_msg_send(mbox, 0);
571 	}
572 }
573 
574 static void
otx2_pf_vf_mbox_irq(void * param)575 otx2_pf_vf_mbox_irq(void *param)
576 {
577 	struct otx2_dev *dev = param;
578 	uint64_t intr;
579 
580 	intr = otx2_read64(dev->bar2 + RVU_VF_INT);
581 	if (intr == 0)
582 		otx2_base_dbg("Proceeding to check mbox UP messages if any");
583 
584 	otx2_write64(intr, dev->bar2 + RVU_VF_INT);
585 	otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
586 
587 	/* First process all configuration messages */
588 	otx2_process_msgs(dev, dev->mbox);
589 
590 	/* Process Uplink messages */
591 	otx2_process_msgs_up(dev, &dev->mbox_up);
592 }
593 
594 static void
otx2_af_pf_mbox_irq(void * param)595 otx2_af_pf_mbox_irq(void *param)
596 {
597 	struct otx2_dev *dev = param;
598 	uint64_t intr;
599 
600 	intr = otx2_read64(dev->bar2 + RVU_PF_INT);
601 	if (intr == 0)
602 		otx2_base_dbg("Proceeding to check mbox UP messages if any");
603 
604 	otx2_write64(intr, dev->bar2 + RVU_PF_INT);
605 	otx2_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
606 
607 	/* First process all configuration messages */
608 	otx2_process_msgs(dev, dev->mbox);
609 
610 	/* Process Uplink messages */
611 	otx2_process_msgs_up(dev, &dev->mbox_up);
612 }
613 
614 static int
mbox_register_pf_irq(struct rte_pci_device * pci_dev,struct otx2_dev * dev)615 mbox_register_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
616 {
617 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
618 	int i, rc;
619 
620 	/* HW clear irq */
621 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
622 		otx2_write64(~0ull, dev->bar2 +
623 			     RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
624 
625 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
626 
627 	dev->timer_set = 0;
628 
629 	/* MBOX interrupt for VF(0...63) <-> PF */
630 	rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
631 			       RVU_PF_INT_VEC_VFPF_MBOX0);
632 
633 	if (rc) {
634 		otx2_err("Fail to register PF(VF0-63) mbox irq");
635 		return rc;
636 	}
637 	/* MBOX interrupt for VF(64...128) <-> PF */
638 	rc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
639 			       RVU_PF_INT_VEC_VFPF_MBOX1);
640 
641 	if (rc) {
642 		otx2_err("Fail to register PF(VF64-128) mbox irq");
643 		return rc;
644 	}
645 	/* MBOX interrupt AF <-> PF */
646 	rc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,
647 			       dev, RVU_PF_INT_VEC_AFPF_MBOX);
648 	if (rc) {
649 		otx2_err("Fail to register AF<->PF mbox irq");
650 		return rc;
651 	}
652 
653 	/* HW enable intr */
654 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
655 		otx2_write64(~0ull, dev->bar2 +
656 			RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
657 
658 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT);
659 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
660 
661 	return rc;
662 }
663 
664 static int
mbox_register_vf_irq(struct rte_pci_device * pci_dev,struct otx2_dev * dev)665 mbox_register_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
666 {
667 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
668 	int rc;
669 
670 	/* Clear irq */
671 	otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
672 
673 	/* MBOX interrupt PF <-> VF */
674 	rc = otx2_register_irq(intr_handle, otx2_pf_vf_mbox_irq,
675 			       dev, RVU_VF_INT_VEC_MBOX);
676 	if (rc) {
677 		otx2_err("Fail to register PF<->VF mbox irq");
678 		return rc;
679 	}
680 
681 	/* HW enable intr */
682 	otx2_write64(~0ull, dev->bar2 + RVU_VF_INT);
683 	otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
684 
685 	return rc;
686 }
687 
688 static int
mbox_register_irq(struct rte_pci_device * pci_dev,struct otx2_dev * dev)689 mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
690 {
691 	if (otx2_dev_is_vf(dev))
692 		return mbox_register_vf_irq(pci_dev, dev);
693 	else
694 		return mbox_register_pf_irq(pci_dev, dev);
695 }
696 
697 static void
mbox_unregister_pf_irq(struct rte_pci_device * pci_dev,struct otx2_dev * dev)698 mbox_unregister_pf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
699 {
700 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
701 	int i;
702 
703 	/* HW clear irq */
704 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
705 		otx2_write64(~0ull, dev->bar2 +
706 			     RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
707 
708 	otx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
709 
710 	dev->timer_set = 0;
711 
712 	rte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);
713 
714 	/* Unregister the interrupt handler for each vectors */
715 	/* MBOX interrupt for VF(0...63) <-> PF */
716 	otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
717 			    RVU_PF_INT_VEC_VFPF_MBOX0);
718 
719 	/* MBOX interrupt for VF(64...128) <-> PF */
720 	otx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,
721 			    RVU_PF_INT_VEC_VFPF_MBOX1);
722 
723 	/* MBOX interrupt AF <-> PF */
724 	otx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,
725 			    RVU_PF_INT_VEC_AFPF_MBOX);
726 
727 }
728 
729 static void
mbox_unregister_vf_irq(struct rte_pci_device * pci_dev,struct otx2_dev * dev)730 mbox_unregister_vf_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
731 {
732 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
733 
734 	/* Clear irq */
735 	otx2_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
736 
737 	/* Unregister the interrupt handler */
738 	otx2_unregister_irq(intr_handle, otx2_pf_vf_mbox_irq, dev,
739 			    RVU_VF_INT_VEC_MBOX);
740 }
741 
742 static void
mbox_unregister_irq(struct rte_pci_device * pci_dev,struct otx2_dev * dev)743 mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
744 {
745 	if (otx2_dev_is_vf(dev))
746 		mbox_unregister_vf_irq(pci_dev, dev);
747 	else
748 		mbox_unregister_pf_irq(pci_dev, dev);
749 }
750 
751 static int
vf_flr_send_msg(struct otx2_dev * dev,uint16_t vf)752 vf_flr_send_msg(struct otx2_dev *dev, uint16_t vf)
753 {
754 	struct otx2_mbox *mbox = dev->mbox;
755 	struct msg_req *req;
756 	int rc;
757 
758 	req = otx2_mbox_alloc_msg_vf_flr(mbox);
759 	/* Overwrite pcifunc to indicate VF */
760 	req->hdr.pcifunc = otx2_pfvf_func(dev->pf, vf);
761 
762 	/* Sync message in interrupt context */
763 	rc = pf_af_sync_msg(dev, NULL);
764 	if (rc)
765 		otx2_err("Failed to send VF FLR mbox msg, rc=%d", rc);
766 
767 	return rc;
768 }
769 
770 static void
otx2_pf_vf_flr_irq(void * param)771 otx2_pf_vf_flr_irq(void *param)
772 {
773 	struct otx2_dev *dev = (struct otx2_dev *)param;
774 	uint16_t max_vf = 64, vf;
775 	uintptr_t bar2;
776 	uint64_t intr;
777 	int i;
778 
779 	max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
780 	bar2 = dev->bar2;
781 
782 	otx2_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
783 
784 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
785 		intr = otx2_read64(bar2 + RVU_PF_VFFLR_INTX(i));
786 		if (!intr)
787 			continue;
788 
789 		for (vf = 0; vf < max_vf; vf++) {
790 			if (!(intr & (1ULL << vf)))
791 				continue;
792 
793 			otx2_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d",
794 				      i, intr, (64 * i + vf));
795 			/* Clear interrupt */
796 			otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
797 			/* Disable the interrupt */
798 			otx2_write64(BIT_ULL(vf),
799 				     bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
800 			/* Inform AF about VF reset */
801 			vf_flr_send_msg(dev, vf);
802 
803 			/* Signal FLR finish */
804 			otx2_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
805 			/* Enable interrupt */
806 			otx2_write64(~0ull,
807 				     bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
808 		}
809 	}
810 }
811 
812 static int
vf_flr_unregister_irqs(struct rte_pci_device * pci_dev,struct otx2_dev * dev)813 vf_flr_unregister_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
814 {
815 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
816 	int i;
817 
818 	otx2_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
819 
820 	/* HW clear irq */
821 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
822 		otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
823 
824 	otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
825 			    RVU_PF_INT_VEC_VFFLR0);
826 
827 	otx2_unregister_irq(intr_handle, otx2_pf_vf_flr_irq, dev,
828 			    RVU_PF_INT_VEC_VFFLR1);
829 
830 	return 0;
831 }
832 
833 static int
vf_flr_register_irqs(struct rte_pci_device * pci_dev,struct otx2_dev * dev)834 vf_flr_register_irqs(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
835 {
836 	struct rte_intr_handle *handle = &pci_dev->intr_handle;
837 	int i, rc;
838 
839 	otx2_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
840 
841 	rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
842 			       RVU_PF_INT_VEC_VFFLR0);
843 	if (rc)
844 		otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
845 
846 	rc = otx2_register_irq(handle, otx2_pf_vf_flr_irq, dev,
847 			       RVU_PF_INT_VEC_VFFLR1);
848 	if (rc)
849 		otx2_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
850 
851 	/* Enable HW interrupt */
852 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
853 		otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
854 		otx2_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
855 		otx2_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
856 	}
857 	return 0;
858 }
859 
860 /**
861  * @internal
862  * Get number of active VFs for the given PF device.
863  */
864 int
otx2_dev_active_vfs(void * otx2_dev)865 otx2_dev_active_vfs(void *otx2_dev)
866 {
867 	struct otx2_dev *dev = otx2_dev;
868 	int i, count = 0;
869 
870 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
871 		count += __builtin_popcount(dev->active_vfs[i]);
872 
873 	return count;
874 }
875 
876 static void
otx2_update_vf_hwcap(struct rte_pci_device * pci_dev,struct otx2_dev * dev)877 otx2_update_vf_hwcap(struct rte_pci_device *pci_dev, struct otx2_dev *dev)
878 {
879 	switch (pci_dev->id.device_id) {
880 	case PCI_DEVID_OCTEONTX2_RVU_PF:
881 		break;
882 	case PCI_DEVID_OCTEONTX2_RVU_SSO_TIM_VF:
883 	case PCI_DEVID_OCTEONTX2_RVU_NPA_VF:
884 	case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
885 	case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
886 	case PCI_DEVID_OCTEONTX2_RVU_VF:
887 	case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
888 		dev->hwcap |= OTX2_HWCAP_F_VF;
889 		break;
890 	}
891 }
892 
893 /**
894  * @internal
895  * Initialize the otx2 device
896  */
897 int
otx2_dev_priv_init(struct rte_pci_device * pci_dev,void * otx2_dev)898 otx2_dev_priv_init(struct rte_pci_device *pci_dev, void *otx2_dev)
899 {
900 	int up_direction = MBOX_DIR_PFAF_UP;
901 	int rc, direction = MBOX_DIR_PFAF;
902 	uint64_t intr_offset = RVU_PF_INT;
903 	struct otx2_dev *dev = otx2_dev;
904 	uintptr_t bar2, bar4;
905 	uint64_t bar4_addr;
906 	void *hwbase;
907 
908 	bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
909 	bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
910 
911 	if (bar2 == 0 || bar4 == 0) {
912 		otx2_err("Failed to get pci bars");
913 		rc = -ENODEV;
914 		goto error;
915 	}
916 
917 	dev->node = pci_dev->device.numa_node;
918 	dev->maxvf = pci_dev->max_vfs;
919 	dev->bar2 = bar2;
920 	dev->bar4 = bar4;
921 
922 	otx2_update_vf_hwcap(pci_dev, dev);
923 
924 	if (otx2_dev_is_vf(dev)) {
925 		direction = MBOX_DIR_VFPF;
926 		up_direction = MBOX_DIR_VFPF_UP;
927 		intr_offset = RVU_VF_INT;
928 	}
929 
930 	/* Initialize the local mbox */
931 	rc = otx2_mbox_init(&dev->mbox_local, bar4, bar2, direction, 1,
932 			    intr_offset);
933 	if (rc)
934 		goto error;
935 	dev->mbox = &dev->mbox_local;
936 
937 	rc = otx2_mbox_init(&dev->mbox_up, bar4, bar2, up_direction, 1,
938 			    intr_offset);
939 	if (rc)
940 		goto error;
941 
942 	/* Register mbox interrupts */
943 	rc = mbox_register_irq(pci_dev, dev);
944 	if (rc)
945 		goto mbox_fini;
946 
947 	/* Check the readiness of PF/VF */
948 	rc = otx2_send_ready_msg(dev->mbox, &dev->pf_func);
949 	if (rc)
950 		goto mbox_unregister;
951 
952 	dev->pf = otx2_get_pf(dev->pf_func);
953 	dev->vf = otx2_get_vf(dev->pf_func);
954 	memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
955 
956 	/* Found VF devices in a PF device */
957 	if (pci_dev->max_vfs > 0) {
958 
959 		/* Remap mbox area for all vf's */
960 		bar4_addr = otx2_read64(bar2 + RVU_PF_VF_BAR4_ADDR);
961 		if (bar4_addr == 0) {
962 			rc = -ENODEV;
963 			goto mbox_fini;
964 		}
965 
966 		hwbase = mbox_mem_map(bar4_addr, MBOX_SIZE * pci_dev->max_vfs);
967 		if (hwbase == MAP_FAILED) {
968 			rc = -ENOMEM;
969 			goto mbox_fini;
970 		}
971 		/* Init mbox object */
972 		rc = otx2_mbox_init(&dev->mbox_vfpf, (uintptr_t)hwbase,
973 				    bar2, MBOX_DIR_PFVF, pci_dev->max_vfs,
974 				    intr_offset);
975 		if (rc)
976 			goto iounmap;
977 
978 		/* PF -> VF UP messages */
979 		rc = otx2_mbox_init(&dev->mbox_vfpf_up, (uintptr_t)hwbase,
980 				    bar2, MBOX_DIR_PFVF_UP, pci_dev->max_vfs,
981 				    intr_offset);
982 		if (rc)
983 			goto mbox_fini;
984 	}
985 
986 	/* Register VF-FLR irq handlers */
987 	if (otx2_dev_is_pf(dev)) {
988 		rc = vf_flr_register_irqs(pci_dev, dev);
989 		if (rc)
990 			goto iounmap;
991 	}
992 	dev->mbox_active = 1;
993 	return rc;
994 
995 iounmap:
996 	mbox_mem_unmap(hwbase, MBOX_SIZE * pci_dev->max_vfs);
997 mbox_unregister:
998 	mbox_unregister_irq(pci_dev, dev);
999 mbox_fini:
1000 	otx2_mbox_fini(dev->mbox);
1001 	otx2_mbox_fini(&dev->mbox_up);
1002 error:
1003 	return rc;
1004 }
1005 
1006 /**
1007  * @internal
1008  * Finalize the otx2 device
1009  */
1010 void
otx2_dev_fini(struct rte_pci_device * pci_dev,void * otx2_dev)1011 otx2_dev_fini(struct rte_pci_device *pci_dev, void *otx2_dev)
1012 {
1013 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1014 	struct otx2_dev *dev = otx2_dev;
1015 	struct otx2_idev_cfg *idev;
1016 	struct otx2_mbox *mbox;
1017 
1018 	/* Clear references to this pci dev */
1019 	idev = otx2_intra_dev_get_cfg();
1020 	if (idev->npa_lf && idev->npa_lf->pci_dev == pci_dev)
1021 		idev->npa_lf = NULL;
1022 
1023 	mbox_unregister_irq(pci_dev, dev);
1024 
1025 	if (otx2_dev_is_pf(dev))
1026 		vf_flr_unregister_irqs(pci_dev, dev);
1027 	/* Release PF - VF */
1028 	mbox = &dev->mbox_vfpf;
1029 	if (mbox->hwbase && mbox->dev)
1030 		mbox_mem_unmap((void *)mbox->hwbase,
1031 			       MBOX_SIZE * pci_dev->max_vfs);
1032 	otx2_mbox_fini(mbox);
1033 	mbox = &dev->mbox_vfpf_up;
1034 	otx2_mbox_fini(mbox);
1035 
1036 	/* Release PF - AF */
1037 	mbox = dev->mbox;
1038 	otx2_mbox_fini(mbox);
1039 	mbox = &dev->mbox_up;
1040 	otx2_mbox_fini(mbox);
1041 	dev->mbox_active = 0;
1042 
1043 	/* Disable MSIX vectors */
1044 	otx2_disable_irqs(intr_handle);
1045 }
1046