1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 #include <unistd.h>
5 #include <stdint.h>
6 #include <fcntl.h>
7 #include <sys/eventfd.h>
8 
9 #include <rte_malloc.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_atomic.h>
13 #include <rte_common.h>
14 #include <rte_io.h>
15 #include <rte_alarm.h>
16 
17 #include <mlx5_common.h>
18 #include <mlx5_glue.h>
19 
20 #include "mlx5_vdpa_utils.h"
21 #include "mlx5_vdpa.h"
22 
23 
24 #define MLX5_VDPA_ERROR_TIME_SEC 3u
25 
26 void
mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv * priv)27 mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv)
28 {
29 	if (priv->uar) {
30 		mlx5_glue->devx_free_uar(priv->uar);
31 		priv->uar = NULL;
32 	}
33 #ifdef HAVE_IBV_DEVX_EVENT
34 	if (priv->eventc) {
35 		union {
36 			struct mlx5dv_devx_async_event_hdr event_resp;
37 			uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr)
38 									 + 128];
39 		} out;
40 
41 		/* Clean all pending events. */
42 		while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
43 		       sizeof(out.buf)) >=
44 		       (ssize_t)sizeof(out.event_resp.cookie))
45 			;
46 		mlx5_glue->devx_destroy_event_channel(priv->eventc);
47 		priv->eventc = NULL;
48 	}
49 #endif
50 	priv->eqn = 0;
51 }
52 
53 /* Prepare all the global resources for all the event objects.*/
54 static int
mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv * priv)55 mlx5_vdpa_event_qp_global_prepare(struct mlx5_vdpa_priv *priv)
56 {
57 	int flags, ret;
58 
59 	if (priv->eventc)
60 		return 0;
61 	if (mlx5_glue->devx_query_eqn(priv->ctx, 0, &priv->eqn)) {
62 		rte_errno = errno;
63 		DRV_LOG(ERR, "Failed to query EQ number %d.", rte_errno);
64 		return -1;
65 	}
66 	priv->eventc = mlx5_glue->devx_create_event_channel(priv->ctx,
67 			   MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
68 	if (!priv->eventc) {
69 		rte_errno = errno;
70 		DRV_LOG(ERR, "Failed to create event channel %d.",
71 			rte_errno);
72 		goto error;
73 	}
74 	flags = fcntl(priv->eventc->fd, F_GETFL);
75 	ret = fcntl(priv->eventc->fd, F_SETFL, flags | O_NONBLOCK);
76 	if (ret) {
77 		DRV_LOG(ERR, "Failed to change event channel FD.");
78 		goto error;
79 	}
80 	/*
81 	 * This PMD always claims the write memory barrier on UAR
82 	 * registers writings, it is safe to allocate UAR with any
83 	 * memory mapping type.
84 	 */
85 	priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
86 	if (!priv->uar) {
87 		rte_errno = errno;
88 		DRV_LOG(ERR, "Failed to allocate UAR.");
89 		goto error;
90 	}
91 	return 0;
92 error:
93 	mlx5_vdpa_event_qp_global_release(priv);
94 	return -1;
95 }
96 
97 static void
mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq * cq)98 mlx5_vdpa_cq_destroy(struct mlx5_vdpa_cq *cq)
99 {
100 	if (cq->cq)
101 		claim_zero(mlx5_devx_cmd_destroy(cq->cq));
102 	if (cq->umem_obj)
103 		claim_zero(mlx5_glue->devx_umem_dereg(cq->umem_obj));
104 	if (cq->umem_buf)
105 		rte_free((void *)(uintptr_t)cq->umem_buf);
106 	memset(cq, 0, sizeof(*cq));
107 }
108 
109 static inline void __rte_unused
mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv * priv,struct mlx5_vdpa_cq * cq)110 mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
111 {
112 	uint32_t arm_sn = cq->arm_sn << MLX5_CQ_SQN_OFFSET;
113 	uint32_t cq_ci = cq->cq_ci & MLX5_CI_MASK;
114 	uint32_t doorbell_hi = arm_sn | MLX5_CQ_DBR_CMD_ALL | cq_ci;
115 	uint64_t doorbell = ((uint64_t)doorbell_hi << 32) | cq->cq->id;
116 	uint64_t db_be = rte_cpu_to_be_64(doorbell);
117 	uint32_t *addr = RTE_PTR_ADD(priv->uar->base_addr, MLX5_CQ_DOORBELL);
118 
119 	rte_io_wmb();
120 	cq->db_rec[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
121 	rte_wmb();
122 #ifdef RTE_ARCH_64
123 	*(uint64_t *)addr = db_be;
124 #else
125 	*(uint32_t *)addr = db_be;
126 	rte_io_wmb();
127 	*((uint32_t *)addr + 1) = db_be >> 32;
128 #endif
129 	cq->arm_sn++;
130 	cq->armed = 1;
131 }
132 
133 static int
mlx5_vdpa_cq_create(struct mlx5_vdpa_priv * priv,uint16_t log_desc_n,int callfd,struct mlx5_vdpa_cq * cq)134 mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
135 		    int callfd, struct mlx5_vdpa_cq *cq)
136 {
137 	struct mlx5_devx_cq_attr attr = {0};
138 	size_t pgsize = sysconf(_SC_PAGESIZE);
139 	uint32_t umem_size;
140 	uint16_t event_nums[1] = {0};
141 	uint16_t cq_size = 1 << log_desc_n;
142 	int ret;
143 
144 	cq->log_desc_n = log_desc_n;
145 	umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
146 	cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
147 	if (!cq->umem_buf) {
148 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
149 		rte_errno = ENOMEM;
150 		return -ENOMEM;
151 	}
152 	cq->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
153 						(void *)(uintptr_t)cq->umem_buf,
154 						umem_size,
155 						IBV_ACCESS_LOCAL_WRITE);
156 	if (!cq->umem_obj) {
157 		DRV_LOG(ERR, "Failed to register umem for CQ.");
158 		goto error;
159 	}
160 	attr.q_umem_valid = 1;
161 	attr.db_umem_valid = 1;
162 	attr.use_first_only = 1;
163 	attr.overrun_ignore = 0;
164 	attr.uar_page_id = priv->uar->page_id;
165 	attr.q_umem_id = cq->umem_obj->umem_id;
166 	attr.q_umem_offset = 0;
167 	attr.db_umem_id = cq->umem_obj->umem_id;
168 	attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
169 	attr.eqn = priv->eqn;
170 	attr.log_cq_size = log_desc_n;
171 	attr.log_page_size = rte_log2_u32(pgsize);
172 	cq->cq = mlx5_devx_cmd_create_cq(priv->ctx, &attr);
173 	if (!cq->cq)
174 		goto error;
175 	cq->db_rec = RTE_PTR_ADD(cq->umem_buf, (uintptr_t)attr.db_umem_offset);
176 	cq->cq_ci = 0;
177 	rte_spinlock_init(&cq->sl);
178 	/* Subscribe CQ event to the event channel controlled by the driver. */
179 	ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc, cq->cq->obj,
180 						   sizeof(event_nums),
181 						   event_nums,
182 						   (uint64_t)(uintptr_t)cq);
183 	if (ret) {
184 		DRV_LOG(ERR, "Failed to subscribe CQE event.");
185 		rte_errno = errno;
186 		goto error;
187 	}
188 	cq->callfd = callfd;
189 	/* Init CQ to ones to be in HW owner in the start. */
190 	cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
191 	cq->cqes[0].wqe_counter = rte_cpu_to_be_16(UINT16_MAX);
192 	/* First arming. */
193 	mlx5_vdpa_cq_arm(priv, cq);
194 	return 0;
195 error:
196 	mlx5_vdpa_cq_destroy(cq);
197 	return -1;
198 }
199 
200 static inline uint32_t
mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq * cq)201 mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
202 {
203 	struct mlx5_vdpa_event_qp *eqp =
204 				container_of(cq, struct mlx5_vdpa_event_qp, cq);
205 	const unsigned int cq_size = 1 << cq->log_desc_n;
206 	union {
207 		struct {
208 			uint16_t wqe_counter;
209 			uint8_t rsvd5;
210 			uint8_t op_own;
211 		};
212 		uint32_t word;
213 	} last_word;
214 	uint16_t next_wqe_counter = cq->cq_ci;
215 	uint16_t cur_wqe_counter;
216 	uint16_t comp;
217 
218 	last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
219 	cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
220 	comp = cur_wqe_counter + (uint16_t)1 - next_wqe_counter;
221 	if (comp) {
222 		cq->cq_ci += comp;
223 		MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
224 			    MLX5_CQE_INVALID);
225 		if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
226 			       MLX5_CQE_RESP_ERR ||
227 			       MLX5_CQE_OPCODE(last_word.op_own) ==
228 			       MLX5_CQE_REQ_ERR)))
229 			cq->errors++;
230 		rte_io_wmb();
231 		/* Ring CQ doorbell record. */
232 		cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
233 		rte_io_wmb();
234 		/* Ring SW QP doorbell record. */
235 		eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
236 	}
237 	return comp;
238 }
239 
240 static void
mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv * priv)241 mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
242 {
243 	struct mlx5_vdpa_cq *cq;
244 	int i;
245 
246 	for (i = 0; i < priv->nr_virtqs; i++) {
247 		cq = &priv->virtqs[i].eqp.cq;
248 		if (cq->cq && !cq->armed)
249 			mlx5_vdpa_cq_arm(priv, cq);
250 	}
251 }
252 
253 static void
mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv * priv,uint32_t max)254 mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
255 {
256 	if (priv->event_mode == MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER) {
257 		switch (max) {
258 		case 0:
259 			priv->timer_delay_us += priv->event_us;
260 			break;
261 		case 1:
262 			break;
263 		default:
264 			priv->timer_delay_us /= max;
265 			break;
266 		}
267 	}
268 	usleep(priv->timer_delay_us);
269 }
270 
271 static void *
mlx5_vdpa_poll_handle(void * arg)272 mlx5_vdpa_poll_handle(void *arg)
273 {
274 	struct mlx5_vdpa_priv *priv = arg;
275 	int i;
276 	struct mlx5_vdpa_cq *cq;
277 	uint32_t max;
278 	uint64_t current_tic;
279 
280 	pthread_mutex_lock(&priv->timer_lock);
281 	while (!priv->timer_on)
282 		pthread_cond_wait(&priv->timer_cond, &priv->timer_lock);
283 	pthread_mutex_unlock(&priv->timer_lock);
284 	priv->timer_delay_us = priv->event_mode ==
285 					    MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
286 					      MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
287 								 priv->event_us;
288 	while (1) {
289 		max = 0;
290 		pthread_mutex_lock(&priv->vq_config_lock);
291 		for (i = 0; i < priv->nr_virtqs; i++) {
292 			cq = &priv->virtqs[i].eqp.cq;
293 			if (cq->cq && !cq->armed) {
294 				uint32_t comp = mlx5_vdpa_cq_poll(cq);
295 
296 				if (comp) {
297 					/* Notify guest for descs consuming. */
298 					if (cq->callfd != -1)
299 						eventfd_write(cq->callfd,
300 							      (eventfd_t)1);
301 					if (comp > max)
302 						max = comp;
303 				}
304 			}
305 		}
306 		current_tic = rte_rdtsc();
307 		if (!max) {
308 			/* No traffic ? stop timer and load interrupts. */
309 			if (current_tic - priv->last_traffic_tic >=
310 			    rte_get_timer_hz() * priv->no_traffic_time_s) {
311 				DRV_LOG(DEBUG, "Device %s traffic was stopped.",
312 					priv->vdev->device->name);
313 				mlx5_vdpa_arm_all_cqs(priv);
314 				pthread_mutex_unlock(&priv->vq_config_lock);
315 				pthread_mutex_lock(&priv->timer_lock);
316 				priv->timer_on = 0;
317 				while (!priv->timer_on)
318 					pthread_cond_wait(&priv->timer_cond,
319 							  &priv->timer_lock);
320 				pthread_mutex_unlock(&priv->timer_lock);
321 				priv->timer_delay_us = priv->event_mode ==
322 					    MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER ?
323 					      MLX5_VDPA_DEFAULT_TIMER_DELAY_US :
324 								 priv->event_us;
325 				continue;
326 			}
327 		} else {
328 			priv->last_traffic_tic = current_tic;
329 		}
330 		pthread_mutex_unlock(&priv->vq_config_lock);
331 		mlx5_vdpa_timer_sleep(priv, max);
332 	}
333 	return NULL;
334 }
335 
336 static void
mlx5_vdpa_interrupt_handler(void * cb_arg)337 mlx5_vdpa_interrupt_handler(void *cb_arg)
338 {
339 	struct mlx5_vdpa_priv *priv = cb_arg;
340 #ifdef HAVE_IBV_DEVX_EVENT
341 	union {
342 		struct mlx5dv_devx_async_event_hdr event_resp;
343 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
344 	} out;
345 
346 	pthread_mutex_lock(&priv->vq_config_lock);
347 	while (mlx5_glue->devx_get_event(priv->eventc, &out.event_resp,
348 					 sizeof(out.buf)) >=
349 				       (ssize_t)sizeof(out.event_resp.cookie)) {
350 		struct mlx5_vdpa_cq *cq = (struct mlx5_vdpa_cq *)
351 					       (uintptr_t)out.event_resp.cookie;
352 		struct mlx5_vdpa_event_qp *eqp = container_of(cq,
353 						 struct mlx5_vdpa_event_qp, cq);
354 		struct mlx5_vdpa_virtq *virtq = container_of(eqp,
355 						   struct mlx5_vdpa_virtq, eqp);
356 
357 		if (!virtq->enable)
358 			continue;
359 		mlx5_vdpa_cq_poll(cq);
360 		/* Notify guest for descs consuming. */
361 		if (cq->callfd != -1)
362 			eventfd_write(cq->callfd, (eventfd_t)1);
363 		if (priv->event_mode == MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
364 			mlx5_vdpa_cq_arm(priv, cq);
365 			pthread_mutex_unlock(&priv->vq_config_lock);
366 			return;
367 		}
368 		/* Don't arm again - timer will take control. */
369 		DRV_LOG(DEBUG, "Device %s virtq %d cq %d event was captured."
370 			" Timer is %s, cq ci is %u.\n",
371 			priv->vdev->device->name,
372 			(int)virtq->index, cq->cq->id,
373 			priv->timer_on ? "on" : "off", cq->cq_ci);
374 		cq->armed = 0;
375 	}
376 #endif
377 
378 	/* Traffic detected: make sure timer is on. */
379 	priv->last_traffic_tic = rte_rdtsc();
380 	pthread_mutex_lock(&priv->timer_lock);
381 	if (!priv->timer_on) {
382 		priv->timer_on = 1;
383 		pthread_cond_signal(&priv->timer_cond);
384 	}
385 	pthread_mutex_unlock(&priv->timer_lock);
386 	pthread_mutex_unlock(&priv->vq_config_lock);
387 }
388 
389 static void
mlx5_vdpa_err_interrupt_handler(void * cb_arg __rte_unused)390 mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
391 {
392 #ifdef HAVE_IBV_DEVX_EVENT
393 	struct mlx5_vdpa_priv *priv = cb_arg;
394 	union {
395 		struct mlx5dv_devx_async_event_hdr event_resp;
396 		uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
397 	} out;
398 	uint32_t vq_index, i, version;
399 	struct mlx5_vdpa_virtq *virtq;
400 	uint64_t sec;
401 
402 	pthread_mutex_lock(&priv->vq_config_lock);
403 	while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
404 					 sizeof(out.buf)) >=
405 				       (ssize_t)sizeof(out.event_resp.cookie)) {
406 		vq_index = out.event_resp.cookie & UINT32_MAX;
407 		version = out.event_resp.cookie >> 32;
408 		if (vq_index >= priv->nr_virtqs) {
409 			DRV_LOG(ERR, "Invalid device %s error event virtq %d.",
410 				priv->vdev->device->name, vq_index);
411 			continue;
412 		}
413 		virtq = &priv->virtqs[vq_index];
414 		if (!virtq->enable || virtq->version != version)
415 			continue;
416 		if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
417 			continue;
418 		virtq->stopped = true;
419 		/* Query error info. */
420 		if (mlx5_vdpa_virtq_query(priv, vq_index))
421 			goto log;
422 		/* Disable vq. */
423 		if (mlx5_vdpa_virtq_enable(priv, vq_index, 0)) {
424 			DRV_LOG(ERR, "Failed to disable virtq %d.", vq_index);
425 			goto log;
426 		}
427 		/* Retry if error happens less than N times in 3 seconds. */
428 		sec = (rte_rdtsc() - virtq->err_time[0]) / rte_get_tsc_hz();
429 		if (sec > MLX5_VDPA_ERROR_TIME_SEC) {
430 			/* Retry. */
431 			if (mlx5_vdpa_virtq_enable(priv, vq_index, 1))
432 				DRV_LOG(ERR, "Failed to enable virtq %d.",
433 					vq_index);
434 			else
435 				DRV_LOG(WARNING, "Recover virtq %d: %u.",
436 					vq_index, ++virtq->n_retry);
437 		} else {
438 			/* Retry timeout, give up. */
439 			DRV_LOG(ERR, "Device %s virtq %d failed to recover.",
440 				priv->vdev->device->name, vq_index);
441 		}
442 log:
443 		/* Shift in current time to error time log end. */
444 		for (i = 1; i < RTE_DIM(virtq->err_time); i++)
445 			virtq->err_time[i - 1] = virtq->err_time[i];
446 		virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
447 	}
448 	pthread_mutex_unlock(&priv->vq_config_lock);
449 #endif
450 }
451 
452 int
mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv * priv)453 mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv)
454 {
455 	int ret;
456 	int flags;
457 
458 	/* Setup device event channel. */
459 	priv->err_chnl = mlx5_glue->devx_create_event_channel(priv->ctx, 0);
460 	if (!priv->err_chnl) {
461 		rte_errno = errno;
462 		DRV_LOG(ERR, "Failed to create device event channel %d.",
463 			rte_errno);
464 		goto error;
465 	}
466 	flags = fcntl(priv->err_chnl->fd, F_GETFL);
467 	ret = fcntl(priv->err_chnl->fd, F_SETFL, flags | O_NONBLOCK);
468 	if (ret) {
469 		DRV_LOG(ERR, "Failed to change device event channel FD.");
470 		goto error;
471 	}
472 	priv->err_intr_handle.fd = priv->err_chnl->fd;
473 	priv->err_intr_handle.type = RTE_INTR_HANDLE_EXT;
474 	if (rte_intr_callback_register(&priv->err_intr_handle,
475 				       mlx5_vdpa_err_interrupt_handler,
476 				       priv)) {
477 		priv->err_intr_handle.fd = 0;
478 		DRV_LOG(ERR, "Failed to register error interrupt for device %d.",
479 			priv->vid);
480 		goto error;
481 	} else {
482 		DRV_LOG(DEBUG, "Registered error interrupt for device%d.",
483 			priv->vid);
484 	}
485 	return 0;
486 error:
487 	mlx5_vdpa_err_event_unset(priv);
488 	return -1;
489 }
490 
491 void
mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv * priv)492 mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv)
493 {
494 	int retries = MLX5_VDPA_INTR_RETRIES;
495 	int ret = -EAGAIN;
496 
497 	if (!priv->err_intr_handle.fd)
498 		return;
499 	while (retries-- && ret == -EAGAIN) {
500 		ret = rte_intr_callback_unregister(&priv->err_intr_handle,
501 					    mlx5_vdpa_err_interrupt_handler,
502 					    priv);
503 		if (ret == -EAGAIN) {
504 			DRV_LOG(DEBUG, "Try again to unregister fd %d "
505 				"of error interrupt, retries = %d.",
506 				priv->err_intr_handle.fd, retries);
507 			rte_pause();
508 		}
509 	}
510 	memset(&priv->err_intr_handle, 0, sizeof(priv->err_intr_handle));
511 	if (priv->err_chnl) {
512 #ifdef HAVE_IBV_DEVX_EVENT
513 		union {
514 			struct mlx5dv_devx_async_event_hdr event_resp;
515 			uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) +
516 				    128];
517 		} out;
518 
519 		/* Clean all pending events. */
520 		while (mlx5_glue->devx_get_event(priv->err_chnl,
521 		       &out.event_resp, sizeof(out.buf)) >=
522 		       (ssize_t)sizeof(out.event_resp.cookie))
523 			;
524 #endif
525 		mlx5_glue->devx_destroy_event_channel(priv->err_chnl);
526 		priv->err_chnl = NULL;
527 	}
528 }
529 
530 int
mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv * priv)531 mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
532 {
533 	int ret;
534 
535 	if (!priv->eventc)
536 		/* All virtqs are in poll mode. */
537 		return 0;
538 	if (priv->event_mode != MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT) {
539 		pthread_mutex_init(&priv->timer_lock, NULL);
540 		pthread_cond_init(&priv->timer_cond, NULL);
541 		priv->timer_on = 0;
542 		ret = pthread_create(&priv->timer_tid, NULL,
543 				     mlx5_vdpa_poll_handle, (void *)priv);
544 		if (ret) {
545 			DRV_LOG(ERR, "Failed to create timer thread.");
546 			return -1;
547 		}
548 	}
549 	priv->intr_handle.fd = priv->eventc->fd;
550 	priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
551 	if (rte_intr_callback_register(&priv->intr_handle,
552 				       mlx5_vdpa_interrupt_handler, priv)) {
553 		priv->intr_handle.fd = 0;
554 		DRV_LOG(ERR, "Failed to register CQE interrupt %d.", rte_errno);
555 		goto error;
556 	}
557 	return 0;
558 error:
559 	mlx5_vdpa_cqe_event_unset(priv);
560 	return -1;
561 }
562 
563 void
mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv * priv)564 mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
565 {
566 	int retries = MLX5_VDPA_INTR_RETRIES;
567 	int ret = -EAGAIN;
568 	void *status;
569 
570 	if (priv->intr_handle.fd) {
571 		while (retries-- && ret == -EAGAIN) {
572 			ret = rte_intr_callback_unregister(&priv->intr_handle,
573 						    mlx5_vdpa_interrupt_handler,
574 						    priv);
575 			if (ret == -EAGAIN) {
576 				DRV_LOG(DEBUG, "Try again to unregister fd %d "
577 					"of CQ interrupt, retries = %d.",
578 					priv->intr_handle.fd, retries);
579 				rte_pause();
580 			}
581 		}
582 		memset(&priv->intr_handle, 0, sizeof(priv->intr_handle));
583 	}
584 	if (priv->timer_tid) {
585 		pthread_cancel(priv->timer_tid);
586 		pthread_join(priv->timer_tid, &status);
587 	}
588 	priv->timer_tid = 0;
589 }
590 
591 void
mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp * eqp)592 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
593 {
594 	if (eqp->sw_qp)
595 		claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
596 	if (eqp->umem_obj)
597 		claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
598 	if (eqp->umem_buf)
599 		rte_free(eqp->umem_buf);
600 	if (eqp->fw_qp)
601 		claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
602 	mlx5_vdpa_cq_destroy(&eqp->cq);
603 	memset(eqp, 0, sizeof(*eqp));
604 }
605 
606 static int
mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp * eqp)607 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
608 {
609 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
610 					  eqp->sw_qp->id)) {
611 		DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
612 			rte_errno);
613 		return -1;
614 	}
615 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
616 					  eqp->fw_qp->id)) {
617 		DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
618 			rte_errno);
619 		return -1;
620 	}
621 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
622 					  eqp->sw_qp->id)) {
623 		DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
624 			rte_errno);
625 		return -1;
626 	}
627 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
628 					  eqp->fw_qp->id)) {
629 		DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
630 			rte_errno);
631 		return -1;
632 	}
633 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
634 					  eqp->sw_qp->id)) {
635 		DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
636 			rte_errno);
637 		return -1;
638 	}
639 	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
640 					  eqp->fw_qp->id)) {
641 		DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
642 			rte_errno);
643 		return -1;
644 	}
645 	return 0;
646 }
647 
648 int
mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv * priv,uint16_t desc_n,int callfd,struct mlx5_vdpa_event_qp * eqp)649 mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
650 			  int callfd, struct mlx5_vdpa_event_qp *eqp)
651 {
652 	struct mlx5_devx_qp_attr attr = {0};
653 	uint16_t log_desc_n = rte_log2_u32(desc_n);
654 	uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
655 						       sizeof(*eqp->db_rec) * 2;
656 
657 	if (mlx5_vdpa_event_qp_global_prepare(priv))
658 		return -1;
659 	if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
660 		return -1;
661 	attr.pd = priv->pdn;
662 	eqp->fw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
663 	if (!eqp->fw_qp) {
664 		DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
665 		goto error;
666 	}
667 	eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
668 	if (!eqp->umem_buf) {
669 		DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
670 		rte_errno = ENOMEM;
671 		goto error;
672 	}
673 	eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
674 					       (void *)(uintptr_t)eqp->umem_buf,
675 					       umem_size,
676 					       IBV_ACCESS_LOCAL_WRITE);
677 	if (!eqp->umem_obj) {
678 		DRV_LOG(ERR, "Failed to register umem for SW QP.");
679 		goto error;
680 	}
681 	attr.uar_index = priv->uar->page_id;
682 	attr.cqn = eqp->cq.cq->id;
683 	attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
684 	attr.rq_size = 1 << log_desc_n;
685 	attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
686 	attr.sq_size = 0; /* No need SQ. */
687 	attr.dbr_umem_valid = 1;
688 	attr.wq_umem_id = eqp->umem_obj->umem_id;
689 	attr.wq_umem_offset = 0;
690 	attr.dbr_umem_id = eqp->umem_obj->umem_id;
691 	attr.dbr_address = (1 << log_desc_n) * MLX5_WSEG_SIZE;
692 	eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
693 	if (!eqp->sw_qp) {
694 		DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
695 		goto error;
696 	}
697 	eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
698 	if (mlx5_vdpa_qps2rts(eqp))
699 		goto error;
700 	/* First ringing. */
701 	rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
702 	return 0;
703 error:
704 	mlx5_vdpa_event_qp_destroy(eqp);
705 	return -1;
706 }
707