xref: /f-stack/dpdk/drivers/vdpa/mlx5/mlx5_vdpa.h (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 Mellanox Technologies, Ltd
3  */
4 
5 #ifndef RTE_PMD_MLX5_VDPA_H_
6 #define RTE_PMD_MLX5_VDPA_H_
7 
8 #include <linux/virtio_net.h>
9 #include <sys/queue.h>
10 
11 #ifdef PEDANTIC
12 #pragma GCC diagnostic ignored "-Wpedantic"
13 #endif
14 #include <rte_vdpa.h>
15 #include <rte_vdpa_dev.h>
16 #include <rte_vhost.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20 #include <rte_spinlock.h>
21 #include <rte_interrupts.h>
22 
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
25 #include <mlx5_prm.h>
26 
27 
28 #define MLX5_VDPA_INTR_RETRIES 256
29 #define MLX5_VDPA_INTR_RETRIES_USEC 1000
30 
31 #ifndef VIRTIO_F_ORDER_PLATFORM
32 #define VIRTIO_F_ORDER_PLATFORM 36
33 #endif
34 
35 #ifndef VIRTIO_F_RING_PACKED
36 #define VIRTIO_F_RING_PACKED 34
37 #endif
38 
39 #define MLX5_VDPA_DEFAULT_TIMER_DELAY_US 100u
40 #define MLX5_VDPA_DEFAULT_TIMER_STEP_US 1u
41 
42 struct mlx5_vdpa_cq {
43 	uint16_t log_desc_n;
44 	uint32_t cq_ci:24;
45 	uint32_t arm_sn:2;
46 	uint32_t armed:1;
47 	int callfd;
48 	rte_spinlock_t sl;
49 	struct mlx5_devx_obj *cq;
50 	struct mlx5dv_devx_umem *umem_obj;
51 	union {
52 		volatile void *umem_buf;
53 		volatile struct mlx5_cqe *cqes;
54 	};
55 	volatile uint32_t *db_rec;
56 	uint64_t errors;
57 };
58 
59 struct mlx5_vdpa_event_qp {
60 	struct mlx5_vdpa_cq cq;
61 	struct mlx5_devx_obj *fw_qp;
62 	struct mlx5_devx_obj *sw_qp;
63 	struct mlx5dv_devx_umem *umem_obj;
64 	void *umem_buf;
65 	volatile uint32_t *db_rec;
66 };
67 
68 struct mlx5_vdpa_query_mr {
69 	SLIST_ENTRY(mlx5_vdpa_query_mr) next;
70 	void *addr;
71 	uint64_t length;
72 	struct mlx5dv_devx_umem *umem;
73 	struct mlx5_devx_obj *mkey;
74 	int is_indirect;
75 };
76 
77 enum {
78 	MLX5_VDPA_NOTIFIER_STATE_DISABLED,
79 	MLX5_VDPA_NOTIFIER_STATE_ENABLED,
80 	MLX5_VDPA_NOTIFIER_STATE_ERR
81 };
82 
83 struct mlx5_vdpa_virtq {
84 	SLIST_ENTRY(mlx5_vdpa_virtq) next;
85 	uint8_t enable;
86 	uint16_t index;
87 	uint16_t vq_size;
88 	uint8_t notifier_state;
89 	bool stopped;
90 	uint32_t version;
91 	struct mlx5_vdpa_priv *priv;
92 	struct mlx5_devx_obj *virtq;
93 	struct mlx5_devx_obj *counters;
94 	struct mlx5_vdpa_event_qp eqp;
95 	struct {
96 		struct mlx5dv_devx_umem *obj;
97 		void *buf;
98 		uint32_t size;
99 	} umems[3];
100 	struct rte_intr_handle intr_handle;
101 	uint64_t err_time[3]; /* RDTSC time of recent errors. */
102 	uint32_t n_retry;
103 	struct mlx5_devx_virtio_q_couners_attr reset;
104 };
105 
106 struct mlx5_vdpa_steer {
107 	struct mlx5_devx_obj *rqt;
108 	void *domain;
109 	void *tbl;
110 	struct {
111 		struct mlx5dv_flow_matcher *matcher;
112 		struct mlx5_devx_obj *tir;
113 		void *tir_action;
114 		void *flow;
115 	} rss[7];
116 };
117 
118 enum {
119 	MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER,
120 	MLX5_VDPA_EVENT_MODE_FIXED_TIMER,
121 	MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT
122 };
123 
124 struct mlx5_vdpa_priv {
125 	TAILQ_ENTRY(mlx5_vdpa_priv) next;
126 	uint8_t configured;
127 	pthread_mutex_t vq_config_lock;
128 	uint64_t last_traffic_tic;
129 	pthread_t timer_tid;
130 	pthread_mutex_t timer_lock;
131 	pthread_cond_t timer_cond;
132 	volatile uint8_t timer_on;
133 	int event_mode;
134 	uint32_t event_us;
135 	uint32_t timer_delay_us;
136 	uint32_t no_traffic_time_s;
137 	struct rte_vdpa_device *vdev; /* vDPA device. */
138 	int vid; /* vhost device id. */
139 	struct ibv_context *ctx; /* Device context. */
140 	struct rte_pci_device *pci_dev;
141 	struct mlx5_hca_vdpa_attr caps;
142 	uint32_t pdn; /* Protection Domain number. */
143 	struct ibv_pd *pd;
144 	uint32_t gpa_mkey_index;
145 	struct ibv_mr *null_mr;
146 	struct rte_vhost_memory *vmem;
147 	uint32_t eqn;
148 	struct mlx5dv_devx_event_channel *eventc;
149 	struct mlx5dv_devx_event_channel *err_chnl;
150 	struct mlx5dv_devx_uar *uar;
151 	struct rte_intr_handle intr_handle;
152 	struct rte_intr_handle err_intr_handle;
153 	struct mlx5_devx_obj *td;
154 	struct mlx5_devx_obj *tiss[16]; /* TIS list for each LAG port. */
155 	uint16_t nr_virtqs;
156 	uint8_t num_lag_ports;
157 	uint64_t features; /* Negotiated features. */
158 	uint16_t log_max_rqt_size;
159 	struct mlx5_vdpa_steer steer;
160 	struct mlx5dv_var *var;
161 	void *virtq_db_addr;
162 	SLIST_HEAD(mr_list, mlx5_vdpa_query_mr) mr_list;
163 	struct mlx5_vdpa_virtq virtqs[];
164 };
165 
166 enum {
167 	MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
168 	MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
169 	MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
170 	MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
171 	MLX5_VDPA_STATS_INVALID_BUFFER,
172 	MLX5_VDPA_STATS_COMPLETION_ERRORS,
173 	MLX5_VDPA_STATS_MAX
174 };
175 
176 /*
177  * Check whether virtq is for traffic receive.
178  * According to VIRTIO_NET Spec the virtqueues index identity its type by:
179  * 0 receiveq1
180  * 1 transmitq1
181  * ...
182  * 2(N-1) receiveqN
183  * 2(N-1)+1 transmitqN
184  * 2N controlq
185  */
186 static inline uint8_t
is_virtq_recvq(int virtq_index,int nr_vring)187 is_virtq_recvq(int virtq_index, int nr_vring)
188 {
189 	if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
190 		return 1;
191 	return 0;
192 }
193 
194 /**
195  * Release all the prepared memory regions and all their related resources.
196  *
197  * @param[in] priv
198  *   The vdpa driver private structure.
199  */
200 void mlx5_vdpa_mem_dereg(struct mlx5_vdpa_priv *priv);
201 
202 /**
203  * Register all the memory regions of the virtio device to the HW and allocate
204  * all their related resources.
205  *
206  * @param[in] priv
207  *   The vdpa driver private structure.
208  *
209  * @return
210  *   0 on success, a negative errno value otherwise and rte_errno is set.
211  */
212 int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
213 
214 
215 /**
216  * Create an event QP and all its related resources.
217  *
218  * @param[in] priv
219  *   The vdpa driver private structure.
220  * @param[in] desc_n
221  *   Number of descriptors.
222  * @param[in] callfd
223  *   The guest notification file descriptor.
224  * @param[in/out] eqp
225  *   Pointer to the event QP structure.
226  *
227  * @return
228  *   0 on success, -1 otherwise and rte_errno is set.
229  */
230 int mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
231 			      int callfd, struct mlx5_vdpa_event_qp *eqp);
232 
233 /**
234  * Destroy an event QP and all its related resources.
235  *
236  * @param[in/out] eqp
237  *   Pointer to the event QP structure.
238  */
239 void mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp);
240 
241 /**
242  * Release all the event global resources.
243  *
244  * @param[in] priv
245  *   The vdpa driver private structure.
246  */
247 void mlx5_vdpa_event_qp_global_release(struct mlx5_vdpa_priv *priv);
248 
249 /**
250  * Setup CQE event.
251  *
252  * @param[in] priv
253  *   The vdpa driver private structure.
254  *
255  * @return
256  *   0 on success, a negative errno value otherwise and rte_errno is set.
257  */
258 int mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv);
259 
260 /**
261  * Unset CQE event .
262  *
263  * @param[in] priv
264  *   The vdpa driver private structure.
265  */
266 void mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv);
267 
268 /**
269  * Setup error interrupt handler.
270  *
271  * @param[in] priv
272  *   The vdpa driver private structure.
273  *
274  * @return
275  *   0 on success, a negative errno value otherwise and rte_errno is set.
276  */
277 int mlx5_vdpa_err_event_setup(struct mlx5_vdpa_priv *priv);
278 
279 /**
280  * Unset error event handler.
281  *
282  * @param[in] priv
283  *   The vdpa driver private structure.
284  */
285 void mlx5_vdpa_err_event_unset(struct mlx5_vdpa_priv *priv);
286 
287 /**
288  * Release a virtq and all its related resources.
289  *
290  * @param[in] priv
291  *   The vdpa driver private structure.
292  */
293 void mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv);
294 
295 /**
296  * Create all the HW virtqs resources and all their related resources.
297  *
298  * @param[in] priv
299  *   The vdpa driver private structure.
300  *
301  * @return
302  *   0 on success, a negative errno value otherwise and rte_errno is set.
303  */
304 int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
305 
306 /**
307  * Enable\Disable virtq..
308  *
309  * @param[in] priv
310  *   The vdpa driver private structure.
311  * @param[in] index
312  *   The virtq index.
313  * @param[in] enable
314  *   Set to enable, otherwise disable.
315  *
316  * @return
317  *   0 on success, a negative value otherwise.
318  */
319 int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
320 
321 /**
322  * Unset steering and release all its related resources- stop traffic.
323  *
324  * @param[in] priv
325  *   The vdpa driver private structure.
326  */
327 void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
328 
329 /**
330  * Update steering according to the received queues status.
331  *
332  * @param[in] priv
333  *   The vdpa driver private structure.
334  *
335  * @return
336  *   0 on success, a negative value otherwise.
337  */
338 int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
339 
340 /**
341  * Setup steering and all its related resources to enable RSS traffic from the
342  * device to all the Rx host queues.
343  *
344  * @param[in] priv
345  *   The vdpa driver private structure.
346  *
347  * @return
348  *   0 on success, a negative value otherwise.
349  */
350 int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv);
351 
352 /**
353  * Enable\Disable live migration logging.
354  *
355  * @param[in] priv
356  *   The vdpa driver private structure.
357  * @param[in] enable
358  *   Set for enable, unset for disable.
359  *
360  * @return
361  *   0 on success, a negative value otherwise.
362  */
363 int mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable);
364 
365 /**
366  * Set dirty bitmap logging to allow live migration.
367  *
368  * @param[in] priv
369  *   The vdpa driver private structure.
370  * @param[in] log_base
371  *   Vhost log base.
372  * @param[in] log_size
373  *   Vhost log size.
374  *
375  * @return
376  *   0 on success, a negative value otherwise.
377  */
378 int mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
379 			       uint64_t log_size);
380 
381 /**
382  * Log all virtqs information for live migration.
383  *
384  * @param[in] priv
385  *   The vdpa driver private structure.
386  * @param[in] enable
387  *   Set for enable, unset for disable.
388  *
389  * @return
390  *   0 on success, a negative value otherwise.
391  */
392 int mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv);
393 
394 /**
395  * Modify virtq state to be ready or suspend.
396  *
397  * @param[in] virtq
398  *   The vdpa driver private virtq structure.
399  * @param[in] state
400  *   Set for ready, otherwise suspend.
401  *
402  * @return
403  *   0 on success, a negative value otherwise.
404  */
405 int mlx5_vdpa_virtq_modify(struct mlx5_vdpa_virtq *virtq, int state);
406 
407 /**
408  * Stop virtq before destroying it.
409  *
410  * @param[in] priv
411  *   The vdpa driver private structure.
412  * @param[in] index
413  *   The virtq index.
414  *
415  * @return
416  *   0 on success, a negative value otherwise.
417  */
418 int mlx5_vdpa_virtq_stop(struct mlx5_vdpa_priv *priv, int index);
419 
420 /**
421  * Query virtq information.
422  *
423  * @param[in] priv
424  *   The vdpa driver private structure.
425  * @param[in] index
426  *   The virtq index.
427  *
428  * @return
429  *   0 on success, a negative value otherwise.
430  */
431 int mlx5_vdpa_virtq_query(struct mlx5_vdpa_priv *priv, int index);
432 
433 /**
434  * Get virtq statistics.
435  *
436  * @param[in] priv
437  *   The vdpa driver private structure.
438  * @param[in] qid
439  *   The virtq index.
440  * @param stats
441  *   The virtq statistics array to fill.
442  * @param n
443  *   The number of elements in @p stats array.
444  *
445  * @return
446  *   A negative value on error, otherwise the number of entries filled in the
447  *   @p stats array.
448  */
449 int
450 mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
451 			  struct rte_vdpa_stat *stats, unsigned int n);
452 
453 /**
454  * Reset virtq statistics.
455  *
456  * @param[in] priv
457  *   The vdpa driver private structure.
458  * @param[in] qid
459  *   The virtq index.
460  *
461  * @return
462  *   A negative value on error, otherwise 0.
463  */
464 int
465 mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid);
466 #endif /* RTE_PMD_MLX5_VDPA_H_ */
467