1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
3 */
4
5 #include <unistd.h>
6
7 #include <rte_errno.h>
8 #include <rte_malloc.h>
9 #include <rte_eal_paging.h>
10
11 #include "mlx5_prm.h"
12 #include "mlx5_devx_cmds.h"
13 #include "mlx5_common_log.h"
14 #include "mlx5_malloc.h"
15
16 static void *
mlx5_devx_get_hca_cap(void * ctx,uint32_t * in,uint32_t * out,int * err,uint32_t flags)17 mlx5_devx_get_hca_cap(void *ctx, uint32_t *in, uint32_t *out,
18 int *err, uint32_t flags)
19 {
20 const size_t size_in = MLX5_ST_SZ_DW(query_hca_cap_in) * sizeof(int);
21 const size_t size_out = MLX5_ST_SZ_DW(query_hca_cap_out) * sizeof(int);
22 int status, syndrome, rc;
23
24 if (err)
25 *err = 0;
26 memset(in, 0, size_in);
27 memset(out, 0, size_out);
28 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
29 MLX5_SET(query_hca_cap_in, in, op_mod, flags);
30 rc = mlx5_glue->devx_general_cmd(ctx, in, size_in, out, size_out);
31 if (rc) {
32 DRV_LOG(ERR,
33 "Failed to query devx HCA capabilities func %#02x",
34 flags >> 1);
35 if (err)
36 *err = rc > 0 ? -rc : rc;
37 return NULL;
38 }
39 status = MLX5_GET(query_hca_cap_out, out, status);
40 syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
41 if (status) {
42 DRV_LOG(ERR,
43 "Failed to query devx HCA capabilities func %#02x status %x, syndrome = %x",
44 flags >> 1, status, syndrome);
45 if (err)
46 *err = -1;
47 return NULL;
48 }
49 return MLX5_ADDR_OF(query_hca_cap_out, out, capability);
50 }
51
52 /**
53 * Perform read access to the registers. Reads data from register
54 * and writes ones to the specified buffer.
55 *
56 * @param[in] ctx
57 * Context returned from mlx5 open_device() glue function.
58 * @param[in] reg_id
59 * Register identifier according to the PRM.
60 * @param[in] arg
61 * Register access auxiliary parameter according to the PRM.
62 * @param[out] data
63 * Pointer to the buffer to store read data.
64 * @param[in] dw_cnt
65 * Buffer size in double words.
66 *
67 * @return
68 * 0 on success, a negative value otherwise.
69 */
70 int
mlx5_devx_cmd_register_read(void * ctx,uint16_t reg_id,uint32_t arg,uint32_t * data,uint32_t dw_cnt)71 mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, uint32_t arg,
72 uint32_t *data, uint32_t dw_cnt)
73 {
74 uint32_t in[MLX5_ST_SZ_DW(access_register_in)] = {0};
75 uint32_t out[MLX5_ST_SZ_DW(access_register_out) +
76 MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0};
77 int status, rc;
78
79 MLX5_ASSERT(data && dw_cnt);
80 MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX);
81 if (dw_cnt > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) {
82 DRV_LOG(ERR, "Not enough buffer for register read data");
83 return -1;
84 }
85 MLX5_SET(access_register_in, in, opcode,
86 MLX5_CMD_OP_ACCESS_REGISTER_USER);
87 MLX5_SET(access_register_in, in, op_mod,
88 MLX5_ACCESS_REGISTER_IN_OP_MOD_READ);
89 MLX5_SET(access_register_in, in, register_id, reg_id);
90 MLX5_SET(access_register_in, in, argument, arg);
91 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out,
92 MLX5_ST_SZ_BYTES(access_register_out) +
93 sizeof(uint32_t) * dw_cnt);
94 if (rc)
95 goto error;
96 status = MLX5_GET(access_register_out, out, status);
97 if (status) {
98 int syndrome = MLX5_GET(access_register_out, out, syndrome);
99
100 DRV_LOG(DEBUG, "Failed to read access NIC register 0x%X, "
101 "status %x, syndrome = %x",
102 reg_id, status, syndrome);
103 return -1;
104 }
105 memcpy(data, &out[MLX5_ST_SZ_DW(access_register_out)],
106 dw_cnt * sizeof(uint32_t));
107 return 0;
108 error:
109 rc = (rc > 0) ? -rc : rc;
110 return rc;
111 }
112
113 /**
114 * Perform write access to the registers.
115 *
116 * @param[in] ctx
117 * Context returned from mlx5 open_device() glue function.
118 * @param[in] reg_id
119 * Register identifier according to the PRM.
120 * @param[in] arg
121 * Register access auxiliary parameter according to the PRM.
122 * @param[out] data
123 * Pointer to the buffer containing data to write.
124 * @param[in] dw_cnt
125 * Buffer size in double words (32bit units).
126 *
127 * @return
128 * 0 on success, a negative value otherwise.
129 */
130 int
mlx5_devx_cmd_register_write(void * ctx,uint16_t reg_id,uint32_t arg,uint32_t * data,uint32_t dw_cnt)131 mlx5_devx_cmd_register_write(void *ctx, uint16_t reg_id, uint32_t arg,
132 uint32_t *data, uint32_t dw_cnt)
133 {
134 uint32_t in[MLX5_ST_SZ_DW(access_register_in) +
135 MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0};
136 uint32_t out[MLX5_ST_SZ_DW(access_register_out)] = {0};
137 int status, rc;
138 void *ptr;
139
140 MLX5_ASSERT(data && dw_cnt);
141 MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX);
142 if (dw_cnt > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) {
143 DRV_LOG(ERR, "Data to write exceeds max size");
144 return -1;
145 }
146 MLX5_SET(access_register_in, in, opcode,
147 MLX5_CMD_OP_ACCESS_REGISTER_USER);
148 MLX5_SET(access_register_in, in, op_mod,
149 MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE);
150 MLX5_SET(access_register_in, in, register_id, reg_id);
151 MLX5_SET(access_register_in, in, argument, arg);
152 ptr = MLX5_ADDR_OF(access_register_in, in, register_data);
153 memcpy(ptr, data, dw_cnt * sizeof(uint32_t));
154 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
155
156 rc = mlx5_glue->devx_general_cmd(ctx, in,
157 MLX5_ST_SZ_BYTES(access_register_in) +
158 dw_cnt * sizeof(uint32_t),
159 out, sizeof(out));
160 if (rc)
161 goto error;
162 status = MLX5_GET(access_register_out, out, status);
163 if (status) {
164 int syndrome = MLX5_GET(access_register_out, out, syndrome);
165
166 DRV_LOG(DEBUG, "Failed to write access NIC register 0x%X, "
167 "status %x, syndrome = %x",
168 reg_id, status, syndrome);
169 return -1;
170 }
171 return 0;
172 error:
173 rc = (rc > 0) ? -rc : rc;
174 return rc;
175 }
176
177 /**
178 * Allocate flow counters via devx interface.
179 *
180 * @param[in] ctx
181 * Context returned from mlx5 open_device() glue function.
182 * @param dcs
183 * Pointer to counters properties structure to be filled by the routine.
184 * @param bulk_n_128
185 * Bulk counter numbers in 128 counters units.
186 *
187 * @return
188 * Pointer to counter object on success, a negative value otherwise and
189 * rte_errno is set.
190 */
191 struct mlx5_devx_obj *
mlx5_devx_cmd_flow_counter_alloc(void * ctx,uint32_t bulk_n_128)192 mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128)
193 {
194 struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs),
195 0, SOCKET_ID_ANY);
196 uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
197 uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
198
199 if (!dcs) {
200 rte_errno = ENOMEM;
201 return NULL;
202 }
203 MLX5_SET(alloc_flow_counter_in, in, opcode,
204 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
205 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, bulk_n_128);
206 dcs->obj = mlx5_glue->devx_obj_create(ctx, in,
207 sizeof(in), out, sizeof(out));
208 if (!dcs->obj) {
209 DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
210 rte_errno = errno;
211 mlx5_free(dcs);
212 return NULL;
213 }
214 dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
215 return dcs;
216 }
217
218 /**
219 * Query flow counters values.
220 *
221 * @param[in] dcs
222 * devx object that was obtained from mlx5_devx_cmd_fc_alloc.
223 * @param[in] clear
224 * Whether hardware should clear the counters after the query or not.
225 * @param[in] n_counters
226 * 0 in case of 1 counter to read, otherwise the counter number to read.
227 * @param pkts
228 * The number of packets that matched the flow.
229 * @param bytes
230 * The number of bytes that matched the flow.
231 * @param mkey
232 * The mkey key for batch query.
233 * @param addr
234 * The address in the mkey range for batch query.
235 * @param cmd_comp
236 * The completion object for asynchronous batch query.
237 * @param async_id
238 * The ID to be returned in the asynchronous batch query response.
239 *
240 * @return
241 * 0 on success, a negative value otherwise.
242 */
243 int
mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj * dcs,int clear,uint32_t n_counters,uint64_t * pkts,uint64_t * bytes,uint32_t mkey,void * addr,void * cmd_comp,uint64_t async_id)244 mlx5_devx_cmd_flow_counter_query(struct mlx5_devx_obj *dcs,
245 int clear, uint32_t n_counters,
246 uint64_t *pkts, uint64_t *bytes,
247 uint32_t mkey, void *addr,
248 void *cmd_comp,
249 uint64_t async_id)
250 {
251 int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +
252 MLX5_ST_SZ_BYTES(traffic_counter);
253 uint32_t out[out_len];
254 uint32_t in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
255 void *stats;
256 int rc;
257
258 MLX5_SET(query_flow_counter_in, in, opcode,
259 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
260 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
261 MLX5_SET(query_flow_counter_in, in, flow_counter_id, dcs->id);
262 MLX5_SET(query_flow_counter_in, in, clear, !!clear);
263
264 if (n_counters) {
265 MLX5_SET(query_flow_counter_in, in, num_of_counters,
266 n_counters);
267 MLX5_SET(query_flow_counter_in, in, dump_to_memory, 1);
268 MLX5_SET(query_flow_counter_in, in, mkey, mkey);
269 MLX5_SET64(query_flow_counter_in, in, address,
270 (uint64_t)(uintptr_t)addr);
271 }
272 if (!cmd_comp)
273 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
274 out_len);
275 else
276 rc = mlx5_glue->devx_obj_query_async(dcs->obj, in, sizeof(in),
277 out_len, async_id,
278 cmd_comp);
279 if (rc) {
280 DRV_LOG(ERR, "Failed to query devx counters with rc %d", rc);
281 rte_errno = rc;
282 return -rc;
283 }
284 if (!n_counters) {
285 stats = MLX5_ADDR_OF(query_flow_counter_out,
286 out, flow_statistics);
287 *pkts = MLX5_GET64(traffic_counter, stats, packets);
288 *bytes = MLX5_GET64(traffic_counter, stats, octets);
289 }
290 return 0;
291 }
292
293 /**
294 * Create a new mkey.
295 *
296 * @param[in] ctx
297 * Context returned from mlx5 open_device() glue function.
298 * @param[in] attr
299 * Attributes of the requested mkey.
300 *
301 * @return
302 * Pointer to Devx mkey on success, a negative value otherwise and rte_errno
303 * is set.
304 */
305 struct mlx5_devx_obj *
mlx5_devx_cmd_mkey_create(void * ctx,struct mlx5_devx_mkey_attr * attr)306 mlx5_devx_cmd_mkey_create(void *ctx,
307 struct mlx5_devx_mkey_attr *attr)
308 {
309 struct mlx5_klm *klm_array = attr->klm_array;
310 int klm_num = attr->klm_num;
311 int in_size_dw = MLX5_ST_SZ_DW(create_mkey_in) +
312 (klm_num ? RTE_ALIGN(klm_num, 4) : 0) * MLX5_ST_SZ_DW(klm);
313 uint32_t in[in_size_dw];
314 uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
315 void *mkc;
316 struct mlx5_devx_obj *mkey = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mkey),
317 0, SOCKET_ID_ANY);
318 size_t pgsize;
319 uint32_t translation_size;
320
321 if (!mkey) {
322 rte_errno = ENOMEM;
323 return NULL;
324 }
325 memset(in, 0, in_size_dw * 4);
326 pgsize = rte_mem_page_size();
327 if (pgsize == (size_t)-1) {
328 mlx5_free(mkey);
329 DRV_LOG(ERR, "Failed to get page size");
330 rte_errno = ENOMEM;
331 return NULL;
332 }
333 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
334 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
335 if (klm_num > 0) {
336 int i;
337 uint8_t *klm = (uint8_t *)MLX5_ADDR_OF(create_mkey_in, in,
338 klm_pas_mtt);
339 translation_size = RTE_ALIGN(klm_num, 4);
340 for (i = 0; i < klm_num; i++) {
341 MLX5_SET(klm, klm, byte_count, klm_array[i].byte_count);
342 MLX5_SET(klm, klm, mkey, klm_array[i].mkey);
343 MLX5_SET64(klm, klm, address, klm_array[i].address);
344 klm += MLX5_ST_SZ_BYTES(klm);
345 }
346 for (; i < (int)translation_size; i++) {
347 MLX5_SET(klm, klm, mkey, 0x0);
348 MLX5_SET64(klm, klm, address, 0x0);
349 klm += MLX5_ST_SZ_BYTES(klm);
350 }
351 MLX5_SET(mkc, mkc, access_mode_1_0, attr->log_entity_size ?
352 MLX5_MKC_ACCESS_MODE_KLM_FBS :
353 MLX5_MKC_ACCESS_MODE_KLM);
354 MLX5_SET(mkc, mkc, log_page_size, attr->log_entity_size);
355 } else {
356 translation_size = (RTE_ALIGN(attr->size, pgsize) * 8) / 16;
357 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
358 MLX5_SET(mkc, mkc, log_page_size, rte_log2_u32(pgsize));
359 }
360 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
361 translation_size);
362 MLX5_SET(create_mkey_in, in, mkey_umem_id, attr->umem_id);
363 MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access);
364 MLX5_SET(mkc, mkc, lw, 0x1);
365 MLX5_SET(mkc, mkc, lr, 0x1);
366 if (attr->set_remote_rw) {
367 MLX5_SET(mkc, mkc, rw, 0x1);
368 MLX5_SET(mkc, mkc, rr, 0x1);
369 }
370 MLX5_SET(mkc, mkc, qpn, 0xffffff);
371 MLX5_SET(mkc, mkc, pd, attr->pd);
372 MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
373 MLX5_SET(mkc, mkc, umr_en, attr->umr_en);
374 MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
375 MLX5_SET(mkc, mkc, relaxed_ordering_write,
376 attr->relaxed_ordering_write);
377 MLX5_SET(mkc, mkc, relaxed_ordering_read, attr->relaxed_ordering_read);
378 MLX5_SET64(mkc, mkc, start_addr, attr->addr);
379 MLX5_SET64(mkc, mkc, len, attr->size);
380 MLX5_SET(mkc, mkc, crypto_en, attr->crypto_en);
381 if (attr->crypto_en) {
382 MLX5_SET(mkc, mkc, bsf_en, attr->crypto_en);
383 MLX5_SET(mkc, mkc, bsf_octword_size, 4);
384 }
385 mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
386 sizeof(out));
387 if (!mkey->obj) {
388 DRV_LOG(ERR, "Can't create %sdirect mkey - error %d",
389 klm_num ? "an in" : "a ", errno);
390 rte_errno = errno;
391 mlx5_free(mkey);
392 return NULL;
393 }
394 mkey->id = MLX5_GET(create_mkey_out, out, mkey_index);
395 mkey->id = (mkey->id << 8) | (attr->umem_id & 0xFF);
396 return mkey;
397 }
398
399 /**
400 * Get status of devx command response.
401 * Mainly used for asynchronous commands.
402 *
403 * @param[in] out
404 * The out response buffer.
405 *
406 * @return
407 * 0 on success, non-zero value otherwise.
408 */
409 int
mlx5_devx_get_out_command_status(void * out)410 mlx5_devx_get_out_command_status(void *out)
411 {
412 int status;
413
414 if (!out)
415 return -EINVAL;
416 status = MLX5_GET(query_flow_counter_out, out, status);
417 if (status) {
418 int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
419
420 DRV_LOG(ERR, "Bad DevX status %x, syndrome = %x", status,
421 syndrome);
422 }
423 return status;
424 }
425
426 /**
427 * Destroy any object allocated by a Devx API.
428 *
429 * @param[in] obj
430 * Pointer to a general object.
431 *
432 * @return
433 * 0 on success, a negative value otherwise.
434 */
435 int
mlx5_devx_cmd_destroy(struct mlx5_devx_obj * obj)436 mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj)
437 {
438 int ret;
439
440 if (!obj)
441 return 0;
442 ret = mlx5_glue->devx_obj_destroy(obj->obj);
443 mlx5_free(obj);
444 return ret;
445 }
446
447 /**
448 * Query NIC vport context.
449 * Fills minimal inline attribute.
450 *
451 * @param[in] ctx
452 * ibv contexts returned from mlx5dv_open_device.
453 * @param[in] vport
454 * vport index
455 * @param[out] attr
456 * Attributes device values.
457 *
458 * @return
459 * 0 on success, a negative value otherwise.
460 */
461 static int
mlx5_devx_cmd_query_nic_vport_context(void * ctx,unsigned int vport,struct mlx5_hca_attr * attr)462 mlx5_devx_cmd_query_nic_vport_context(void *ctx,
463 unsigned int vport,
464 struct mlx5_hca_attr *attr)
465 {
466 uint32_t in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
467 uint32_t out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
468 void *vctx;
469 int status, syndrome, rc;
470
471 /* Query NIC vport context to determine inline mode. */
472 MLX5_SET(query_nic_vport_context_in, in, opcode,
473 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
474 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
475 if (vport)
476 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
477 rc = mlx5_glue->devx_general_cmd(ctx,
478 in, sizeof(in),
479 out, sizeof(out));
480 if (rc)
481 goto error;
482 status = MLX5_GET(query_nic_vport_context_out, out, status);
483 syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);
484 if (status) {
485 DRV_LOG(DEBUG, "Failed to query NIC vport context, "
486 "status %x, syndrome = %x", status, syndrome);
487 return -1;
488 }
489 vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
490 nic_vport_context);
491 attr->vport_inline_mode = MLX5_GET(nic_vport_context, vctx,
492 min_wqe_inline_mode);
493 return 0;
494 error:
495 rc = (rc > 0) ? -rc : rc;
496 return rc;
497 }
498
499 /**
500 * Query NIC vDPA attributes.
501 *
502 * @param[in] ctx
503 * Context returned from mlx5 open_device() glue function.
504 * @param[out] vdpa_attr
505 * vDPA Attributes structure to fill.
506 */
507 static void
mlx5_devx_cmd_query_hca_vdpa_attr(void * ctx,struct mlx5_hca_vdpa_attr * vdpa_attr)508 mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
509 struct mlx5_hca_vdpa_attr *vdpa_attr)
510 {
511 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)];
512 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)];
513 void *hcattr;
514
515 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, NULL,
516 MLX5_GET_HCA_CAP_OP_MOD_VDPA_EMULATION |
517 MLX5_HCA_CAP_OPMOD_GET_CUR);
518 if (!hcattr) {
519 RTE_LOG(DEBUG, PMD, "Failed to query devx VDPA capabilities");
520 vdpa_attr->valid = 0;
521 } else {
522 vdpa_attr->valid = 1;
523 vdpa_attr->desc_tunnel_offload_type =
524 MLX5_GET(virtio_emulation_cap, hcattr,
525 desc_tunnel_offload_type);
526 vdpa_attr->eth_frame_offload_type =
527 MLX5_GET(virtio_emulation_cap, hcattr,
528 eth_frame_offload_type);
529 vdpa_attr->virtio_version_1_0 =
530 MLX5_GET(virtio_emulation_cap, hcattr,
531 virtio_version_1_0);
532 vdpa_attr->tso_ipv4 = MLX5_GET(virtio_emulation_cap, hcattr,
533 tso_ipv4);
534 vdpa_attr->tso_ipv6 = MLX5_GET(virtio_emulation_cap, hcattr,
535 tso_ipv6);
536 vdpa_attr->tx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
537 tx_csum);
538 vdpa_attr->rx_csum = MLX5_GET(virtio_emulation_cap, hcattr,
539 rx_csum);
540 vdpa_attr->event_mode = MLX5_GET(virtio_emulation_cap, hcattr,
541 event_mode);
542 vdpa_attr->virtio_queue_type =
543 MLX5_GET(virtio_emulation_cap, hcattr,
544 virtio_queue_type);
545 vdpa_attr->log_doorbell_stride =
546 MLX5_GET(virtio_emulation_cap, hcattr,
547 log_doorbell_stride);
548 vdpa_attr->log_doorbell_bar_size =
549 MLX5_GET(virtio_emulation_cap, hcattr,
550 log_doorbell_bar_size);
551 vdpa_attr->doorbell_bar_offset =
552 MLX5_GET64(virtio_emulation_cap, hcattr,
553 doorbell_bar_offset);
554 vdpa_attr->max_num_virtio_queues =
555 MLX5_GET(virtio_emulation_cap, hcattr,
556 max_num_virtio_queues);
557 vdpa_attr->umems[0].a = MLX5_GET(virtio_emulation_cap, hcattr,
558 umem_1_buffer_param_a);
559 vdpa_attr->umems[0].b = MLX5_GET(virtio_emulation_cap, hcattr,
560 umem_1_buffer_param_b);
561 vdpa_attr->umems[1].a = MLX5_GET(virtio_emulation_cap, hcattr,
562 umem_2_buffer_param_a);
563 vdpa_attr->umems[1].b = MLX5_GET(virtio_emulation_cap, hcattr,
564 umem_2_buffer_param_b);
565 vdpa_attr->umems[2].a = MLX5_GET(virtio_emulation_cap, hcattr,
566 umem_3_buffer_param_a);
567 vdpa_attr->umems[2].b = MLX5_GET(virtio_emulation_cap, hcattr,
568 umem_3_buffer_param_b);
569 }
570 }
571
572 int
mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj * flex_obj,uint32_t ids[],uint32_t num)573 mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
574 uint32_t ids[], uint32_t num)
575 {
576 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
577 uint32_t out[MLX5_ST_SZ_DW(create_flex_parser_out)] = {0};
578 void *hdr = MLX5_ADDR_OF(create_flex_parser_out, in, hdr);
579 void *flex = MLX5_ADDR_OF(create_flex_parser_out, out, flex);
580 void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
581 int ret;
582 uint32_t idx = 0;
583 uint32_t i;
584
585 if (num > MLX5_GRAPH_NODE_SAMPLE_NUM) {
586 rte_errno = EINVAL;
587 DRV_LOG(ERR, "Too many sample IDs to be fetched.");
588 return -rte_errno;
589 }
590 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
591 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
592 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
593 MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
594 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, flex_obj->id);
595 ret = mlx5_glue->devx_obj_query(flex_obj->obj, in, sizeof(in),
596 out, sizeof(out));
597 if (ret) {
598 rte_errno = ret;
599 DRV_LOG(ERR, "Failed to query sample IDs with object %p.",
600 (void *)flex_obj);
601 return -rte_errno;
602 }
603 for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
604 void *s_off = (void *)((char *)sample + i *
605 MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
606 uint32_t en;
607
608 en = MLX5_GET(parse_graph_flow_match_sample, s_off,
609 flow_match_sample_en);
610 if (!en)
611 continue;
612 ids[idx++] = MLX5_GET(parse_graph_flow_match_sample, s_off,
613 flow_match_sample_field_id);
614 }
615 if (num != idx) {
616 rte_errno = EINVAL;
617 DRV_LOG(ERR, "Number of sample IDs are not as expected.");
618 return -rte_errno;
619 }
620 return ret;
621 }
622
623 struct mlx5_devx_obj *
mlx5_devx_cmd_create_flex_parser(void * ctx,struct mlx5_devx_graph_node_attr * data)624 mlx5_devx_cmd_create_flex_parser(void *ctx,
625 struct mlx5_devx_graph_node_attr *data)
626 {
627 uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0};
628 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
629 void *hdr = MLX5_ADDR_OF(create_flex_parser_in, in, hdr);
630 void *flex = MLX5_ADDR_OF(create_flex_parser_in, in, flex);
631 void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
632 void *in_arc = MLX5_ADDR_OF(parse_graph_flex, flex, input_arc);
633 void *out_arc = MLX5_ADDR_OF(parse_graph_flex, flex, output_arc);
634 struct mlx5_devx_obj *parse_flex_obj = mlx5_malloc
635 (MLX5_MEM_ZERO, sizeof(*parse_flex_obj), 0, SOCKET_ID_ANY);
636 uint32_t i;
637
638 if (!parse_flex_obj) {
639 DRV_LOG(ERR, "Failed to allocate flex parser data.");
640 rte_errno = ENOMEM;
641 return NULL;
642 }
643 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
644 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
645 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
646 MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
647 MLX5_SET(parse_graph_flex, flex, header_length_mode,
648 data->header_length_mode);
649 MLX5_SET64(parse_graph_flex, flex, modify_field_select,
650 data->modify_field_select);
651 MLX5_SET(parse_graph_flex, flex, header_length_base_value,
652 data->header_length_base_value);
653 MLX5_SET(parse_graph_flex, flex, header_length_field_offset,
654 data->header_length_field_offset);
655 MLX5_SET(parse_graph_flex, flex, header_length_field_shift,
656 data->header_length_field_shift);
657 MLX5_SET(parse_graph_flex, flex, next_header_field_offset,
658 data->next_header_field_offset);
659 MLX5_SET(parse_graph_flex, flex, next_header_field_size,
660 data->next_header_field_size);
661 MLX5_SET(parse_graph_flex, flex, header_length_field_mask,
662 data->header_length_field_mask);
663 for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
664 struct mlx5_devx_match_sample_attr *s = &data->sample[i];
665 void *s_off = (void *)((char *)sample + i *
666 MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
667
668 if (!s->flow_match_sample_en)
669 continue;
670 MLX5_SET(parse_graph_flow_match_sample, s_off,
671 flow_match_sample_en, !!s->flow_match_sample_en);
672 MLX5_SET(parse_graph_flow_match_sample, s_off,
673 flow_match_sample_field_offset,
674 s->flow_match_sample_field_offset);
675 MLX5_SET(parse_graph_flow_match_sample, s_off,
676 flow_match_sample_offset_mode,
677 s->flow_match_sample_offset_mode);
678 MLX5_SET(parse_graph_flow_match_sample, s_off,
679 flow_match_sample_field_offset_mask,
680 s->flow_match_sample_field_offset_mask);
681 MLX5_SET(parse_graph_flow_match_sample, s_off,
682 flow_match_sample_field_offset_shift,
683 s->flow_match_sample_field_offset_shift);
684 MLX5_SET(parse_graph_flow_match_sample, s_off,
685 flow_match_sample_field_base_offset,
686 s->flow_match_sample_field_base_offset);
687 MLX5_SET(parse_graph_flow_match_sample, s_off,
688 flow_match_sample_tunnel_mode,
689 s->flow_match_sample_tunnel_mode);
690 }
691 for (i = 0; i < MLX5_GRAPH_NODE_ARC_NUM; i++) {
692 struct mlx5_devx_graph_arc_attr *ia = &data->in[i];
693 struct mlx5_devx_graph_arc_attr *oa = &data->out[i];
694 void *in_off = (void *)((char *)in_arc + i *
695 MLX5_ST_SZ_BYTES(parse_graph_arc));
696 void *out_off = (void *)((char *)out_arc + i *
697 MLX5_ST_SZ_BYTES(parse_graph_arc));
698
699 if (ia->arc_parse_graph_node != 0) {
700 MLX5_SET(parse_graph_arc, in_off,
701 compare_condition_value,
702 ia->compare_condition_value);
703 MLX5_SET(parse_graph_arc, in_off, start_inner_tunnel,
704 ia->start_inner_tunnel);
705 MLX5_SET(parse_graph_arc, in_off, arc_parse_graph_node,
706 ia->arc_parse_graph_node);
707 MLX5_SET(parse_graph_arc, in_off,
708 parse_graph_node_handle,
709 ia->parse_graph_node_handle);
710 }
711 if (oa->arc_parse_graph_node != 0) {
712 MLX5_SET(parse_graph_arc, out_off,
713 compare_condition_value,
714 oa->compare_condition_value);
715 MLX5_SET(parse_graph_arc, out_off, start_inner_tunnel,
716 oa->start_inner_tunnel);
717 MLX5_SET(parse_graph_arc, out_off, arc_parse_graph_node,
718 oa->arc_parse_graph_node);
719 MLX5_SET(parse_graph_arc, out_off,
720 parse_graph_node_handle,
721 oa->parse_graph_node_handle);
722 }
723 }
724 parse_flex_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
725 out, sizeof(out));
726 if (!parse_flex_obj->obj) {
727 rte_errno = errno;
728 DRV_LOG(ERR, "Failed to create FLEX PARSE GRAPH object "
729 "by using DevX.");
730 mlx5_free(parse_flex_obj);
731 return NULL;
732 }
733 parse_flex_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
734 return parse_flex_obj;
735 }
736
737 static int
mlx5_devx_cmd_query_hca_parse_graph_node_cap(void * ctx,struct mlx5_hca_flex_attr * attr)738 mlx5_devx_cmd_query_hca_parse_graph_node_cap
739 (void *ctx, struct mlx5_hca_flex_attr *attr)
740 {
741 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)];
742 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)];
743 void *hcattr;
744 int rc;
745
746 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
747 MLX5_GET_HCA_CAP_OP_MOD_PARSE_GRAPH_NODE_CAP |
748 MLX5_HCA_CAP_OPMOD_GET_CUR);
749 if (!hcattr)
750 return rc;
751 attr->node_in = MLX5_GET(parse_graph_node_cap, hcattr, node_in);
752 attr->node_out = MLX5_GET(parse_graph_node_cap, hcattr, node_out);
753 attr->header_length_mode = MLX5_GET(parse_graph_node_cap, hcattr,
754 header_length_mode);
755 attr->sample_offset_mode = MLX5_GET(parse_graph_node_cap, hcattr,
756 sample_offset_mode);
757 attr->max_num_arc_in = MLX5_GET(parse_graph_node_cap, hcattr,
758 max_num_arc_in);
759 attr->max_num_arc_out = MLX5_GET(parse_graph_node_cap, hcattr,
760 max_num_arc_out);
761 attr->max_num_sample = MLX5_GET(parse_graph_node_cap, hcattr,
762 max_num_sample);
763 attr->sample_id_in_out = MLX5_GET(parse_graph_node_cap, hcattr,
764 sample_id_in_out);
765 attr->max_base_header_length = MLX5_GET(parse_graph_node_cap, hcattr,
766 max_base_header_length);
767 attr->max_sample_base_offset = MLX5_GET(parse_graph_node_cap, hcattr,
768 max_sample_base_offset);
769 attr->max_next_header_offset = MLX5_GET(parse_graph_node_cap, hcattr,
770 max_next_header_offset);
771 attr->header_length_mask_width = MLX5_GET(parse_graph_node_cap, hcattr,
772 header_length_mask_width);
773 /* Get the max supported samples from HCA CAP 2 */
774 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
775 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
776 MLX5_HCA_CAP_OPMOD_GET_CUR);
777 if (!hcattr)
778 return rc;
779 attr->max_num_prog_sample =
780 MLX5_GET(cmd_hca_cap_2, hcattr, max_num_prog_sample_field);
781 return 0;
782 }
783
784 static int
mlx5_devx_query_pkt_integrity_match(void * hcattr)785 mlx5_devx_query_pkt_integrity_match(void *hcattr)
786 {
787 return MLX5_GET(flow_table_nic_cap, hcattr,
788 ft_field_support_2_nic_receive.inner_l3_ok) &&
789 MLX5_GET(flow_table_nic_cap, hcattr,
790 ft_field_support_2_nic_receive.inner_l4_ok) &&
791 MLX5_GET(flow_table_nic_cap, hcattr,
792 ft_field_support_2_nic_receive.outer_l3_ok) &&
793 MLX5_GET(flow_table_nic_cap, hcattr,
794 ft_field_support_2_nic_receive.outer_l4_ok) &&
795 MLX5_GET(flow_table_nic_cap, hcattr,
796 ft_field_support_2_nic_receive
797 .inner_ipv4_checksum_ok) &&
798 MLX5_GET(flow_table_nic_cap, hcattr,
799 ft_field_support_2_nic_receive.inner_l4_checksum_ok) &&
800 MLX5_GET(flow_table_nic_cap, hcattr,
801 ft_field_support_2_nic_receive
802 .outer_ipv4_checksum_ok) &&
803 MLX5_GET(flow_table_nic_cap, hcattr,
804 ft_field_support_2_nic_receive.outer_l4_checksum_ok);
805 }
806
807 /**
808 * Query HCA attributes.
809 * Using those attributes we can check on run time if the device
810 * is having the required capabilities.
811 *
812 * @param[in] ctx
813 * Context returned from mlx5 open_device() glue function.
814 * @param[out] attr
815 * Attributes device values.
816 *
817 * @return
818 * 0 on success, a negative value otherwise.
819 */
820 int
mlx5_devx_cmd_query_hca_attr(void * ctx,struct mlx5_hca_attr * attr)821 mlx5_devx_cmd_query_hca_attr(void *ctx,
822 struct mlx5_hca_attr *attr)
823 {
824 uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
825 uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
826 bool hca_cap_2_sup;
827 uint64_t general_obj_types_supported = 0;
828 void *hcattr;
829 int rc, i;
830
831 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
832 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
833 MLX5_HCA_CAP_OPMOD_GET_CUR);
834 if (!hcattr)
835 return rc;
836 hca_cap_2_sup = MLX5_GET(cmd_hca_cap, hcattr, hca_cap_2);
837 attr->max_wqe_sz_sq = MLX5_GET(cmd_hca_cap, hcattr, max_wqe_sz_sq);
838 attr->flow_counter_bulk_alloc_bitmap =
839 MLX5_GET(cmd_hca_cap, hcattr, flow_counter_bulk_alloc);
840 attr->flow_counters_dump = MLX5_GET(cmd_hca_cap, hcattr,
841 flow_counters_dump);
842 attr->log_max_rmp = MLX5_GET(cmd_hca_cap, hcattr, log_max_rmp);
843 attr->mem_rq_rmp = MLX5_GET(cmd_hca_cap, hcattr, mem_rq_rmp);
844 attr->log_max_rqt_size = MLX5_GET(cmd_hca_cap, hcattr,
845 log_max_rqt_size);
846 attr->eswitch_manager = MLX5_GET(cmd_hca_cap, hcattr, eswitch_manager);
847 attr->hairpin = MLX5_GET(cmd_hca_cap, hcattr, hairpin);
848 attr->log_max_hairpin_queues = MLX5_GET(cmd_hca_cap, hcattr,
849 log_max_hairpin_queues);
850 attr->log_max_hairpin_wq_data_sz = MLX5_GET(cmd_hca_cap, hcattr,
851 log_max_hairpin_wq_data_sz);
852 attr->log_max_hairpin_num_packets = MLX5_GET
853 (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
854 attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
855 attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr,
856 relaxed_ordering_write);
857 attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr,
858 relaxed_ordering_read);
859 attr->access_register_user = MLX5_GET(cmd_hca_cap, hcattr,
860 access_register_user);
861 attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
862 eth_net_offloads);
863 attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
864 attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
865 flex_parser_protocols);
866 attr->max_geneve_tlv_options = MLX5_GET(cmd_hca_cap, hcattr,
867 max_geneve_tlv_options);
868 attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr,
869 max_geneve_tlv_option_data_len);
870 attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
871 attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr,
872 general_obj_types) &
873 MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO);
874 attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
875 general_obj_types) &
876 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
877 attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
878 general_obj_types) &
879 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
880 attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr,
881 general_obj_types) &
882 MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE);
883 attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr,
884 wqe_index_ignore_cap);
885 attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd);
886 attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq);
887 attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr,
888 log_max_static_sq_wq);
889 attr->num_lag_ports = MLX5_GET(cmd_hca_cap, hcattr, num_lag_ports);
890 attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr,
891 device_frequency_khz);
892 attr->scatter_fcs_w_decap_disable =
893 MLX5_GET(cmd_hca_cap, hcattr, scatter_fcs_w_decap_disable);
894 attr->roce = MLX5_GET(cmd_hca_cap, hcattr, roce);
895 attr->rq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, rq_ts_format);
896 attr->sq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, sq_ts_format);
897 attr->steering_format_version =
898 MLX5_GET(cmd_hca_cap, hcattr, steering_format_version);
899 attr->regexp_params = MLX5_GET(cmd_hca_cap, hcattr, regexp_params);
900 attr->regexp_version = MLX5_GET(cmd_hca_cap, hcattr, regexp_version);
901 attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr,
902 regexp_num_of_engines);
903 /* Read the general_obj_types bitmap and extract the relevant bits. */
904 general_obj_types_supported = MLX5_GET64(cmd_hca_cap, hcattr,
905 general_obj_types);
906 attr->vdpa.valid = !!(general_obj_types_supported &
907 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
908 attr->vdpa.queue_counters_valid =
909 !!(general_obj_types_supported &
910 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
911 attr->parse_graph_flex_node =
912 !!(general_obj_types_supported &
913 MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE);
914 attr->flow_hit_aso = !!(general_obj_types_supported &
915 MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO);
916 attr->geneve_tlv_opt = !!(general_obj_types_supported &
917 MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT);
918 attr->dek = !!(general_obj_types_supported &
919 MLX5_GENERAL_OBJ_TYPES_CAP_DEK);
920 attr->import_kek = !!(general_obj_types_supported &
921 MLX5_GENERAL_OBJ_TYPES_CAP_IMPORT_KEK);
922 attr->credential = !!(general_obj_types_supported &
923 MLX5_GENERAL_OBJ_TYPES_CAP_CREDENTIAL);
924 attr->crypto_login = !!(general_obj_types_supported &
925 MLX5_GENERAL_OBJ_TYPES_CAP_CRYPTO_LOGIN);
926 /* Add reading of other GENERAL_OBJ_TYPES_CAP bits above this line. */
927 attr->log_max_cq = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq);
928 attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp);
929 attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz);
930 attr->log_max_qp_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp_sz);
931 attr->log_max_mrw_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_mrw_sz);
932 attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd);
933 attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq);
934 attr->log_max_srq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq_sz);
935 attr->reg_c_preserve =
936 MLX5_GET(cmd_hca_cap, hcattr, reg_c_preserve);
937 attr->mmo_regex_qp_en = MLX5_GET(cmd_hca_cap, hcattr, regexp_mmo_qp);
938 attr->mmo_regex_sq_en = MLX5_GET(cmd_hca_cap, hcattr, regexp_mmo_sq);
939 attr->mmo_dma_sq_en = MLX5_GET(cmd_hca_cap, hcattr, dma_mmo_sq);
940 attr->mmo_compress_sq_en = MLX5_GET(cmd_hca_cap, hcattr,
941 compress_mmo_sq);
942 attr->mmo_decompress_sq_en = MLX5_GET(cmd_hca_cap, hcattr,
943 decompress_mmo_sq);
944 attr->mmo_dma_qp_en = MLX5_GET(cmd_hca_cap, hcattr, dma_mmo_qp);
945 attr->mmo_compress_qp_en = MLX5_GET(cmd_hca_cap, hcattr,
946 compress_mmo_qp);
947 attr->mmo_decompress_qp_en = MLX5_GET(cmd_hca_cap, hcattr,
948 decompress_mmo_qp);
949 attr->compress_min_block_size = MLX5_GET(cmd_hca_cap, hcattr,
950 compress_min_block_size);
951 attr->log_max_mmo_dma = MLX5_GET(cmd_hca_cap, hcattr, log_dma_mmo_size);
952 attr->log_max_mmo_compress = MLX5_GET(cmd_hca_cap, hcattr,
953 log_compress_mmo_size);
954 attr->log_max_mmo_decompress = MLX5_GET(cmd_hca_cap, hcattr,
955 log_decompress_mmo_size);
956 attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression);
957 attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr,
958 mini_cqe_resp_flow_tag);
959 attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr,
960 mini_cqe_resp_l3_l4_tag);
961 attr->umr_indirect_mkey_disabled =
962 MLX5_GET(cmd_hca_cap, hcattr, umr_indirect_mkey_disabled);
963 attr->umr_modify_entity_size_disabled =
964 MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled);
965 attr->wait_on_time = MLX5_GET(cmd_hca_cap, hcattr, wait_on_time);
966 attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto);
967 if (attr->crypto)
968 attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts);
969 attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr,
970 general_obj_types) &
971 MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
972 attr->rq_delay_drop = MLX5_GET(cmd_hca_cap, hcattr, rq_delay_drop);
973 if (hca_cap_2_sup) {
974 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
975 MLX5_GET_HCA_CAP_OP_MOD_GENERAL_DEVICE_2 |
976 MLX5_HCA_CAP_OPMOD_GET_CUR);
977 if (!hcattr) {
978 DRV_LOG(DEBUG,
979 "Failed to query DevX HCA capabilities 2.");
980 return rc;
981 }
982 attr->log_min_stride_wqe_sz = MLX5_GET(cmd_hca_cap_2, hcattr,
983 log_min_stride_wqe_sz);
984 }
985 if (attr->log_min_stride_wqe_sz == 0)
986 attr->log_min_stride_wqe_sz = MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
987 if (attr->qos.sup) {
988 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
989 MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
990 MLX5_HCA_CAP_OPMOD_GET_CUR);
991 if (!hcattr) {
992 DRV_LOG(DEBUG, "Failed to query devx QOS capabilities");
993 return rc;
994 }
995 attr->qos.flow_meter_old =
996 MLX5_GET(qos_cap, hcattr, flow_meter_old);
997 attr->qos.log_max_flow_meter =
998 MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
999 attr->qos.flow_meter_reg_c_ids =
1000 MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
1001 attr->qos.flow_meter =
1002 MLX5_GET(qos_cap, hcattr, flow_meter);
1003 attr->qos.packet_pacing =
1004 MLX5_GET(qos_cap, hcattr, packet_pacing);
1005 attr->qos.wqe_rate_pp =
1006 MLX5_GET(qos_cap, hcattr, wqe_rate_pp);
1007 if (attr->qos.flow_meter_aso_sup) {
1008 attr->qos.log_meter_aso_granularity =
1009 MLX5_GET(qos_cap, hcattr,
1010 log_meter_aso_granularity);
1011 attr->qos.log_meter_aso_max_alloc =
1012 MLX5_GET(qos_cap, hcattr,
1013 log_meter_aso_max_alloc);
1014 attr->qos.log_max_num_meter_aso =
1015 MLX5_GET(qos_cap, hcattr,
1016 log_max_num_meter_aso);
1017 }
1018 }
1019 /*
1020 * Flex item support needs max_num_prog_sample_field
1021 * from the Capabilities 2 table for PARSE_GRAPH_NODE
1022 */
1023 if (attr->parse_graph_flex_node) {
1024 rc = mlx5_devx_cmd_query_hca_parse_graph_node_cap
1025 (ctx, &attr->flex);
1026 if (rc)
1027 return -1;
1028 }
1029 if (attr->vdpa.valid)
1030 mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
1031 if (!attr->eth_net_offloads)
1032 return 0;
1033 /* Query Flow Sampler Capability From FLow Table Properties Layout. */
1034 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
1035 MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
1036 MLX5_HCA_CAP_OPMOD_GET_CUR);
1037 if (!hcattr) {
1038 attr->log_max_ft_sampler_num = 0;
1039 return rc;
1040 }
1041 attr->log_max_ft_sampler_num = MLX5_GET
1042 (flow_table_nic_cap, hcattr,
1043 flow_table_properties_nic_receive.log_max_ft_sampler_num);
1044 attr->flow.tunnel_header_0_1 = MLX5_GET
1045 (flow_table_nic_cap, hcattr,
1046 ft_field_support_2_nic_receive.tunnel_header_0_1);
1047 attr->flow.tunnel_header_2_3 = MLX5_GET
1048 (flow_table_nic_cap, hcattr,
1049 ft_field_support_2_nic_receive.tunnel_header_2_3);
1050 attr->pkt_integrity_match = mlx5_devx_query_pkt_integrity_match(hcattr);
1051 attr->inner_ipv4_ihl = MLX5_GET
1052 (flow_table_nic_cap, hcattr,
1053 ft_field_support_2_nic_receive.inner_ipv4_ihl);
1054 attr->outer_ipv4_ihl = MLX5_GET
1055 (flow_table_nic_cap, hcattr,
1056 ft_field_support_2_nic_receive.outer_ipv4_ihl);
1057 /* Query HCA offloads for Ethernet protocol. */
1058 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
1059 MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
1060 MLX5_HCA_CAP_OPMOD_GET_CUR);
1061 if (!hcattr) {
1062 attr->eth_net_offloads = 0;
1063 return rc;
1064 }
1065 attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
1066 hcattr, wqe_vlan_insert);
1067 attr->csum_cap = MLX5_GET(per_protocol_networking_offload_caps,
1068 hcattr, csum_cap);
1069 attr->vlan_cap = MLX5_GET(per_protocol_networking_offload_caps,
1070 hcattr, vlan_cap);
1071 attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
1072 lro_cap);
1073 attr->max_lso_cap = MLX5_GET(per_protocol_networking_offload_caps,
1074 hcattr, max_lso_cap);
1075 attr->scatter_fcs = MLX5_GET(per_protocol_networking_offload_caps,
1076 hcattr, scatter_fcs);
1077 attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
1078 hcattr, tunnel_lro_gre);
1079 attr->tunnel_lro_vxlan = MLX5_GET(per_protocol_networking_offload_caps,
1080 hcattr, tunnel_lro_vxlan);
1081 attr->swp = MLX5_GET(per_protocol_networking_offload_caps,
1082 hcattr, swp);
1083 attr->tunnel_stateless_gre =
1084 MLX5_GET(per_protocol_networking_offload_caps,
1085 hcattr, tunnel_stateless_gre);
1086 attr->tunnel_stateless_vxlan =
1087 MLX5_GET(per_protocol_networking_offload_caps,
1088 hcattr, tunnel_stateless_vxlan);
1089 attr->swp_csum = MLX5_GET(per_protocol_networking_offload_caps,
1090 hcattr, swp_csum);
1091 attr->swp_lso = MLX5_GET(per_protocol_networking_offload_caps,
1092 hcattr, swp_lso);
1093 attr->lro_max_msg_sz_mode = MLX5_GET
1094 (per_protocol_networking_offload_caps,
1095 hcattr, lro_max_msg_sz_mode);
1096 for (i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
1097 attr->lro_timer_supported_periods[i] =
1098 MLX5_GET(per_protocol_networking_offload_caps, hcattr,
1099 lro_timer_supported_periods[i]);
1100 }
1101 attr->lro_min_mss_size = MLX5_GET(per_protocol_networking_offload_caps,
1102 hcattr, lro_min_mss_size);
1103 attr->tunnel_stateless_geneve_rx =
1104 MLX5_GET(per_protocol_networking_offload_caps,
1105 hcattr, tunnel_stateless_geneve_rx);
1106 attr->geneve_max_opt_len =
1107 MLX5_GET(per_protocol_networking_offload_caps,
1108 hcattr, max_geneve_opt_len);
1109 attr->wqe_inline_mode = MLX5_GET(per_protocol_networking_offload_caps,
1110 hcattr, wqe_inline_mode);
1111 attr->tunnel_stateless_gtp = MLX5_GET
1112 (per_protocol_networking_offload_caps,
1113 hcattr, tunnel_stateless_gtp);
1114 attr->rss_ind_tbl_cap = MLX5_GET
1115 (per_protocol_networking_offload_caps,
1116 hcattr, rss_ind_tbl_cap);
1117 /* Query HCA attribute for ROCE. */
1118 if (attr->roce) {
1119 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
1120 MLX5_GET_HCA_CAP_OP_MOD_ROCE |
1121 MLX5_HCA_CAP_OPMOD_GET_CUR);
1122 if (!hcattr) {
1123 DRV_LOG(DEBUG,
1124 "Failed to query devx HCA ROCE capabilities");
1125 return rc;
1126 }
1127 attr->qp_ts_format = MLX5_GET(roce_caps, hcattr, qp_ts_format);
1128 }
1129 if (attr->eth_virt &&
1130 attr->wqe_inline_mode == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) {
1131 rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);
1132 if (rc) {
1133 attr->eth_virt = 0;
1134 goto error;
1135 }
1136 }
1137 if (attr->eswitch_manager) {
1138 hcattr = mlx5_devx_get_hca_cap(ctx, in, out, &rc,
1139 MLX5_SET_HCA_CAP_OP_MOD_ESW |
1140 MLX5_HCA_CAP_OPMOD_GET_CUR);
1141 if (!hcattr)
1142 return rc;
1143 attr->esw_mgr_vport_id_valid =
1144 MLX5_GET(esw_cap, hcattr,
1145 esw_manager_vport_number_valid);
1146 attr->esw_mgr_vport_id =
1147 MLX5_GET(esw_cap, hcattr, esw_manager_vport_number);
1148 }
1149 return 0;
1150 error:
1151 rc = (rc > 0) ? -rc : rc;
1152 return rc;
1153 }
1154
1155 /**
1156 * Query TIS transport domain from QP verbs object using DevX API.
1157 *
1158 * @param[in] qp
1159 * Pointer to verbs QP returned by ibv_create_qp .
1160 * @param[in] tis_num
1161 * TIS number of TIS to query.
1162 * @param[out] tis_td
1163 * Pointer to TIS transport domain variable, to be set by the routine.
1164 *
1165 * @return
1166 * 0 on success, a negative value otherwise.
1167 */
1168 int
mlx5_devx_cmd_qp_query_tis_td(void * qp,uint32_t tis_num,uint32_t * tis_td)1169 mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
1170 uint32_t *tis_td)
1171 {
1172 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1173 uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};
1174 uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};
1175 int rc;
1176 void *tis_ctx;
1177
1178 MLX5_SET(query_tis_in, in, opcode, MLX5_CMD_OP_QUERY_TIS);
1179 MLX5_SET(query_tis_in, in, tisn, tis_num);
1180 rc = mlx5_glue->devx_qp_query(qp, in, sizeof(in), out, sizeof(out));
1181 if (rc) {
1182 DRV_LOG(ERR, "Failed to query QP using DevX");
1183 return -rc;
1184 };
1185 tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);
1186 *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);
1187 return 0;
1188 #else
1189 (void)qp;
1190 (void)tis_num;
1191 (void)tis_td;
1192 return -ENOTSUP;
1193 #endif
1194 }
1195
1196 /**
1197 * Fill WQ data for DevX API command.
1198 * Utility function for use when creating DevX objects containing a WQ.
1199 *
1200 * @param[in] wq_ctx
1201 * Pointer to WQ context to fill with data.
1202 * @param [in] wq_attr
1203 * Pointer to WQ attributes structure to fill in WQ context.
1204 */
1205 static void
devx_cmd_fill_wq_data(void * wq_ctx,struct mlx5_devx_wq_attr * wq_attr)1206 devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr)
1207 {
1208 MLX5_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);
1209 MLX5_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);
1210 MLX5_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);
1211 MLX5_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);
1212 MLX5_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);
1213 MLX5_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);
1214 MLX5_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);
1215 MLX5_SET(wq, wq_ctx, lwm, wq_attr->lwm);
1216 MLX5_SET(wq, wq_ctx, pd, wq_attr->pd);
1217 MLX5_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);
1218 MLX5_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);
1219 MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
1220 MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
1221 MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
1222 if (wq_attr->log_wq_pg_sz > MLX5_ADAPTER_PAGE_SHIFT)
1223 MLX5_SET(wq, wq_ctx, log_wq_pg_sz,
1224 wq_attr->log_wq_pg_sz - MLX5_ADAPTER_PAGE_SHIFT);
1225 MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
1226 MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
1227 MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
1228 MLX5_SET(wq, wq_ctx, log_hairpin_num_packets,
1229 wq_attr->log_hairpin_num_packets);
1230 MLX5_SET(wq, wq_ctx, log_hairpin_data_sz, wq_attr->log_hairpin_data_sz);
1231 MLX5_SET(wq, wq_ctx, single_wqe_log_num_of_strides,
1232 wq_attr->single_wqe_log_num_of_strides);
1233 MLX5_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);
1234 MLX5_SET(wq, wq_ctx, single_stride_log_num_of_bytes,
1235 wq_attr->single_stride_log_num_of_bytes);
1236 MLX5_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);
1237 MLX5_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);
1238 MLX5_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);
1239 }
1240
1241 /**
1242 * Create RQ using DevX API.
1243 *
1244 * @param[in] ctx
1245 * Context returned from mlx5 open_device() glue function.
1246 * @param [in] rq_attr
1247 * Pointer to create RQ attributes structure.
1248 * @param [in] socket
1249 * CPU socket ID for allocations.
1250 *
1251 * @return
1252 * The DevX object created, NULL otherwise and rte_errno is set.
1253 */
1254 struct mlx5_devx_obj *
mlx5_devx_cmd_create_rq(void * ctx,struct mlx5_devx_create_rq_attr * rq_attr,int socket)1255 mlx5_devx_cmd_create_rq(void *ctx,
1256 struct mlx5_devx_create_rq_attr *rq_attr,
1257 int socket)
1258 {
1259 uint32_t in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
1260 uint32_t out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
1261 void *rq_ctx, *wq_ctx;
1262 struct mlx5_devx_wq_attr *wq_attr;
1263 struct mlx5_devx_obj *rq = NULL;
1264
1265 rq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rq), 0, socket);
1266 if (!rq) {
1267 DRV_LOG(ERR, "Failed to allocate RQ data");
1268 rte_errno = ENOMEM;
1269 return NULL;
1270 }
1271 MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
1272 rq_ctx = MLX5_ADDR_OF(create_rq_in, in, ctx);
1273 MLX5_SET(rqc, rq_ctx, rlky, rq_attr->rlky);
1274 MLX5_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);
1275 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
1276 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
1277 MLX5_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);
1278 MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
1279 MLX5_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);
1280 MLX5_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);
1281 MLX5_SET(rqc, rq_ctx, user_index, rq_attr->user_index);
1282 MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
1283 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
1284 MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
1285 MLX5_SET(sqc, rq_ctx, ts_format, rq_attr->ts_format);
1286 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
1287 wq_attr = &rq_attr->wq_attr;
1288 devx_cmd_fill_wq_data(wq_ctx, wq_attr);
1289 rq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1290 out, sizeof(out));
1291 if (!rq->obj) {
1292 DRV_LOG(ERR, "Failed to create RQ using DevX");
1293 rte_errno = errno;
1294 mlx5_free(rq);
1295 return NULL;
1296 }
1297 rq->id = MLX5_GET(create_rq_out, out, rqn);
1298 return rq;
1299 }
1300
1301 /**
1302 * Modify RQ using DevX API.
1303 *
1304 * @param[in] rq
1305 * Pointer to RQ object structure.
1306 * @param [in] rq_attr
1307 * Pointer to modify RQ attributes structure.
1308 *
1309 * @return
1310 * 0 on success, a negative errno value otherwise and rte_errno is set.
1311 */
1312 int
mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj * rq,struct mlx5_devx_modify_rq_attr * rq_attr)1313 mlx5_devx_cmd_modify_rq(struct mlx5_devx_obj *rq,
1314 struct mlx5_devx_modify_rq_attr *rq_attr)
1315 {
1316 uint32_t in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
1317 uint32_t out[MLX5_ST_SZ_DW(modify_rq_out)] = {0};
1318 void *rq_ctx, *wq_ctx;
1319 int ret;
1320
1321 MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
1322 MLX5_SET(modify_rq_in, in, rq_state, rq_attr->rq_state);
1323 MLX5_SET(modify_rq_in, in, rqn, rq->id);
1324 MLX5_SET64(modify_rq_in, in, modify_bitmask, rq_attr->modify_bitmask);
1325 rq_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1326 MLX5_SET(rqc, rq_ctx, state, rq_attr->state);
1327 if (rq_attr->modify_bitmask &
1328 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS)
1329 MLX5_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
1330 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD)
1331 MLX5_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
1332 if (rq_attr->modify_bitmask &
1333 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID)
1334 MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
1335 MLX5_SET(rqc, rq_ctx, hairpin_peer_sq, rq_attr->hairpin_peer_sq);
1336 MLX5_SET(rqc, rq_ctx, hairpin_peer_vhca, rq_attr->hairpin_peer_vhca);
1337 if (rq_attr->modify_bitmask & MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_WQ_LWM) {
1338 wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
1339 MLX5_SET(wq, wq_ctx, lwm, rq_attr->lwm);
1340 }
1341 ret = mlx5_glue->devx_obj_modify(rq->obj, in, sizeof(in),
1342 out, sizeof(out));
1343 if (ret) {
1344 DRV_LOG(ERR, "Failed to modify RQ using DevX");
1345 rte_errno = errno;
1346 return -errno;
1347 }
1348 return ret;
1349 }
1350
1351 /**
1352 * Create RMP using DevX API.
1353 *
1354 * @param[in] ctx
1355 * Context returned from mlx5 open_device() glue function.
1356 * @param [in] rmp_attr
1357 * Pointer to create RMP attributes structure.
1358 * @param [in] socket
1359 * CPU socket ID for allocations.
1360 *
1361 * @return
1362 * The DevX object created, NULL otherwise and rte_errno is set.
1363 */
1364 struct mlx5_devx_obj *
mlx5_devx_cmd_create_rmp(void * ctx,struct mlx5_devx_create_rmp_attr * rmp_attr,int socket)1365 mlx5_devx_cmd_create_rmp(void *ctx,
1366 struct mlx5_devx_create_rmp_attr *rmp_attr,
1367 int socket)
1368 {
1369 uint32_t in[MLX5_ST_SZ_DW(create_rmp_in)] = {0};
1370 uint32_t out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
1371 void *rmp_ctx, *wq_ctx;
1372 struct mlx5_devx_wq_attr *wq_attr;
1373 struct mlx5_devx_obj *rmp = NULL;
1374
1375 rmp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rmp), 0, socket);
1376 if (!rmp) {
1377 DRV_LOG(ERR, "Failed to allocate RMP data");
1378 rte_errno = ENOMEM;
1379 return NULL;
1380 }
1381 MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
1382 rmp_ctx = MLX5_ADDR_OF(create_rmp_in, in, ctx);
1383 MLX5_SET(rmpc, rmp_ctx, state, rmp_attr->state);
1384 MLX5_SET(rmpc, rmp_ctx, basic_cyclic_rcv_wqe,
1385 rmp_attr->basic_cyclic_rcv_wqe);
1386 wq_ctx = MLX5_ADDR_OF(rmpc, rmp_ctx, wq);
1387 wq_attr = &rmp_attr->wq_attr;
1388 devx_cmd_fill_wq_data(wq_ctx, wq_attr);
1389 rmp->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1390 sizeof(out));
1391 if (!rmp->obj) {
1392 DRV_LOG(ERR, "Failed to create RMP using DevX");
1393 rte_errno = errno;
1394 mlx5_free(rmp);
1395 return NULL;
1396 }
1397 rmp->id = MLX5_GET(create_rmp_out, out, rmpn);
1398 return rmp;
1399 }
1400
1401 /*
1402 * Create TIR using DevX API.
1403 *
1404 * @param[in] ctx
1405 * Context returned from mlx5 open_device() glue function.
1406 * @param [in] tir_attr
1407 * Pointer to TIR attributes structure.
1408 *
1409 * @return
1410 * The DevX object created, NULL otherwise and rte_errno is set.
1411 */
1412 struct mlx5_devx_obj *
mlx5_devx_cmd_create_tir(void * ctx,struct mlx5_devx_tir_attr * tir_attr)1413 mlx5_devx_cmd_create_tir(void *ctx,
1414 struct mlx5_devx_tir_attr *tir_attr)
1415 {
1416 uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
1417 uint32_t out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
1418 void *tir_ctx, *outer, *inner, *rss_key;
1419 struct mlx5_devx_obj *tir = NULL;
1420
1421 tir = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tir), 0, SOCKET_ID_ANY);
1422 if (!tir) {
1423 DRV_LOG(ERR, "Failed to allocate TIR data");
1424 rte_errno = ENOMEM;
1425 return NULL;
1426 }
1427 MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
1428 tir_ctx = MLX5_ADDR_OF(create_tir_in, in, ctx);
1429 MLX5_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);
1430 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,
1431 tir_attr->lro_timeout_period_usecs);
1432 MLX5_SET(tirc, tir_ctx, lro_enable_mask, tir_attr->lro_enable_mask);
1433 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz, tir_attr->lro_max_msg_sz);
1434 MLX5_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);
1435 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric, tir_attr->rx_hash_symmetric);
1436 MLX5_SET(tirc, tir_ctx, tunneled_offload_en,
1437 tir_attr->tunneled_offload_en);
1438 MLX5_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);
1439 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
1440 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
1441 MLX5_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);
1442 rss_key = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key);
1443 memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, MLX5_RSS_HASH_KEY_LEN);
1444 outer = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);
1445 MLX5_SET(rx_hash_field_select, outer, l3_prot_type,
1446 tir_attr->rx_hash_field_selector_outer.l3_prot_type);
1447 MLX5_SET(rx_hash_field_select, outer, l4_prot_type,
1448 tir_attr->rx_hash_field_selector_outer.l4_prot_type);
1449 MLX5_SET(rx_hash_field_select, outer, selected_fields,
1450 tir_attr->rx_hash_field_selector_outer.selected_fields);
1451 inner = MLX5_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_inner);
1452 MLX5_SET(rx_hash_field_select, inner, l3_prot_type,
1453 tir_attr->rx_hash_field_selector_inner.l3_prot_type);
1454 MLX5_SET(rx_hash_field_select, inner, l4_prot_type,
1455 tir_attr->rx_hash_field_selector_inner.l4_prot_type);
1456 MLX5_SET(rx_hash_field_select, inner, selected_fields,
1457 tir_attr->rx_hash_field_selector_inner.selected_fields);
1458 tir->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1459 out, sizeof(out));
1460 if (!tir->obj) {
1461 DRV_LOG(ERR, "Failed to create TIR using DevX");
1462 rte_errno = errno;
1463 mlx5_free(tir);
1464 return NULL;
1465 }
1466 tir->id = MLX5_GET(create_tir_out, out, tirn);
1467 return tir;
1468 }
1469
1470 /**
1471 * Modify TIR using DevX API.
1472 *
1473 * @param[in] tir
1474 * Pointer to TIR DevX object structure.
1475 * @param [in] modify_tir_attr
1476 * Pointer to TIR modification attributes structure.
1477 *
1478 * @return
1479 * 0 on success, a negative errno value otherwise and rte_errno is set.
1480 */
1481 int
mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj * tir,struct mlx5_devx_modify_tir_attr * modify_tir_attr)1482 mlx5_devx_cmd_modify_tir(struct mlx5_devx_obj *tir,
1483 struct mlx5_devx_modify_tir_attr *modify_tir_attr)
1484 {
1485 struct mlx5_devx_tir_attr *tir_attr = &modify_tir_attr->tir;
1486 uint32_t in[MLX5_ST_SZ_DW(modify_tir_in)] = {0};
1487 uint32_t out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
1488 void *tir_ctx;
1489 int ret;
1490
1491 MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
1492 MLX5_SET(modify_tir_in, in, tirn, modify_tir_attr->tirn);
1493 MLX5_SET64(modify_tir_in, in, modify_bitmask,
1494 modify_tir_attr->modify_bitmask);
1495 tir_ctx = MLX5_ADDR_OF(modify_rq_in, in, ctx);
1496 if (modify_tir_attr->modify_bitmask &
1497 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_LRO) {
1498 MLX5_SET(tirc, tir_ctx, lro_timeout_period_usecs,
1499 tir_attr->lro_timeout_period_usecs);
1500 MLX5_SET(tirc, tir_ctx, lro_enable_mask,
1501 tir_attr->lro_enable_mask);
1502 MLX5_SET(tirc, tir_ctx, lro_max_msg_sz,
1503 tir_attr->lro_max_msg_sz);
1504 }
1505 if (modify_tir_attr->modify_bitmask &
1506 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE)
1507 MLX5_SET(tirc, tir_ctx, indirect_table,
1508 tir_attr->indirect_table);
1509 if (modify_tir_attr->modify_bitmask &
1510 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH) {
1511 int i;
1512 void *outer, *inner;
1513
1514 MLX5_SET(tirc, tir_ctx, rx_hash_symmetric,
1515 tir_attr->rx_hash_symmetric);
1516 MLX5_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
1517 for (i = 0; i < 10; i++) {
1518 MLX5_SET(tirc, tir_ctx, rx_hash_toeplitz_key[i],
1519 tir_attr->rx_hash_toeplitz_key[i]);
1520 }
1521 outer = MLX5_ADDR_OF(tirc, tir_ctx,
1522 rx_hash_field_selector_outer);
1523 MLX5_SET(rx_hash_field_select, outer, l3_prot_type,
1524 tir_attr->rx_hash_field_selector_outer.l3_prot_type);
1525 MLX5_SET(rx_hash_field_select, outer, l4_prot_type,
1526 tir_attr->rx_hash_field_selector_outer.l4_prot_type);
1527 MLX5_SET
1528 (rx_hash_field_select, outer, selected_fields,
1529 tir_attr->rx_hash_field_selector_outer.selected_fields);
1530 inner = MLX5_ADDR_OF(tirc, tir_ctx,
1531 rx_hash_field_selector_inner);
1532 MLX5_SET(rx_hash_field_select, inner, l3_prot_type,
1533 tir_attr->rx_hash_field_selector_inner.l3_prot_type);
1534 MLX5_SET(rx_hash_field_select, inner, l4_prot_type,
1535 tir_attr->rx_hash_field_selector_inner.l4_prot_type);
1536 MLX5_SET
1537 (rx_hash_field_select, inner, selected_fields,
1538 tir_attr->rx_hash_field_selector_inner.selected_fields);
1539 }
1540 if (modify_tir_attr->modify_bitmask &
1541 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_SELF_LB_EN) {
1542 MLX5_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
1543 }
1544 ret = mlx5_glue->devx_obj_modify(tir->obj, in, sizeof(in),
1545 out, sizeof(out));
1546 if (ret) {
1547 DRV_LOG(ERR, "Failed to modify TIR using DevX");
1548 rte_errno = errno;
1549 return -errno;
1550 }
1551 return ret;
1552 }
1553
1554 /**
1555 * Create RQT using DevX API.
1556 *
1557 * @param[in] ctx
1558 * Context returned from mlx5 open_device() glue function.
1559 * @param [in] rqt_attr
1560 * Pointer to RQT attributes structure.
1561 *
1562 * @return
1563 * The DevX object created, NULL otherwise and rte_errno is set.
1564 */
1565 struct mlx5_devx_obj *
mlx5_devx_cmd_create_rqt(void * ctx,struct mlx5_devx_rqt_attr * rqt_attr)1566 mlx5_devx_cmd_create_rqt(void *ctx,
1567 struct mlx5_devx_rqt_attr *rqt_attr)
1568 {
1569 uint32_t *in = NULL;
1570 uint32_t inlen = MLX5_ST_SZ_BYTES(create_rqt_in) +
1571 rqt_attr->rqt_actual_size * sizeof(uint32_t);
1572 uint32_t out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
1573 void *rqt_ctx;
1574 struct mlx5_devx_obj *rqt = NULL;
1575 int i;
1576
1577 in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY);
1578 if (!in) {
1579 DRV_LOG(ERR, "Failed to allocate RQT IN data");
1580 rte_errno = ENOMEM;
1581 return NULL;
1582 }
1583 rqt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt), 0, SOCKET_ID_ANY);
1584 if (!rqt) {
1585 DRV_LOG(ERR, "Failed to allocate RQT data");
1586 rte_errno = ENOMEM;
1587 mlx5_free(in);
1588 return NULL;
1589 }
1590 MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
1591 rqt_ctx = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1592 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
1593 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
1594 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
1595 for (i = 0; i < rqt_attr->rqt_actual_size; i++)
1596 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
1597 rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
1598 mlx5_free(in);
1599 if (!rqt->obj) {
1600 DRV_LOG(ERR, "Failed to create RQT using DevX");
1601 rte_errno = errno;
1602 mlx5_free(rqt);
1603 return NULL;
1604 }
1605 rqt->id = MLX5_GET(create_rqt_out, out, rqtn);
1606 return rqt;
1607 }
1608
1609 /**
1610 * Modify RQT using DevX API.
1611 *
1612 * @param[in] rqt
1613 * Pointer to RQT DevX object structure.
1614 * @param [in] rqt_attr
1615 * Pointer to RQT attributes structure.
1616 *
1617 * @return
1618 * 0 on success, a negative errno value otherwise and rte_errno is set.
1619 */
1620 int
mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj * rqt,struct mlx5_devx_rqt_attr * rqt_attr)1621 mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt,
1622 struct mlx5_devx_rqt_attr *rqt_attr)
1623 {
1624 uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) +
1625 rqt_attr->rqt_actual_size * sizeof(uint32_t);
1626 uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
1627 uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY);
1628 void *rqt_ctx;
1629 int i;
1630 int ret;
1631
1632 if (!in) {
1633 DRV_LOG(ERR, "Failed to allocate RQT modify IN data.");
1634 rte_errno = ENOMEM;
1635 return -ENOMEM;
1636 }
1637 MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
1638 MLX5_SET(modify_rqt_in, in, rqtn, rqt->id);
1639 MLX5_SET64(modify_rqt_in, in, modify_bitmask, 0x1);
1640 rqt_ctx = MLX5_ADDR_OF(modify_rqt_in, in, rqt_context);
1641 MLX5_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
1642 MLX5_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->rqt_max_size);
1643 MLX5_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->rqt_actual_size);
1644 for (i = 0; i < rqt_attr->rqt_actual_size; i++)
1645 MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
1646 ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out));
1647 mlx5_free(in);
1648 if (ret) {
1649 DRV_LOG(ERR, "Failed to modify RQT using DevX.");
1650 rte_errno = errno;
1651 return -rte_errno;
1652 }
1653 return ret;
1654 }
1655
1656 /**
1657 * Create SQ using DevX API.
1658 *
1659 * @param[in] ctx
1660 * Context returned from mlx5 open_device() glue function.
1661 * @param [in] sq_attr
1662 * Pointer to SQ attributes structure.
1663 * @param [in] socket
1664 * CPU socket ID for allocations.
1665 *
1666 * @return
1667 * The DevX object created, NULL otherwise and rte_errno is set.
1668 **/
1669 struct mlx5_devx_obj *
mlx5_devx_cmd_create_sq(void * ctx,struct mlx5_devx_create_sq_attr * sq_attr)1670 mlx5_devx_cmd_create_sq(void *ctx,
1671 struct mlx5_devx_create_sq_attr *sq_attr)
1672 {
1673 uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
1674 uint32_t out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
1675 void *sq_ctx;
1676 void *wq_ctx;
1677 struct mlx5_devx_wq_attr *wq_attr;
1678 struct mlx5_devx_obj *sq = NULL;
1679
1680 sq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sq), 0, SOCKET_ID_ANY);
1681 if (!sq) {
1682 DRV_LOG(ERR, "Failed to allocate SQ data");
1683 rte_errno = ENOMEM;
1684 return NULL;
1685 }
1686 MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
1687 sq_ctx = MLX5_ADDR_OF(create_sq_in, in, ctx);
1688 MLX5_SET(sqc, sq_ctx, rlky, sq_attr->rlky);
1689 MLX5_SET(sqc, sq_ctx, cd_master, sq_attr->cd_master);
1690 MLX5_SET(sqc, sq_ctx, fre, sq_attr->fre);
1691 MLX5_SET(sqc, sq_ctx, flush_in_error_en, sq_attr->flush_in_error_en);
1692 MLX5_SET(sqc, sq_ctx, allow_multi_pkt_send_wqe,
1693 sq_attr->allow_multi_pkt_send_wqe);
1694 MLX5_SET(sqc, sq_ctx, min_wqe_inline_mode,
1695 sq_attr->min_wqe_inline_mode);
1696 MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
1697 MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);
1698 MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);
1699 MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);
1700 MLX5_SET(sqc, sq_ctx, non_wire, sq_attr->non_wire);
1701 MLX5_SET(sqc, sq_ctx, static_sq_wq, sq_attr->static_sq_wq);
1702 MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);
1703 MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);
1704 MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,
1705 sq_attr->packet_pacing_rate_limit_index);
1706 MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);
1707 MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);
1708 MLX5_SET(sqc, sq_ctx, ts_format, sq_attr->ts_format);
1709 wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);
1710 wq_attr = &sq_attr->wq_attr;
1711 devx_cmd_fill_wq_data(wq_ctx, wq_attr);
1712 sq->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1713 out, sizeof(out));
1714 if (!sq->obj) {
1715 DRV_LOG(ERR, "Failed to create SQ using DevX");
1716 rte_errno = errno;
1717 mlx5_free(sq);
1718 return NULL;
1719 }
1720 sq->id = MLX5_GET(create_sq_out, out, sqn);
1721 return sq;
1722 }
1723
1724 /**
1725 * Modify SQ using DevX API.
1726 *
1727 * @param[in] sq
1728 * Pointer to SQ object structure.
1729 * @param [in] sq_attr
1730 * Pointer to SQ attributes structure.
1731 *
1732 * @return
1733 * 0 on success, a negative errno value otherwise and rte_errno is set.
1734 */
1735 int
mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj * sq,struct mlx5_devx_modify_sq_attr * sq_attr)1736 mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq,
1737 struct mlx5_devx_modify_sq_attr *sq_attr)
1738 {
1739 uint32_t in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
1740 uint32_t out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
1741 void *sq_ctx;
1742 int ret;
1743
1744 MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
1745 MLX5_SET(modify_sq_in, in, sq_state, sq_attr->sq_state);
1746 MLX5_SET(modify_sq_in, in, sqn, sq->id);
1747 sq_ctx = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1748 MLX5_SET(sqc, sq_ctx, state, sq_attr->state);
1749 MLX5_SET(sqc, sq_ctx, hairpin_peer_rq, sq_attr->hairpin_peer_rq);
1750 MLX5_SET(sqc, sq_ctx, hairpin_peer_vhca, sq_attr->hairpin_peer_vhca);
1751 ret = mlx5_glue->devx_obj_modify(sq->obj, in, sizeof(in),
1752 out, sizeof(out));
1753 if (ret) {
1754 DRV_LOG(ERR, "Failed to modify SQ using DevX");
1755 rte_errno = errno;
1756 return -rte_errno;
1757 }
1758 return ret;
1759 }
1760
1761 /**
1762 * Create TIS using DevX API.
1763 *
1764 * @param[in] ctx
1765 * Context returned from mlx5 open_device() glue function.
1766 * @param [in] tis_attr
1767 * Pointer to TIS attributes structure.
1768 *
1769 * @return
1770 * The DevX object created, NULL otherwise and rte_errno is set.
1771 */
1772 struct mlx5_devx_obj *
mlx5_devx_cmd_create_tis(void * ctx,struct mlx5_devx_tis_attr * tis_attr)1773 mlx5_devx_cmd_create_tis(void *ctx,
1774 struct mlx5_devx_tis_attr *tis_attr)
1775 {
1776 uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
1777 uint32_t out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
1778 struct mlx5_devx_obj *tis = NULL;
1779 void *tis_ctx;
1780
1781 tis = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tis), 0, SOCKET_ID_ANY);
1782 if (!tis) {
1783 DRV_LOG(ERR, "Failed to allocate TIS object");
1784 rte_errno = ENOMEM;
1785 return NULL;
1786 }
1787 MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
1788 tis_ctx = MLX5_ADDR_OF(create_tis_in, in, ctx);
1789 MLX5_SET(tisc, tis_ctx, strict_lag_tx_port_affinity,
1790 tis_attr->strict_lag_tx_port_affinity);
1791 MLX5_SET(tisc, tis_ctx, lag_tx_port_affinity,
1792 tis_attr->lag_tx_port_affinity);
1793 MLX5_SET(tisc, tis_ctx, prio, tis_attr->prio);
1794 MLX5_SET(tisc, tis_ctx, transport_domain,
1795 tis_attr->transport_domain);
1796 tis->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1797 out, sizeof(out));
1798 if (!tis->obj) {
1799 DRV_LOG(ERR, "Failed to create TIS using DevX");
1800 rte_errno = errno;
1801 mlx5_free(tis);
1802 return NULL;
1803 }
1804 tis->id = MLX5_GET(create_tis_out, out, tisn);
1805 return tis;
1806 }
1807
1808 /**
1809 * Create transport domain using DevX API.
1810 *
1811 * @param[in] ctx
1812 * Context returned from mlx5 open_device() glue function.
1813 * @return
1814 * The DevX object created, NULL otherwise and rte_errno is set.
1815 */
1816 struct mlx5_devx_obj *
mlx5_devx_cmd_create_td(void * ctx)1817 mlx5_devx_cmd_create_td(void *ctx)
1818 {
1819 uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
1820 uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
1821 struct mlx5_devx_obj *td = NULL;
1822
1823 td = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*td), 0, SOCKET_ID_ANY);
1824 if (!td) {
1825 DRV_LOG(ERR, "Failed to allocate TD object");
1826 rte_errno = ENOMEM;
1827 return NULL;
1828 }
1829 MLX5_SET(alloc_transport_domain_in, in, opcode,
1830 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
1831 td->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
1832 out, sizeof(out));
1833 if (!td->obj) {
1834 DRV_LOG(ERR, "Failed to create TIS using DevX");
1835 rte_errno = errno;
1836 mlx5_free(td);
1837 return NULL;
1838 }
1839 td->id = MLX5_GET(alloc_transport_domain_out, out,
1840 transport_domain);
1841 return td;
1842 }
1843
1844 /**
1845 * Dump all flows to file.
1846 *
1847 * @param[in] fdb_domain
1848 * FDB domain.
1849 * @param[in] rx_domain
1850 * RX domain.
1851 * @param[in] tx_domain
1852 * TX domain.
1853 * @param[out] file
1854 * Pointer to file stream.
1855 *
1856 * @return
1857 * 0 on success, a negative value otherwise.
1858 */
1859 int
mlx5_devx_cmd_flow_dump(void * fdb_domain __rte_unused,void * rx_domain __rte_unused,void * tx_domain __rte_unused,FILE * file __rte_unused)1860 mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
1861 void *rx_domain __rte_unused,
1862 void *tx_domain __rte_unused, FILE *file __rte_unused)
1863 {
1864 int ret = 0;
1865
1866 #ifdef HAVE_MLX5_DR_FLOW_DUMP
1867 if (fdb_domain) {
1868 ret = mlx5_glue->dr_dump_domain(file, fdb_domain);
1869 if (ret)
1870 return ret;
1871 }
1872 MLX5_ASSERT(rx_domain);
1873 ret = mlx5_glue->dr_dump_domain(file, rx_domain);
1874 if (ret)
1875 return ret;
1876 MLX5_ASSERT(tx_domain);
1877 ret = mlx5_glue->dr_dump_domain(file, tx_domain);
1878 #else
1879 ret = ENOTSUP;
1880 #endif
1881 return -ret;
1882 }
1883
1884 int
mlx5_devx_cmd_flow_single_dump(void * rule_info __rte_unused,FILE * file __rte_unused)1885 mlx5_devx_cmd_flow_single_dump(void *rule_info __rte_unused,
1886 FILE *file __rte_unused)
1887 {
1888 int ret = 0;
1889 #ifdef HAVE_MLX5_DR_FLOW_DUMP_RULE
1890 if (rule_info)
1891 ret = mlx5_glue->dr_dump_rule(file, rule_info);
1892 #else
1893 ret = ENOTSUP;
1894 #endif
1895 return -ret;
1896 }
1897
1898 /*
1899 * Create CQ using DevX API.
1900 *
1901 * @param[in] ctx
1902 * Context returned from mlx5 open_device() glue function.
1903 * @param [in] attr
1904 * Pointer to CQ attributes structure.
1905 *
1906 * @return
1907 * The DevX object created, NULL otherwise and rte_errno is set.
1908 */
1909 struct mlx5_devx_obj *
mlx5_devx_cmd_create_cq(void * ctx,struct mlx5_devx_cq_attr * attr)1910 mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr)
1911 {
1912 uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0};
1913 uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
1914 struct mlx5_devx_obj *cq_obj = mlx5_malloc(MLX5_MEM_ZERO,
1915 sizeof(*cq_obj),
1916 0, SOCKET_ID_ANY);
1917 void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1918
1919 if (!cq_obj) {
1920 DRV_LOG(ERR, "Failed to allocate CQ object memory.");
1921 rte_errno = ENOMEM;
1922 return NULL;
1923 }
1924 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
1925 if (attr->db_umem_valid) {
1926 MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid);
1927 MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id);
1928 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset);
1929 } else {
1930 MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
1931 }
1932 MLX5_SET(cqc, cqctx, cqe_sz, (RTE_CACHE_LINE_SIZE == 128) ?
1933 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B);
1934 MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
1935 MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
1936 MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
1937 if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT)
1938 MLX5_SET(cqc, cqctx, log_page_size,
1939 attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT);
1940 MLX5_SET(cqc, cqctx, c_eqn, attr->eqn);
1941 MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id);
1942 MLX5_SET(cqc, cqctx, cqe_comp_en, !!attr->cqe_comp_en);
1943 MLX5_SET(cqc, cqctx, mini_cqe_res_format, attr->mini_cqe_res_format);
1944 MLX5_SET(cqc, cqctx, mini_cqe_res_format_ext,
1945 attr->mini_cqe_res_format_ext);
1946 if (attr->q_umem_valid) {
1947 MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
1948 MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
1949 MLX5_SET64(create_cq_in, in, cq_umem_offset,
1950 attr->q_umem_offset);
1951 }
1952 cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
1953 sizeof(out));
1954 if (!cq_obj->obj) {
1955 rte_errno = errno;
1956 DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno);
1957 mlx5_free(cq_obj);
1958 return NULL;
1959 }
1960 cq_obj->id = MLX5_GET(create_cq_out, out, cqn);
1961 return cq_obj;
1962 }
1963
1964 /**
1965 * Create VIRTQ using DevX API.
1966 *
1967 * @param[in] ctx
1968 * Context returned from mlx5 open_device() glue function.
1969 * @param [in] attr
1970 * Pointer to VIRTQ attributes structure.
1971 *
1972 * @return
1973 * The DevX object created, NULL otherwise and rte_errno is set.
1974 */
1975 struct mlx5_devx_obj *
mlx5_devx_cmd_create_virtq(void * ctx,struct mlx5_devx_virtq_attr * attr)1976 mlx5_devx_cmd_create_virtq(void *ctx,
1977 struct mlx5_devx_virtq_attr *attr)
1978 {
1979 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
1980 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
1981 struct mlx5_devx_obj *virtq_obj = mlx5_malloc(MLX5_MEM_ZERO,
1982 sizeof(*virtq_obj),
1983 0, SOCKET_ID_ANY);
1984 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
1985 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
1986 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
1987
1988 if (!virtq_obj) {
1989 DRV_LOG(ERR, "Failed to allocate virtq data.");
1990 rte_errno = ENOMEM;
1991 return NULL;
1992 }
1993 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
1994 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
1995 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
1996 MLX5_GENERAL_OBJ_TYPE_VIRTQ);
1997 MLX5_SET16(virtio_net_q, virtq, hw_available_index,
1998 attr->hw_available_index);
1999 MLX5_SET16(virtio_net_q, virtq, hw_used_index, attr->hw_used_index);
2000 MLX5_SET16(virtio_net_q, virtq, tso_ipv4, attr->tso_ipv4);
2001 MLX5_SET16(virtio_net_q, virtq, tso_ipv6, attr->tso_ipv6);
2002 MLX5_SET16(virtio_net_q, virtq, tx_csum, attr->tx_csum);
2003 MLX5_SET16(virtio_net_q, virtq, rx_csum, attr->rx_csum);
2004 MLX5_SET16(virtio_q, virtctx, virtio_version_1_0,
2005 attr->virtio_version_1_0);
2006 MLX5_SET16(virtio_q, virtctx, event_mode, attr->event_mode);
2007 MLX5_SET(virtio_q, virtctx, event_qpn_or_msix, attr->qp_id);
2008 MLX5_SET64(virtio_q, virtctx, desc_addr, attr->desc_addr);
2009 MLX5_SET64(virtio_q, virtctx, used_addr, attr->used_addr);
2010 MLX5_SET64(virtio_q, virtctx, available_addr, attr->available_addr);
2011 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
2012 MLX5_SET16(virtio_q, virtctx, queue_size, attr->q_size);
2013 MLX5_SET(virtio_q, virtctx, virtio_q_mkey, attr->mkey);
2014 MLX5_SET(virtio_q, virtctx, umem_1_id, attr->umems[0].id);
2015 MLX5_SET(virtio_q, virtctx, umem_1_size, attr->umems[0].size);
2016 MLX5_SET64(virtio_q, virtctx, umem_1_offset, attr->umems[0].offset);
2017 MLX5_SET(virtio_q, virtctx, umem_2_id, attr->umems[1].id);
2018 MLX5_SET(virtio_q, virtctx, umem_2_size, attr->umems[1].size);
2019 MLX5_SET64(virtio_q, virtctx, umem_2_offset, attr->umems[1].offset);
2020 MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id);
2021 MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size);
2022 MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset);
2023 MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id);
2024 MLX5_SET(virtio_q, virtctx, pd, attr->pd);
2025 MLX5_SET(virtio_q, virtctx, queue_period_mode, attr->hw_latency_mode);
2026 MLX5_SET(virtio_q, virtctx, queue_period_us, attr->hw_max_latency_us);
2027 MLX5_SET(virtio_q, virtctx, queue_max_count, attr->hw_max_pending_comp);
2028 MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id);
2029 virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
2030 sizeof(out));
2031 if (!virtq_obj->obj) {
2032 rte_errno = errno;
2033 DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX.");
2034 mlx5_free(virtq_obj);
2035 return NULL;
2036 }
2037 virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2038 return virtq_obj;
2039 }
2040
2041 /**
2042 * Modify VIRTQ using DevX API.
2043 *
2044 * @param[in] virtq_obj
2045 * Pointer to virtq object structure.
2046 * @param [in] attr
2047 * Pointer to modify virtq attributes structure.
2048 *
2049 * @return
2050 * 0 on success, a negative errno value otherwise and rte_errno is set.
2051 */
2052 int
mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj * virtq_obj,struct mlx5_devx_virtq_attr * attr)2053 mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj,
2054 struct mlx5_devx_virtq_attr *attr)
2055 {
2056 uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
2057 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2058 void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
2059 void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
2060 void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
2061 int ret;
2062
2063 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
2064 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
2065 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
2066 MLX5_GENERAL_OBJ_TYPE_VIRTQ);
2067 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
2068 MLX5_SET64(virtio_net_q, virtq, modify_field_select, attr->type);
2069 MLX5_SET16(virtio_q, virtctx, queue_index, attr->queue_index);
2070 switch (attr->type) {
2071 case MLX5_VIRTQ_MODIFY_TYPE_STATE:
2072 MLX5_SET16(virtio_net_q, virtq, state, attr->state);
2073 break;
2074 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS:
2075 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_mkey,
2076 attr->dirty_bitmap_mkey);
2077 MLX5_SET64(virtio_net_q, virtq, dirty_bitmap_addr,
2078 attr->dirty_bitmap_addr);
2079 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_size,
2080 attr->dirty_bitmap_size);
2081 break;
2082 case MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_DUMP_ENABLE:
2083 MLX5_SET(virtio_net_q, virtq, dirty_bitmap_dump_enable,
2084 attr->dirty_bitmap_dump_enable);
2085 break;
2086 default:
2087 rte_errno = EINVAL;
2088 return -rte_errno;
2089 }
2090 ret = mlx5_glue->devx_obj_modify(virtq_obj->obj, in, sizeof(in),
2091 out, sizeof(out));
2092 if (ret) {
2093 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
2094 rte_errno = errno;
2095 return -rte_errno;
2096 }
2097 return ret;
2098 }
2099
2100 /**
2101 * Query VIRTQ using DevX API.
2102 *
2103 * @param[in] virtq_obj
2104 * Pointer to virtq object structure.
2105 * @param [in/out] attr
2106 * Pointer to virtq attributes structure.
2107 *
2108 * @return
2109 * 0 on success, a negative errno value otherwise and rte_errno is set.
2110 */
2111 int
mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj * virtq_obj,struct mlx5_devx_virtq_attr * attr)2112 mlx5_devx_cmd_query_virtq(struct mlx5_devx_obj *virtq_obj,
2113 struct mlx5_devx_virtq_attr *attr)
2114 {
2115 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
2116 uint32_t out[MLX5_ST_SZ_DW(query_virtq_out)] = {0};
2117 void *hdr = MLX5_ADDR_OF(query_virtq_out, in, hdr);
2118 void *virtq = MLX5_ADDR_OF(query_virtq_out, out, virtq);
2119 int ret;
2120
2121 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
2122 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
2123 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
2124 MLX5_GENERAL_OBJ_TYPE_VIRTQ);
2125 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, virtq_obj->id);
2126 ret = mlx5_glue->devx_obj_query(virtq_obj->obj, in, sizeof(in),
2127 out, sizeof(out));
2128 if (ret) {
2129 DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
2130 rte_errno = errno;
2131 return -errno;
2132 }
2133 attr->hw_available_index = MLX5_GET16(virtio_net_q, virtq,
2134 hw_available_index);
2135 attr->hw_used_index = MLX5_GET16(virtio_net_q, virtq, hw_used_index);
2136 attr->state = MLX5_GET16(virtio_net_q, virtq, state);
2137 attr->error_type = MLX5_GET16(virtio_net_q, virtq,
2138 virtio_q_context.error_type);
2139 return ret;
2140 }
2141
2142 /**
2143 * Create QP using DevX API.
2144 *
2145 * @param[in] ctx
2146 * Context returned from mlx5 open_device() glue function.
2147 * @param [in] attr
2148 * Pointer to QP attributes structure.
2149 *
2150 * @return
2151 * The DevX object created, NULL otherwise and rte_errno is set.
2152 */
2153 struct mlx5_devx_obj *
mlx5_devx_cmd_create_qp(void * ctx,struct mlx5_devx_qp_attr * attr)2154 mlx5_devx_cmd_create_qp(void *ctx,
2155 struct mlx5_devx_qp_attr *attr)
2156 {
2157 uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0};
2158 uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
2159 struct mlx5_devx_obj *qp_obj = mlx5_malloc(MLX5_MEM_ZERO,
2160 sizeof(*qp_obj),
2161 0, SOCKET_ID_ANY);
2162 void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2163
2164 if (!qp_obj) {
2165 DRV_LOG(ERR, "Failed to allocate QP data.");
2166 rte_errno = ENOMEM;
2167 return NULL;
2168 }
2169 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
2170 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
2171 MLX5_SET(qpc, qpc, pd, attr->pd);
2172 MLX5_SET(qpc, qpc, ts_format, attr->ts_format);
2173 MLX5_SET(qpc, qpc, user_index, attr->user_index);
2174 if (attr->uar_index) {
2175 if (attr->mmo) {
2176 void *qpc_ext_and_pas_list = MLX5_ADDR_OF(create_qp_in,
2177 in, qpc_extension_and_pas_list);
2178 void *qpc_ext = MLX5_ADDR_OF(qpc_extension_and_pas_list,
2179 qpc_ext_and_pas_list, qpc_data_extension);
2180
2181 MLX5_SET(create_qp_in, in, qpc_ext, 1);
2182 MLX5_SET(qpc_extension, qpc_ext, mmo, 1);
2183 }
2184 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2185 MLX5_SET(qpc, qpc, uar_page, attr->uar_index);
2186 if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT)
2187 MLX5_SET(qpc, qpc, log_page_size,
2188 attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT);
2189 if (attr->num_of_send_wqbbs) {
2190 MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->num_of_send_wqbbs));
2191 MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
2192 MLX5_SET(qpc, qpc, log_sq_size,
2193 rte_log2_u32(attr->num_of_send_wqbbs));
2194 } else {
2195 MLX5_SET(qpc, qpc, no_sq, 1);
2196 }
2197 if (attr->num_of_receive_wqes) {
2198 MLX5_ASSERT(RTE_IS_POWER_OF_2(
2199 attr->num_of_receive_wqes));
2200 MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
2201 MLX5_SET(qpc, qpc, log_rq_stride, attr->log_rq_stride -
2202 MLX5_LOG_RQ_STRIDE_SHIFT);
2203 MLX5_SET(qpc, qpc, log_rq_size,
2204 rte_log2_u32(attr->num_of_receive_wqes));
2205 MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
2206 } else {
2207 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
2208 }
2209 if (attr->dbr_umem_valid) {
2210 MLX5_SET(qpc, qpc, dbr_umem_valid,
2211 attr->dbr_umem_valid);
2212 MLX5_SET(qpc, qpc, dbr_umem_id, attr->dbr_umem_id);
2213 }
2214 MLX5_SET64(qpc, qpc, dbr_addr, attr->dbr_address);
2215 MLX5_SET64(create_qp_in, in, wq_umem_offset,
2216 attr->wq_umem_offset);
2217 MLX5_SET(create_qp_in, in, wq_umem_id, attr->wq_umem_id);
2218 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
2219 } else {
2220 /* Special QP to be managed by FW - no SQ\RQ\CQ\UAR\DB rec. */
2221 MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ);
2222 MLX5_SET(qpc, qpc, no_sq, 1);
2223 }
2224 qp_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
2225 sizeof(out));
2226 if (!qp_obj->obj) {
2227 rte_errno = errno;
2228 DRV_LOG(ERR, "Failed to create QP Obj using DevX.");
2229 mlx5_free(qp_obj);
2230 return NULL;
2231 }
2232 qp_obj->id = MLX5_GET(create_qp_out, out, qpn);
2233 return qp_obj;
2234 }
2235
2236 /**
2237 * Modify QP using DevX API.
2238 * Currently supports only force loop-back QP.
2239 *
2240 * @param[in] qp
2241 * Pointer to QP object structure.
2242 * @param [in] qp_st_mod_op
2243 * The QP state modification operation.
2244 * @param [in] remote_qp_id
2245 * The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
2246 *
2247 * @return
2248 * 0 on success, a negative errno value otherwise and rte_errno is set.
2249 */
2250 int
mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj * qp,uint32_t qp_st_mod_op,uint32_t remote_qp_id)2251 mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op,
2252 uint32_t remote_qp_id)
2253 {
2254 union {
2255 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_in)];
2256 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_in)];
2257 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_in)];
2258 } in;
2259 union {
2260 uint32_t rst2init[MLX5_ST_SZ_DW(rst2init_qp_out)];
2261 uint32_t init2rtr[MLX5_ST_SZ_DW(init2rtr_qp_out)];
2262 uint32_t rtr2rts[MLX5_ST_SZ_DW(rtr2rts_qp_out)];
2263 } out;
2264 void *qpc;
2265 int ret;
2266 unsigned int inlen;
2267 unsigned int outlen;
2268
2269 memset(&in, 0, sizeof(in));
2270 memset(&out, 0, sizeof(out));
2271 MLX5_SET(rst2init_qp_in, &in, opcode, qp_st_mod_op);
2272 switch (qp_st_mod_op) {
2273 case MLX5_CMD_OP_RST2INIT_QP:
2274 MLX5_SET(rst2init_qp_in, &in, qpn, qp->id);
2275 qpc = MLX5_ADDR_OF(rst2init_qp_in, &in, qpc);
2276 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
2277 MLX5_SET(qpc, qpc, rre, 1);
2278 MLX5_SET(qpc, qpc, rwe, 1);
2279 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2280 inlen = sizeof(in.rst2init);
2281 outlen = sizeof(out.rst2init);
2282 break;
2283 case MLX5_CMD_OP_INIT2RTR_QP:
2284 MLX5_SET(init2rtr_qp_in, &in, qpn, qp->id);
2285 qpc = MLX5_ADDR_OF(init2rtr_qp_in, &in, qpc);
2286 MLX5_SET(qpc, qpc, primary_address_path.fl, 1);
2287 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1);
2288 MLX5_SET(qpc, qpc, mtu, 1);
2289 MLX5_SET(qpc, qpc, log_msg_max, 30);
2290 MLX5_SET(qpc, qpc, remote_qpn, remote_qp_id);
2291 MLX5_SET(qpc, qpc, min_rnr_nak, 0);
2292 inlen = sizeof(in.init2rtr);
2293 outlen = sizeof(out.init2rtr);
2294 break;
2295 case MLX5_CMD_OP_RTR2RTS_QP:
2296 qpc = MLX5_ADDR_OF(rtr2rts_qp_in, &in, qpc);
2297 MLX5_SET(rtr2rts_qp_in, &in, qpn, qp->id);
2298 MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 16);
2299 MLX5_SET(qpc, qpc, log_ack_req_freq, 0);
2300 MLX5_SET(qpc, qpc, retry_count, 7);
2301 MLX5_SET(qpc, qpc, rnr_retry, 7);
2302 inlen = sizeof(in.rtr2rts);
2303 outlen = sizeof(out.rtr2rts);
2304 break;
2305 default:
2306 DRV_LOG(ERR, "Invalid or unsupported QP modify op %u.",
2307 qp_st_mod_op);
2308 rte_errno = EINVAL;
2309 return -rte_errno;
2310 }
2311 ret = mlx5_glue->devx_obj_modify(qp->obj, &in, inlen, &out, outlen);
2312 if (ret) {
2313 DRV_LOG(ERR, "Failed to modify QP using DevX.");
2314 rte_errno = errno;
2315 return -rte_errno;
2316 }
2317 return ret;
2318 }
2319
2320 struct mlx5_devx_obj *
mlx5_devx_cmd_create_virtio_q_counters(void * ctx)2321 mlx5_devx_cmd_create_virtio_q_counters(void *ctx)
2322 {
2323 uint32_t in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {0};
2324 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2325 struct mlx5_devx_obj *couners_obj = mlx5_malloc(MLX5_MEM_ZERO,
2326 sizeof(*couners_obj), 0,
2327 SOCKET_ID_ANY);
2328 void *hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
2329
2330 if (!couners_obj) {
2331 DRV_LOG(ERR, "Failed to allocate virtio queue counters data.");
2332 rte_errno = ENOMEM;
2333 return NULL;
2334 }
2335 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
2336 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2337 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
2338 MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
2339 couners_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
2340 sizeof(out));
2341 if (!couners_obj->obj) {
2342 rte_errno = errno;
2343 DRV_LOG(ERR, "Failed to create virtio queue counters Obj using"
2344 " DevX.");
2345 mlx5_free(couners_obj);
2346 return NULL;
2347 }
2348 couners_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2349 return couners_obj;
2350 }
2351
2352 int
mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj * couners_obj,struct mlx5_devx_virtio_q_couners_attr * attr)2353 mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj,
2354 struct mlx5_devx_virtio_q_couners_attr *attr)
2355 {
2356 uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
2357 uint32_t out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {0};
2358 void *hdr = MLX5_ADDR_OF(query_virtio_q_counters_out, in, hdr);
2359 void *virtio_q_counters = MLX5_ADDR_OF(query_virtio_q_counters_out, out,
2360 virtio_q_counters);
2361 int ret;
2362
2363 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
2364 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
2365 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
2366 MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
2367 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, couners_obj->id);
2368 ret = mlx5_glue->devx_obj_query(couners_obj->obj, in, sizeof(in), out,
2369 sizeof(out));
2370 if (ret) {
2371 DRV_LOG(ERR, "Failed to query virtio q counters using DevX.");
2372 rte_errno = errno;
2373 return -errno;
2374 }
2375 attr->received_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
2376 received_desc);
2377 attr->completed_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
2378 completed_desc);
2379 attr->error_cqes = MLX5_GET(virtio_q_counters, virtio_q_counters,
2380 error_cqes);
2381 attr->bad_desc_errors = MLX5_GET(virtio_q_counters, virtio_q_counters,
2382 bad_desc_errors);
2383 attr->exceed_max_chain = MLX5_GET(virtio_q_counters, virtio_q_counters,
2384 exceed_max_chain);
2385 attr->invalid_buffer = MLX5_GET(virtio_q_counters, virtio_q_counters,
2386 invalid_buffer);
2387 return ret;
2388 }
2389
2390 /**
2391 * Create general object of type FLOW_HIT_ASO using DevX API.
2392 *
2393 * @param[in] ctx
2394 * Context returned from mlx5 open_device() glue function.
2395 * @param [in] pd
2396 * PD value to associate the FLOW_HIT_ASO object with.
2397 *
2398 * @return
2399 * The DevX object created, NULL otherwise and rte_errno is set.
2400 */
2401 struct mlx5_devx_obj *
mlx5_devx_cmd_create_flow_hit_aso_obj(void * ctx,uint32_t pd)2402 mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx, uint32_t pd)
2403 {
2404 uint32_t in[MLX5_ST_SZ_DW(create_flow_hit_aso_in)] = {0};
2405 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2406 struct mlx5_devx_obj *flow_hit_aso_obj = NULL;
2407 void *ptr = NULL;
2408
2409 flow_hit_aso_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*flow_hit_aso_obj),
2410 0, SOCKET_ID_ANY);
2411 if (!flow_hit_aso_obj) {
2412 DRV_LOG(ERR, "Failed to allocate FLOW_HIT_ASO object data");
2413 rte_errno = ENOMEM;
2414 return NULL;
2415 }
2416 ptr = MLX5_ADDR_OF(create_flow_hit_aso_in, in, hdr);
2417 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
2418 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2419 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
2420 MLX5_GENERAL_OBJ_TYPE_FLOW_HIT_ASO);
2421 ptr = MLX5_ADDR_OF(create_flow_hit_aso_in, in, flow_hit_aso);
2422 MLX5_SET(flow_hit_aso, ptr, access_pd, pd);
2423 flow_hit_aso_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
2424 out, sizeof(out));
2425 if (!flow_hit_aso_obj->obj) {
2426 rte_errno = errno;
2427 DRV_LOG(ERR, "Failed to create FLOW_HIT_ASO obj using DevX.");
2428 mlx5_free(flow_hit_aso_obj);
2429 return NULL;
2430 }
2431 flow_hit_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2432 return flow_hit_aso_obj;
2433 }
2434
2435 /*
2436 * Create PD using DevX API.
2437 *
2438 * @param[in] ctx
2439 * Context returned from mlx5 open_device() glue function.
2440 *
2441 * @return
2442 * The DevX object created, NULL otherwise and rte_errno is set.
2443 */
2444 struct mlx5_devx_obj *
mlx5_devx_cmd_alloc_pd(void * ctx)2445 mlx5_devx_cmd_alloc_pd(void *ctx)
2446 {
2447 struct mlx5_devx_obj *ppd =
2448 mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ppd), 0, SOCKET_ID_ANY);
2449 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0};
2450 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
2451
2452 if (!ppd) {
2453 DRV_LOG(ERR, "Failed to allocate PD data.");
2454 rte_errno = ENOMEM;
2455 return NULL;
2456 }
2457 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2458 ppd->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
2459 out, sizeof(out));
2460 if (!ppd->obj) {
2461 mlx5_free(ppd);
2462 DRV_LOG(ERR, "Failed to allocate PD Obj using DevX.");
2463 rte_errno = errno;
2464 return NULL;
2465 }
2466 ppd->id = MLX5_GET(alloc_pd_out, out, pd);
2467 return ppd;
2468 }
2469
2470 /**
2471 * Create general object of type FLOW_METER_ASO using DevX API.
2472 *
2473 * @param[in] ctx
2474 * Context returned from mlx5 open_device() glue function.
2475 * @param [in] pd
2476 * PD value to associate the FLOW_METER_ASO object with.
2477 * @param [in] log_obj_size
2478 * log_obj_size define to allocate number of 2 * meters
2479 * in one FLOW_METER_ASO object.
2480 *
2481 * @return
2482 * The DevX object created, NULL otherwise and rte_errno is set.
2483 */
2484 struct mlx5_devx_obj *
mlx5_devx_cmd_create_flow_meter_aso_obj(void * ctx,uint32_t pd,uint32_t log_obj_size)2485 mlx5_devx_cmd_create_flow_meter_aso_obj(void *ctx, uint32_t pd,
2486 uint32_t log_obj_size)
2487 {
2488 uint32_t in[MLX5_ST_SZ_DW(create_flow_meter_aso_in)] = {0};
2489 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2490 struct mlx5_devx_obj *flow_meter_aso_obj;
2491 void *ptr;
2492
2493 flow_meter_aso_obj = mlx5_malloc(MLX5_MEM_ZERO,
2494 sizeof(*flow_meter_aso_obj),
2495 0, SOCKET_ID_ANY);
2496 if (!flow_meter_aso_obj) {
2497 DRV_LOG(ERR, "Failed to allocate FLOW_METER_ASO object data");
2498 rte_errno = ENOMEM;
2499 return NULL;
2500 }
2501 ptr = MLX5_ADDR_OF(create_flow_meter_aso_in, in, hdr);
2502 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
2503 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2504 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
2505 MLX5_GENERAL_OBJ_TYPE_FLOW_METER_ASO);
2506 MLX5_SET(general_obj_in_cmd_hdr, ptr, log_obj_range,
2507 log_obj_size);
2508 ptr = MLX5_ADDR_OF(create_flow_meter_aso_in, in, flow_meter_aso);
2509 MLX5_SET(flow_meter_aso, ptr, access_pd, pd);
2510 flow_meter_aso_obj->obj = mlx5_glue->devx_obj_create(
2511 ctx, in, sizeof(in),
2512 out, sizeof(out));
2513 if (!flow_meter_aso_obj->obj) {
2514 rte_errno = errno;
2515 DRV_LOG(ERR, "Failed to create FLOW_METER_ASO obj using DevX.");
2516 mlx5_free(flow_meter_aso_obj);
2517 return NULL;
2518 }
2519 flow_meter_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr,
2520 out, obj_id);
2521 return flow_meter_aso_obj;
2522 }
2523
2524 /*
2525 * Create general object of type CONN_TRACK_OFFLOAD using DevX API.
2526 *
2527 * @param[in] ctx
2528 * Context returned from mlx5 open_device() glue function.
2529 * @param [in] pd
2530 * PD value to associate the CONN_TRACK_OFFLOAD ASO object with.
2531 * @param [in] log_obj_size
2532 * log_obj_size to allocate its power of 2 * objects
2533 * in one CONN_TRACK_OFFLOAD bulk allocation.
2534 *
2535 * @return
2536 * The DevX object created, NULL otherwise and rte_errno is set.
2537 */
2538 struct mlx5_devx_obj *
mlx5_devx_cmd_create_conn_track_offload_obj(void * ctx,uint32_t pd,uint32_t log_obj_size)2539 mlx5_devx_cmd_create_conn_track_offload_obj(void *ctx, uint32_t pd,
2540 uint32_t log_obj_size)
2541 {
2542 uint32_t in[MLX5_ST_SZ_DW(create_conn_track_aso_in)] = {0};
2543 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2544 struct mlx5_devx_obj *ct_aso_obj;
2545 void *ptr;
2546
2547 ct_aso_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ct_aso_obj),
2548 0, SOCKET_ID_ANY);
2549 if (!ct_aso_obj) {
2550 DRV_LOG(ERR, "Failed to allocate CONN_TRACK_OFFLOAD object.");
2551 rte_errno = ENOMEM;
2552 return NULL;
2553 }
2554 ptr = MLX5_ADDR_OF(create_conn_track_aso_in, in, hdr);
2555 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
2556 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2557 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
2558 MLX5_GENERAL_OBJ_TYPE_CONN_TRACK_OFFLOAD);
2559 MLX5_SET(general_obj_in_cmd_hdr, ptr, log_obj_range, log_obj_size);
2560 ptr = MLX5_ADDR_OF(create_conn_track_aso_in, in, conn_track_offload);
2561 MLX5_SET(conn_track_offload, ptr, conn_track_aso_access_pd, pd);
2562 ct_aso_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
2563 out, sizeof(out));
2564 if (!ct_aso_obj->obj) {
2565 rte_errno = errno;
2566 DRV_LOG(ERR, "Failed to create CONN_TRACK_OFFLOAD obj by using DevX.");
2567 mlx5_free(ct_aso_obj);
2568 return NULL;
2569 }
2570 ct_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2571 return ct_aso_obj;
2572 }
2573
2574 /**
2575 * Create general object of type GENEVE TLV option using DevX API.
2576 *
2577 * @param[in] ctx
2578 * Context returned from mlx5 open_device() glue function.
2579 * @param [in] class
2580 * TLV option variable value of class
2581 * @param [in] type
2582 * TLV option variable value of type
2583 * @param [in] len
2584 * TLV option variable value of len
2585 *
2586 * @return
2587 * The DevX object created, NULL otherwise and rte_errno is set.
2588 */
2589 struct mlx5_devx_obj *
mlx5_devx_cmd_create_geneve_tlv_option(void * ctx,uint16_t class,uint8_t type,uint8_t len)2590 mlx5_devx_cmd_create_geneve_tlv_option(void *ctx,
2591 uint16_t class, uint8_t type, uint8_t len)
2592 {
2593 uint32_t in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {0};
2594 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2595 struct mlx5_devx_obj *geneve_tlv_opt_obj = mlx5_malloc(MLX5_MEM_ZERO,
2596 sizeof(*geneve_tlv_opt_obj),
2597 0, SOCKET_ID_ANY);
2598
2599 if (!geneve_tlv_opt_obj) {
2600 DRV_LOG(ERR, "Failed to allocate geneve tlv option object.");
2601 rte_errno = ENOMEM;
2602 return NULL;
2603 }
2604 void *hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr);
2605 void *opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in,
2606 geneve_tlv_opt);
2607 MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
2608 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2609 MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
2610 MLX5_GENERAL_OBJ_TYPE_GENEVE_TLV_OPT);
2611 MLX5_SET(geneve_tlv_option, opt, option_class,
2612 rte_be_to_cpu_16(class));
2613 MLX5_SET(geneve_tlv_option, opt, option_type, type);
2614 MLX5_SET(geneve_tlv_option, opt, option_data_length, len);
2615 geneve_tlv_opt_obj->obj = mlx5_glue->devx_obj_create(ctx, in,
2616 sizeof(in), out, sizeof(out));
2617 if (!geneve_tlv_opt_obj->obj) {
2618 rte_errno = errno;
2619 DRV_LOG(ERR, "Failed to create Geneve tlv option "
2620 "Obj using DevX.");
2621 mlx5_free(geneve_tlv_opt_obj);
2622 return NULL;
2623 }
2624 geneve_tlv_opt_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2625 return geneve_tlv_opt_obj;
2626 }
2627
2628 int
mlx5_devx_cmd_wq_query(void * wq,uint32_t * counter_set_id)2629 mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id)
2630 {
2631 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
2632 uint32_t in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
2633 uint32_t out[MLX5_ST_SZ_DW(query_rq_out)] = {0};
2634 int rc;
2635 void *rq_ctx;
2636
2637 MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
2638 MLX5_SET(query_rq_in, in, rqn, ((struct ibv_wq *)wq)->wq_num);
2639 rc = mlx5_glue->devx_wq_query(wq, in, sizeof(in), out, sizeof(out));
2640 if (rc) {
2641 rte_errno = errno;
2642 DRV_LOG(ERR, "Failed to query WQ counter set ID using DevX - "
2643 "rc = %d, errno = %d.", rc, errno);
2644 return -rc;
2645 };
2646 rq_ctx = MLX5_ADDR_OF(query_rq_out, out, rq_context);
2647 *counter_set_id = MLX5_GET(rqc, rq_ctx, counter_set_id);
2648 return 0;
2649 #else
2650 (void)wq;
2651 (void)counter_set_id;
2652 return -ENOTSUP;
2653 #endif
2654 }
2655
2656 /*
2657 * Allocate queue counters via devx interface.
2658 *
2659 * @param[in] ctx
2660 * Context returned from mlx5 open_device() glue function.
2661 *
2662 * @return
2663 * Pointer to counter object on success, a NULL value otherwise and
2664 * rte_errno is set.
2665 */
2666 struct mlx5_devx_obj *
mlx5_devx_cmd_queue_counter_alloc(void * ctx)2667 mlx5_devx_cmd_queue_counter_alloc(void *ctx)
2668 {
2669 struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0,
2670 SOCKET_ID_ANY);
2671 uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
2672 uint32_t out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
2673
2674 if (!dcs) {
2675 rte_errno = ENOMEM;
2676 return NULL;
2677 }
2678 MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
2679 dcs->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
2680 sizeof(out));
2681 if (!dcs->obj) {
2682 DRV_LOG(DEBUG, "Can't allocate q counter set by DevX - error "
2683 "%d.", errno);
2684 rte_errno = errno;
2685 mlx5_free(dcs);
2686 return NULL;
2687 }
2688 dcs->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id);
2689 return dcs;
2690 }
2691
2692 /**
2693 * Query queue counters values.
2694 *
2695 * @param[in] dcs
2696 * devx object of the queue counter set.
2697 * @param[in] clear
2698 * Whether hardware should clear the counters after the query or not.
2699 * @param[out] out_of_buffers
2700 * Number of dropped occurred due to lack of WQE for the associated QPs/RQs.
2701 *
2702 * @return
2703 * 0 on success, a negative value otherwise.
2704 */
2705 int
mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj * dcs,int clear,uint32_t * out_of_buffers)2706 mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
2707 uint32_t *out_of_buffers)
2708 {
2709 uint32_t out[MLX5_ST_SZ_BYTES(query_q_counter_out)] = {0};
2710 uint32_t in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
2711 int rc;
2712
2713 MLX5_SET(query_q_counter_in, in, opcode,
2714 MLX5_CMD_OP_QUERY_Q_COUNTER);
2715 MLX5_SET(query_q_counter_in, in, op_mod, 0);
2716 MLX5_SET(query_q_counter_in, in, counter_set_id, dcs->id);
2717 MLX5_SET(query_q_counter_in, in, clear, !!clear);
2718 rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
2719 sizeof(out));
2720 if (rc) {
2721 DRV_LOG(ERR, "Failed to query devx q counter set - rc %d", rc);
2722 rte_errno = rc;
2723 return -rc;
2724 }
2725 *out_of_buffers = MLX5_GET(query_q_counter_out, out, out_of_buffer);
2726 return 0;
2727 }
2728
2729 /**
2730 * Create general object of type DEK using DevX API.
2731 *
2732 * @param[in] ctx
2733 * Context returned from mlx5 open_device() glue function.
2734 * @param [in] attr
2735 * Pointer to DEK attributes structure.
2736 *
2737 * @return
2738 * The DevX object created, NULL otherwise and rte_errno is set.
2739 */
2740 struct mlx5_devx_obj *
mlx5_devx_cmd_create_dek_obj(void * ctx,struct mlx5_devx_dek_attr * attr)2741 mlx5_devx_cmd_create_dek_obj(void *ctx, struct mlx5_devx_dek_attr *attr)
2742 {
2743 uint32_t in[MLX5_ST_SZ_DW(create_dek_in)] = {0};
2744 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2745 struct mlx5_devx_obj *dek_obj = NULL;
2746 void *ptr = NULL, *key_addr = NULL;
2747
2748 dek_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dek_obj),
2749 0, SOCKET_ID_ANY);
2750 if (dek_obj == NULL) {
2751 DRV_LOG(ERR, "Failed to allocate DEK object data");
2752 rte_errno = ENOMEM;
2753 return NULL;
2754 }
2755 ptr = MLX5_ADDR_OF(create_dek_in, in, hdr);
2756 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
2757 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2758 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
2759 MLX5_GENERAL_OBJ_TYPE_DEK);
2760 ptr = MLX5_ADDR_OF(create_dek_in, in, dek);
2761 MLX5_SET(dek, ptr, key_size, attr->key_size);
2762 MLX5_SET(dek, ptr, has_keytag, attr->has_keytag);
2763 MLX5_SET(dek, ptr, key_purpose, attr->key_purpose);
2764 MLX5_SET(dek, ptr, pd, attr->pd);
2765 MLX5_SET64(dek, ptr, opaque, attr->opaque);
2766 key_addr = MLX5_ADDR_OF(dek, ptr, key);
2767 memcpy(key_addr, (void *)(attr->key), MLX5_CRYPTO_KEY_MAX_SIZE);
2768 dek_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
2769 out, sizeof(out));
2770 if (dek_obj->obj == NULL) {
2771 rte_errno = errno;
2772 DRV_LOG(ERR, "Failed to create DEK obj using DevX.");
2773 mlx5_free(dek_obj);
2774 return NULL;
2775 }
2776 dek_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2777 return dek_obj;
2778 }
2779
2780 /**
2781 * Create general object of type IMPORT_KEK using DevX API.
2782 *
2783 * @param[in] ctx
2784 * Context returned from mlx5 open_device() glue function.
2785 * @param [in] attr
2786 * Pointer to IMPORT_KEK attributes structure.
2787 *
2788 * @return
2789 * The DevX object created, NULL otherwise and rte_errno is set.
2790 */
2791 struct mlx5_devx_obj *
mlx5_devx_cmd_create_import_kek_obj(void * ctx,struct mlx5_devx_import_kek_attr * attr)2792 mlx5_devx_cmd_create_import_kek_obj(void *ctx,
2793 struct mlx5_devx_import_kek_attr *attr)
2794 {
2795 uint32_t in[MLX5_ST_SZ_DW(create_import_kek_in)] = {0};
2796 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2797 struct mlx5_devx_obj *import_kek_obj = NULL;
2798 void *ptr = NULL, *key_addr = NULL;
2799
2800 import_kek_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*import_kek_obj),
2801 0, SOCKET_ID_ANY);
2802 if (import_kek_obj == NULL) {
2803 DRV_LOG(ERR, "Failed to allocate IMPORT_KEK object data");
2804 rte_errno = ENOMEM;
2805 return NULL;
2806 }
2807 ptr = MLX5_ADDR_OF(create_import_kek_in, in, hdr);
2808 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
2809 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2810 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
2811 MLX5_GENERAL_OBJ_TYPE_IMPORT_KEK);
2812 ptr = MLX5_ADDR_OF(create_import_kek_in, in, import_kek);
2813 MLX5_SET(import_kek, ptr, key_size, attr->key_size);
2814 key_addr = MLX5_ADDR_OF(import_kek, ptr, key);
2815 memcpy(key_addr, (void *)(attr->key), MLX5_CRYPTO_KEY_MAX_SIZE);
2816 import_kek_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
2817 out, sizeof(out));
2818 if (import_kek_obj->obj == NULL) {
2819 rte_errno = errno;
2820 DRV_LOG(ERR, "Failed to create IMPORT_KEK object using DevX.");
2821 mlx5_free(import_kek_obj);
2822 return NULL;
2823 }
2824 import_kek_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2825 return import_kek_obj;
2826 }
2827
2828 /**
2829 * Create general object of type CREDENTIAL using DevX API.
2830 *
2831 * @param[in] ctx
2832 * Context returned from mlx5 open_device() glue function.
2833 * @param [in] attr
2834 * Pointer to CREDENTIAL attributes structure.
2835 *
2836 * @return
2837 * The DevX object created, NULL otherwise and rte_errno is set.
2838 */
2839 struct mlx5_devx_obj *
mlx5_devx_cmd_create_credential_obj(void * ctx,struct mlx5_devx_credential_attr * attr)2840 mlx5_devx_cmd_create_credential_obj(void *ctx,
2841 struct mlx5_devx_credential_attr *attr)
2842 {
2843 uint32_t in[MLX5_ST_SZ_DW(create_credential_in)] = {0};
2844 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2845 struct mlx5_devx_obj *credential_obj = NULL;
2846 void *ptr = NULL, *credential_addr = NULL;
2847
2848 credential_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*credential_obj),
2849 0, SOCKET_ID_ANY);
2850 if (credential_obj == NULL) {
2851 DRV_LOG(ERR, "Failed to allocate CREDENTIAL object data");
2852 rte_errno = ENOMEM;
2853 return NULL;
2854 }
2855 ptr = MLX5_ADDR_OF(create_credential_in, in, hdr);
2856 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
2857 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2858 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
2859 MLX5_GENERAL_OBJ_TYPE_CREDENTIAL);
2860 ptr = MLX5_ADDR_OF(create_credential_in, in, credential);
2861 MLX5_SET(credential, ptr, credential_role, attr->credential_role);
2862 credential_addr = MLX5_ADDR_OF(credential, ptr, credential);
2863 memcpy(credential_addr, (void *)(attr->credential),
2864 MLX5_CRYPTO_CREDENTIAL_SIZE);
2865 credential_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
2866 out, sizeof(out));
2867 if (credential_obj->obj == NULL) {
2868 rte_errno = errno;
2869 DRV_LOG(ERR, "Failed to create CREDENTIAL object using DevX.");
2870 mlx5_free(credential_obj);
2871 return NULL;
2872 }
2873 credential_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2874 return credential_obj;
2875 }
2876
2877 /**
2878 * Create general object of type CRYPTO_LOGIN using DevX API.
2879 *
2880 * @param[in] ctx
2881 * Context returned from mlx5 open_device() glue function.
2882 * @param [in] attr
2883 * Pointer to CRYPTO_LOGIN attributes structure.
2884 *
2885 * @return
2886 * The DevX object created, NULL otherwise and rte_errno is set.
2887 */
2888 struct mlx5_devx_obj *
mlx5_devx_cmd_create_crypto_login_obj(void * ctx,struct mlx5_devx_crypto_login_attr * attr)2889 mlx5_devx_cmd_create_crypto_login_obj(void *ctx,
2890 struct mlx5_devx_crypto_login_attr *attr)
2891 {
2892 uint32_t in[MLX5_ST_SZ_DW(create_crypto_login_in)] = {0};
2893 uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
2894 struct mlx5_devx_obj *crypto_login_obj = NULL;
2895 void *ptr = NULL, *credential_addr = NULL;
2896
2897 crypto_login_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*crypto_login_obj),
2898 0, SOCKET_ID_ANY);
2899 if (crypto_login_obj == NULL) {
2900 DRV_LOG(ERR, "Failed to allocate CRYPTO_LOGIN object data");
2901 rte_errno = ENOMEM;
2902 return NULL;
2903 }
2904 ptr = MLX5_ADDR_OF(create_crypto_login_in, in, hdr);
2905 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
2906 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
2907 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
2908 MLX5_GENERAL_OBJ_TYPE_CRYPTO_LOGIN);
2909 ptr = MLX5_ADDR_OF(create_crypto_login_in, in, crypto_login);
2910 MLX5_SET(crypto_login, ptr, credential_pointer,
2911 attr->credential_pointer);
2912 MLX5_SET(crypto_login, ptr, session_import_kek_ptr,
2913 attr->session_import_kek_ptr);
2914 credential_addr = MLX5_ADDR_OF(crypto_login, ptr, credential);
2915 memcpy(credential_addr, (void *)(attr->credential),
2916 MLX5_CRYPTO_CREDENTIAL_SIZE);
2917 crypto_login_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
2918 out, sizeof(out));
2919 if (crypto_login_obj->obj == NULL) {
2920 rte_errno = errno;
2921 DRV_LOG(ERR, "Failed to create CRYPTO_LOGIN obj using DevX.");
2922 mlx5_free(crypto_login_obj);
2923 return NULL;
2924 }
2925 crypto_login_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
2926 return crypto_login_obj;
2927 }
2928
2929 /**
2930 * Query LAG context.
2931 *
2932 * @param[in] ctx
2933 * Pointer to ibv_context, returned from mlx5dv_open_device.
2934 * @param[out] lag_ctx
2935 * Pointer to struct mlx5_devx_lag_context, to be set by the routine.
2936 *
2937 * @return
2938 * 0 on success, a negative value otherwise.
2939 */
2940 int
mlx5_devx_cmd_query_lag(void * ctx,struct mlx5_devx_lag_context * lag_ctx)2941 mlx5_devx_cmd_query_lag(void *ctx,
2942 struct mlx5_devx_lag_context *lag_ctx)
2943 {
2944 uint32_t in[MLX5_ST_SZ_DW(query_lag_in)] = {0};
2945 uint32_t out[MLX5_ST_SZ_DW(query_lag_out)] = {0};
2946 void *lctx;
2947 int rc;
2948
2949 MLX5_SET(query_lag_in, in, opcode, MLX5_CMD_OP_QUERY_LAG);
2950 rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
2951 if (rc)
2952 goto error;
2953 lctx = MLX5_ADDR_OF(query_lag_out, out, context);
2954 lag_ctx->fdb_selection_mode = MLX5_GET(lag_context, lctx,
2955 fdb_selection_mode);
2956 lag_ctx->port_select_mode = MLX5_GET(lag_context, lctx,
2957 port_select_mode);
2958 lag_ctx->lag_state = MLX5_GET(lag_context, lctx, lag_state);
2959 lag_ctx->tx_remap_affinity_2 = MLX5_GET(lag_context, lctx,
2960 tx_remap_affinity_2);
2961 lag_ctx->tx_remap_affinity_1 = MLX5_GET(lag_context, lctx,
2962 tx_remap_affinity_1);
2963 return 0;
2964 error:
2965 rc = (rc > 0) ? -rc : rc;
2966 return rc;
2967 }
2968