1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
3 * All rights reserved.
4 */
5
6 #include "ena_com.h"
7
8 /*****************************************************************************/
9 /*****************************************************************************/
10
11 /* Timeout in micro-sec */
12 #define ADMIN_CMD_TIMEOUT_US (3000000)
13
14 #define ENA_ASYNC_QUEUE_DEPTH 16
15 #define ENA_ADMIN_QUEUE_DEPTH 32
16
17 #define ENA_CTRL_MAJOR 0
18 #define ENA_CTRL_MINOR 0
19 #define ENA_CTRL_SUB_MINOR 1
20
21 #define MIN_ENA_CTRL_VER \
22 (((ENA_CTRL_MAJOR) << \
23 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
24 ((ENA_CTRL_MINOR) << \
25 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
26 (ENA_CTRL_SUB_MINOR))
27
28 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
29 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
30
31 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
32
33 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
34
35 #define ENA_REGS_ADMIN_INTR_MASK 1
36
37 #define ENA_MIN_ADMIN_POLL_US 100
38
39 #define ENA_MAX_ADMIN_POLL_US 5000
40
41 /*****************************************************************************/
42 /*****************************************************************************/
43 /*****************************************************************************/
44
45 enum ena_cmd_status {
46 ENA_CMD_SUBMITTED,
47 ENA_CMD_COMPLETED,
48 /* Abort - canceled by the driver */
49 ENA_CMD_ABORTED,
50 };
51
52 struct ena_comp_ctx {
53 ena_wait_event_t wait_event;
54 struct ena_admin_acq_entry *user_cqe;
55 u32 comp_size;
56 enum ena_cmd_status status;
57 /* status from the device */
58 u8 comp_status;
59 u8 cmd_opcode;
60 bool occupied;
61 };
62
63 struct ena_com_stats_ctx {
64 struct ena_admin_aq_get_stats_cmd get_cmd;
65 struct ena_admin_acq_get_stats_resp get_resp;
66 };
67
ena_com_mem_addr_set(struct ena_com_dev * ena_dev,struct ena_common_mem_addr * ena_addr,dma_addr_t addr)68 static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
69 struct ena_common_mem_addr *ena_addr,
70 dma_addr_t addr)
71 {
72 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
73 ena_trc_err("dma address has more bits that the device supports\n");
74 return ENA_COM_INVAL;
75 }
76
77 ena_addr->mem_addr_low = lower_32_bits(addr);
78 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
79
80 return 0;
81 }
82
ena_com_admin_init_sq(struct ena_com_admin_queue * queue)83 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
84 {
85 struct ena_com_admin_sq *sq = &queue->sq;
86 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
87
88 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
89 sq->mem_handle);
90
91 if (!sq->entries) {
92 ena_trc_err("memory allocation failed\n");
93 return ENA_COM_NO_MEM;
94 }
95
96 sq->head = 0;
97 sq->tail = 0;
98 sq->phase = 1;
99
100 sq->db_addr = NULL;
101
102 return 0;
103 }
104
ena_com_admin_init_cq(struct ena_com_admin_queue * queue)105 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
106 {
107 struct ena_com_admin_cq *cq = &queue->cq;
108 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
109
110 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
111 cq->mem_handle);
112
113 if (!cq->entries) {
114 ena_trc_err("memory allocation failed\n");
115 return ENA_COM_NO_MEM;
116 }
117
118 cq->head = 0;
119 cq->phase = 1;
120
121 return 0;
122 }
123
ena_com_admin_init_aenq(struct ena_com_dev * dev,struct ena_aenq_handlers * aenq_handlers)124 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
125 struct ena_aenq_handlers *aenq_handlers)
126 {
127 struct ena_com_aenq *aenq = &dev->aenq;
128 u32 addr_low, addr_high, aenq_caps;
129 u16 size;
130
131 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
132 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
133 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
134 aenq->entries,
135 aenq->dma_addr,
136 aenq->mem_handle);
137
138 if (!aenq->entries) {
139 ena_trc_err("memory allocation failed\n");
140 return ENA_COM_NO_MEM;
141 }
142
143 aenq->head = aenq->q_depth;
144 aenq->phase = 1;
145
146 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
147 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
148
149 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
150 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
151
152 aenq_caps = 0;
153 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
154 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
155 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
156 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
157 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
158
159 if (unlikely(!aenq_handlers)) {
160 ena_trc_err("aenq handlers pointer is NULL\n");
161 return ENA_COM_INVAL;
162 }
163
164 aenq->aenq_handlers = aenq_handlers;
165
166 return 0;
167 }
168
comp_ctxt_release(struct ena_com_admin_queue * queue,struct ena_comp_ctx * comp_ctx)169 static void comp_ctxt_release(struct ena_com_admin_queue *queue,
170 struct ena_comp_ctx *comp_ctx)
171 {
172 comp_ctx->occupied = false;
173 ATOMIC32_DEC(&queue->outstanding_cmds);
174 }
175
get_comp_ctxt(struct ena_com_admin_queue * queue,u16 command_id,bool capture)176 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
177 u16 command_id, bool capture)
178 {
179 if (unlikely(command_id >= queue->q_depth)) {
180 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
181 command_id, queue->q_depth);
182 return NULL;
183 }
184
185 if (unlikely(!queue->comp_ctx)) {
186 ena_trc_err("Completion context is NULL\n");
187 return NULL;
188 }
189
190 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
191 ena_trc_err("Completion context is occupied\n");
192 return NULL;
193 }
194
195 if (capture) {
196 ATOMIC32_INC(&queue->outstanding_cmds);
197 queue->comp_ctx[command_id].occupied = true;
198 }
199
200 return &queue->comp_ctx[command_id];
201 }
202
__ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)203 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
204 struct ena_admin_aq_entry *cmd,
205 size_t cmd_size_in_bytes,
206 struct ena_admin_acq_entry *comp,
207 size_t comp_size_in_bytes)
208 {
209 struct ena_comp_ctx *comp_ctx;
210 u16 tail_masked, cmd_id;
211 u16 queue_size_mask;
212 u16 cnt;
213
214 queue_size_mask = admin_queue->q_depth - 1;
215
216 tail_masked = admin_queue->sq.tail & queue_size_mask;
217
218 /* In case of queue FULL */
219 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
220 if (cnt >= admin_queue->q_depth) {
221 ena_trc_dbg("admin queue is full.\n");
222 admin_queue->stats.out_of_space++;
223 return ERR_PTR(ENA_COM_NO_SPACE);
224 }
225
226 cmd_id = admin_queue->curr_cmd_id;
227
228 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
229 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
230
231 cmd->aq_common_descriptor.command_id |= cmd_id &
232 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
233
234 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
235 if (unlikely(!comp_ctx))
236 return ERR_PTR(ENA_COM_INVAL);
237
238 comp_ctx->status = ENA_CMD_SUBMITTED;
239 comp_ctx->comp_size = (u32)comp_size_in_bytes;
240 comp_ctx->user_cqe = comp;
241 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
242
243 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
244
245 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
246
247 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
248 queue_size_mask;
249
250 admin_queue->sq.tail++;
251 admin_queue->stats.submitted_cmd++;
252
253 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
254 admin_queue->sq.phase = !admin_queue->sq.phase;
255
256 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
257 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
258 admin_queue->sq.db_addr);
259
260 return comp_ctx;
261 }
262
ena_com_init_comp_ctxt(struct ena_com_admin_queue * queue)263 static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
264 {
265 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
266 struct ena_comp_ctx *comp_ctx;
267 u16 i;
268
269 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
270 if (unlikely(!queue->comp_ctx)) {
271 ena_trc_err("memory allocation failed\n");
272 return ENA_COM_NO_MEM;
273 }
274
275 for (i = 0; i < queue->q_depth; i++) {
276 comp_ctx = get_comp_ctxt(queue, i, false);
277 if (comp_ctx)
278 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
279 }
280
281 return 0;
282 }
283
ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)284 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
285 struct ena_admin_aq_entry *cmd,
286 size_t cmd_size_in_bytes,
287 struct ena_admin_acq_entry *comp,
288 size_t comp_size_in_bytes)
289 {
290 unsigned long flags = 0;
291 struct ena_comp_ctx *comp_ctx;
292
293 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
294 if (unlikely(!admin_queue->running_state)) {
295 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
296 return ERR_PTR(ENA_COM_NO_DEVICE);
297 }
298 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
299 cmd_size_in_bytes,
300 comp,
301 comp_size_in_bytes);
302 if (IS_ERR(comp_ctx))
303 admin_queue->running_state = false;
304 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
305
306 return comp_ctx;
307 }
308
ena_com_init_io_sq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_sq * io_sq)309 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
310 struct ena_com_create_io_ctx *ctx,
311 struct ena_com_io_sq *io_sq)
312 {
313 size_t size;
314 int dev_node = 0;
315
316 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
317
318 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
319 io_sq->desc_entry_size =
320 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
321 sizeof(struct ena_eth_io_tx_desc) :
322 sizeof(struct ena_eth_io_rx_desc);
323
324 size = io_sq->desc_entry_size * io_sq->q_depth;
325 io_sq->bus = ena_dev->bus;
326
327 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
328 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
329 size,
330 io_sq->desc_addr.virt_addr,
331 io_sq->desc_addr.phys_addr,
332 io_sq->desc_addr.mem_handle,
333 ctx->numa_node,
334 dev_node);
335 if (!io_sq->desc_addr.virt_addr) {
336 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
337 size,
338 io_sq->desc_addr.virt_addr,
339 io_sq->desc_addr.phys_addr,
340 io_sq->desc_addr.mem_handle);
341 }
342
343 if (!io_sq->desc_addr.virt_addr) {
344 ena_trc_err("memory allocation failed\n");
345 return ENA_COM_NO_MEM;
346 }
347 }
348
349 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
350 /* Allocate bounce buffers */
351 io_sq->bounce_buf_ctrl.buffer_size =
352 ena_dev->llq_info.desc_list_entry_size;
353 io_sq->bounce_buf_ctrl.buffers_num =
354 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
355 io_sq->bounce_buf_ctrl.next_to_use = 0;
356
357 size = io_sq->bounce_buf_ctrl.buffer_size *
358 io_sq->bounce_buf_ctrl.buffers_num;
359
360 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
361 size,
362 io_sq->bounce_buf_ctrl.base_buffer,
363 ctx->numa_node,
364 dev_node);
365 if (!io_sq->bounce_buf_ctrl.base_buffer)
366 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
367
368 if (!io_sq->bounce_buf_ctrl.base_buffer) {
369 ena_trc_err("bounce buffer memory allocation failed\n");
370 return ENA_COM_NO_MEM;
371 }
372
373 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
374 sizeof(io_sq->llq_info));
375
376 /* Initiate the first bounce buffer */
377 io_sq->llq_buf_ctrl.curr_bounce_buf =
378 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
379 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
380 0x0, io_sq->llq_info.desc_list_entry_size);
381 io_sq->llq_buf_ctrl.descs_left_in_line =
382 io_sq->llq_info.descs_num_before_header;
383 io_sq->disable_meta_caching =
384 io_sq->llq_info.disable_meta_caching;
385
386 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
387 io_sq->entries_in_tx_burst_left =
388 io_sq->llq_info.max_entries_in_tx_burst;
389 }
390
391 io_sq->tail = 0;
392 io_sq->next_to_comp = 0;
393 io_sq->phase = 1;
394
395 return 0;
396 }
397
ena_com_init_io_cq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_cq * io_cq)398 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
399 struct ena_com_create_io_ctx *ctx,
400 struct ena_com_io_cq *io_cq)
401 {
402 size_t size;
403 int prev_node = 0;
404
405 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
406
407 /* Use the basic completion descriptor for Rx */
408 io_cq->cdesc_entry_size_in_bytes =
409 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
410 sizeof(struct ena_eth_io_tx_cdesc) :
411 sizeof(struct ena_eth_io_rx_cdesc_base);
412
413 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
414 io_cq->bus = ena_dev->bus;
415
416 ENA_MEM_ALLOC_COHERENT_NODE_ALIGNED(ena_dev->dmadev,
417 size,
418 io_cq->cdesc_addr.virt_addr,
419 io_cq->cdesc_addr.phys_addr,
420 io_cq->cdesc_addr.mem_handle,
421 ctx->numa_node,
422 prev_node,
423 ENA_CDESC_RING_SIZE_ALIGNMENT);
424 if (!io_cq->cdesc_addr.virt_addr) {
425 ENA_MEM_ALLOC_COHERENT_ALIGNED(ena_dev->dmadev,
426 size,
427 io_cq->cdesc_addr.virt_addr,
428 io_cq->cdesc_addr.phys_addr,
429 io_cq->cdesc_addr.mem_handle,
430 ENA_CDESC_RING_SIZE_ALIGNMENT);
431 }
432
433 if (!io_cq->cdesc_addr.virt_addr) {
434 ena_trc_err("memory allocation failed\n");
435 return ENA_COM_NO_MEM;
436 }
437
438 io_cq->phase = 1;
439 io_cq->head = 0;
440
441 return 0;
442 }
443
ena_com_handle_single_admin_completion(struct ena_com_admin_queue * admin_queue,struct ena_admin_acq_entry * cqe)444 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
445 struct ena_admin_acq_entry *cqe)
446 {
447 struct ena_comp_ctx *comp_ctx;
448 u16 cmd_id;
449
450 cmd_id = cqe->acq_common_descriptor.command &
451 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
452
453 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
454 if (unlikely(!comp_ctx)) {
455 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
456 admin_queue->running_state = false;
457 return;
458 }
459
460 comp_ctx->status = ENA_CMD_COMPLETED;
461 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
462
463 if (comp_ctx->user_cqe)
464 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
465
466 if (!admin_queue->polling)
467 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
468 }
469
ena_com_handle_admin_completion(struct ena_com_admin_queue * admin_queue)470 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
471 {
472 struct ena_admin_acq_entry *cqe = NULL;
473 u16 comp_num = 0;
474 u16 head_masked;
475 u8 phase;
476
477 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
478 phase = admin_queue->cq.phase;
479
480 cqe = &admin_queue->cq.entries[head_masked];
481
482 /* Go over all the completions */
483 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
484 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
485 /* Do not read the rest of the completion entry before the
486 * phase bit was validated
487 */
488 dma_rmb();
489 ena_com_handle_single_admin_completion(admin_queue, cqe);
490
491 head_masked++;
492 comp_num++;
493 if (unlikely(head_masked == admin_queue->q_depth)) {
494 head_masked = 0;
495 phase = !phase;
496 }
497
498 cqe = &admin_queue->cq.entries[head_masked];
499 }
500
501 admin_queue->cq.head += comp_num;
502 admin_queue->cq.phase = phase;
503 admin_queue->sq.head += comp_num;
504 admin_queue->stats.completed_cmd += comp_num;
505 }
506
ena_com_comp_status_to_errno(u8 comp_status)507 static int ena_com_comp_status_to_errno(u8 comp_status)
508 {
509 if (unlikely(comp_status != 0))
510 ena_trc_err("admin command failed[%u]\n", comp_status);
511
512 switch (comp_status) {
513 case ENA_ADMIN_SUCCESS:
514 return ENA_COM_OK;
515 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
516 return ENA_COM_NO_MEM;
517 case ENA_ADMIN_UNSUPPORTED_OPCODE:
518 return ENA_COM_UNSUPPORTED;
519 case ENA_ADMIN_BAD_OPCODE:
520 case ENA_ADMIN_MALFORMED_REQUEST:
521 case ENA_ADMIN_ILLEGAL_PARAMETER:
522 case ENA_ADMIN_UNKNOWN_ERROR:
523 return ENA_COM_INVAL;
524 case ENA_ADMIN_RESOURCE_BUSY:
525 return ENA_COM_TRY_AGAIN;
526 }
527
528 return ENA_COM_INVAL;
529 }
530
ena_delay_exponential_backoff_us(u32 exp,u32 delay_us)531 static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
532 {
533 delay_us = ENA_MAX32(ENA_MIN_ADMIN_POLL_US, delay_us);
534 delay_us = ENA_MIN32(delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
535 ENA_USLEEP(delay_us);
536 }
537
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)538 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
539 struct ena_com_admin_queue *admin_queue)
540 {
541 unsigned long flags = 0;
542 ena_time_t timeout;
543 int ret;
544 u32 exp = 0;
545
546 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
547
548 while (1) {
549 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
550 ena_com_handle_admin_completion(admin_queue);
551 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
552
553 if (comp_ctx->status != ENA_CMD_SUBMITTED)
554 break;
555
556 if (ENA_TIME_EXPIRE(timeout)) {
557 ena_trc_err("Wait for completion (polling) timeout\n");
558 /* ENA didn't have any completion */
559 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
560 admin_queue->stats.no_completion++;
561 admin_queue->running_state = false;
562 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
563
564 ret = ENA_COM_TIMER_EXPIRED;
565 goto err;
566 }
567
568 ena_delay_exponential_backoff_us(exp++,
569 admin_queue->ena_dev->ena_min_poll_delay_us);
570 }
571
572 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
573 ena_trc_err("Command was aborted\n");
574 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
575 admin_queue->stats.aborted_cmd++;
576 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
577 ret = ENA_COM_NO_DEVICE;
578 goto err;
579 }
580
581 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
582 "Invalid comp status %d\n", comp_ctx->status);
583
584 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
585 err:
586 comp_ctxt_release(admin_queue, comp_ctx);
587 return ret;
588 }
589
590 /**
591 * Set the LLQ configurations of the firmware
592 *
593 * The driver provides only the enabled feature values to the device,
594 * which in turn, checks if they are supported.
595 */
ena_com_set_llq(struct ena_com_dev * ena_dev)596 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
597 {
598 struct ena_com_admin_queue *admin_queue;
599 struct ena_admin_set_feat_cmd cmd;
600 struct ena_admin_set_feat_resp resp;
601 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
602 int ret;
603
604 memset(&cmd, 0x0, sizeof(cmd));
605 admin_queue = &ena_dev->admin_queue;
606
607 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
608 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
609
610 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
611 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
612 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
613 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
614
615 cmd.u.llq.accel_mode.u.set.enabled_flags =
616 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
617 BIT(ENA_ADMIN_LIMIT_TX_BURST);
618
619 ret = ena_com_execute_admin_command(admin_queue,
620 (struct ena_admin_aq_entry *)&cmd,
621 sizeof(cmd),
622 (struct ena_admin_acq_entry *)&resp,
623 sizeof(resp));
624
625 if (unlikely(ret))
626 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
627
628 return ret;
629 }
630
ena_com_config_llq_info(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)631 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
632 struct ena_admin_feature_llq_desc *llq_features,
633 struct ena_llq_configurations *llq_default_cfg)
634 {
635 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
636 struct ena_admin_accel_mode_get llq_accel_mode_get;
637 u16 supported_feat;
638 int rc;
639
640 memset(llq_info, 0, sizeof(*llq_info));
641
642 supported_feat = llq_features->header_location_ctrl_supported;
643
644 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
645 llq_info->header_location_ctrl =
646 llq_default_cfg->llq_header_location;
647 } else {
648 ena_trc_err("Invalid header location control, supported: 0x%x\n",
649 supported_feat);
650 return -EINVAL;
651 }
652
653 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
654 supported_feat = llq_features->descriptors_stride_ctrl_supported;
655 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
656 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
657 } else {
658 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
659 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
660 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
661 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
662 } else {
663 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
664 supported_feat);
665 return -EINVAL;
666 }
667
668 ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
669 llq_default_cfg->llq_stride_ctrl,
670 supported_feat,
671 llq_info->desc_stride_ctrl);
672 }
673 } else {
674 llq_info->desc_stride_ctrl = 0;
675 }
676
677 supported_feat = llq_features->entry_size_ctrl_supported;
678 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
679 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
680 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
681 } else {
682 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
683 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
684 llq_info->desc_list_entry_size = 128;
685 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
686 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
687 llq_info->desc_list_entry_size = 192;
688 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
689 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
690 llq_info->desc_list_entry_size = 256;
691 } else {
692 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
693 return -EINVAL;
694 }
695
696 ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
697 llq_default_cfg->llq_ring_entry_size,
698 supported_feat,
699 llq_info->desc_list_entry_size);
700 }
701 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
702 /* The desc list entry size should be whole multiply of 8
703 * This requirement comes from __iowrite64_copy()
704 */
705 ena_trc_err("illegal entry size %d\n",
706 llq_info->desc_list_entry_size);
707 return -EINVAL;
708 }
709
710 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
711 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
712 sizeof(struct ena_eth_io_tx_desc);
713 else
714 llq_info->descs_per_entry = 1;
715
716 supported_feat = llq_features->desc_num_before_header_supported;
717 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
718 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
719 } else {
720 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
721 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
722 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
723 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
724 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
725 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
726 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
727 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
728 } else {
729 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
730 supported_feat);
731 return -EINVAL;
732 }
733
734 ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
735 llq_default_cfg->llq_num_decs_before_header,
736 supported_feat,
737 llq_info->descs_num_before_header);
738 }
739 /* Check for accelerated queue supported */
740 llq_accel_mode_get = llq_features->accel_mode.u.get;
741
742 llq_info->disable_meta_caching =
743 !!(llq_accel_mode_get.supported_flags &
744 BIT(ENA_ADMIN_DISABLE_META_CACHING));
745
746 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
747 llq_info->max_entries_in_tx_burst =
748 llq_accel_mode_get.max_tx_burst_size /
749 llq_default_cfg->llq_ring_entry_size_value;
750
751 rc = ena_com_set_llq(ena_dev);
752 if (rc)
753 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
754
755 return rc;
756 }
757
ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)758 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
759 struct ena_com_admin_queue *admin_queue)
760 {
761 unsigned long flags = 0;
762 int ret;
763
764 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
765 admin_queue->completion_timeout);
766
767 /* In case the command wasn't completed find out the root cause.
768 * There might be 2 kinds of errors
769 * 1) No completion (timeout reached)
770 * 2) There is completion but the device didn't get any msi-x interrupt.
771 */
772 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
773 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
774 ena_com_handle_admin_completion(admin_queue);
775 admin_queue->stats.no_completion++;
776 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
777
778 if (comp_ctx->status == ENA_CMD_COMPLETED) {
779 ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
780 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
781 /* Check if fallback to polling is enabled */
782 if (admin_queue->auto_polling)
783 admin_queue->polling = true;
784 } else {
785 ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
786 comp_ctx->cmd_opcode, comp_ctx->status);
787 }
788 /* Check if shifted to polling mode.
789 * This will happen if there is a completion without an interrupt
790 * and autopolling mode is enabled. Continuing normal execution in such case
791 */
792 if (!admin_queue->polling) {
793 admin_queue->running_state = false;
794 ret = ENA_COM_TIMER_EXPIRED;
795 goto err;
796 }
797 }
798
799 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
800 err:
801 comp_ctxt_release(admin_queue, comp_ctx);
802 return ret;
803 }
804
805 /* This method read the hardware device register through posting writes
806 * and waiting for response
807 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
808 */
ena_com_reg_bar_read32(struct ena_com_dev * ena_dev,u16 offset)809 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
810 {
811 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
812 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
813 mmio_read->read_resp;
814 u32 mmio_read_reg, ret, i;
815 unsigned long flags = 0;
816 u32 timeout = mmio_read->reg_read_to;
817
818 ENA_MIGHT_SLEEP();
819
820 if (timeout == 0)
821 timeout = ENA_REG_READ_TIMEOUT;
822
823 /* If readless is disabled, perform regular read */
824 if (!mmio_read->readless_supported)
825 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
826
827 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
828 mmio_read->seq_num++;
829
830 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
831 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
832 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
833 mmio_read_reg |= mmio_read->seq_num &
834 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
835
836 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
837 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
838
839 for (i = 0; i < timeout; i++) {
840 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
841 break;
842
843 ENA_UDELAY(1);
844 }
845
846 if (unlikely(i == timeout)) {
847 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
848 mmio_read->seq_num,
849 offset,
850 read_resp->req_id,
851 read_resp->reg_off);
852 ret = ENA_MMIO_READ_TIMEOUT;
853 goto err;
854 }
855
856 if (read_resp->reg_off != offset) {
857 ena_trc_err("Read failure: wrong offset provided\n");
858 ret = ENA_MMIO_READ_TIMEOUT;
859 } else {
860 ret = read_resp->reg_val;
861 }
862 err:
863 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
864
865 return ret;
866 }
867
868 /* There are two types to wait for completion.
869 * Polling mode - wait until the completion is available.
870 * Async mode - wait on wait queue until the completion is ready
871 * (or the timeout expired).
872 * It is expected that the IRQ called ena_com_handle_admin_completion
873 * to mark the completions.
874 */
ena_com_wait_and_process_admin_cq(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)875 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
876 struct ena_com_admin_queue *admin_queue)
877 {
878 if (admin_queue->polling)
879 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
880 admin_queue);
881
882 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
883 admin_queue);
884 }
885
ena_com_destroy_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq)886 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
887 struct ena_com_io_sq *io_sq)
888 {
889 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
890 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
891 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
892 u8 direction;
893 int ret;
894
895 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
896
897 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
898 direction = ENA_ADMIN_SQ_DIRECTION_TX;
899 else
900 direction = ENA_ADMIN_SQ_DIRECTION_RX;
901
902 destroy_cmd.sq.sq_identity |= (direction <<
903 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
904 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
905
906 destroy_cmd.sq.sq_idx = io_sq->idx;
907 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
908
909 ret = ena_com_execute_admin_command(admin_queue,
910 (struct ena_admin_aq_entry *)&destroy_cmd,
911 sizeof(destroy_cmd),
912 (struct ena_admin_acq_entry *)&destroy_resp,
913 sizeof(destroy_resp));
914
915 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
916 ena_trc_err("failed to destroy io sq error: %d\n", ret);
917
918 return ret;
919 }
920
ena_com_io_queue_free(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,struct ena_com_io_cq * io_cq)921 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
922 struct ena_com_io_sq *io_sq,
923 struct ena_com_io_cq *io_cq)
924 {
925 size_t size;
926
927 if (io_cq->cdesc_addr.virt_addr) {
928 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
929
930 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
931 size,
932 io_cq->cdesc_addr.virt_addr,
933 io_cq->cdesc_addr.phys_addr,
934 io_cq->cdesc_addr.mem_handle);
935
936 io_cq->cdesc_addr.virt_addr = NULL;
937 }
938
939 if (io_sq->desc_addr.virt_addr) {
940 size = io_sq->desc_entry_size * io_sq->q_depth;
941
942 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
943 size,
944 io_sq->desc_addr.virt_addr,
945 io_sq->desc_addr.phys_addr,
946 io_sq->desc_addr.mem_handle);
947
948 io_sq->desc_addr.virt_addr = NULL;
949 }
950
951 if (io_sq->bounce_buf_ctrl.base_buffer) {
952 ENA_MEM_FREE(ena_dev->dmadev,
953 io_sq->bounce_buf_ctrl.base_buffer,
954 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
955 io_sq->bounce_buf_ctrl.base_buffer = NULL;
956 }
957 }
958
wait_for_reset_state(struct ena_com_dev * ena_dev,u32 timeout,u16 exp_state)959 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
960 u16 exp_state)
961 {
962 u32 val, exp = 0;
963 ena_time_t timeout_stamp;
964
965 /* Convert timeout from resolution of 100ms to us resolution. */
966 timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
967
968 while (1) {
969 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
970
971 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
972 ena_trc_err("Reg read timeout occurred\n");
973 return ENA_COM_TIMER_EXPIRED;
974 }
975
976 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
977 exp_state)
978 return 0;
979
980 if (ENA_TIME_EXPIRE(timeout_stamp))
981 return ENA_COM_TIMER_EXPIRED;
982
983 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
984 }
985 }
986
ena_com_check_supported_feature_id(struct ena_com_dev * ena_dev,enum ena_admin_aq_feature_id feature_id)987 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
988 enum ena_admin_aq_feature_id feature_id)
989 {
990 u32 feature_mask = 1 << feature_id;
991
992 /* Device attributes is always supported */
993 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
994 !(ena_dev->supported_features & feature_mask))
995 return false;
996
997 return true;
998 }
999
ena_com_get_feature_ex(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,dma_addr_t control_buf_dma_addr,u32 control_buff_size,u8 feature_ver)1000 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1001 struct ena_admin_get_feat_resp *get_resp,
1002 enum ena_admin_aq_feature_id feature_id,
1003 dma_addr_t control_buf_dma_addr,
1004 u32 control_buff_size,
1005 u8 feature_ver)
1006 {
1007 struct ena_com_admin_queue *admin_queue;
1008 struct ena_admin_get_feat_cmd get_cmd;
1009 int ret;
1010
1011 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1012 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
1013 return ENA_COM_UNSUPPORTED;
1014 }
1015
1016 memset(&get_cmd, 0x0, sizeof(get_cmd));
1017 admin_queue = &ena_dev->admin_queue;
1018
1019 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1020
1021 if (control_buff_size)
1022 get_cmd.aq_common_descriptor.flags =
1023 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1024 else
1025 get_cmd.aq_common_descriptor.flags = 0;
1026
1027 ret = ena_com_mem_addr_set(ena_dev,
1028 &get_cmd.control_buffer.address,
1029 control_buf_dma_addr);
1030 if (unlikely(ret)) {
1031 ena_trc_err("memory address set failed\n");
1032 return ret;
1033 }
1034
1035 get_cmd.control_buffer.length = control_buff_size;
1036 get_cmd.feat_common.feature_version = feature_ver;
1037 get_cmd.feat_common.feature_id = feature_id;
1038
1039 ret = ena_com_execute_admin_command(admin_queue,
1040 (struct ena_admin_aq_entry *)
1041 &get_cmd,
1042 sizeof(get_cmd),
1043 (struct ena_admin_acq_entry *)
1044 get_resp,
1045 sizeof(*get_resp));
1046
1047 if (unlikely(ret))
1048 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1049 feature_id, ret);
1050
1051 return ret;
1052 }
1053
ena_com_get_feature(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,u8 feature_ver)1054 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1055 struct ena_admin_get_feat_resp *get_resp,
1056 enum ena_admin_aq_feature_id feature_id,
1057 u8 feature_ver)
1058 {
1059 return ena_com_get_feature_ex(ena_dev,
1060 get_resp,
1061 feature_id,
1062 0,
1063 0,
1064 feature_ver);
1065 }
1066
ena_com_get_current_hash_function(struct ena_com_dev * ena_dev)1067 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1068 {
1069 return ena_dev->rss.hash_func;
1070 }
1071
ena_com_hash_key_fill_default_key(struct ena_com_dev * ena_dev)1072 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1073 {
1074 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1075 (ena_dev->rss).hash_key;
1076
1077 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1078 /* The key buffer is stored in the device in an array of
1079 * uint32 elements.
1080 */
1081 hash_key->keys_num = ENA_ADMIN_RSS_KEY_PARTS;
1082 }
1083
ena_com_hash_key_allocate(struct ena_com_dev * ena_dev)1084 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1085 {
1086 struct ena_rss *rss = &ena_dev->rss;
1087
1088 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
1089 return ENA_COM_UNSUPPORTED;
1090
1091 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1092 sizeof(*rss->hash_key),
1093 rss->hash_key,
1094 rss->hash_key_dma_addr,
1095 rss->hash_key_mem_handle);
1096
1097 if (unlikely(!rss->hash_key))
1098 return ENA_COM_NO_MEM;
1099
1100 return 0;
1101 }
1102
ena_com_hash_key_destroy(struct ena_com_dev * ena_dev)1103 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1104 {
1105 struct ena_rss *rss = &ena_dev->rss;
1106
1107 if (rss->hash_key)
1108 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1109 sizeof(*rss->hash_key),
1110 rss->hash_key,
1111 rss->hash_key_dma_addr,
1112 rss->hash_key_mem_handle);
1113 rss->hash_key = NULL;
1114 }
1115
ena_com_hash_ctrl_init(struct ena_com_dev * ena_dev)1116 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1117 {
1118 struct ena_rss *rss = &ena_dev->rss;
1119
1120 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1121 sizeof(*rss->hash_ctrl),
1122 rss->hash_ctrl,
1123 rss->hash_ctrl_dma_addr,
1124 rss->hash_ctrl_mem_handle);
1125
1126 if (unlikely(!rss->hash_ctrl))
1127 return ENA_COM_NO_MEM;
1128
1129 return 0;
1130 }
1131
ena_com_hash_ctrl_destroy(struct ena_com_dev * ena_dev)1132 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1133 {
1134 struct ena_rss *rss = &ena_dev->rss;
1135
1136 if (rss->hash_ctrl)
1137 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1138 sizeof(*rss->hash_ctrl),
1139 rss->hash_ctrl,
1140 rss->hash_ctrl_dma_addr,
1141 rss->hash_ctrl_mem_handle);
1142 rss->hash_ctrl = NULL;
1143 }
1144
ena_com_indirect_table_allocate(struct ena_com_dev * ena_dev,u16 log_size)1145 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1146 u16 log_size)
1147 {
1148 struct ena_rss *rss = &ena_dev->rss;
1149 struct ena_admin_get_feat_resp get_resp;
1150 size_t tbl_size;
1151 int ret;
1152
1153 ret = ena_com_get_feature(ena_dev, &get_resp,
1154 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1155 if (unlikely(ret))
1156 return ret;
1157
1158 if ((get_resp.u.ind_table.min_size > log_size) ||
1159 (get_resp.u.ind_table.max_size < log_size)) {
1160 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1161 1 << log_size,
1162 1 << get_resp.u.ind_table.min_size,
1163 1 << get_resp.u.ind_table.max_size);
1164 return ENA_COM_INVAL;
1165 }
1166
1167 tbl_size = (1ULL << log_size) *
1168 sizeof(struct ena_admin_rss_ind_table_entry);
1169
1170 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1171 tbl_size,
1172 rss->rss_ind_tbl,
1173 rss->rss_ind_tbl_dma_addr,
1174 rss->rss_ind_tbl_mem_handle);
1175 if (unlikely(!rss->rss_ind_tbl))
1176 goto mem_err1;
1177
1178 tbl_size = (1ULL << log_size) * sizeof(u16);
1179 rss->host_rss_ind_tbl =
1180 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1181 if (unlikely(!rss->host_rss_ind_tbl))
1182 goto mem_err2;
1183
1184 rss->tbl_log_size = log_size;
1185
1186 return 0;
1187
1188 mem_err2:
1189 tbl_size = (1ULL << log_size) *
1190 sizeof(struct ena_admin_rss_ind_table_entry);
1191
1192 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1193 tbl_size,
1194 rss->rss_ind_tbl,
1195 rss->rss_ind_tbl_dma_addr,
1196 rss->rss_ind_tbl_mem_handle);
1197 rss->rss_ind_tbl = NULL;
1198 mem_err1:
1199 rss->tbl_log_size = 0;
1200 return ENA_COM_NO_MEM;
1201 }
1202
ena_com_indirect_table_destroy(struct ena_com_dev * ena_dev)1203 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1204 {
1205 struct ena_rss *rss = &ena_dev->rss;
1206 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1207 sizeof(struct ena_admin_rss_ind_table_entry);
1208
1209 if (rss->rss_ind_tbl)
1210 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1211 tbl_size,
1212 rss->rss_ind_tbl,
1213 rss->rss_ind_tbl_dma_addr,
1214 rss->rss_ind_tbl_mem_handle);
1215 rss->rss_ind_tbl = NULL;
1216
1217 if (rss->host_rss_ind_tbl)
1218 ENA_MEM_FREE(ena_dev->dmadev,
1219 rss->host_rss_ind_tbl,
1220 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1221 rss->host_rss_ind_tbl = NULL;
1222 }
1223
ena_com_create_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,u16 cq_idx)1224 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1225 struct ena_com_io_sq *io_sq, u16 cq_idx)
1226 {
1227 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1228 struct ena_admin_aq_create_sq_cmd create_cmd;
1229 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1230 u8 direction;
1231 int ret;
1232
1233 memset(&create_cmd, 0x0, sizeof(create_cmd));
1234
1235 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1236
1237 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1238 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1239 else
1240 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1241
1242 create_cmd.sq_identity |= (direction <<
1243 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1244 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1245
1246 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1247 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1248
1249 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1250 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1251 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1252
1253 create_cmd.sq_caps_3 |=
1254 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1255
1256 create_cmd.cq_idx = cq_idx;
1257 create_cmd.sq_depth = io_sq->q_depth;
1258
1259 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1260 ret = ena_com_mem_addr_set(ena_dev,
1261 &create_cmd.sq_ba,
1262 io_sq->desc_addr.phys_addr);
1263 if (unlikely(ret)) {
1264 ena_trc_err("memory address set failed\n");
1265 return ret;
1266 }
1267 }
1268
1269 ret = ena_com_execute_admin_command(admin_queue,
1270 (struct ena_admin_aq_entry *)&create_cmd,
1271 sizeof(create_cmd),
1272 (struct ena_admin_acq_entry *)&cmd_completion,
1273 sizeof(cmd_completion));
1274 if (unlikely(ret)) {
1275 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1276 return ret;
1277 }
1278
1279 io_sq->idx = cmd_completion.sq_idx;
1280
1281 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1282 (uintptr_t)cmd_completion.sq_doorbell_offset);
1283
1284 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1285 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1286 + cmd_completion.llq_headers_offset);
1287
1288 io_sq->desc_addr.pbuf_dev_addr =
1289 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1290 cmd_completion.llq_descriptors_offset);
1291 }
1292
1293 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1294
1295 return ret;
1296 }
1297
ena_com_ind_tbl_convert_to_device(struct ena_com_dev * ena_dev)1298 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1299 {
1300 struct ena_rss *rss = &ena_dev->rss;
1301 struct ena_com_io_sq *io_sq;
1302 u16 qid;
1303 int i;
1304
1305 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1306 qid = rss->host_rss_ind_tbl[i];
1307 if (qid >= ENA_TOTAL_NUM_QUEUES)
1308 return ENA_COM_INVAL;
1309
1310 io_sq = &ena_dev->io_sq_queues[qid];
1311
1312 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1313 return ENA_COM_INVAL;
1314
1315 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1316 }
1317
1318 return 0;
1319 }
1320
ena_com_update_intr_delay_resolution(struct ena_com_dev * ena_dev,u16 intr_delay_resolution)1321 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1322 u16 intr_delay_resolution)
1323 {
1324 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1325
1326 if (unlikely(!intr_delay_resolution)) {
1327 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1328 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1329 }
1330
1331 /* update Rx */
1332 ena_dev->intr_moder_rx_interval =
1333 ena_dev->intr_moder_rx_interval *
1334 prev_intr_delay_resolution /
1335 intr_delay_resolution;
1336
1337 /* update Tx */
1338 ena_dev->intr_moder_tx_interval =
1339 ena_dev->intr_moder_tx_interval *
1340 prev_intr_delay_resolution /
1341 intr_delay_resolution;
1342
1343 ena_dev->intr_delay_resolution = intr_delay_resolution;
1344 }
1345
1346 /*****************************************************************************/
1347 /******************************* API ******************************/
1348 /*****************************************************************************/
1349
ena_com_execute_admin_command(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size,struct ena_admin_acq_entry * comp,size_t comp_size)1350 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1351 struct ena_admin_aq_entry *cmd,
1352 size_t cmd_size,
1353 struct ena_admin_acq_entry *comp,
1354 size_t comp_size)
1355 {
1356 struct ena_comp_ctx *comp_ctx;
1357 int ret;
1358
1359 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1360 comp, comp_size);
1361 if (IS_ERR(comp_ctx)) {
1362 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1363 ena_trc_dbg("Failed to submit command [%ld]\n",
1364 PTR_ERR(comp_ctx));
1365 else
1366 ena_trc_err("Failed to submit command [%ld]\n",
1367 PTR_ERR(comp_ctx));
1368
1369 return PTR_ERR(comp_ctx);
1370 }
1371
1372 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1373 if (unlikely(ret)) {
1374 if (admin_queue->running_state)
1375 ena_trc_err("Failed to process command. ret = %d\n",
1376 ret);
1377 else
1378 ena_trc_dbg("Failed to process command. ret = %d\n",
1379 ret);
1380 }
1381 return ret;
1382 }
1383
ena_com_create_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1384 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1385 struct ena_com_io_cq *io_cq)
1386 {
1387 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1388 struct ena_admin_aq_create_cq_cmd create_cmd;
1389 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1390 int ret;
1391
1392 memset(&create_cmd, 0x0, sizeof(create_cmd));
1393
1394 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1395
1396 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1397 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1398 create_cmd.cq_caps_1 |=
1399 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1400
1401 create_cmd.msix_vector = io_cq->msix_vector;
1402 create_cmd.cq_depth = io_cq->q_depth;
1403
1404 ret = ena_com_mem_addr_set(ena_dev,
1405 &create_cmd.cq_ba,
1406 io_cq->cdesc_addr.phys_addr);
1407 if (unlikely(ret)) {
1408 ena_trc_err("memory address set failed\n");
1409 return ret;
1410 }
1411
1412 ret = ena_com_execute_admin_command(admin_queue,
1413 (struct ena_admin_aq_entry *)&create_cmd,
1414 sizeof(create_cmd),
1415 (struct ena_admin_acq_entry *)&cmd_completion,
1416 sizeof(cmd_completion));
1417 if (unlikely(ret)) {
1418 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1419 return ret;
1420 }
1421
1422 io_cq->idx = cmd_completion.cq_idx;
1423
1424 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1425 cmd_completion.cq_interrupt_unmask_register_offset);
1426
1427 if (cmd_completion.cq_head_db_register_offset)
1428 io_cq->cq_head_db_reg =
1429 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1430 cmd_completion.cq_head_db_register_offset);
1431
1432 if (cmd_completion.numa_node_register_offset)
1433 io_cq->numa_node_cfg_reg =
1434 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1435 cmd_completion.numa_node_register_offset);
1436
1437 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1438
1439 return ret;
1440 }
1441
ena_com_get_io_handlers(struct ena_com_dev * ena_dev,u16 qid,struct ena_com_io_sq ** io_sq,struct ena_com_io_cq ** io_cq)1442 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1443 struct ena_com_io_sq **io_sq,
1444 struct ena_com_io_cq **io_cq)
1445 {
1446 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1447 ena_trc_err("Invalid queue number %d but the max is %d\n",
1448 qid, ENA_TOTAL_NUM_QUEUES);
1449 return ENA_COM_INVAL;
1450 }
1451
1452 *io_sq = &ena_dev->io_sq_queues[qid];
1453 *io_cq = &ena_dev->io_cq_queues[qid];
1454
1455 return 0;
1456 }
1457
ena_com_abort_admin_commands(struct ena_com_dev * ena_dev)1458 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1459 {
1460 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1461 struct ena_comp_ctx *comp_ctx;
1462 u16 i;
1463
1464 if (!admin_queue->comp_ctx)
1465 return;
1466
1467 for (i = 0; i < admin_queue->q_depth; i++) {
1468 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1469 if (unlikely(!comp_ctx))
1470 break;
1471
1472 comp_ctx->status = ENA_CMD_ABORTED;
1473
1474 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1475 }
1476 }
1477
ena_com_wait_for_abort_completion(struct ena_com_dev * ena_dev)1478 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1479 {
1480 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1481 unsigned long flags = 0;
1482 u32 exp = 0;
1483
1484 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1485 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1486 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1487 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
1488 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1489 }
1490 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1491 }
1492
ena_com_destroy_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1493 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1494 struct ena_com_io_cq *io_cq)
1495 {
1496 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1497 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1498 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1499 int ret;
1500
1501 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1502
1503 destroy_cmd.cq_idx = io_cq->idx;
1504 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1505
1506 ret = ena_com_execute_admin_command(admin_queue,
1507 (struct ena_admin_aq_entry *)&destroy_cmd,
1508 sizeof(destroy_cmd),
1509 (struct ena_admin_acq_entry *)&destroy_resp,
1510 sizeof(destroy_resp));
1511
1512 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1513 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1514
1515 return ret;
1516 }
1517
ena_com_get_admin_running_state(struct ena_com_dev * ena_dev)1518 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1519 {
1520 return ena_dev->admin_queue.running_state;
1521 }
1522
ena_com_set_admin_running_state(struct ena_com_dev * ena_dev,bool state)1523 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1524 {
1525 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1526 unsigned long flags = 0;
1527
1528 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1529 ena_dev->admin_queue.running_state = state;
1530 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1531 }
1532
ena_com_admin_aenq_enable(struct ena_com_dev * ena_dev)1533 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1534 {
1535 u16 depth = ena_dev->aenq.q_depth;
1536
1537 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1538
1539 /* Init head_db to mark that all entries in the queue
1540 * are initially available
1541 */
1542 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1543 }
1544
ena_com_set_aenq_config(struct ena_com_dev * ena_dev,u32 groups_flag)1545 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1546 {
1547 struct ena_com_admin_queue *admin_queue;
1548 struct ena_admin_set_feat_cmd cmd;
1549 struct ena_admin_set_feat_resp resp;
1550 struct ena_admin_get_feat_resp get_resp;
1551 int ret;
1552
1553 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1554 if (ret) {
1555 ena_trc_info("Can't get aenq configuration\n");
1556 return ret;
1557 }
1558
1559 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1560 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1561 get_resp.u.aenq.supported_groups,
1562 groups_flag);
1563 return ENA_COM_UNSUPPORTED;
1564 }
1565
1566 memset(&cmd, 0x0, sizeof(cmd));
1567 admin_queue = &ena_dev->admin_queue;
1568
1569 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1570 cmd.aq_common_descriptor.flags = 0;
1571 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1572 cmd.u.aenq.enabled_groups = groups_flag;
1573
1574 ret = ena_com_execute_admin_command(admin_queue,
1575 (struct ena_admin_aq_entry *)&cmd,
1576 sizeof(cmd),
1577 (struct ena_admin_acq_entry *)&resp,
1578 sizeof(resp));
1579
1580 if (unlikely(ret))
1581 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1582
1583 return ret;
1584 }
1585
ena_com_get_dma_width(struct ena_com_dev * ena_dev)1586 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1587 {
1588 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1589 int width;
1590
1591 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1592 ena_trc_err("Reg read timeout occurred\n");
1593 return ENA_COM_TIMER_EXPIRED;
1594 }
1595
1596 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1597 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1598
1599 ena_trc_dbg("ENA dma width: %d\n", width);
1600
1601 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1602 ena_trc_err("DMA width illegal value: %d\n", width);
1603 return ENA_COM_INVAL;
1604 }
1605
1606 ena_dev->dma_addr_bits = width;
1607
1608 return width;
1609 }
1610
ena_com_validate_version(struct ena_com_dev * ena_dev)1611 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1612 {
1613 u32 ver;
1614 u32 ctrl_ver;
1615 u32 ctrl_ver_masked;
1616
1617 /* Make sure the ENA version and the controller version are at least
1618 * as the driver expects
1619 */
1620 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1621 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1622 ENA_REGS_CONTROLLER_VERSION_OFF);
1623
1624 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1625 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1626 ena_trc_err("Reg read timeout occurred\n");
1627 return ENA_COM_TIMER_EXPIRED;
1628 }
1629
1630 ena_trc_info("ena device version: %d.%d\n",
1631 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1632 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1633 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1634
1635 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1636 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1637 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1638 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1639 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1640 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1641 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1642 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1643
1644 ctrl_ver_masked =
1645 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1646 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1647 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1648
1649 /* Validate the ctrl version without the implementation ID */
1650 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1651 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1652 return -1;
1653 }
1654
1655 return 0;
1656 }
1657
ena_com_admin_destroy(struct ena_com_dev * ena_dev)1658 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1659 {
1660 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1661 struct ena_com_admin_cq *cq = &admin_queue->cq;
1662 struct ena_com_admin_sq *sq = &admin_queue->sq;
1663 struct ena_com_aenq *aenq = &ena_dev->aenq;
1664 u16 size;
1665
1666 if (admin_queue->comp_ctx) {
1667 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1668 ENA_MEM_FREE(ena_dev->dmadev,
1669 admin_queue->comp_ctx,
1670 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1671 }
1672
1673 admin_queue->comp_ctx = NULL;
1674 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1675 if (sq->entries)
1676 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1677 sq->dma_addr, sq->mem_handle);
1678 sq->entries = NULL;
1679
1680 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1681 if (cq->entries)
1682 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1683 cq->dma_addr, cq->mem_handle);
1684 cq->entries = NULL;
1685
1686 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1687 if (ena_dev->aenq.entries)
1688 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1689 aenq->dma_addr, aenq->mem_handle);
1690 aenq->entries = NULL;
1691 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1692 }
1693
ena_com_set_admin_polling_mode(struct ena_com_dev * ena_dev,bool polling)1694 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1695 {
1696 u32 mask_value = 0;
1697
1698 if (polling)
1699 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1700
1701 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1702 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1703 ena_dev->admin_queue.polling = polling;
1704 }
1705
ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)1706 bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
1707 {
1708 return ena_dev->admin_queue.polling;
1709 }
1710
ena_com_set_admin_auto_polling_mode(struct ena_com_dev * ena_dev,bool polling)1711 void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1712 bool polling)
1713 {
1714 ena_dev->admin_queue.auto_polling = polling;
1715 }
1716
ena_com_mmio_reg_read_request_init(struct ena_com_dev * ena_dev)1717 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1718 {
1719 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1720
1721 ENA_SPINLOCK_INIT(mmio_read->lock);
1722 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1723 sizeof(*mmio_read->read_resp),
1724 mmio_read->read_resp,
1725 mmio_read->read_resp_dma_addr,
1726 mmio_read->read_resp_mem_handle);
1727 if (unlikely(!mmio_read->read_resp))
1728 goto err;
1729
1730 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1731
1732 mmio_read->read_resp->req_id = 0x0;
1733 mmio_read->seq_num = 0x0;
1734 mmio_read->readless_supported = true;
1735
1736 return 0;
1737
1738 err:
1739 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1740 return ENA_COM_NO_MEM;
1741 }
1742
ena_com_set_mmio_read_mode(struct ena_com_dev * ena_dev,bool readless_supported)1743 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1744 {
1745 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1746
1747 mmio_read->readless_supported = readless_supported;
1748 }
1749
ena_com_mmio_reg_read_request_destroy(struct ena_com_dev * ena_dev)1750 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1751 {
1752 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1753
1754 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1755 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1756
1757 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1758 sizeof(*mmio_read->read_resp),
1759 mmio_read->read_resp,
1760 mmio_read->read_resp_dma_addr,
1761 mmio_read->read_resp_mem_handle);
1762
1763 mmio_read->read_resp = NULL;
1764 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1765 }
1766
ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev * ena_dev)1767 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1768 {
1769 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1770 u32 addr_low, addr_high;
1771
1772 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1773 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1774
1775 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1776 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1777 }
1778
ena_com_admin_init(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)1779 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1780 struct ena_aenq_handlers *aenq_handlers)
1781 {
1782 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1783 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1784 int ret;
1785
1786 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1787
1788 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1789 ena_trc_err("Reg read timeout occurred\n");
1790 return ENA_COM_TIMER_EXPIRED;
1791 }
1792
1793 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1794 ena_trc_err("Device isn't ready, abort com init\n");
1795 return ENA_COM_NO_DEVICE;
1796 }
1797
1798 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1799
1800 admin_queue->bus = ena_dev->bus;
1801 admin_queue->q_dmadev = ena_dev->dmadev;
1802 admin_queue->polling = false;
1803 admin_queue->curr_cmd_id = 0;
1804
1805 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1806
1807 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1808
1809 ret = ena_com_init_comp_ctxt(admin_queue);
1810 if (ret)
1811 goto error;
1812
1813 ret = ena_com_admin_init_sq(admin_queue);
1814 if (ret)
1815 goto error;
1816
1817 ret = ena_com_admin_init_cq(admin_queue);
1818 if (ret)
1819 goto error;
1820
1821 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1822 ENA_REGS_AQ_DB_OFF);
1823
1824 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1825 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1826
1827 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1828 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1829
1830 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1831 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1832
1833 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1834 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1835
1836 aq_caps = 0;
1837 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1838 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1839 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1840 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1841
1842 acq_caps = 0;
1843 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1844 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1845 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1846 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1847
1848 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1849 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1850 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1851 if (ret)
1852 goto error;
1853
1854 admin_queue->ena_dev = ena_dev;
1855 admin_queue->running_state = true;
1856
1857 return 0;
1858 error:
1859 ena_com_admin_destroy(ena_dev);
1860
1861 return ret;
1862 }
1863
ena_com_create_io_queue(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx)1864 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1865 struct ena_com_create_io_ctx *ctx)
1866 {
1867 struct ena_com_io_sq *io_sq;
1868 struct ena_com_io_cq *io_cq;
1869 int ret;
1870
1871 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1872 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1873 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1874 return ENA_COM_INVAL;
1875 }
1876
1877 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1878 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1879
1880 memset(io_sq, 0x0, sizeof(*io_sq));
1881 memset(io_cq, 0x0, sizeof(*io_cq));
1882
1883 /* Init CQ */
1884 io_cq->q_depth = ctx->queue_size;
1885 io_cq->direction = ctx->direction;
1886 io_cq->qid = ctx->qid;
1887
1888 io_cq->msix_vector = ctx->msix_vector;
1889
1890 io_sq->q_depth = ctx->queue_size;
1891 io_sq->direction = ctx->direction;
1892 io_sq->qid = ctx->qid;
1893
1894 io_sq->mem_queue_type = ctx->mem_queue_type;
1895
1896 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1897 /* header length is limited to 8 bits */
1898 io_sq->tx_max_header_size =
1899 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1900
1901 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1902 if (ret)
1903 goto error;
1904 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1905 if (ret)
1906 goto error;
1907
1908 ret = ena_com_create_io_cq(ena_dev, io_cq);
1909 if (ret)
1910 goto error;
1911
1912 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1913 if (ret)
1914 goto destroy_io_cq;
1915
1916 return 0;
1917
1918 destroy_io_cq:
1919 ena_com_destroy_io_cq(ena_dev, io_cq);
1920 error:
1921 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1922 return ret;
1923 }
1924
ena_com_destroy_io_queue(struct ena_com_dev * ena_dev,u16 qid)1925 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1926 {
1927 struct ena_com_io_sq *io_sq;
1928 struct ena_com_io_cq *io_cq;
1929
1930 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1931 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1932 qid, ENA_TOTAL_NUM_QUEUES);
1933 return;
1934 }
1935
1936 io_sq = &ena_dev->io_sq_queues[qid];
1937 io_cq = &ena_dev->io_cq_queues[qid];
1938
1939 ena_com_destroy_io_sq(ena_dev, io_sq);
1940 ena_com_destroy_io_cq(ena_dev, io_cq);
1941
1942 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1943 }
1944
ena_com_get_link_params(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * resp)1945 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1946 struct ena_admin_get_feat_resp *resp)
1947 {
1948 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1949 }
1950
ena_com_get_dev_attr_feat(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)1951 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1952 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1953 {
1954 struct ena_admin_get_feat_resp get_resp;
1955 int rc;
1956
1957 rc = ena_com_get_feature(ena_dev, &get_resp,
1958 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1959 if (rc)
1960 return rc;
1961
1962 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1963 sizeof(get_resp.u.dev_attr));
1964 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1965
1966 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1967 rc = ena_com_get_feature(ena_dev, &get_resp,
1968 ENA_ADMIN_MAX_QUEUES_EXT,
1969 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1970 if (rc)
1971 return rc;
1972
1973 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1974 return -EINVAL;
1975
1976 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1977 sizeof(get_resp.u.max_queue_ext));
1978 ena_dev->tx_max_header_size =
1979 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1980 } else {
1981 rc = ena_com_get_feature(ena_dev, &get_resp,
1982 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1983 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1984 sizeof(get_resp.u.max_queue));
1985 ena_dev->tx_max_header_size =
1986 get_resp.u.max_queue.max_header_size;
1987
1988 if (rc)
1989 return rc;
1990 }
1991
1992 rc = ena_com_get_feature(ena_dev, &get_resp,
1993 ENA_ADMIN_AENQ_CONFIG, 0);
1994 if (rc)
1995 return rc;
1996
1997 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1998 sizeof(get_resp.u.aenq));
1999
2000 rc = ena_com_get_feature(ena_dev, &get_resp,
2001 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2002 if (rc)
2003 return rc;
2004
2005 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2006 sizeof(get_resp.u.offload));
2007
2008 /* Driver hints isn't mandatory admin command. So in case the
2009 * command isn't supported set driver hints to 0
2010 */
2011 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2012
2013 if (!rc)
2014 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2015 sizeof(get_resp.u.hw_hints));
2016 else if (rc == ENA_COM_UNSUPPORTED)
2017 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2018 else
2019 return rc;
2020
2021 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2022 if (!rc)
2023 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2024 sizeof(get_resp.u.llq));
2025 else if (rc == ENA_COM_UNSUPPORTED)
2026 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2027 else
2028 return rc;
2029
2030 rc = ena_com_get_feature(ena_dev, &get_resp,
2031 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2032 if (!rc)
2033 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2034 sizeof(get_resp.u.ind_table));
2035 else if (rc == ENA_COM_UNSUPPORTED)
2036 memset(&get_feat_ctx->ind_table, 0x0,
2037 sizeof(get_feat_ctx->ind_table));
2038 else
2039 return rc;
2040
2041 return 0;
2042 }
2043
ena_com_admin_q_comp_intr_handler(struct ena_com_dev * ena_dev)2044 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2045 {
2046 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2047 }
2048
2049 /* ena_handle_specific_aenq_event:
2050 * return the handler that is relevant to the specific event group
2051 */
ena_com_get_specific_aenq_cb(struct ena_com_dev * dev,u16 group)2052 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2053 u16 group)
2054 {
2055 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2056
2057 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2058 return aenq_handlers->handlers[group];
2059
2060 return aenq_handlers->unimplemented_handler;
2061 }
2062
2063 /* ena_aenq_intr_handler:
2064 * handles the aenq incoming events.
2065 * pop events from the queue and apply the specific handler
2066 */
ena_com_aenq_intr_handler(struct ena_com_dev * dev,void * data)2067 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2068 {
2069 struct ena_admin_aenq_entry *aenq_e;
2070 struct ena_admin_aenq_common_desc *aenq_common;
2071 struct ena_com_aenq *aenq = &dev->aenq;
2072 u64 timestamp;
2073 ena_aenq_handler handler_cb;
2074 u16 masked_head, processed = 0;
2075 u8 phase;
2076
2077 masked_head = aenq->head & (aenq->q_depth - 1);
2078 phase = aenq->phase;
2079 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2080 aenq_common = &aenq_e->aenq_common_desc;
2081
2082 /* Go over all the events */
2083 while ((READ_ONCE8(aenq_common->flags) &
2084 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2085 /* Make sure the phase bit (ownership) is as expected before
2086 * reading the rest of the descriptor.
2087 */
2088 dma_rmb();
2089
2090 timestamp = (u64)aenq_common->timestamp_low |
2091 ((u64)aenq_common->timestamp_high << 32);
2092 ENA_TOUCH(timestamp); /* In case debug is disabled */
2093 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2094 aenq_common->group,
2095 aenq_common->syndrom,
2096 timestamp);
2097
2098 /* Handle specific event*/
2099 handler_cb = ena_com_get_specific_aenq_cb(dev,
2100 aenq_common->group);
2101 handler_cb(data, aenq_e); /* call the actual event handler*/
2102
2103 /* Get next event entry */
2104 masked_head++;
2105 processed++;
2106
2107 if (unlikely(masked_head == aenq->q_depth)) {
2108 masked_head = 0;
2109 phase = !phase;
2110 }
2111 aenq_e = &aenq->entries[masked_head];
2112 aenq_common = &aenq_e->aenq_common_desc;
2113 }
2114
2115 aenq->head += processed;
2116 aenq->phase = phase;
2117
2118 /* Don't update aenq doorbell if there weren't any processed events */
2119 if (!processed)
2120 return;
2121
2122 /* write the aenq doorbell after all AENQ descriptors were read */
2123 mb();
2124 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2125 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2126 mmiowb();
2127 }
2128
ena_com_dev_reset(struct ena_com_dev * ena_dev,enum ena_regs_reset_reason_types reset_reason)2129 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2130 enum ena_regs_reset_reason_types reset_reason)
2131 {
2132 u32 stat, timeout, cap, reset_val;
2133 int rc;
2134
2135 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2136 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2137
2138 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2139 (cap == ENA_MMIO_READ_TIMEOUT))) {
2140 ena_trc_err("Reg read32 timeout occurred\n");
2141 return ENA_COM_TIMER_EXPIRED;
2142 }
2143
2144 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2145 ena_trc_err("Device isn't ready, can't reset device\n");
2146 return ENA_COM_INVAL;
2147 }
2148
2149 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2150 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2151 if (timeout == 0) {
2152 ena_trc_err("Invalid timeout value\n");
2153 return ENA_COM_INVAL;
2154 }
2155
2156 /* start reset */
2157 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2158 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2159 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2160 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2161
2162 /* Write again the MMIO read request address */
2163 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2164
2165 rc = wait_for_reset_state(ena_dev, timeout,
2166 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2167 if (rc != 0) {
2168 ena_trc_err("Reset indication didn't turn on\n");
2169 return rc;
2170 }
2171
2172 /* reset done */
2173 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2174 rc = wait_for_reset_state(ena_dev, timeout, 0);
2175 if (rc != 0) {
2176 ena_trc_err("Reset indication didn't turn off\n");
2177 return rc;
2178 }
2179
2180 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2181 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2182 if (timeout)
2183 /* the resolution of timeout reg is 100ms */
2184 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2185 else
2186 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2187
2188 return 0;
2189 }
2190
ena_get_dev_stats(struct ena_com_dev * ena_dev,struct ena_com_stats_ctx * ctx,enum ena_admin_get_stats_type type)2191 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2192 struct ena_com_stats_ctx *ctx,
2193 enum ena_admin_get_stats_type type)
2194 {
2195 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2196 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2197 struct ena_com_admin_queue *admin_queue;
2198 int ret;
2199
2200 admin_queue = &ena_dev->admin_queue;
2201
2202 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2203 get_cmd->aq_common_descriptor.flags = 0;
2204 get_cmd->type = type;
2205
2206 ret = ena_com_execute_admin_command(admin_queue,
2207 (struct ena_admin_aq_entry *)get_cmd,
2208 sizeof(*get_cmd),
2209 (struct ena_admin_acq_entry *)get_resp,
2210 sizeof(*get_resp));
2211
2212 if (unlikely(ret))
2213 ena_trc_err("Failed to get stats. error: %d\n", ret);
2214
2215 return ret;
2216 }
2217
ena_com_get_eni_stats(struct ena_com_dev * ena_dev,struct ena_admin_eni_stats * stats)2218 int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2219 struct ena_admin_eni_stats *stats)
2220 {
2221 struct ena_com_stats_ctx ctx;
2222 int ret;
2223
2224 memset(&ctx, 0x0, sizeof(ctx));
2225 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2226 if (likely(ret == 0))
2227 memcpy(stats, &ctx.get_resp.u.eni_stats,
2228 sizeof(ctx.get_resp.u.eni_stats));
2229
2230 return ret;
2231 }
2232
ena_com_get_dev_basic_stats(struct ena_com_dev * ena_dev,struct ena_admin_basic_stats * stats)2233 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2234 struct ena_admin_basic_stats *stats)
2235 {
2236 struct ena_com_stats_ctx ctx;
2237 int ret;
2238
2239 memset(&ctx, 0x0, sizeof(ctx));
2240 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2241 if (likely(ret == 0))
2242 memcpy(stats, &ctx.get_resp.u.basic_stats,
2243 sizeof(ctx.get_resp.u.basic_stats));
2244
2245 return ret;
2246 }
2247
ena_com_set_dev_mtu(struct ena_com_dev * ena_dev,int mtu)2248 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2249 {
2250 struct ena_com_admin_queue *admin_queue;
2251 struct ena_admin_set_feat_cmd cmd;
2252 struct ena_admin_set_feat_resp resp;
2253 int ret;
2254
2255 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2256 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2257 return ENA_COM_UNSUPPORTED;
2258 }
2259
2260 memset(&cmd, 0x0, sizeof(cmd));
2261 admin_queue = &ena_dev->admin_queue;
2262
2263 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2264 cmd.aq_common_descriptor.flags = 0;
2265 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2266 cmd.u.mtu.mtu = mtu;
2267
2268 ret = ena_com_execute_admin_command(admin_queue,
2269 (struct ena_admin_aq_entry *)&cmd,
2270 sizeof(cmd),
2271 (struct ena_admin_acq_entry *)&resp,
2272 sizeof(resp));
2273
2274 if (unlikely(ret))
2275 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2276
2277 return ret;
2278 }
2279
ena_com_get_offload_settings(struct ena_com_dev * ena_dev,struct ena_admin_feature_offload_desc * offload)2280 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2281 struct ena_admin_feature_offload_desc *offload)
2282 {
2283 int ret;
2284 struct ena_admin_get_feat_resp resp;
2285
2286 ret = ena_com_get_feature(ena_dev, &resp,
2287 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2288 if (unlikely(ret)) {
2289 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2290 return ret;
2291 }
2292
2293 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2294
2295 return 0;
2296 }
2297
ena_com_set_hash_function(struct ena_com_dev * ena_dev)2298 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2299 {
2300 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2301 struct ena_rss *rss = &ena_dev->rss;
2302 struct ena_admin_set_feat_cmd cmd;
2303 struct ena_admin_set_feat_resp resp;
2304 struct ena_admin_get_feat_resp get_resp;
2305 int ret;
2306
2307 if (!ena_com_check_supported_feature_id(ena_dev,
2308 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2309 ena_trc_dbg("Feature %d isn't supported\n",
2310 ENA_ADMIN_RSS_HASH_FUNCTION);
2311 return ENA_COM_UNSUPPORTED;
2312 }
2313
2314 /* Validate hash function is supported */
2315 ret = ena_com_get_feature(ena_dev, &get_resp,
2316 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2317 if (unlikely(ret))
2318 return ret;
2319
2320 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2321 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2322 rss->hash_func);
2323 return ENA_COM_UNSUPPORTED;
2324 }
2325
2326 memset(&cmd, 0x0, sizeof(cmd));
2327
2328 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2329 cmd.aq_common_descriptor.flags =
2330 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2331 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2332 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2333 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2334
2335 ret = ena_com_mem_addr_set(ena_dev,
2336 &cmd.control_buffer.address,
2337 rss->hash_key_dma_addr);
2338 if (unlikely(ret)) {
2339 ena_trc_err("memory address set failed\n");
2340 return ret;
2341 }
2342
2343 cmd.control_buffer.length = sizeof(*rss->hash_key);
2344
2345 ret = ena_com_execute_admin_command(admin_queue,
2346 (struct ena_admin_aq_entry *)&cmd,
2347 sizeof(cmd),
2348 (struct ena_admin_acq_entry *)&resp,
2349 sizeof(resp));
2350 if (unlikely(ret)) {
2351 ena_trc_err("Failed to set hash function %d. error: %d\n",
2352 rss->hash_func, ret);
2353 return ENA_COM_INVAL;
2354 }
2355
2356 return 0;
2357 }
2358
ena_com_fill_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions func,const u8 * key,u16 key_len,u32 init_val)2359 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2360 enum ena_admin_hash_functions func,
2361 const u8 *key, u16 key_len, u32 init_val)
2362 {
2363 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2364 struct ena_admin_get_feat_resp get_resp;
2365 enum ena_admin_hash_functions old_func;
2366 struct ena_rss *rss = &ena_dev->rss;
2367 int rc;
2368
2369 hash_key = rss->hash_key;
2370
2371 /* Make sure size is a mult of DWs */
2372 if (unlikely(key_len & 0x3))
2373 return ENA_COM_INVAL;
2374
2375 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2376 ENA_ADMIN_RSS_HASH_FUNCTION,
2377 rss->hash_key_dma_addr,
2378 sizeof(*rss->hash_key), 0);
2379 if (unlikely(rc))
2380 return rc;
2381
2382 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2383 ena_trc_err("Flow hash function %d isn't supported\n", func);
2384 return ENA_COM_UNSUPPORTED;
2385 }
2386
2387 switch (func) {
2388 case ENA_ADMIN_TOEPLITZ:
2389 if (key) {
2390 if (key_len != sizeof(hash_key->key)) {
2391 ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2392 key_len, sizeof(hash_key->key));
2393 return ENA_COM_INVAL;
2394 }
2395 memcpy(hash_key->key, key, key_len);
2396 rss->hash_init_val = init_val;
2397 hash_key->keys_num = key_len / sizeof(u32);
2398 }
2399 break;
2400 case ENA_ADMIN_CRC32:
2401 rss->hash_init_val = init_val;
2402 break;
2403 default:
2404 ena_trc_err("Invalid hash function (%d)\n", func);
2405 return ENA_COM_INVAL;
2406 }
2407
2408 old_func = rss->hash_func;
2409 rss->hash_func = func;
2410 rc = ena_com_set_hash_function(ena_dev);
2411
2412 /* Restore the old function */
2413 if (unlikely(rc))
2414 rss->hash_func = old_func;
2415
2416 return rc;
2417 }
2418
ena_com_get_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions * func)2419 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2420 enum ena_admin_hash_functions *func)
2421 {
2422 struct ena_rss *rss = &ena_dev->rss;
2423 struct ena_admin_get_feat_resp get_resp;
2424 int rc;
2425
2426 if (unlikely(!func))
2427 return ENA_COM_INVAL;
2428
2429 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2430 ENA_ADMIN_RSS_HASH_FUNCTION,
2431 rss->hash_key_dma_addr,
2432 sizeof(*rss->hash_key), 0);
2433 if (unlikely(rc))
2434 return rc;
2435
2436 /* ENA_FFS() returns 1 in case the lsb is set */
2437 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2438 if (rss->hash_func)
2439 rss->hash_func--;
2440
2441 *func = rss->hash_func;
2442
2443 return 0;
2444 }
2445
ena_com_get_hash_key(struct ena_com_dev * ena_dev,u8 * key)2446 int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2447 {
2448 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2449 ena_dev->rss.hash_key;
2450
2451 if (key)
2452 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2453
2454 return 0;
2455 }
2456
ena_com_get_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 * fields)2457 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2458 enum ena_admin_flow_hash_proto proto,
2459 u16 *fields)
2460 {
2461 struct ena_rss *rss = &ena_dev->rss;
2462 struct ena_admin_get_feat_resp get_resp;
2463 int rc;
2464
2465 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2466 ENA_ADMIN_RSS_HASH_INPUT,
2467 rss->hash_ctrl_dma_addr,
2468 sizeof(*rss->hash_ctrl), 0);
2469 if (unlikely(rc))
2470 return rc;
2471
2472 if (fields)
2473 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2474
2475 return 0;
2476 }
2477
ena_com_set_hash_ctrl(struct ena_com_dev * ena_dev)2478 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2479 {
2480 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2481 struct ena_rss *rss = &ena_dev->rss;
2482 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2483 struct ena_admin_set_feat_cmd cmd;
2484 struct ena_admin_set_feat_resp resp;
2485 int ret;
2486
2487 if (!ena_com_check_supported_feature_id(ena_dev,
2488 ENA_ADMIN_RSS_HASH_INPUT)) {
2489 ena_trc_dbg("Feature %d isn't supported\n",
2490 ENA_ADMIN_RSS_HASH_INPUT);
2491 return ENA_COM_UNSUPPORTED;
2492 }
2493
2494 memset(&cmd, 0x0, sizeof(cmd));
2495
2496 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2497 cmd.aq_common_descriptor.flags =
2498 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2499 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2500 cmd.u.flow_hash_input.enabled_input_sort =
2501 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2502 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2503
2504 ret = ena_com_mem_addr_set(ena_dev,
2505 &cmd.control_buffer.address,
2506 rss->hash_ctrl_dma_addr);
2507 if (unlikely(ret)) {
2508 ena_trc_err("memory address set failed\n");
2509 return ret;
2510 }
2511 cmd.control_buffer.length = sizeof(*hash_ctrl);
2512
2513 ret = ena_com_execute_admin_command(admin_queue,
2514 (struct ena_admin_aq_entry *)&cmd,
2515 sizeof(cmd),
2516 (struct ena_admin_acq_entry *)&resp,
2517 sizeof(resp));
2518 if (unlikely(ret))
2519 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2520
2521 return ret;
2522 }
2523
ena_com_set_default_hash_ctrl(struct ena_com_dev * ena_dev)2524 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2525 {
2526 struct ena_rss *rss = &ena_dev->rss;
2527 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2528 rss->hash_ctrl;
2529 u16 available_fields = 0;
2530 int rc, i;
2531
2532 /* Get the supported hash input */
2533 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2534 if (unlikely(rc))
2535 return rc;
2536
2537 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2538 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2539 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2540
2541 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2542 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2543 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2544
2545 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2546 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2547 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2548
2549 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2550 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2551 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2552
2553 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2554 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2555
2556 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2557 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2558
2559 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2560 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2561
2562 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2563 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2564
2565 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2566 available_fields = hash_ctrl->selected_fields[i].fields &
2567 hash_ctrl->supported_fields[i].fields;
2568 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2569 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2570 i, hash_ctrl->supported_fields[i].fields,
2571 hash_ctrl->selected_fields[i].fields);
2572 return ENA_COM_UNSUPPORTED;
2573 }
2574 }
2575
2576 rc = ena_com_set_hash_ctrl(ena_dev);
2577
2578 /* In case of failure, restore the old hash ctrl */
2579 if (unlikely(rc))
2580 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2581
2582 return rc;
2583 }
2584
ena_com_fill_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 hash_fields)2585 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2586 enum ena_admin_flow_hash_proto proto,
2587 u16 hash_fields)
2588 {
2589 struct ena_rss *rss = &ena_dev->rss;
2590 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2591 u16 supported_fields;
2592 int rc;
2593
2594 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2595 ena_trc_err("Invalid proto num (%u)\n", proto);
2596 return ENA_COM_INVAL;
2597 }
2598
2599 /* Get the ctrl table */
2600 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2601 if (unlikely(rc))
2602 return rc;
2603
2604 /* Make sure all the fields are supported */
2605 supported_fields = hash_ctrl->supported_fields[proto].fields;
2606 if ((hash_fields & supported_fields) != hash_fields) {
2607 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2608 proto, hash_fields, supported_fields);
2609 }
2610
2611 hash_ctrl->selected_fields[proto].fields = hash_fields;
2612
2613 rc = ena_com_set_hash_ctrl(ena_dev);
2614
2615 /* In case of failure, restore the old hash ctrl */
2616 if (unlikely(rc))
2617 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2618
2619 return 0;
2620 }
2621
ena_com_indirect_table_fill_entry(struct ena_com_dev * ena_dev,u16 entry_idx,u16 entry_value)2622 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2623 u16 entry_idx, u16 entry_value)
2624 {
2625 struct ena_rss *rss = &ena_dev->rss;
2626
2627 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2628 return ENA_COM_INVAL;
2629
2630 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2631 return ENA_COM_INVAL;
2632
2633 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2634
2635 return 0;
2636 }
2637
ena_com_indirect_table_set(struct ena_com_dev * ena_dev)2638 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2639 {
2640 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2641 struct ena_rss *rss = &ena_dev->rss;
2642 struct ena_admin_set_feat_cmd cmd;
2643 struct ena_admin_set_feat_resp resp;
2644 int ret;
2645
2646 if (!ena_com_check_supported_feature_id(ena_dev,
2647 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2648 ena_trc_dbg("Feature %d isn't supported\n",
2649 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2650 return ENA_COM_UNSUPPORTED;
2651 }
2652
2653 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2654 if (ret) {
2655 ena_trc_err("Failed to convert host indirection table to device table\n");
2656 return ret;
2657 }
2658
2659 memset(&cmd, 0x0, sizeof(cmd));
2660
2661 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2662 cmd.aq_common_descriptor.flags =
2663 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2664 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2665 cmd.u.ind_table.size = rss->tbl_log_size;
2666 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2667
2668 ret = ena_com_mem_addr_set(ena_dev,
2669 &cmd.control_buffer.address,
2670 rss->rss_ind_tbl_dma_addr);
2671 if (unlikely(ret)) {
2672 ena_trc_err("memory address set failed\n");
2673 return ret;
2674 }
2675
2676 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2677 sizeof(struct ena_admin_rss_ind_table_entry);
2678
2679 ret = ena_com_execute_admin_command(admin_queue,
2680 (struct ena_admin_aq_entry *)&cmd,
2681 sizeof(cmd),
2682 (struct ena_admin_acq_entry *)&resp,
2683 sizeof(resp));
2684
2685 if (unlikely(ret))
2686 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2687
2688 return ret;
2689 }
2690
ena_com_indirect_table_get(struct ena_com_dev * ena_dev,u32 * ind_tbl)2691 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2692 {
2693 struct ena_rss *rss = &ena_dev->rss;
2694 struct ena_admin_get_feat_resp get_resp;
2695 u32 tbl_size;
2696 int i, rc;
2697
2698 tbl_size = (1ULL << rss->tbl_log_size) *
2699 sizeof(struct ena_admin_rss_ind_table_entry);
2700
2701 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2702 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2703 rss->rss_ind_tbl_dma_addr,
2704 tbl_size, 0);
2705 if (unlikely(rc))
2706 return rc;
2707
2708 if (!ind_tbl)
2709 return 0;
2710
2711 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2712 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2713
2714 return 0;
2715 }
2716
ena_com_rss_init(struct ena_com_dev * ena_dev,u16 indr_tbl_log_size)2717 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2718 {
2719 int rc;
2720
2721 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2722
2723 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2724 if (unlikely(rc))
2725 goto err_indr_tbl;
2726
2727 /* The following function might return unsupported in case the
2728 * device doesn't support setting the key / hash function. We can safely
2729 * ignore this error and have indirection table support only.
2730 */
2731 rc = ena_com_hash_key_allocate(ena_dev);
2732 if (likely(!rc))
2733 ena_com_hash_key_fill_default_key(ena_dev);
2734 else if (rc != ENA_COM_UNSUPPORTED)
2735 goto err_hash_key;
2736
2737 rc = ena_com_hash_ctrl_init(ena_dev);
2738 if (unlikely(rc))
2739 goto err_hash_ctrl;
2740
2741 return 0;
2742
2743 err_hash_ctrl:
2744 ena_com_hash_key_destroy(ena_dev);
2745 err_hash_key:
2746 ena_com_indirect_table_destroy(ena_dev);
2747 err_indr_tbl:
2748
2749 return rc;
2750 }
2751
ena_com_rss_destroy(struct ena_com_dev * ena_dev)2752 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2753 {
2754 ena_com_indirect_table_destroy(ena_dev);
2755 ena_com_hash_key_destroy(ena_dev);
2756 ena_com_hash_ctrl_destroy(ena_dev);
2757
2758 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2759 }
2760
ena_com_allocate_host_info(struct ena_com_dev * ena_dev)2761 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2762 {
2763 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2764
2765 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2766 SZ_4K,
2767 host_attr->host_info,
2768 host_attr->host_info_dma_addr,
2769 host_attr->host_info_dma_handle);
2770 if (unlikely(!host_attr->host_info))
2771 return ENA_COM_NO_MEM;
2772
2773 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2774 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2775 (ENA_COMMON_SPEC_VERSION_MINOR));
2776
2777 return 0;
2778 }
2779
ena_com_allocate_debug_area(struct ena_com_dev * ena_dev,u32 debug_area_size)2780 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2781 u32 debug_area_size)
2782 {
2783 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2784
2785 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2786 debug_area_size,
2787 host_attr->debug_area_virt_addr,
2788 host_attr->debug_area_dma_addr,
2789 host_attr->debug_area_dma_handle);
2790 if (unlikely(!host_attr->debug_area_virt_addr)) {
2791 host_attr->debug_area_size = 0;
2792 return ENA_COM_NO_MEM;
2793 }
2794
2795 host_attr->debug_area_size = debug_area_size;
2796
2797 return 0;
2798 }
2799
ena_com_delete_host_info(struct ena_com_dev * ena_dev)2800 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2801 {
2802 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2803
2804 if (host_attr->host_info) {
2805 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2806 SZ_4K,
2807 host_attr->host_info,
2808 host_attr->host_info_dma_addr,
2809 host_attr->host_info_dma_handle);
2810 host_attr->host_info = NULL;
2811 }
2812 }
2813
ena_com_delete_debug_area(struct ena_com_dev * ena_dev)2814 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2815 {
2816 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2817
2818 if (host_attr->debug_area_virt_addr) {
2819 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2820 host_attr->debug_area_size,
2821 host_attr->debug_area_virt_addr,
2822 host_attr->debug_area_dma_addr,
2823 host_attr->debug_area_dma_handle);
2824 host_attr->debug_area_virt_addr = NULL;
2825 }
2826 }
2827
ena_com_set_host_attributes(struct ena_com_dev * ena_dev)2828 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2829 {
2830 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2831 struct ena_com_admin_queue *admin_queue;
2832 struct ena_admin_set_feat_cmd cmd;
2833 struct ena_admin_set_feat_resp resp;
2834
2835 int ret;
2836
2837 /* Host attribute config is called before ena_com_get_dev_attr_feat
2838 * so ena_com can't check if the feature is supported.
2839 */
2840
2841 memset(&cmd, 0x0, sizeof(cmd));
2842 admin_queue = &ena_dev->admin_queue;
2843
2844 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2845 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2846
2847 ret = ena_com_mem_addr_set(ena_dev,
2848 &cmd.u.host_attr.debug_ba,
2849 host_attr->debug_area_dma_addr);
2850 if (unlikely(ret)) {
2851 ena_trc_err("memory address set failed\n");
2852 return ret;
2853 }
2854
2855 ret = ena_com_mem_addr_set(ena_dev,
2856 &cmd.u.host_attr.os_info_ba,
2857 host_attr->host_info_dma_addr);
2858 if (unlikely(ret)) {
2859 ena_trc_err("memory address set failed\n");
2860 return ret;
2861 }
2862
2863 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2864
2865 ret = ena_com_execute_admin_command(admin_queue,
2866 (struct ena_admin_aq_entry *)&cmd,
2867 sizeof(cmd),
2868 (struct ena_admin_acq_entry *)&resp,
2869 sizeof(resp));
2870
2871 if (unlikely(ret))
2872 ena_trc_err("Failed to set host attributes: %d\n", ret);
2873
2874 return ret;
2875 }
2876
2877 /* Interrupt moderation */
ena_com_interrupt_moderation_supported(struct ena_com_dev * ena_dev)2878 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2879 {
2880 return ena_com_check_supported_feature_id(ena_dev,
2881 ENA_ADMIN_INTERRUPT_MODERATION);
2882 }
2883
ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,u32 intr_delay_resolution,u32 * intr_moder_interval)2884 static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2885 u32 intr_delay_resolution,
2886 u32 *intr_moder_interval)
2887 {
2888 if (!intr_delay_resolution) {
2889 ena_trc_err("Illegal interrupt delay granularity value\n");
2890 return ENA_COM_FAULT;
2891 }
2892
2893 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2894
2895 return 0;
2896 }
2897
2898
ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev,u32 tx_coalesce_usecs)2899 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2900 u32 tx_coalesce_usecs)
2901 {
2902 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2903 ena_dev->intr_delay_resolution,
2904 &ena_dev->intr_moder_tx_interval);
2905 }
2906
ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev,u32 rx_coalesce_usecs)2907 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2908 u32 rx_coalesce_usecs)
2909 {
2910 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2911 ena_dev->intr_delay_resolution,
2912 &ena_dev->intr_moder_rx_interval);
2913 }
2914
ena_com_init_interrupt_moderation(struct ena_com_dev * ena_dev)2915 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2916 {
2917 struct ena_admin_get_feat_resp get_resp;
2918 u16 delay_resolution;
2919 int rc;
2920
2921 rc = ena_com_get_feature(ena_dev, &get_resp,
2922 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2923
2924 if (rc) {
2925 if (rc == ENA_COM_UNSUPPORTED) {
2926 ena_trc_dbg("Feature %d isn't supported\n",
2927 ENA_ADMIN_INTERRUPT_MODERATION);
2928 rc = 0;
2929 } else {
2930 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2931 rc);
2932 }
2933
2934 /* no moderation supported, disable adaptive support */
2935 ena_com_disable_adaptive_moderation(ena_dev);
2936 return rc;
2937 }
2938
2939 /* if moderation is supported by device we set adaptive moderation */
2940 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2941 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2942
2943 /* Disable adaptive moderation by default - can be enabled later */
2944 ena_com_disable_adaptive_moderation(ena_dev);
2945
2946 return 0;
2947 }
2948
ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev)2949 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2950 {
2951 return ena_dev->intr_moder_tx_interval;
2952 }
2953
ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev)2954 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2955 {
2956 return ena_dev->intr_moder_rx_interval;
2957 }
2958
ena_com_config_dev_mode(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)2959 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2960 struct ena_admin_feature_llq_desc *llq_features,
2961 struct ena_llq_configurations *llq_default_cfg)
2962 {
2963 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2964 int rc;
2965
2966 if (!llq_features->max_llq_num) {
2967 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2968 return 0;
2969 }
2970
2971 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2972 if (rc)
2973 return rc;
2974
2975 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2976 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2977
2978 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2979 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
2980 return -EINVAL;
2981 }
2982
2983 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2984
2985 return 0;
2986 }
2987