1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (C) 2013 Emulex
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Emulex Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Contact Information:
34 * [email protected]
35 *
36 * Emulex
37 * 3333 Susan Street
38 * Costa Mesa, CA 92626
39 */
40
41 /* $FreeBSD$ */
42
43 #include "oce_if.h"
44
45 /*****************************************************
46 * local queue functions
47 *****************************************************/
48
49 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
50 uint32_t q_len, uint32_t wq_type);
51 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
52 static void oce_wq_free(struct oce_wq *wq);
53 static void oce_wq_del(struct oce_wq *wq);
54 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
55 uint32_t q_len,
56 uint32_t frag_size,
57 uint32_t mtu, uint32_t rss);
58 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
59 static void oce_rq_free(struct oce_rq *rq);
60 static void oce_rq_del(struct oce_rq *rq);
61 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
62 uint32_t q_len,
63 uint32_t item_size,
64 uint32_t eq_delay,
65 uint32_t vector);
66 static void oce_eq_del(struct oce_eq *eq);
67 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
68 struct oce_eq *eq, uint32_t q_len);
69 static void oce_mq_free(struct oce_mq *mq);
70 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
71 *mbx, size_t req_size, enum qtype qtype, int version);
72 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
73 struct oce_eq *eq,
74 uint32_t q_len,
75 uint32_t item_size,
76 uint32_t sol_event,
77 uint32_t is_eventable,
78 uint32_t nodelay, uint32_t ncoalesce);
79 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
80
81 /**
82 * @brief Create and initialize all the queues on the board
83 * @param sc software handle to the device
84 * @returns 0 if successful, or error
85 **/
86 int
oce_queue_init_all(POCE_SOFTC sc)87 oce_queue_init_all(POCE_SOFTC sc)
88 {
89 int rc = 0, i, vector;
90 struct oce_wq *wq;
91 struct oce_rq *rq;
92 struct oce_aic_obj *aic;
93
94 /* alloc TX/RX queues */
95 for_all_wq_queues(sc, wq, i) {
96 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
97 NIC_WQ_TYPE_STANDARD);
98 if (!sc->wq[i])
99 goto error;
100
101 }
102
103 for_all_rq_queues(sc, rq, i) {
104 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
105 OCE_MAX_JUMBO_FRAME_SIZE,
106 (i == 0) ? 0 : is_rss_enabled(sc));
107 if (!sc->rq[i])
108 goto error;
109 }
110
111 /* Create network interface on card */
112 if (oce_create_nw_interface(sc))
113 goto error;
114
115 /* create all of the event queues */
116 for (vector = 0; vector < sc->intr_count; vector++) {
117 /* setup aic defaults for each event queue */
118 aic = &sc->aic_obj[vector];
119 aic->max_eqd = OCE_MAX_EQD;
120 aic->min_eqd = OCE_MIN_EQD;
121 aic->et_eqd = OCE_MIN_EQD;
122 aic->enable = TRUE;
123
124 sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
125 EQE_SIZE_4,0, vector);
126
127 if (!sc->eq[vector])
128 goto error;
129 }
130
131 /* create Tx, Rx and mcc queues */
132 for_all_wq_queues(sc, wq, i) {
133 rc = oce_wq_create(wq, sc->eq[i]);
134 if (rc)
135 goto error;
136 wq->queue_index = i;
137 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
138 }
139
140 for_all_rq_queues(sc, rq, i) {
141 rc = oce_rq_create(rq, sc->if_id,
142 sc->eq[(i == 0) ? 0:(i-1)]);
143 if (rc)
144 goto error;
145 rq->queue_index = i;
146 }
147
148 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
149 if (!sc->mq)
150 goto error;
151
152 return rc;
153
154 error:
155 oce_queue_release_all(sc);
156 return 1;
157 }
158
159 /**
160 * @brief Releases all mailbox queues created
161 * @param sc software handle to the device
162 */
163 void
oce_queue_release_all(POCE_SOFTC sc)164 oce_queue_release_all(POCE_SOFTC sc)
165 {
166 int i = 0;
167 struct oce_wq *wq;
168 struct oce_rq *rq;
169 struct oce_eq *eq;
170
171 /* before deleting lro queues, we have to disable hwlro */
172 if(sc->enable_hwlro)
173 oce_mbox_nic_set_iface_lro_config(sc, 0);
174
175 for_all_rq_queues(sc, rq, i) {
176 if (rq) {
177 oce_rq_del(sc->rq[i]);
178 oce_rq_free(sc->rq[i]);
179 }
180 }
181
182 for_all_wq_queues(sc, wq, i) {
183 if (wq) {
184 oce_wq_del(sc->wq[i]);
185 oce_wq_free(sc->wq[i]);
186 }
187 }
188
189 if (sc->mq)
190 oce_mq_free(sc->mq);
191
192 for_all_evnt_queues(sc, eq, i) {
193 if (eq)
194 oce_eq_del(sc->eq[i]);
195 }
196 }
197
198 /**
199 * @brief Function to create a WQ for NIC Tx
200 * @param sc software handle to the device
201 * @param qlen number of entries in the queue
202 * @param wq_type work queue type
203 * @returns the pointer to the WQ created or NULL on failure
204 */
205 static struct
oce_wq_init(POCE_SOFTC sc,uint32_t q_len,uint32_t wq_type)206 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
207 {
208 struct oce_wq *wq;
209 int rc = 0, i;
210
211 /* q_len must be min 256 and max 2k */
212 if (q_len < 256 || q_len > 2048) {
213 device_printf(sc->dev,
214 "Invalid q length. Must be "
215 "[256, 2000]: 0x%x\n", q_len);
216 return NULL;
217 }
218
219 /* allocate wq */
220 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
221 if (!wq)
222 return NULL;
223
224 /* Set the wq config */
225 wq->cfg.q_len = q_len;
226 wq->cfg.wq_type = (uint8_t) wq_type;
227 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
228 wq->cfg.nbufs = 2 * wq->cfg.q_len;
229 wq->cfg.nhdl = 2 * wq->cfg.q_len;
230
231 wq->parent = (void *)sc;
232
233 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
234 1, 0,
235 BUS_SPACE_MAXADDR,
236 BUS_SPACE_MAXADDR,
237 NULL, NULL,
238 OCE_MAX_TX_SIZE,
239 OCE_MAX_TX_ELEMENTS,
240 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
241
242 if (rc)
243 goto free_wq;
244
245 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
246 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
247 if (rc)
248 goto free_wq;
249 }
250
251 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
252 if (!wq->ring)
253 goto free_wq;
254
255 LOCK_CREATE(&wq->tx_lock, "TX_lock");
256 LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
257
258 /* Allocate buf ring for multiqueue*/
259 wq->br = buf_ring_alloc(4096, M_DEVBUF,
260 M_WAITOK, &wq->tx_lock.mutex);
261 if (!wq->br)
262 goto free_wq;
263 return wq;
264
265 free_wq:
266 device_printf(sc->dev, "Create WQ failed\n");
267 oce_wq_free(wq);
268 return NULL;
269 }
270
271 /**
272 * @brief Frees the work queue
273 * @param wq pointer to work queue to free
274 */
275 static void
oce_wq_free(struct oce_wq * wq)276 oce_wq_free(struct oce_wq *wq)
277 {
278 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
279 int i;
280
281 taskqueue_drain(taskqueue_swi, &wq->txtask);
282
283 if (wq->ring != NULL) {
284 oce_destroy_ring_buffer(sc, wq->ring);
285 wq->ring = NULL;
286 }
287
288 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
289 if (wq->pckts[i].map != NULL) {
290 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
291 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
292 wq->pckts[i].map = NULL;
293 }
294 }
295
296 if (wq->tag != NULL)
297 bus_dma_tag_destroy(wq->tag);
298 if (wq->br != NULL)
299 buf_ring_free(wq->br, M_DEVBUF);
300
301 LOCK_DESTROY(&wq->tx_lock);
302 LOCK_DESTROY(&wq->tx_compl_lock);
303 free(wq, M_DEVBUF);
304 }
305
306 /**
307 * @brief Create a work queue
308 * @param wq pointer to work queue
309 * @param eq pointer to associated event queue
310 */
311 static int
oce_wq_create(struct oce_wq * wq,struct oce_eq * eq)312 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
313 {
314 POCE_SOFTC sc = wq->parent;
315 struct oce_cq *cq;
316 int rc = 0;
317
318 /* create the CQ */
319 cq = oce_cq_create(sc,
320 eq,
321 CQ_LEN_1024,
322 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
323 if (!cq)
324 return ENXIO;
325
326 wq->cq = cq;
327
328 rc = oce_mbox_create_wq(wq);
329 if (rc)
330 goto error;
331
332 wq->qstate = QCREATED;
333 wq->wq_free = wq->cfg.q_len;
334 wq->ring->cidx = 0;
335 wq->ring->pidx = 0;
336
337 eq->cq[eq->cq_valid] = cq;
338 eq->cq_valid++;
339 cq->cb_arg = wq;
340 cq->cq_handler = oce_wq_handler;
341
342 return 0;
343
344 error:
345 device_printf(sc->dev, "WQ create failed\n");
346 oce_wq_del(wq);
347 return rc;
348 }
349
350 /**
351 * @brief Delete a work queue
352 * @param wq pointer to work queue
353 */
354 static void
oce_wq_del(struct oce_wq * wq)355 oce_wq_del(struct oce_wq *wq)
356 {
357 struct oce_mbx mbx;
358 struct mbx_delete_nic_wq *fwcmd;
359 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
360
361 if (wq->qstate == QCREATED) {
362 bzero(&mbx, sizeof(struct oce_mbx));
363 /* now fill the command */
364 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
365 fwcmd->params.req.wq_id = wq->wq_id;
366 (void)oce_destroy_q(sc, &mbx,
367 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
368 wq->qstate = QDELETED;
369 }
370
371 if (wq->cq != NULL) {
372 oce_cq_del(sc, wq->cq);
373 wq->cq = NULL;
374 }
375 }
376
377 /**
378 * @brief function to allocate receive queue resources
379 * @param sc software handle to the device
380 * @param q_len length of receive queue
381 * @param frag_size size of an receive queue fragment
382 * @param mtu maximum transmission unit
383 * @param rss is-rss-queue flag
384 * @returns the pointer to the RQ created or NULL on failure
385 */
386 static struct
oce_rq_init(POCE_SOFTC sc,uint32_t q_len,uint32_t frag_size,uint32_t mtu,uint32_t rss)387 oce_rq *oce_rq_init(POCE_SOFTC sc,
388 uint32_t q_len,
389 uint32_t frag_size,
390 uint32_t mtu, uint32_t rss)
391 {
392 struct oce_rq *rq;
393 int rc = 0, i;
394
395 if (OCE_LOG2(frag_size) <= 0)
396 return NULL;
397
398 if ((q_len == 0) || (q_len > 1024))
399 return NULL;
400
401 /* allocate the rq */
402 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
403 if (!rq)
404 return NULL;
405
406 rq->cfg.q_len = q_len;
407 rq->cfg.frag_size = frag_size;
408 rq->cfg.mtu = mtu;
409 rq->cfg.eqd = 0;
410 rq->lro_pkts_queued = 0;
411 rq->cfg.is_rss_queue = rss;
412 rq->pending = 0;
413
414 rq->parent = (void *)sc;
415
416 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
417 1, 0,
418 BUS_SPACE_MAXADDR,
419 BUS_SPACE_MAXADDR,
420 NULL, NULL,
421 oce_rq_buf_size,
422 1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
423 if (rc)
424 goto free_rq;
425
426 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
427 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
428 if (rc)
429 goto free_rq;
430 }
431
432 /* create the ring buffer */
433 rq->ring = oce_create_ring_buffer(sc, q_len,
434 sizeof(struct oce_nic_rqe));
435 if (!rq->ring)
436 goto free_rq;
437
438 LOCK_CREATE(&rq->rx_lock, "RX_lock");
439
440 return rq;
441
442 free_rq:
443 device_printf(sc->dev, "Create RQ failed\n");
444 oce_rq_free(rq);
445 return NULL;
446 }
447
448 /**
449 * @brief Free a receive queue
450 * @param rq pointer to receive queue
451 */
452 static void
oce_rq_free(struct oce_rq * rq)453 oce_rq_free(struct oce_rq *rq)
454 {
455 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
456 int i = 0 ;
457
458 if (rq->ring != NULL) {
459 oce_destroy_ring_buffer(sc, rq->ring);
460 rq->ring = NULL;
461 }
462 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
463 if (rq->pckts[i].map != NULL) {
464 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
465 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
466 rq->pckts[i].map = NULL;
467 }
468 if (rq->pckts[i].mbuf) {
469 m_free(rq->pckts[i].mbuf);
470 rq->pckts[i].mbuf = NULL;
471 }
472 }
473
474 if (rq->tag != NULL)
475 bus_dma_tag_destroy(rq->tag);
476
477 LOCK_DESTROY(&rq->rx_lock);
478 free(rq, M_DEVBUF);
479 }
480
481 /**
482 * @brief Create a receive queue
483 * @param rq receive queue
484 * @param if_id interface identifier index`
485 * @param eq pointer to event queue
486 */
487 static int
oce_rq_create(struct oce_rq * rq,uint32_t if_id,struct oce_eq * eq)488 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
489 {
490 POCE_SOFTC sc = rq->parent;
491 struct oce_cq *cq;
492
493 cq = oce_cq_create(sc, eq,
494 sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
495 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
496
497 if (!cq)
498 return ENXIO;
499
500 rq->cq = cq;
501 rq->cfg.if_id = if_id;
502
503 /* Dont create RQ here. Create in if_activate */
504 rq->qstate = 0;
505 rq->ring->cidx = 0;
506 rq->ring->pidx = 0;
507 eq->cq[eq->cq_valid] = cq;
508 eq->cq_valid++;
509 cq->cb_arg = rq;
510 cq->cq_handler = oce_rq_handler;
511
512 return 0;
513
514 }
515
516 /**
517 * @brief Delete a receive queue
518 * @param rq receive queue
519 */
520 static void
oce_rq_del(struct oce_rq * rq)521 oce_rq_del(struct oce_rq *rq)
522 {
523 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
524 struct oce_mbx mbx;
525 struct mbx_delete_nic_rq *fwcmd;
526 struct mbx_delete_nic_rq_v1 *fwcmd1;
527
528 if (rq->qstate == QCREATED) {
529 bzero(&mbx, sizeof(mbx));
530 if(!rq->islro) {
531 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
532 fwcmd->params.req.rq_id = rq->rq_id;
533 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
534 }else {
535 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
536 fwcmd1->params.req.rq_id = rq->rq_id;
537 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
538 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
539 }
540 rq->qstate = QDELETED;
541 }
542
543 if (rq->cq != NULL) {
544 oce_cq_del(sc, rq->cq);
545 rq->cq = NULL;
546 }
547 }
548
549 /**
550 * @brief function to create an event queue
551 * @param sc software handle to the device
552 * @param q_len length of event queue
553 * @param item_size size of an event queue item
554 * @param eq_delay event queue delay
555 * @retval eq success, pointer to event queue
556 * @retval NULL failure
557 */
558 static struct
oce_eq_create(POCE_SOFTC sc,uint32_t q_len,uint32_t item_size,uint32_t eq_delay,uint32_t vector)559 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
560 uint32_t item_size,
561 uint32_t eq_delay,
562 uint32_t vector)
563 {
564 struct oce_eq *eq;
565 int rc = 0;
566
567 /* allocate an eq */
568 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
569 if (eq == NULL)
570 return NULL;
571
572 eq->parent = (void *)sc;
573 eq->eq_id = 0xffff;
574 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
575 if (!eq->ring)
576 goto free_eq;
577
578 eq->eq_cfg.q_len = q_len;
579 eq->eq_cfg.item_size = item_size;
580 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
581
582 rc = oce_mbox_create_eq(eq);
583 if (rc)
584 goto free_eq;
585
586 sc->intrs[sc->neqs++].eq = eq;
587
588 return eq;
589
590 free_eq:
591 oce_eq_del(eq);
592 return NULL;
593 }
594
595 /**
596 * @brief Function to delete an event queue
597 * @param eq pointer to an event queue
598 */
599 static void
oce_eq_del(struct oce_eq * eq)600 oce_eq_del(struct oce_eq *eq)
601 {
602 struct oce_mbx mbx;
603 struct mbx_destroy_common_eq *fwcmd;
604 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
605
606 if (eq->eq_id != 0xffff) {
607 bzero(&mbx, sizeof(mbx));
608 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
609 fwcmd->params.req.id = eq->eq_id;
610 (void)oce_destroy_q(sc, &mbx,
611 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
612 }
613
614 if (eq->ring != NULL) {
615 oce_destroy_ring_buffer(sc, eq->ring);
616 eq->ring = NULL;
617 }
618
619 free(eq, M_DEVBUF);
620
621 }
622
623 /**
624 * @brief Function to create an MQ
625 * @param sc software handle to the device
626 * @param eq the EQ to associate with the MQ for event notification
627 * @param q_len the number of entries to create in the MQ
628 * @returns pointer to the created MQ, failure otherwise
629 */
630 static struct oce_mq *
oce_mq_create(POCE_SOFTC sc,struct oce_eq * eq,uint32_t q_len)631 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
632 {
633 struct oce_mbx mbx;
634 struct mbx_create_common_mq_ex *fwcmd = NULL;
635 struct oce_mq *mq = NULL;
636 int rc = 0;
637 struct oce_cq *cq;
638 oce_mq_ext_ctx_t *ctx;
639 uint32_t num_pages;
640 uint32_t page_size;
641 int version;
642
643 cq = oce_cq_create(sc, eq, CQ_LEN_256,
644 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
645 if (!cq)
646 return NULL;
647
648 /* allocate the mq */
649 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
650 if (!mq) {
651 oce_cq_del(sc, cq);
652 goto error;
653 }
654
655 mq->parent = sc;
656
657 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
658 if (!mq->ring)
659 goto error;
660
661 bzero(&mbx, sizeof(struct oce_mbx));
662
663 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
664 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
665 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
666 MBX_SUBSYSTEM_COMMON,
667 OPCODE_COMMON_CREATE_MQ_EXT,
668 MBX_TIMEOUT_SEC,
669 sizeof(struct mbx_create_common_mq_ex),
670 version);
671
672 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
673 page_size = mq->ring->num_items * mq->ring->item_size;
674
675 ctx = &fwcmd->params.req.context;
676
677 if (IS_XE201(sc)) {
678 ctx->v1.num_pages = num_pages;
679 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
680 ctx->v1.cq_id = cq->cq_id;
681 ctx->v1.valid = 1;
682 ctx->v1.async_cq_id = cq->cq_id;
683 ctx->v1.async_cq_valid = 1;
684 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
685 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
686 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
687 ctx->v1.async_evt_bitmap |=
688 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
689 }
690 else {
691 ctx->v0.num_pages = num_pages;
692 ctx->v0.cq_id = cq->cq_id;
693 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
694 ctx->v0.valid = 1;
695 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
696 ctx->v0.async_evt_bitmap = 0xffffffff;
697 }
698
699 mbx.u0.s.embedded = 1;
700 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
701 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
702
703 rc = oce_mbox_post(sc, &mbx, NULL);
704 if (!rc)
705 rc = fwcmd->hdr.u0.rsp.status;
706 if (rc) {
707 device_printf(sc->dev,"%s failed - cmd status: %d\n",
708 __FUNCTION__, rc);
709 goto error;
710 }
711 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
712 mq->cq = cq;
713 eq->cq[eq->cq_valid] = cq;
714 eq->cq_valid++;
715 mq->cq->eq = eq;
716 mq->cfg.q_len = (uint8_t) q_len;
717 mq->cfg.eqd = 0;
718 mq->qstate = QCREATED;
719
720 mq->cq->cb_arg = mq;
721 mq->cq->cq_handler = oce_mq_handler;
722
723 return mq;
724
725 error:
726 device_printf(sc->dev, "MQ create failed\n");
727 oce_mq_free(mq);
728 mq = NULL;
729 return mq;
730 }
731
732 /**
733 * @brief Function to free a mailbox queue
734 * @param mq pointer to a mailbox queue
735 */
736 static void
oce_mq_free(struct oce_mq * mq)737 oce_mq_free(struct oce_mq *mq)
738 {
739 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
740 struct oce_mbx mbx;
741 struct mbx_destroy_common_mq *fwcmd;
742
743 if (!mq)
744 return;
745
746 if (mq->ring != NULL) {
747 oce_destroy_ring_buffer(sc, mq->ring);
748 mq->ring = NULL;
749 if (mq->qstate == QCREATED) {
750 bzero(&mbx, sizeof (struct oce_mbx));
751 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
752 fwcmd->params.req.id = mq->mq_id;
753 (void) oce_destroy_q(sc, &mbx,
754 sizeof (struct mbx_destroy_common_mq),
755 QTYPE_MQ, 0);
756 }
757 mq->qstate = QDELETED;
758 }
759
760 if (mq->cq != NULL) {
761 oce_cq_del(sc, mq->cq);
762 mq->cq = NULL;
763 }
764
765 free(mq, M_DEVBUF);
766 mq = NULL;
767 }
768
769 /**
770 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
771 * @param sc sofware handle to the device
772 * @param mbx mailbox command to send to the fw to delete the queue
773 * (mbx contains the queue information to delete)
774 * @param req_size the size of the mbx payload dependent on the qtype
775 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
776 * @returns 0 on success, failure otherwise
777 */
778 static int
oce_destroy_q(POCE_SOFTC sc,struct oce_mbx * mbx,size_t req_size,enum qtype qtype,int version)779 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
780 enum qtype qtype, int version)
781 {
782 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
783 int opcode;
784 int subsys;
785 int rc = 0;
786
787 switch (qtype) {
788 case QTYPE_EQ:
789 opcode = OPCODE_COMMON_DESTROY_EQ;
790 subsys = MBX_SUBSYSTEM_COMMON;
791 break;
792 case QTYPE_CQ:
793 opcode = OPCODE_COMMON_DESTROY_CQ;
794 subsys = MBX_SUBSYSTEM_COMMON;
795 break;
796 case QTYPE_MQ:
797 opcode = OPCODE_COMMON_DESTROY_MQ;
798 subsys = MBX_SUBSYSTEM_COMMON;
799 break;
800 case QTYPE_WQ:
801 opcode = NIC_DELETE_WQ;
802 subsys = MBX_SUBSYSTEM_NIC;
803 break;
804 case QTYPE_RQ:
805 opcode = NIC_DELETE_RQ;
806 subsys = MBX_SUBSYSTEM_NIC;
807 break;
808 default:
809 return EINVAL;
810 }
811
812 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
813 opcode, MBX_TIMEOUT_SEC, req_size,
814 version);
815
816 mbx->u0.s.embedded = 1;
817 mbx->payload_length = (uint32_t) req_size;
818 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
819
820 rc = oce_mbox_post(sc, mbx, NULL);
821 if (!rc)
822 rc = hdr->u0.rsp.status;
823 if (rc)
824 device_printf(sc->dev,"%s failed - cmd status: %d\n",
825 __FUNCTION__, rc);
826 return rc;
827 }
828
829 /**
830 * @brief Function to create a completion queue
831 * @param sc software handle to the device
832 * @param eq optional eq to be associated with to the cq
833 * @param q_len length of completion queue
834 * @param item_size size of completion queue items
835 * @param sol_event command context event
836 * @param is_eventable event table
837 * @param nodelay no delay flag
838 * @param ncoalesce no coalescence flag
839 * @returns pointer to the cq created, NULL on failure
840 */
841 struct oce_cq *
oce_cq_create(POCE_SOFTC sc,struct oce_eq * eq,uint32_t q_len,uint32_t item_size,uint32_t sol_event,uint32_t is_eventable,uint32_t nodelay,uint32_t ncoalesce)842 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
843 uint32_t q_len,
844 uint32_t item_size,
845 uint32_t sol_event,
846 uint32_t is_eventable,
847 uint32_t nodelay, uint32_t ncoalesce)
848 {
849 struct oce_cq *cq = NULL;
850 int rc = 0;
851
852 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
853 if (!cq)
854 return NULL;
855
856 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
857 if (!cq->ring)
858 goto error;
859
860 cq->parent = sc;
861 cq->eq = eq;
862 cq->cq_cfg.q_len = q_len;
863 cq->cq_cfg.item_size = item_size;
864 cq->cq_cfg.nodelay = (uint8_t) nodelay;
865
866 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
867 if (rc)
868 goto error;
869
870 sc->cq[sc->ncqs++] = cq;
871
872 return cq;
873
874 error:
875 device_printf(sc->dev, "CQ create failed\n");
876 oce_cq_del(sc, cq);
877 return NULL;
878 }
879
880 /**
881 * @brief Deletes the completion queue
882 * @param sc software handle to the device
883 * @param cq pointer to a completion queue
884 */
885 static void
oce_cq_del(POCE_SOFTC sc,struct oce_cq * cq)886 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
887 {
888 struct oce_mbx mbx;
889 struct mbx_destroy_common_cq *fwcmd;
890
891 if (cq->ring != NULL) {
892 bzero(&mbx, sizeof(struct oce_mbx));
893 /* now fill the command */
894 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
895 fwcmd->params.req.id = cq->cq_id;
896 (void)oce_destroy_q(sc, &mbx,
897 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
898 /*NOW destroy the ring */
899 oce_destroy_ring_buffer(sc, cq->ring);
900 cq->ring = NULL;
901 }
902
903 free(cq, M_DEVBUF);
904 cq = NULL;
905 }
906
907 /**
908 * @brief Start a receive queue
909 * @param rq pointer to a receive queue
910 */
911 int
oce_start_rq(struct oce_rq * rq)912 oce_start_rq(struct oce_rq *rq)
913 {
914 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
915 int rc;
916
917 if(sc->enable_hwlro)
918 rc = oce_alloc_rx_bufs(rq, 960);
919 else
920 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
921
922 if (rc == 0)
923 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
924
925 return rc;
926 }
927
928 /**
929 * @brief Start a work queue
930 * @param wq pointer to a work queue
931 */
932 int
oce_start_wq(struct oce_wq * wq)933 oce_start_wq(struct oce_wq *wq)
934 {
935 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
936 return 0;
937 }
938
939 /**
940 * @brief Start a mailbox queue
941 * @param mq pointer to a mailbox queue
942 */
943 int
oce_start_mq(struct oce_mq * mq)944 oce_start_mq(struct oce_mq *mq)
945 {
946 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
947 return 0;
948 }
949
950 /**
951 * @brief Function to arm an EQ so that it can generate events
952 * @param sc software handle to the device
953 * @param qid id of the EQ returned by the fw at the time of creation
954 * @param npopped number of EQEs to arm
955 * @param rearm rearm bit enable/disable
956 * @param clearint bit to clear the interrupt condition because of which
957 * EQEs are generated
958 */
959 void
oce_arm_eq(POCE_SOFTC sc,int16_t qid,int npopped,uint32_t rearm,uint32_t clearint)960 oce_arm_eq(POCE_SOFTC sc,
961 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
962 {
963 eq_db_t eq_db = { 0 };
964
965 eq_db.bits.rearm = rearm;
966 eq_db.bits.event = 1;
967 eq_db.bits.num_popped = npopped;
968 eq_db.bits.clrint = clearint;
969 eq_db.bits.qid = qid;
970 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
971
972 }
973
974 /**
975 * @brief Function to arm a CQ with CQEs
976 * @param sc software handle to the device
977 * @param qid id of the CQ returned by the fw at the time of creation
978 * @param npopped number of CQEs to arm
979 * @param rearm rearm bit enable/disable
980 */
oce_arm_cq(POCE_SOFTC sc,int16_t qid,int npopped,uint32_t rearm)981 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
982 {
983 cq_db_t cq_db = { 0 };
984
985 cq_db.bits.rearm = rearm;
986 cq_db.bits.num_popped = npopped;
987 cq_db.bits.event = 0;
988 cq_db.bits.qid = qid;
989 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
990
991 }
992
993 /*
994 * @brief function to cleanup the eqs used during stop
995 * @param eq pointer to event queue structure
996 * @returns the number of EQs processed
997 */
998 void
oce_drain_eq(struct oce_eq * eq)999 oce_drain_eq(struct oce_eq *eq)
1000 {
1001
1002 struct oce_eqe *eqe;
1003 uint16_t num_eqe = 0;
1004 POCE_SOFTC sc = eq->parent;
1005
1006 do {
1007 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1008 if (eqe->evnt == 0)
1009 break;
1010 eqe->evnt = 0;
1011 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1012 BUS_DMASYNC_POSTWRITE);
1013 num_eqe++;
1014 RING_GET(eq->ring, 1);
1015
1016 } while (TRUE);
1017
1018 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1019
1020 }
1021
1022 void
oce_drain_wq_cq(struct oce_wq * wq)1023 oce_drain_wq_cq(struct oce_wq *wq)
1024 {
1025 POCE_SOFTC sc = wq->parent;
1026 struct oce_cq *cq = wq->cq;
1027 struct oce_nic_tx_cqe *cqe;
1028 int num_cqes = 0;
1029
1030 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1031 BUS_DMASYNC_POSTWRITE);
1032
1033 do {
1034 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1035 if (cqe->u0.dw[3] == 0)
1036 break;
1037 cqe->u0.dw[3] = 0;
1038 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1039 BUS_DMASYNC_POSTWRITE);
1040 RING_GET(cq->ring, 1);
1041 num_cqes++;
1042
1043 } while (TRUE);
1044
1045 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1046
1047 }
1048
1049 /*
1050 * @brief function to drain a MCQ and process its CQEs
1051 * @param dev software handle to the device
1052 * @param cq pointer to the cq to drain
1053 * @returns the number of CQEs processed
1054 */
1055 void
oce_drain_mq_cq(void * arg)1056 oce_drain_mq_cq(void *arg)
1057 {
1058 /* TODO: additional code. */
1059 return;
1060 }
1061
1062 /**
1063 * @brief function to process a Recieve queue
1064 * @param arg pointer to the RQ to charge
1065 * @return number of cqes processed
1066 */
1067 void
oce_drain_rq_cq(struct oce_rq * rq)1068 oce_drain_rq_cq(struct oce_rq *rq)
1069 {
1070 struct oce_nic_rx_cqe *cqe;
1071 uint16_t num_cqe = 0;
1072 struct oce_cq *cq;
1073 POCE_SOFTC sc;
1074
1075 sc = rq->parent;
1076 cq = rq->cq;
1077 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1078 /* dequeue till you reach an invalid cqe */
1079 while (RQ_CQE_VALID(cqe)) {
1080 RQ_CQE_INVALIDATE(cqe);
1081 RING_GET(cq->ring, 1);
1082 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1083 struct oce_nic_rx_cqe);
1084 num_cqe++;
1085 }
1086 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1087
1088 return;
1089 }
1090
1091 void
oce_free_posted_rxbuf(struct oce_rq * rq)1092 oce_free_posted_rxbuf(struct oce_rq *rq)
1093 {
1094 struct oce_packet_desc *pd;
1095
1096 while (rq->pending) {
1097 pd = &rq->pckts[rq->ring->cidx];
1098 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1099 bus_dmamap_unload(rq->tag, pd->map);
1100 if (pd->mbuf != NULL) {
1101 m_freem(pd->mbuf);
1102 pd->mbuf = NULL;
1103 }
1104
1105 RING_GET(rq->ring,1);
1106 rq->pending--;
1107 }
1108
1109 }
1110
1111 void
oce_rx_cq_clean_hwlro(struct oce_rq * rq)1112 oce_rx_cq_clean_hwlro(struct oce_rq *rq)
1113 {
1114 struct oce_cq *cq = rq->cq;
1115 POCE_SOFTC sc = rq->parent;
1116 struct nic_hwlro_singleton_cqe *cqe;
1117 struct nic_hwlro_cqe_part2 *cqe2;
1118 int flush_wait = 0;
1119 int flush_compl = 0;
1120 int num_frags = 0;
1121
1122 for (;;) {
1123 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1124 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1125 if(cqe->valid) {
1126 if(cqe->cqe_type == 0) { /* singleton cqe */
1127 /* we should not get singleton cqe after cqe1 on same rq */
1128 if(rq->cqe_firstpart != NULL) {
1129 device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1130 goto exit_rx_cq_clean_hwlro;
1131 }
1132 num_frags = cqe->pkt_size / rq->cfg.frag_size;
1133 if(cqe->pkt_size % rq->cfg.frag_size)
1134 num_frags++;
1135 oce_discard_rx_comp(rq, num_frags);
1136 /* Check if CQE is flush completion */
1137 if(!cqe->pkt_size)
1138 flush_compl = 1;
1139 cqe->valid = 0;
1140 RING_GET(cq->ring, 1);
1141 }else if(cqe->cqe_type == 0x1) { /* first part */
1142 /* we should not get cqe1 after cqe1 on same rq */
1143 if(rq->cqe_firstpart != NULL) {
1144 device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1145 goto exit_rx_cq_clean_hwlro;
1146 }
1147 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1148 RING_GET(cq->ring, 1);
1149 }else if(cqe->cqe_type == 0x2) { /* second part */
1150 cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
1151 /* We should not get cqe2 without cqe1 */
1152 if(rq->cqe_firstpart == NULL) {
1153 device_printf(sc->dev, "Got cqe2 without cqe1 \n");
1154 goto exit_rx_cq_clean_hwlro;
1155 }
1156 num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
1157 if(cqe2->coalesced_size % rq->cfg.frag_size)
1158 num_frags++;
1159
1160 /* Flush completion will always come in singleton CQE */
1161 oce_discard_rx_comp(rq, num_frags);
1162
1163 rq->cqe_firstpart->valid = 0;
1164 cqe2->valid = 0;
1165 rq->cqe_firstpart = NULL;
1166 RING_GET(cq->ring, 1);
1167 }
1168 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1169 if(flush_compl)
1170 break;
1171 }else {
1172 if (flush_wait++ > 100) {
1173 device_printf(sc->dev, "did not receive hwlro flush compl\n");
1174 break;
1175 }
1176 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1177 DELAY(1000);
1178 }
1179 }
1180
1181 /* After cleanup, leave the CQ in unarmed state */
1182 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1183
1184 exit_rx_cq_clean_hwlro:
1185 return;
1186 }
1187
1188 void
oce_rx_cq_clean(struct oce_rq * rq)1189 oce_rx_cq_clean(struct oce_rq *rq)
1190 {
1191 struct oce_nic_rx_cqe *cqe;
1192 struct oce_cq *cq;
1193 POCE_SOFTC sc;
1194 int flush_wait = 0;
1195 int flush_compl = 0;
1196 sc = rq->parent;
1197 cq = rq->cq;
1198
1199 for (;;) {
1200 bus_dmamap_sync(cq->ring->dma.tag,
1201 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1202 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1203 if(RQ_CQE_VALID(cqe)) {
1204 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1205 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1206 /* Check if CQE is flush completion */
1207 if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
1208 flush_compl = 1;
1209
1210 RQ_CQE_INVALIDATE(cqe);
1211 RING_GET(cq->ring, 1);
1212 #if defined(INET6) || defined(INET)
1213 if (IF_LRO_ENABLED(sc))
1214 oce_rx_flush_lro(rq);
1215 #endif
1216 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1217 if(flush_compl)
1218 break;
1219 }else {
1220 if (flush_wait++ > 100) {
1221 device_printf(sc->dev, "did not receive flush compl\n");
1222 break;
1223 }
1224 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1225 DELAY(1000);
1226 }
1227 }
1228
1229 /* After cleanup, leave the CQ in unarmed state */
1230 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1231 }
1232
1233 void
oce_stop_rx(POCE_SOFTC sc)1234 oce_stop_rx(POCE_SOFTC sc)
1235 {
1236 struct epoch_tracker et;
1237 struct oce_mbx mbx;
1238 struct mbx_delete_nic_rq *fwcmd;
1239 struct mbx_delete_nic_rq_v1 *fwcmd1;
1240 struct oce_rq *rq;
1241 int i = 0;
1242
1243 NET_EPOCH_ENTER(et);
1244 /* before deleting disable hwlro */
1245 if(sc->enable_hwlro)
1246 oce_mbox_nic_set_iface_lro_config(sc, 0);
1247
1248 for_all_rq_queues(sc, rq, i) {
1249 if (rq->qstate == QCREATED) {
1250 /* Delete rxq in firmware */
1251 LOCK(&rq->rx_lock);
1252
1253 bzero(&mbx, sizeof(mbx));
1254 if(!rq->islro) {
1255 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1256 fwcmd->params.req.rq_id = rq->rq_id;
1257 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
1258 }else {
1259 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
1260 fwcmd1->params.req.rq_id = rq->rq_id;
1261 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
1262
1263 (void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
1264 }
1265 rq->qstate = QDELETED;
1266
1267 DELAY(1000);
1268
1269 if(!rq->islro)
1270 oce_rx_cq_clean(rq);
1271 else
1272 oce_rx_cq_clean_hwlro(rq);
1273
1274 /* Free posted RX buffers that are not used */
1275 oce_free_posted_rxbuf(rq);
1276 UNLOCK(&rq->rx_lock);
1277 }
1278 }
1279 NET_EPOCH_EXIT(et);
1280 }
1281
1282 int
oce_start_rx(POCE_SOFTC sc)1283 oce_start_rx(POCE_SOFTC sc)
1284 {
1285 struct oce_rq *rq;
1286 int rc = 0, i;
1287
1288 for_all_rq_queues(sc, rq, i) {
1289 if (rq->qstate == QCREATED)
1290 continue;
1291 if((i == 0) || (!sc->enable_hwlro)) {
1292 rc = oce_mbox_create_rq(rq);
1293 if (rc)
1294 goto error;
1295 rq->islro = 0;
1296 }else {
1297 rc = oce_mbox_create_rq_v2(rq);
1298 if (rc)
1299 goto error;
1300 rq->islro = 1;
1301 }
1302 /* reset queue pointers */
1303 rq->qstate = QCREATED;
1304 rq->pending = 0;
1305 rq->ring->cidx = 0;
1306 rq->ring->pidx = 0;
1307 }
1308
1309 if(sc->enable_hwlro) {
1310 rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
1311 if (rc)
1312 goto error;
1313 }
1314
1315 DELAY(1);
1316
1317 /* RSS config */
1318 if (is_rss_enabled(sc)) {
1319 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1320 if (rc)
1321 goto error;
1322 }
1323
1324 DELAY(1);
1325 return rc;
1326 error:
1327 device_printf(sc->dev, "Start RX failed\n");
1328 return rc;
1329
1330 }
1331