1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Microsoft Corporation
3 * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
4 * All rights reserved.
5 */
6
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdio.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <strings.h>
13 #include <malloc.h>
14
15 #include <rte_ethdev.h>
16 #include <rte_memcpy.h>
17 #include <rte_string_fns.h>
18 #include <rte_memzone.h>
19 #include <rte_malloc.h>
20 #include <rte_atomic.h>
21 #include <rte_bitmap.h>
22 #include <rte_branch_prediction.h>
23 #include <rte_ether.h>
24 #include <rte_common.h>
25 #include <rte_errno.h>
26 #include <rte_memory.h>
27 #include <rte_eal.h>
28 #include <rte_dev.h>
29 #include <rte_net.h>
30 #include <rte_bus_vmbus.h>
31 #include <rte_spinlock.h>
32
33 #include "hn_logs.h"
34 #include "hn_var.h"
35 #include "hn_rndis.h"
36 #include "hn_nvs.h"
37 #include "ndis.h"
38
39 #define HN_NVS_SEND_MSG_SIZE \
40 (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
41
42 #define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
43 #define HN_RXQ_EVENT_DEFAULT 2048
44
45 struct hn_rxinfo {
46 uint32_t vlan_info;
47 uint32_t csum_info;
48 uint32_t hash_info;
49 uint32_t hash_value;
50 };
51
52 #define HN_RXINFO_VLAN 0x0001
53 #define HN_RXINFO_CSUM 0x0002
54 #define HN_RXINFO_HASHINF 0x0004
55 #define HN_RXINFO_HASHVAL 0x0008
56 #define HN_RXINFO_ALL \
57 (HN_RXINFO_VLAN | \
58 HN_RXINFO_CSUM | \
59 HN_RXINFO_HASHINF | \
60 HN_RXINFO_HASHVAL)
61
62 #define HN_NDIS_VLAN_INFO_INVALID 0xffffffff
63 #define HN_NDIS_RXCSUM_INFO_INVALID 0
64 #define HN_NDIS_HASH_INFO_INVALID 0
65
66 /*
67 * Per-transmit book keeping.
68 * A slot in transmit ring (chim_index) is reserved for each transmit.
69 *
70 * There are two types of transmit:
71 * - buffered transmit where chimney buffer is used and RNDIS header
72 * is in the buffer. mbuf == NULL for this case.
73 *
74 * - direct transmit where RNDIS header is in the in rndis_pkt
75 * mbuf is freed after transmit.
76 *
77 * Descriptors come from per-port pool which is used
78 * to limit number of outstanding requests per device.
79 */
80 struct hn_txdesc {
81 struct rte_mbuf *m;
82
83 uint16_t queue_id;
84 uint32_t chim_index;
85 uint32_t chim_size;
86 uint32_t data_size;
87 uint32_t packets;
88
89 struct rndis_packet_msg *rndis_pkt;
90 };
91
92 #define HN_RNDIS_PKT_LEN \
93 (sizeof(struct rndis_packet_msg) + \
94 RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \
95 RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \
96 RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \
97 RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
98
99 #define HN_RNDIS_PKT_ALIGNED RTE_ALIGN(HN_RNDIS_PKT_LEN, RTE_CACHE_LINE_SIZE)
100
101 /* Minimum space required for a packet */
102 #define HN_PKTSIZE_MIN(align) \
103 RTE_ALIGN(RTE_ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
104
105 #define DEFAULT_TX_FREE_THRESH 32
106
107 static void
hn_update_packet_stats(struct hn_stats * stats,const struct rte_mbuf * m)108 hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
109 {
110 uint32_t s = m->pkt_len;
111 const struct rte_ether_addr *ea;
112
113 if (s == 64) {
114 stats->size_bins[1]++;
115 } else if (s > 64 && s < 1024) {
116 uint32_t bin;
117
118 /* count zeros, and offset into correct bin */
119 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
120 stats->size_bins[bin]++;
121 } else {
122 if (s < 64)
123 stats->size_bins[0]++;
124 else if (s < 1519)
125 stats->size_bins[6]++;
126 else
127 stats->size_bins[7]++;
128 }
129
130 ea = rte_pktmbuf_mtod(m, const struct rte_ether_addr *);
131 if (rte_is_multicast_ether_addr(ea)) {
132 if (rte_is_broadcast_ether_addr(ea))
133 stats->broadcast++;
134 else
135 stats->multicast++;
136 }
137 }
138
hn_rndis_pktlen(const struct rndis_packet_msg * pkt)139 static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt)
140 {
141 return pkt->pktinfooffset + pkt->pktinfolen;
142 }
143
144 static inline uint32_t
hn_rndis_pktmsg_offset(uint32_t ofs)145 hn_rndis_pktmsg_offset(uint32_t ofs)
146 {
147 return ofs - offsetof(struct rndis_packet_msg, dataoffset);
148 }
149
hn_txd_init(struct rte_mempool * mp __rte_unused,void * opaque,void * obj,unsigned int idx)150 static void hn_txd_init(struct rte_mempool *mp __rte_unused,
151 void *opaque, void *obj, unsigned int idx)
152 {
153 struct hn_tx_queue *txq = opaque;
154 struct hn_txdesc *txd = obj;
155
156 memset(txd, 0, sizeof(*txd));
157
158 txd->queue_id = txq->queue_id;
159 txd->chim_index = NVS_CHIM_IDX_INVALID;
160 txd->rndis_pkt = (struct rndis_packet_msg *)((char *)txq->tx_rndis
161 + idx * HN_RNDIS_PKT_ALIGNED);
162 }
163
164 int
hn_chim_init(struct rte_eth_dev * dev)165 hn_chim_init(struct rte_eth_dev *dev)
166 {
167 struct hn_data *hv = dev->data->dev_private;
168 uint32_t i, chim_bmp_size;
169
170 rte_spinlock_init(&hv->chim_lock);
171 chim_bmp_size = rte_bitmap_get_memory_footprint(hv->chim_cnt);
172 hv->chim_bmem = rte_zmalloc("hn_chim_bitmap", chim_bmp_size,
173 RTE_CACHE_LINE_SIZE);
174 if (hv->chim_bmem == NULL) {
175 PMD_INIT_LOG(ERR, "failed to allocate bitmap size %u",
176 chim_bmp_size);
177 return -1;
178 }
179
180 hv->chim_bmap = rte_bitmap_init(hv->chim_cnt,
181 hv->chim_bmem, chim_bmp_size);
182 if (hv->chim_bmap == NULL) {
183 PMD_INIT_LOG(ERR, "failed to init chim bitmap");
184 return -1;
185 }
186
187 for (i = 0; i < hv->chim_cnt; i++)
188 rte_bitmap_set(hv->chim_bmap, i);
189
190 return 0;
191 }
192
193 void
hn_chim_uninit(struct rte_eth_dev * dev)194 hn_chim_uninit(struct rte_eth_dev *dev)
195 {
196 struct hn_data *hv = dev->data->dev_private;
197
198 rte_bitmap_free(hv->chim_bmap);
199 rte_free(hv->chim_bmem);
200 hv->chim_bmem = NULL;
201 }
202
hn_chim_alloc(struct hn_data * hv)203 static uint32_t hn_chim_alloc(struct hn_data *hv)
204 {
205 uint32_t index = NVS_CHIM_IDX_INVALID;
206 uint64_t slab = 0;
207
208 rte_spinlock_lock(&hv->chim_lock);
209 if (rte_bitmap_scan(hv->chim_bmap, &index, &slab)) {
210 index += rte_bsf64(slab);
211 rte_bitmap_clear(hv->chim_bmap, index);
212 }
213 rte_spinlock_unlock(&hv->chim_lock);
214
215 return index;
216 }
217
hn_chim_free(struct hn_data * hv,uint32_t chim_idx)218 static void hn_chim_free(struct hn_data *hv, uint32_t chim_idx)
219 {
220 if (chim_idx >= hv->chim_cnt) {
221 PMD_DRV_LOG(ERR, "Invalid chimney index %u", chim_idx);
222 } else {
223 rte_spinlock_lock(&hv->chim_lock);
224 rte_bitmap_set(hv->chim_bmap, chim_idx);
225 rte_spinlock_unlock(&hv->chim_lock);
226 }
227 }
228
hn_reset_txagg(struct hn_tx_queue * txq)229 static void hn_reset_txagg(struct hn_tx_queue *txq)
230 {
231 txq->agg_szleft = txq->agg_szmax;
232 txq->agg_pktleft = txq->agg_pktmax;
233 txq->agg_txd = NULL;
234 txq->agg_prevpkt = NULL;
235 }
236
237 int
hn_dev_tx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_txconf * tx_conf)238 hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
239 uint16_t queue_idx, uint16_t nb_desc,
240 unsigned int socket_id,
241 const struct rte_eth_txconf *tx_conf)
242
243 {
244 struct hn_data *hv = dev->data->dev_private;
245 struct hn_tx_queue *txq;
246 char name[RTE_MEMPOOL_NAMESIZE];
247 uint32_t tx_free_thresh;
248 int err = -ENOMEM;
249
250 PMD_INIT_FUNC_TRACE();
251
252 tx_free_thresh = tx_conf->tx_free_thresh;
253 if (tx_free_thresh == 0)
254 tx_free_thresh = RTE_MIN(nb_desc / 4,
255 DEFAULT_TX_FREE_THRESH);
256
257 if (tx_free_thresh + 3 >= nb_desc) {
258 PMD_INIT_LOG(ERR,
259 "tx_free_thresh must be less than the number of TX entries minus 3(%u)."
260 " (tx_free_thresh=%u port=%u queue=%u)\n",
261 nb_desc - 3,
262 tx_free_thresh, dev->data->port_id, queue_idx);
263 return -EINVAL;
264 }
265
266 txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
267 socket_id);
268 if (!txq)
269 return -ENOMEM;
270
271 txq->hv = hv;
272 txq->chan = hv->channels[queue_idx];
273 txq->port_id = dev->data->port_id;
274 txq->queue_id = queue_idx;
275 txq->free_thresh = tx_free_thresh;
276
277 snprintf(name, sizeof(name),
278 "hn_txd_%u_%u", dev->data->port_id, queue_idx);
279
280 PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu",
281 name, nb_desc, sizeof(struct hn_txdesc));
282
283 txq->tx_rndis_mz = rte_memzone_reserve_aligned(name,
284 nb_desc * HN_RNDIS_PKT_ALIGNED, rte_socket_id(),
285 RTE_MEMZONE_IOVA_CONTIG, HN_RNDIS_PKT_ALIGNED);
286 if (!txq->tx_rndis_mz) {
287 err = -rte_errno;
288 goto error;
289 }
290 txq->tx_rndis = txq->tx_rndis_mz->addr;
291 txq->tx_rndis_iova = txq->tx_rndis_mz->iova;
292
293 txq->txdesc_pool = rte_mempool_create(name, nb_desc,
294 sizeof(struct hn_txdesc),
295 0, 0, NULL, NULL,
296 hn_txd_init, txq,
297 dev->device->numa_node, 0);
298 if (txq->txdesc_pool == NULL) {
299 PMD_DRV_LOG(ERR,
300 "mempool %s create failed: %d", name, rte_errno);
301 goto error;
302 }
303
304 txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
305 txq->agg_pktmax = hv->rndis_agg_pkts;
306 txq->agg_align = hv->rndis_agg_align;
307
308 hn_reset_txagg(txq);
309
310 err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc,
311 socket_id, tx_conf);
312 if (err == 0) {
313 dev->data->tx_queues[queue_idx] = txq;
314 return 0;
315 }
316
317 error:
318 rte_mempool_free(txq->txdesc_pool);
319 rte_memzone_free(txq->tx_rndis_mz);
320 rte_free(txq);
321 return err;
322 }
323
324 void
hn_dev_tx_queue_info(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_txq_info * qinfo)325 hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
326 struct rte_eth_txq_info *qinfo)
327 {
328 struct hn_tx_queue *txq = dev->data->tx_queues[queue_id];
329
330 qinfo->nb_desc = txq->txdesc_pool->size;
331 qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
332 }
333
hn_txd_get(struct hn_tx_queue * txq)334 static struct hn_txdesc *hn_txd_get(struct hn_tx_queue *txq)
335 {
336 struct hn_txdesc *txd;
337
338 if (rte_mempool_get(txq->txdesc_pool, (void **)&txd)) {
339 ++txq->stats.ring_full;
340 PMD_TX_LOG(DEBUG, "tx pool exhausted!");
341 return NULL;
342 }
343
344 txd->m = NULL;
345 txd->packets = 0;
346 txd->data_size = 0;
347 txd->chim_size = 0;
348
349 return txd;
350 }
351
hn_txd_put(struct hn_tx_queue * txq,struct hn_txdesc * txd)352 static void hn_txd_put(struct hn_tx_queue *txq, struct hn_txdesc *txd)
353 {
354 rte_mempool_put(txq->txdesc_pool, txd);
355 }
356
357 void
hn_dev_tx_queue_release(struct rte_eth_dev * dev,uint16_t qid)358 hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
359 {
360 struct hn_tx_queue *txq = dev->data->tx_queues[qid];
361
362 PMD_INIT_FUNC_TRACE();
363
364 if (!txq)
365 return;
366
367 rte_mempool_free(txq->txdesc_pool);
368
369 rte_memzone_free(txq->tx_rndis_mz);
370 rte_free(txq);
371 }
372
373 /*
374 * Check the status of a Tx descriptor in the queue.
375 *
376 * returns:
377 * - -EINVAL - offset outside of tx_descriptor pool.
378 * - RTE_ETH_TX_DESC_FULL - descriptor is not acknowledged by host.
379 * - RTE_ETH_TX_DESC_DONE - descriptor is available.
380 */
hn_dev_tx_descriptor_status(void * arg,uint16_t offset)381 int hn_dev_tx_descriptor_status(void *arg, uint16_t offset)
382 {
383 const struct hn_tx_queue *txq = arg;
384
385 hn_process_events(txq->hv, txq->queue_id, 0);
386
387 if (offset >= rte_mempool_avail_count(txq->txdesc_pool))
388 return -EINVAL;
389
390 if (offset < rte_mempool_in_use_count(txq->txdesc_pool))
391 return RTE_ETH_TX_DESC_FULL;
392 else
393 return RTE_ETH_TX_DESC_DONE;
394 }
395
396 static void
hn_nvs_send_completed(struct rte_eth_dev * dev,uint16_t queue_id,unsigned long xactid,const struct hn_nvs_rndis_ack * ack)397 hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
398 unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
399 {
400 struct hn_data *hv = dev->data->dev_private;
401 struct hn_txdesc *txd = (struct hn_txdesc *)xactid;
402 struct hn_tx_queue *txq;
403
404 /* Control packets are sent with xacid == 0 */
405 if (!txd)
406 return;
407
408 txq = dev->data->tx_queues[queue_id];
409 if (likely(ack->status == NVS_STATUS_OK)) {
410 PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u",
411 txq->port_id, txq->queue_id, txd->chim_index,
412 txd->packets, txd->data_size);
413 txq->stats.bytes += txd->data_size;
414 txq->stats.packets += txd->packets;
415 } else {
416 PMD_DRV_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
417 txq->port_id, txq->queue_id, txd->chim_index, ack->status);
418 ++txq->stats.errors;
419 }
420
421 if (txd->chim_index != NVS_CHIM_IDX_INVALID) {
422 hn_chim_free(hv, txd->chim_index);
423 txd->chim_index = NVS_CHIM_IDX_INVALID;
424 }
425
426 rte_pktmbuf_free(txd->m);
427 hn_txd_put(txq, txd);
428 }
429
430 /* Handle transmit completion events */
431 static void
hn_nvs_handle_comp(struct rte_eth_dev * dev,uint16_t queue_id,const struct vmbus_chanpkt_hdr * pkt,const void * data)432 hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id,
433 const struct vmbus_chanpkt_hdr *pkt,
434 const void *data)
435 {
436 const struct hn_nvs_hdr *hdr = data;
437
438 switch (hdr->type) {
439 case NVS_TYPE_RNDIS_ACK:
440 hn_nvs_send_completed(dev, queue_id, pkt->xactid, data);
441 break;
442
443 default:
444 PMD_DRV_LOG(NOTICE, "unexpected send completion type %u",
445 hdr->type);
446 }
447 }
448
449 /* Parse per-packet info (meta data) */
450 static int
hn_rndis_rxinfo(const void * info_data,unsigned int info_dlen,struct hn_rxinfo * info)451 hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
452 struct hn_rxinfo *info)
453 {
454 const struct rndis_pktinfo *pi = info_data;
455 uint32_t mask = 0;
456
457 while (info_dlen != 0) {
458 const void *data;
459 uint32_t dlen;
460
461 if (unlikely(info_dlen < sizeof(*pi)))
462 return -EINVAL;
463
464 if (unlikely(info_dlen < pi->size))
465 return -EINVAL;
466 info_dlen -= pi->size;
467
468 if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK))
469 return -EINVAL;
470 if (unlikely(pi->size < pi->offset))
471 return -EINVAL;
472
473 dlen = pi->size - pi->offset;
474 data = pi->data;
475
476 switch (pi->type) {
477 case NDIS_PKTINFO_TYPE_VLAN:
478 if (unlikely(dlen < NDIS_VLAN_INFO_SIZE))
479 return -EINVAL;
480 info->vlan_info = *((const uint32_t *)data);
481 mask |= HN_RXINFO_VLAN;
482 break;
483
484 case NDIS_PKTINFO_TYPE_CSUM:
485 if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE))
486 return -EINVAL;
487 info->csum_info = *((const uint32_t *)data);
488 mask |= HN_RXINFO_CSUM;
489 break;
490
491 case NDIS_PKTINFO_TYPE_HASHVAL:
492 if (unlikely(dlen < NDIS_HASH_VALUE_SIZE))
493 return -EINVAL;
494 info->hash_value = *((const uint32_t *)data);
495 mask |= HN_RXINFO_HASHVAL;
496 break;
497
498 case NDIS_PKTINFO_TYPE_HASHINF:
499 if (unlikely(dlen < NDIS_HASH_INFO_SIZE))
500 return -EINVAL;
501 info->hash_info = *((const uint32_t *)data);
502 mask |= HN_RXINFO_HASHINF;
503 break;
504
505 default:
506 goto next;
507 }
508
509 if (mask == HN_RXINFO_ALL)
510 break; /* All found; done */
511 next:
512 pi = (const struct rndis_pktinfo *)
513 ((const uint8_t *)pi + pi->size);
514 }
515
516 /*
517 * Final fixup.
518 * - If there is no hash value, invalidate the hash info.
519 */
520 if (!(mask & HN_RXINFO_HASHVAL))
521 info->hash_info = HN_NDIS_HASH_INFO_INVALID;
522 return 0;
523 }
524
hn_rx_buf_free_cb(void * buf __rte_unused,void * opaque)525 static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
526 {
527 struct hn_rx_bufinfo *rxb = opaque;
528 struct hn_rx_queue *rxq = rxb->rxq;
529
530 rte_atomic32_dec(&rxq->rxbuf_outstanding);
531 hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
532 }
533
hn_rx_buf_init(struct hn_rx_queue * rxq,const struct vmbus_chanpkt_rxbuf * pkt)534 static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq,
535 const struct vmbus_chanpkt_rxbuf *pkt)
536 {
537 struct hn_rx_bufinfo *rxb;
538
539 rxb = rxq->rxbuf_info + pkt->hdr.xactid;
540 rxb->chan = rxq->chan;
541 rxb->xactid = pkt->hdr.xactid;
542 rxb->rxq = rxq;
543
544 rxb->shinfo.free_cb = hn_rx_buf_free_cb;
545 rxb->shinfo.fcb_opaque = rxb;
546 rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1);
547 return rxb;
548 }
549
hn_rxpkt(struct hn_rx_queue * rxq,struct hn_rx_bufinfo * rxb,uint8_t * data,unsigned int headroom,unsigned int dlen,const struct hn_rxinfo * info)550 static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
551 uint8_t *data, unsigned int headroom, unsigned int dlen,
552 const struct hn_rxinfo *info)
553 {
554 struct hn_data *hv = rxq->hv;
555 struct rte_mbuf *m;
556 bool use_extbuf = false;
557
558 m = rte_pktmbuf_alloc(rxq->mb_pool);
559 if (unlikely(!m)) {
560 struct rte_eth_dev *dev =
561 &rte_eth_devices[rxq->port_id];
562
563 dev->data->rx_mbuf_alloc_failed++;
564 return;
565 }
566
567 /*
568 * For large packets, avoid copy if possible but need to keep
569 * some space available in receive area for later packets.
570 */
571 if (hv->rx_extmbuf_enable && dlen > hv->rx_copybreak &&
572 (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
573 hv->rxbuf_section_cnt / 2) {
574 struct rte_mbuf_ext_shared_info *shinfo;
575 const void *rxbuf;
576 rte_iova_t iova;
577
578 /*
579 * Build an external mbuf that points to receive area.
580 * Use refcount to handle multiple packets in same
581 * receive buffer section.
582 */
583 rxbuf = hv->rxbuf_res->addr;
584 iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
585 shinfo = &rxb->shinfo;
586
587 /* shinfo is already set to 1 by the caller */
588 if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
589 rte_atomic32_inc(&rxq->rxbuf_outstanding);
590
591 rte_pktmbuf_attach_extbuf(m, data, iova,
592 dlen + headroom, shinfo);
593 m->data_off = headroom;
594 use_extbuf = true;
595 } else {
596 /* Mbuf's in pool must be large enough to hold small packets */
597 if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
598 rte_pktmbuf_free_seg(m);
599 ++rxq->stats.errors;
600 return;
601 }
602 rte_memcpy(rte_pktmbuf_mtod(m, void *),
603 data + headroom, dlen);
604 }
605
606 m->port = rxq->port_id;
607 m->pkt_len = dlen;
608 m->data_len = dlen;
609 m->packet_type = rte_net_get_ptype(m, NULL,
610 RTE_PTYPE_L2_MASK |
611 RTE_PTYPE_L3_MASK |
612 RTE_PTYPE_L4_MASK);
613
614 if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
615 m->vlan_tci = info->vlan_info;
616 m->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED | RTE_MBUF_F_RX_VLAN;
617
618 /* NDIS always strips tag, put it back if necessary */
619 if (!hv->vlan_strip && rte_vlan_insert(&m)) {
620 PMD_DRV_LOG(DEBUG, "vlan insert failed");
621 ++rxq->stats.errors;
622 if (use_extbuf)
623 rte_pktmbuf_detach_extbuf(m);
624 rte_pktmbuf_free(m);
625 return;
626 }
627 }
628
629 if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
630 if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
631 m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
632
633 if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
634 | NDIS_RXCSUM_INFO_TCPCS_OK))
635 m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
636 else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
637 | NDIS_RXCSUM_INFO_UDPCS_FAILED))
638 m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
639 }
640
641 if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
642 m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
643 m->hash.rss = info->hash_value;
644 }
645
646 PMD_RX_LOG(DEBUG,
647 "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64,
648 rxq->port_id, rxq->queue_id, rxb->xactid,
649 m->pkt_len, m->packet_type, m->ol_flags);
650
651 ++rxq->stats.packets;
652 rxq->stats.bytes += m->pkt_len;
653 hn_update_packet_stats(&rxq->stats, m);
654
655 if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
656 ++rxq->stats.ring_full;
657 PMD_RX_LOG(DEBUG, "rx ring full");
658 if (use_extbuf)
659 rte_pktmbuf_detach_extbuf(m);
660 rte_pktmbuf_free(m);
661 }
662 }
663
hn_rndis_rx_data(struct hn_rx_queue * rxq,struct hn_rx_bufinfo * rxb,void * data,uint32_t dlen)664 static void hn_rndis_rx_data(struct hn_rx_queue *rxq,
665 struct hn_rx_bufinfo *rxb,
666 void *data, uint32_t dlen)
667 {
668 unsigned int data_off, data_len;
669 unsigned int pktinfo_off, pktinfo_len;
670 const struct rndis_packet_msg *pkt = data;
671 struct hn_rxinfo info = {
672 .vlan_info = HN_NDIS_VLAN_INFO_INVALID,
673 .csum_info = HN_NDIS_RXCSUM_INFO_INVALID,
674 .hash_info = HN_NDIS_HASH_INFO_INVALID,
675 };
676 int err;
677
678 hn_rndis_dump(pkt);
679
680 if (unlikely(dlen < sizeof(*pkt)))
681 goto error;
682
683 if (unlikely(dlen < pkt->len))
684 goto error; /* truncated RNDIS from host */
685
686 if (unlikely(pkt->len < pkt->datalen
687 + pkt->oobdatalen + pkt->pktinfolen))
688 goto error;
689
690 if (unlikely(pkt->datalen == 0))
691 goto error;
692
693 /* Check offsets. */
694 if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN))
695 goto error;
696
697 if (likely(pkt->pktinfooffset > 0) &&
698 unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN ||
699 (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)))
700 goto error;
701
702 data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
703 data_len = pkt->datalen;
704 pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset);
705 pktinfo_len = pkt->pktinfolen;
706
707 if (likely(pktinfo_len > 0)) {
708 err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off,
709 pktinfo_len, &info);
710 if (err)
711 goto error;
712 }
713
714 /* overflow check */
715 if (data_len > data_len + data_off || data_len + data_off > pkt->len)
716 goto error;
717
718 if (unlikely(data_len < RTE_ETHER_HDR_LEN))
719 goto error;
720
721 hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
722 return;
723 error:
724 ++rxq->stats.errors;
725 }
726
727 static void
hn_rndis_receive(struct rte_eth_dev * dev,struct hn_rx_queue * rxq,struct hn_rx_bufinfo * rxb,void * buf,uint32_t len)728 hn_rndis_receive(struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
729 struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
730 {
731 const struct rndis_msghdr *hdr = buf;
732
733 switch (hdr->type) {
734 case RNDIS_PACKET_MSG:
735 if (dev->data->dev_started)
736 hn_rndis_rx_data(rxq, rxb, buf, len);
737 break;
738
739 case RNDIS_INDICATE_STATUS_MSG:
740 hn_rndis_link_status(dev, buf);
741 break;
742
743 case RNDIS_INITIALIZE_CMPLT:
744 case RNDIS_QUERY_CMPLT:
745 case RNDIS_SET_CMPLT:
746 hn_rndis_receive_response(rxq->hv, buf, len);
747 break;
748
749 default:
750 PMD_DRV_LOG(NOTICE,
751 "unexpected RNDIS message (type %#x len %u)",
752 hdr->type, len);
753 break;
754 }
755 }
756
757 static void
hn_nvs_handle_rxbuf(struct rte_eth_dev * dev,struct hn_data * hv,struct hn_rx_queue * rxq,const struct vmbus_chanpkt_hdr * hdr,const void * buf)758 hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
759 struct hn_data *hv,
760 struct hn_rx_queue *rxq,
761 const struct vmbus_chanpkt_hdr *hdr,
762 const void *buf)
763 {
764 const struct vmbus_chanpkt_rxbuf *pkt;
765 const struct hn_nvs_hdr *nvs_hdr = buf;
766 uint32_t rxbuf_sz = hv->rxbuf_res->len;
767 char *rxbuf = hv->rxbuf_res->addr;
768 unsigned int i, hlen, count;
769 struct hn_rx_bufinfo *rxb;
770
771 /* At minimum we need type header */
772 if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) {
773 PMD_RX_LOG(ERR, "invalid receive nvs RNDIS");
774 return;
775 }
776
777 /* Make sure that this is a RNDIS message. */
778 if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) {
779 PMD_RX_LOG(ERR, "nvs type %u, not RNDIS",
780 nvs_hdr->type);
781 return;
782 }
783
784 hlen = vmbus_chanpkt_getlen(hdr->hlen);
785 if (unlikely(hlen < sizeof(*pkt))) {
786 PMD_RX_LOG(ERR, "invalid rxbuf chanpkt");
787 return;
788 }
789
790 pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr);
791 if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) {
792 PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x",
793 pkt->rxbuf_id);
794 return;
795 }
796
797 count = pkt->rxbuf_cnt;
798 if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf,
799 rxbuf[count]))) {
800 PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count);
801 return;
802 }
803
804 if (pkt->hdr.xactid > hv->rxbuf_section_cnt) {
805 PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64,
806 pkt->hdr.xactid);
807 return;
808 }
809
810 /* Setup receive buffer info to allow for callback */
811 rxb = hn_rx_buf_init(rxq, pkt);
812
813 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
814 for (i = 0; i < count; ++i) {
815 unsigned int ofs, len;
816
817 ofs = pkt->rxbuf[i].ofs;
818 len = pkt->rxbuf[i].len;
819
820 if (unlikely(ofs + len > rxbuf_sz)) {
821 PMD_RX_LOG(ERR,
822 "%uth RNDIS msg overflow ofs %u, len %u",
823 i, ofs, len);
824 continue;
825 }
826
827 if (unlikely(len == 0)) {
828 PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len);
829 continue;
830 }
831
832 hn_rndis_receive(dev, rxq, rxb,
833 rxbuf + ofs, len);
834 }
835
836 /* Send ACK now if external mbuf not used */
837 if (rte_mbuf_ext_refcnt_update(&rxb->shinfo, -1) == 0)
838 hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
839 }
840
841 /*
842 * Called when NVS inband events are received.
843 * Send up a two part message with port_id and the NVS message
844 * to the pipe to the netvsc-vf-event control thread.
845 */
hn_nvs_handle_notify(struct rte_eth_dev * dev,const struct vmbus_chanpkt_hdr * pkt,const void * data)846 static void hn_nvs_handle_notify(struct rte_eth_dev *dev,
847 const struct vmbus_chanpkt_hdr *pkt,
848 const void *data)
849 {
850 const struct hn_nvs_hdr *hdr = data;
851
852 switch (hdr->type) {
853 case NVS_TYPE_TXTBL_NOTE:
854 /* Transmit indirection table has locking problems
855 * in DPDK and therefore not implemented
856 */
857 PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table");
858 break;
859
860 case NVS_TYPE_VFASSOC_NOTE:
861 hn_nvs_handle_vfassoc(dev, pkt, data);
862 break;
863
864 default:
865 PMD_DRV_LOG(INFO,
866 "got notify, nvs type %u", hdr->type);
867 }
868 }
869
hn_rx_queue_alloc(struct hn_data * hv,uint16_t queue_id,unsigned int socket_id)870 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
871 uint16_t queue_id,
872 unsigned int socket_id)
873 {
874 struct hn_rx_queue *rxq;
875
876 rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq),
877 RTE_CACHE_LINE_SIZE, socket_id);
878 if (!rxq)
879 return NULL;
880
881 rxq->hv = hv;
882 rxq->chan = hv->channels[queue_id];
883 rte_spinlock_init(&rxq->ring_lock);
884 rxq->port_id = hv->port_id;
885 rxq->queue_id = queue_id;
886 rxq->event_sz = HN_RXQ_EVENT_DEFAULT;
887 rxq->event_buf = rte_malloc_socket("HN_EVENTS", HN_RXQ_EVENT_DEFAULT,
888 RTE_CACHE_LINE_SIZE, socket_id);
889 if (!rxq->event_buf) {
890 rte_free(rxq);
891 return NULL;
892 }
893
894 /* setup rxbuf_info for non-primary queue */
895 if (queue_id) {
896 rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
897 hv->rxbuf_section_cnt,
898 sizeof(*rxq->rxbuf_info),
899 RTE_CACHE_LINE_SIZE);
900
901 if (!rxq->rxbuf_info) {
902 PMD_DRV_LOG(ERR,
903 "Could not allocate rxbuf info for queue %d\n",
904 queue_id);
905 rte_free(rxq->event_buf);
906 rte_free(rxq);
907 return NULL;
908 }
909 }
910
911 return rxq;
912 }
913
914 void
hn_dev_rx_queue_info(struct rte_eth_dev * dev,uint16_t queue_id,struct rte_eth_rxq_info * qinfo)915 hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
916 struct rte_eth_rxq_info *qinfo)
917 {
918 struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
919
920 qinfo->mp = rxq->mb_pool;
921 qinfo->nb_desc = rxq->rx_ring->size;
922 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
923 }
924
925 int
hn_dev_rx_queue_setup(struct rte_eth_dev * dev,uint16_t queue_idx,uint16_t nb_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mp)926 hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
927 uint16_t queue_idx, uint16_t nb_desc,
928 unsigned int socket_id,
929 const struct rte_eth_rxconf *rx_conf,
930 struct rte_mempool *mp)
931 {
932 struct hn_data *hv = dev->data->dev_private;
933 char ring_name[RTE_RING_NAMESIZE];
934 struct hn_rx_queue *rxq;
935 unsigned int count;
936 int error = -ENOMEM;
937
938 PMD_INIT_FUNC_TRACE();
939
940 if (queue_idx == 0) {
941 rxq = hv->primary;
942 } else {
943 rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
944 if (!rxq)
945 return -ENOMEM;
946 }
947
948 rxq->mb_pool = mp;
949 count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues;
950 if (nb_desc == 0 || nb_desc > count)
951 nb_desc = count;
952
953 /*
954 * Staging ring from receive event logic to rx_pkts.
955 * rx_pkts assumes caller is handling multi-thread issue.
956 * event logic has locking.
957 */
958 snprintf(ring_name, sizeof(ring_name),
959 "hn_rx_%u_%u", dev->data->port_id, queue_idx);
960 rxq->rx_ring = rte_ring_create(ring_name,
961 rte_align32pow2(nb_desc),
962 socket_id, 0);
963 if (!rxq->rx_ring)
964 goto fail;
965
966 error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc,
967 socket_id, rx_conf, mp);
968 if (error)
969 goto fail;
970
971 dev->data->rx_queues[queue_idx] = rxq;
972 return 0;
973
974 fail:
975 rte_ring_free(rxq->rx_ring);
976 rte_free(rxq->rxbuf_info);
977 rte_free(rxq->event_buf);
978 rte_free(rxq);
979 return error;
980 }
981
982 static void
hn_rx_queue_free(struct hn_rx_queue * rxq,bool keep_primary)983 hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
984 {
985
986 if (!rxq)
987 return;
988
989 rte_ring_free(rxq->rx_ring);
990 rxq->rx_ring = NULL;
991 rxq->mb_pool = NULL;
992
993 hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
994
995 /* Keep primary queue to allow for control operations */
996 if (keep_primary && rxq == rxq->hv->primary)
997 return;
998
999 rte_free(rxq->rxbuf_info);
1000 rte_free(rxq->event_buf);
1001 rte_free(rxq);
1002 }
1003
1004 void
hn_dev_rx_queue_release(struct rte_eth_dev * dev,uint16_t qid)1005 hn_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1006 {
1007 struct hn_rx_queue *rxq = dev->data->rx_queues[qid];
1008
1009 PMD_INIT_FUNC_TRACE();
1010
1011 hn_rx_queue_free(rxq, true);
1012 }
1013
1014 /*
1015 * Get the number of used descriptor in a rx queue
1016 * For this device that means how many packets are pending in the ring.
1017 */
1018 uint32_t
hn_dev_rx_queue_count(void * rx_queue)1019 hn_dev_rx_queue_count(void *rx_queue)
1020 {
1021 struct hn_rx_queue *rxq = rx_queue;
1022
1023 return rte_ring_count(rxq->rx_ring);
1024 }
1025
1026 /*
1027 * Check the status of a Rx descriptor in the queue
1028 *
1029 * returns:
1030 * - -EINVAL - offset outside of ring
1031 * - RTE_ETH_RX_DESC_AVAIL - no data available yet
1032 * - RTE_ETH_RX_DESC_DONE - data is waiting in staging ring
1033 */
hn_dev_rx_queue_status(void * arg,uint16_t offset)1034 int hn_dev_rx_queue_status(void *arg, uint16_t offset)
1035 {
1036 const struct hn_rx_queue *rxq = arg;
1037
1038 hn_process_events(rxq->hv, rxq->queue_id, 0);
1039 if (offset >= rxq->rx_ring->capacity)
1040 return -EINVAL;
1041
1042 if (offset < rte_ring_count(rxq->rx_ring))
1043 return RTE_ETH_RX_DESC_DONE;
1044 else
1045 return RTE_ETH_RX_DESC_AVAIL;
1046 }
1047
1048 int
hn_dev_tx_done_cleanup(void * arg,uint32_t free_cnt)1049 hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
1050 {
1051 struct hn_tx_queue *txq = arg;
1052
1053 return hn_process_events(txq->hv, txq->queue_id, free_cnt);
1054 }
1055
1056 /*
1057 * Process pending events on the channel.
1058 * Called from both Rx queue poll and Tx cleanup
1059 */
hn_process_events(struct hn_data * hv,uint16_t queue_id,uint32_t tx_limit)1060 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
1061 uint32_t tx_limit)
1062 {
1063 struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
1064 struct hn_rx_queue *rxq;
1065 uint32_t bytes_read = 0;
1066 uint32_t tx_done = 0;
1067 int ret = 0;
1068
1069 rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
1070
1071 /*
1072 * Since channel is shared between Rx and TX queue need to have a lock
1073 * since DPDK does not force same CPU to be used for Rx/Tx.
1074 */
1075 if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
1076 return 0;
1077
1078 for (;;) {
1079 const struct vmbus_chanpkt_hdr *pkt;
1080 uint32_t len = rxq->event_sz;
1081 const void *data;
1082
1083 retry:
1084 ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
1085 if (ret == -EAGAIN)
1086 break; /* ring is empty */
1087
1088 if (unlikely(ret == -ENOBUFS)) {
1089 /* event buffer not large enough to read ring */
1090
1091 PMD_DRV_LOG(DEBUG,
1092 "event buffer expansion (need %u)", len);
1093 rxq->event_sz = len + len / 4;
1094 rxq->event_buf = rte_realloc(rxq->event_buf, rxq->event_sz,
1095 RTE_CACHE_LINE_SIZE);
1096 if (rxq->event_buf)
1097 goto retry;
1098 /* out of memory, no more events now */
1099 rxq->event_sz = 0;
1100 break;
1101 }
1102
1103 if (unlikely(ret <= 0)) {
1104 /* This indicates a failure to communicate (or worse) */
1105 rte_exit(EXIT_FAILURE,
1106 "vmbus ring buffer error: %d", ret);
1107 }
1108
1109 bytes_read += ret;
1110 pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
1111 data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
1112
1113 switch (pkt->type) {
1114 case VMBUS_CHANPKT_TYPE_COMP:
1115 ++tx_done;
1116 hn_nvs_handle_comp(dev, queue_id, pkt, data);
1117 break;
1118
1119 case VMBUS_CHANPKT_TYPE_RXBUF:
1120 hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data);
1121 break;
1122
1123 case VMBUS_CHANPKT_TYPE_INBAND:
1124 hn_nvs_handle_notify(dev, pkt, data);
1125 break;
1126
1127 default:
1128 PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
1129 break;
1130 }
1131
1132 if (tx_limit && tx_done >= tx_limit)
1133 break;
1134 }
1135
1136 if (bytes_read > 0)
1137 rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
1138
1139 rte_spinlock_unlock(&rxq->ring_lock);
1140
1141 return tx_done;
1142 }
1143
hn_append_to_chim(struct hn_tx_queue * txq,struct rndis_packet_msg * pkt,const struct rte_mbuf * m)1144 static void hn_append_to_chim(struct hn_tx_queue *txq,
1145 struct rndis_packet_msg *pkt,
1146 const struct rte_mbuf *m)
1147 {
1148 struct hn_txdesc *txd = txq->agg_txd;
1149 uint8_t *buf = (uint8_t *)pkt;
1150 unsigned int data_offs;
1151
1152 hn_rndis_dump(pkt);
1153
1154 data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
1155 txd->chim_size += pkt->len;
1156 txd->data_size += m->pkt_len;
1157 ++txd->packets;
1158 hn_update_packet_stats(&txq->stats, m);
1159
1160 for (; m; m = m->next) {
1161 uint16_t len = rte_pktmbuf_data_len(m);
1162
1163 rte_memcpy(buf + data_offs,
1164 rte_pktmbuf_mtod(m, const char *), len);
1165 data_offs += len;
1166 }
1167 }
1168
1169 /*
1170 * Send pending aggregated data in chimney buffer (if any).
1171 * Returns error if send was unsuccessful because channel ring buffer
1172 * was full.
1173 */
hn_flush_txagg(struct hn_tx_queue * txq,bool * need_sig)1174 static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig)
1175
1176 {
1177 struct hn_txdesc *txd = txq->agg_txd;
1178 struct hn_nvs_rndis rndis;
1179 int ret;
1180
1181 if (!txd)
1182 return 0;
1183
1184 rndis = (struct hn_nvs_rndis) {
1185 .type = NVS_TYPE_RNDIS,
1186 .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
1187 .chim_idx = txd->chim_index,
1188 .chim_sz = txd->chim_size,
1189 };
1190
1191 PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u",
1192 txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size);
1193
1194 ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC,
1195 &rndis, sizeof(rndis), (uintptr_t)txd, need_sig);
1196
1197 if (likely(ret == 0))
1198 hn_reset_txagg(txq);
1199 else if (ret == -EAGAIN) {
1200 PMD_TX_LOG(DEBUG, "port %u:%u channel full",
1201 txq->port_id, txq->queue_id);
1202 ++txq->stats.channel_full;
1203 } else {
1204 ++txq->stats.errors;
1205
1206 PMD_DRV_LOG(NOTICE, "port %u:%u send failed: %d",
1207 txq->port_id, txq->queue_id, ret);
1208 }
1209 return ret;
1210 }
1211
1212 /*
1213 * Try and find a place in a send chimney buffer to put
1214 * the small packet. If space is available, this routine
1215 * returns a pointer of where to place the data.
1216 * If no space, caller should try direct transmit.
1217 */
1218 static void *
hn_try_txagg(struct hn_data * hv,struct hn_tx_queue * txq,struct hn_txdesc * txd,uint32_t pktsize)1219 hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq,
1220 struct hn_txdesc *txd, uint32_t pktsize)
1221 {
1222 struct hn_txdesc *agg_txd = txq->agg_txd;
1223 struct rndis_packet_msg *pkt;
1224 void *chim;
1225
1226 if (agg_txd) {
1227 unsigned int padding, olen;
1228
1229 /*
1230 * Update the previous RNDIS packet's total length,
1231 * it can be increased due to the mandatory alignment
1232 * padding for this RNDIS packet. And update the
1233 * aggregating txdesc's chimney sending buffer size
1234 * accordingly.
1235 *
1236 * Zero-out the padding, as required by the RNDIS spec.
1237 */
1238 pkt = txq->agg_prevpkt;
1239 olen = pkt->len;
1240 padding = RTE_ALIGN(olen, txq->agg_align) - olen;
1241 if (padding > 0) {
1242 agg_txd->chim_size += padding;
1243 pkt->len += padding;
1244 memset((uint8_t *)pkt + olen, 0, padding);
1245 }
1246
1247 chim = (uint8_t *)pkt + pkt->len;
1248 txq->agg_prevpkt = chim;
1249 txq->agg_pktleft--;
1250 txq->agg_szleft -= pktsize;
1251 if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) {
1252 /*
1253 * Probably can't aggregate more packets,
1254 * flush this aggregating txdesc proactively.
1255 */
1256 txq->agg_pktleft = 0;
1257 }
1258
1259 hn_txd_put(txq, txd);
1260 return chim;
1261 }
1262
1263 txd->chim_index = hn_chim_alloc(hv);
1264 if (txd->chim_index == NVS_CHIM_IDX_INVALID)
1265 return NULL;
1266
1267 chim = (uint8_t *)hv->chim_res->addr
1268 + txd->chim_index * hv->chim_szmax;
1269
1270 txq->agg_txd = txd;
1271 txq->agg_pktleft = txq->agg_pktmax - 1;
1272 txq->agg_szleft = txq->agg_szmax - pktsize;
1273 txq->agg_prevpkt = chim;
1274
1275 return chim;
1276 }
1277
1278 static inline void *
hn_rndis_pktinfo_append(struct rndis_packet_msg * pkt,uint32_t pi_dlen,uint32_t pi_type)1279 hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt,
1280 uint32_t pi_dlen, uint32_t pi_type)
1281 {
1282 const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen);
1283 struct rndis_pktinfo *pi;
1284
1285 /*
1286 * Per-packet-info does not move; it only grows.
1287 *
1288 * NOTE:
1289 * pktinfooffset in this phase counts from the beginning
1290 * of rndis_packet_msg.
1291 */
1292 pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt));
1293
1294 pkt->pktinfolen += pi_size;
1295
1296 pi->size = pi_size;
1297 pi->type = pi_type;
1298 pi->offset = RNDIS_PKTINFO_OFFSET;
1299
1300 return pi->data;
1301 }
1302
1303 /* Put RNDIS header and packet info on packet */
hn_encap(struct rndis_packet_msg * pkt,uint16_t queue_id,const struct rte_mbuf * m)1304 static void hn_encap(struct rndis_packet_msg *pkt,
1305 uint16_t queue_id,
1306 const struct rte_mbuf *m)
1307 {
1308 unsigned int hlen = m->l2_len + m->l3_len;
1309 uint32_t *pi_data;
1310 uint32_t pkt_hlen;
1311
1312 pkt->type = RNDIS_PACKET_MSG;
1313 pkt->len = m->pkt_len;
1314 pkt->dataoffset = 0;
1315 pkt->datalen = m->pkt_len;
1316 pkt->oobdataoffset = 0;
1317 pkt->oobdatalen = 0;
1318 pkt->oobdataelements = 0;
1319 pkt->pktinfooffset = sizeof(*pkt);
1320 pkt->pktinfolen = 0;
1321 pkt->vchandle = 0;
1322 pkt->reserved = 0;
1323
1324 /*
1325 * Set the hash value for this packet, to the queue_id to cause
1326 * TX done event for this packet on the right channel.
1327 */
1328 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE,
1329 NDIS_PKTINFO_TYPE_HASHVAL);
1330 *pi_data = queue_id;
1331
1332 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
1333 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
1334 NDIS_PKTINFO_TYPE_VLAN);
1335 *pi_data = m->vlan_tci;
1336 }
1337
1338 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
1339 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
1340 NDIS_PKTINFO_TYPE_LSO);
1341
1342 if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
1343 *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
1344 m->tso_segsz);
1345 } else {
1346 *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
1347 m->tso_segsz);
1348 }
1349 } else if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1350 RTE_MBUF_F_TX_TCP_CKSUM ||
1351 (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1352 RTE_MBUF_F_TX_UDP_CKSUM ||
1353 (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)) {
1354 pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
1355 NDIS_PKTINFO_TYPE_CSUM);
1356 *pi_data = 0;
1357
1358 if (m->ol_flags & RTE_MBUF_F_TX_IPV6)
1359 *pi_data |= NDIS_TXCSUM_INFO_IPV6;
1360 if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
1361 *pi_data |= NDIS_TXCSUM_INFO_IPV4;
1362
1363 if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
1364 *pi_data |= NDIS_TXCSUM_INFO_IPCS;
1365 }
1366
1367 if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1368 RTE_MBUF_F_TX_TCP_CKSUM)
1369 *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
1370 else if ((m->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
1371 RTE_MBUF_F_TX_UDP_CKSUM)
1372 *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
1373 }
1374
1375 pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen;
1376 /* Fixup RNDIS packet message total length */
1377 pkt->len += pkt_hlen;
1378
1379 /* Convert RNDIS packet message offsets */
1380 pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen);
1381 pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset);
1382 }
1383
1384 /* How many scatter gather list elements ar needed */
hn_get_slots(const struct rte_mbuf * m)1385 static unsigned int hn_get_slots(const struct rte_mbuf *m)
1386 {
1387 unsigned int slots = 1; /* for RNDIS header */
1388
1389 while (m) {
1390 unsigned int size = rte_pktmbuf_data_len(m);
1391 unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
1392
1393 slots += (offs + size + rte_mem_page_size() - 1) /
1394 rte_mem_page_size();
1395 m = m->next;
1396 }
1397
1398 return slots;
1399 }
1400
1401 /* Build scatter gather list from chained mbuf */
hn_fill_sg(struct vmbus_gpa * sg,const struct rte_mbuf * m)1402 static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
1403 const struct rte_mbuf *m)
1404 {
1405 unsigned int segs = 0;
1406
1407 while (m) {
1408 rte_iova_t addr = rte_mbuf_data_iova(m);
1409 unsigned int page = addr / rte_mem_page_size();
1410 unsigned int offset = addr & PAGE_MASK;
1411 unsigned int len = rte_pktmbuf_data_len(m);
1412
1413 while (len > 0) {
1414 unsigned int bytes = RTE_MIN(len,
1415 rte_mem_page_size() - offset);
1416
1417 sg[segs].page = page;
1418 sg[segs].ofs = offset;
1419 sg[segs].len = bytes;
1420 segs++;
1421
1422 ++page;
1423 offset = 0;
1424 len -= bytes;
1425 }
1426 m = m->next;
1427 }
1428
1429 return segs;
1430 }
1431
1432 /* Transmit directly from mbuf */
hn_xmit_sg(struct hn_tx_queue * txq,const struct hn_txdesc * txd,const struct rte_mbuf * m,bool * need_sig)1433 static int hn_xmit_sg(struct hn_tx_queue *txq,
1434 const struct hn_txdesc *txd, const struct rte_mbuf *m,
1435 bool *need_sig)
1436 {
1437 struct vmbus_gpa sg[hn_get_slots(m)];
1438 struct hn_nvs_rndis nvs_rndis = {
1439 .type = NVS_TYPE_RNDIS,
1440 .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
1441 .chim_sz = txd->chim_size,
1442 };
1443 rte_iova_t addr;
1444 unsigned int segs;
1445
1446 /* attach aggregation data if present */
1447 if (txd->chim_size > 0)
1448 nvs_rndis.chim_idx = txd->chim_index;
1449 else
1450 nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID;
1451
1452 hn_rndis_dump(txd->rndis_pkt);
1453
1454 /* pass IOVA of rndis header in first segment */
1455 addr = txq->tx_rndis_iova +
1456 ((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
1457
1458 sg[0].page = addr / rte_mem_page_size();
1459 sg[0].ofs = addr & PAGE_MASK;
1460 sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
1461 segs = 1;
1462
1463 hn_update_packet_stats(&txq->stats, m);
1464
1465 segs += hn_fill_sg(sg + 1, m);
1466
1467 PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u",
1468 txq->port_id, txq->queue_id, txd->chim_index,
1469 segs, nvs_rndis.chim_sz);
1470
1471 return hn_nvs_send_sglist(txq->chan, sg, segs,
1472 &nvs_rndis, sizeof(nvs_rndis),
1473 (uintptr_t)txd, need_sig);
1474 }
1475
1476 uint16_t
hn_xmit_pkts(void * ptxq,struct rte_mbuf ** tx_pkts,uint16_t nb_pkts)1477 hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1478 {
1479 struct hn_tx_queue *txq = ptxq;
1480 uint16_t queue_id = txq->queue_id;
1481 struct hn_data *hv = txq->hv;
1482 struct rte_eth_dev *vf_dev;
1483 bool need_sig = false;
1484 uint16_t nb_tx, tx_thresh;
1485 int ret;
1486
1487 if (unlikely(hv->closed))
1488 return 0;
1489
1490 /*
1491 * Always check for events on the primary channel
1492 * because that is where hotplug notifications occur.
1493 */
1494 tx_thresh = RTE_MAX(txq->free_thresh, nb_pkts);
1495 if (txq->queue_id == 0 ||
1496 rte_mempool_avail_count(txq->txdesc_pool) < tx_thresh)
1497 hn_process_events(hv, txq->queue_id, 0);
1498
1499 /* Transmit over VF if present and up */
1500 if (hv->vf_ctx.vf_vsc_switched) {
1501 rte_rwlock_read_lock(&hv->vf_lock);
1502 vf_dev = hn_get_vf_dev(hv);
1503 if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
1504 vf_dev->data->dev_started) {
1505 void *sub_q = vf_dev->data->tx_queues[queue_id];
1506
1507 nb_tx = (*vf_dev->tx_pkt_burst)
1508 (sub_q, tx_pkts, nb_pkts);
1509 rte_rwlock_read_unlock(&hv->vf_lock);
1510 return nb_tx;
1511 }
1512 rte_rwlock_read_unlock(&hv->vf_lock);
1513 }
1514
1515 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1516 struct rte_mbuf *m = tx_pkts[nb_tx];
1517 uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN;
1518 struct rndis_packet_msg *pkt;
1519 struct hn_txdesc *txd;
1520
1521 txd = hn_txd_get(txq);
1522 if (txd == NULL)
1523 break;
1524
1525 /* For small packets aggregate them in chimney buffer */
1526 if (m->pkt_len <= hv->tx_copybreak &&
1527 pkt_size <= txq->agg_szmax) {
1528 /* If this packet will not fit, then flush */
1529 if (txq->agg_pktleft == 0 ||
1530 RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
1531 if (hn_flush_txagg(txq, &need_sig))
1532 goto fail;
1533 }
1534
1535
1536 pkt = hn_try_txagg(hv, txq, txd, pkt_size);
1537 if (unlikely(!pkt))
1538 break;
1539
1540 hn_encap(pkt, queue_id, m);
1541 hn_append_to_chim(txq, pkt, m);
1542
1543 rte_pktmbuf_free(m);
1544
1545 /* if buffer is full, flush */
1546 if (txq->agg_pktleft == 0 &&
1547 hn_flush_txagg(txq, &need_sig))
1548 goto fail;
1549 } else {
1550 /* Send any outstanding packets in buffer */
1551 if (txq->agg_txd && hn_flush_txagg(txq, &need_sig))
1552 goto fail;
1553
1554 pkt = txd->rndis_pkt;
1555 txd->m = m;
1556 txd->data_size = m->pkt_len;
1557 ++txd->packets;
1558
1559 hn_encap(pkt, queue_id, m);
1560
1561 ret = hn_xmit_sg(txq, txd, m, &need_sig);
1562 if (unlikely(ret != 0)) {
1563 if (ret == -EAGAIN) {
1564 PMD_TX_LOG(DEBUG, "sg channel full");
1565 ++txq->stats.channel_full;
1566 } else {
1567 PMD_DRV_LOG(NOTICE, "sg send failed: %d", ret);
1568 ++txq->stats.errors;
1569 }
1570 hn_txd_put(txq, txd);
1571 goto fail;
1572 }
1573 }
1574 }
1575
1576 /* If partial buffer left, then try and send it.
1577 * if that fails, then reuse it on next send.
1578 */
1579 hn_flush_txagg(txq, &need_sig);
1580
1581 fail:
1582 if (need_sig)
1583 rte_vmbus_chan_signal_tx(txq->chan);
1584
1585 return nb_tx;
1586 }
1587
1588 static uint16_t
hn_recv_vf(uint16_t vf_port,const struct hn_rx_queue * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1589 hn_recv_vf(uint16_t vf_port, const struct hn_rx_queue *rxq,
1590 struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1591 {
1592 uint16_t i, n;
1593
1594 if (unlikely(nb_pkts == 0))
1595 return 0;
1596
1597 n = rte_eth_rx_burst(vf_port, rxq->queue_id, rx_pkts, nb_pkts);
1598
1599 /* relabel the received mbufs */
1600 for (i = 0; i < n; i++)
1601 rx_pkts[i]->port = rxq->port_id;
1602
1603 return n;
1604 }
1605
1606 uint16_t
hn_recv_pkts(void * prxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)1607 hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1608 {
1609 struct hn_rx_queue *rxq = prxq;
1610 struct hn_data *hv = rxq->hv;
1611 struct rte_eth_dev *vf_dev;
1612 uint16_t nb_rcv;
1613
1614 if (unlikely(hv->closed))
1615 return 0;
1616
1617 /* Check for new completions (and hotplug) */
1618 if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts))
1619 hn_process_events(hv, rxq->queue_id, 0);
1620
1621 /* Always check the vmbus path for multicast and new flows */
1622 nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
1623 (void **)rx_pkts, nb_pkts, NULL);
1624
1625 /* If VF is available, check that as well */
1626 if (hv->vf_ctx.vf_vsc_switched) {
1627 rte_rwlock_read_lock(&hv->vf_lock);
1628 vf_dev = hn_get_vf_dev(hv);
1629 if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
1630 vf_dev->data->dev_started)
1631 nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
1632 rx_pkts + nb_rcv,
1633 nb_pkts - nb_rcv);
1634
1635 rte_rwlock_read_unlock(&hv->vf_lock);
1636 }
1637 return nb_rcv;
1638 }
1639
1640 void
hn_dev_free_queues(struct rte_eth_dev * dev)1641 hn_dev_free_queues(struct rte_eth_dev *dev)
1642 {
1643 unsigned int i;
1644
1645 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1646 struct hn_rx_queue *rxq = dev->data->rx_queues[i];
1647
1648 hn_rx_queue_free(rxq, false);
1649 dev->data->rx_queues[i] = NULL;
1650 }
1651 dev->data->nb_rx_queues = 0;
1652
1653 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1654 hn_dev_tx_queue_release(dev, i);
1655 dev->data->tx_queues[i] = NULL;
1656 }
1657 dev->data->nb_tx_queues = 0;
1658 }
1659