1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2009-2018 Microsoft Corp.
3 * Copyright (c) 2016 Brocade Communications Systems, Inc.
4 * Copyright (c) 2012 NetApp Inc.
5 * Copyright (c) 2012 Citrix Inc.
6 * All rights reserved.
7 */
8
9 /*
10 * Tunable ethdev params
11 */
12 #define HN_MIN_RX_BUF_SIZE 1024
13 #define HN_MAX_XFER_LEN 2048
14 #define HN_MAX_MAC_ADDRS 1
15 #define HN_MAX_CHANNELS 64
16
17 /* Claimed to be 12232B */
18 #define HN_MTU_MAX (9 * 1024)
19
20 /* Retry interval */
21 #define HN_CHAN_INTERVAL_US 100
22
23 /* Host monitor interval */
24 #define HN_CHAN_LATENCY_NS 50000
25
26 #define HN_TXCOPY_THRESHOLD 512
27 #define HN_RXCOPY_THRESHOLD 256
28
29 #define HN_RX_EXTMBUF_ENABLE 0
30
31 /* Buffers need to be aligned */
32 #ifndef PAGE_SIZE
33 #define PAGE_SIZE 4096
34 #endif
35
36 #ifndef PAGE_MASK
37 #define PAGE_MASK (PAGE_SIZE - 1)
38 #endif
39
40 struct hn_data;
41 struct hn_txdesc;
42
43 struct hn_stats {
44 uint64_t packets;
45 uint64_t bytes;
46 uint64_t errors;
47 uint64_t ring_full;
48 uint64_t channel_full;
49 uint64_t multicast;
50 uint64_t broadcast;
51 /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
52 uint64_t size_bins[8];
53 };
54
55 struct hn_tx_queue {
56 struct hn_data *hv;
57 struct vmbus_channel *chan;
58 uint16_t port_id;
59 uint16_t queue_id;
60 uint32_t free_thresh;
61 struct rte_mempool *txdesc_pool;
62 const struct rte_memzone *tx_rndis_mz;
63 void *tx_rndis;
64 rte_iova_t tx_rndis_iova;
65
66 /* Applied packet transmission aggregation limits. */
67 uint32_t agg_szmax;
68 uint32_t agg_pktmax;
69 uint32_t agg_align;
70
71 /* Packet transmission aggregation states */
72 struct hn_txdesc *agg_txd;
73 uint32_t agg_pktleft;
74 uint32_t agg_szleft;
75 struct rndis_packet_msg *agg_prevpkt;
76
77 struct hn_stats stats;
78 };
79
80 struct hn_rx_queue {
81 struct hn_data *hv;
82 struct vmbus_channel *chan;
83 struct rte_mempool *mb_pool;
84 struct rte_ring *rx_ring;
85
86 rte_spinlock_t ring_lock;
87 uint32_t event_sz;
88 uint16_t port_id;
89 uint16_t queue_id;
90 struct hn_stats stats;
91
92 void *event_buf;
93 struct hn_rx_bufinfo *rxbuf_info;
94 rte_atomic32_t rxbuf_outstanding;
95 };
96
97
98 /* multi-packet data from host */
99 struct hn_rx_bufinfo {
100 struct vmbus_channel *chan;
101 struct hn_rx_queue *rxq;
102 uint64_t xactid;
103 struct rte_mbuf_ext_shared_info shinfo;
104 } __rte_cache_aligned;
105
106 #define HN_INVALID_PORT UINT16_MAX
107
108 struct hn_data {
109 struct rte_vmbus_device *vmbus;
110 struct hn_rx_queue *primary;
111 rte_rwlock_t vf_lock;
112 uint16_t port_id;
113 uint16_t vf_port;
114
115 uint8_t vf_present;
116 uint8_t closed;
117 uint8_t vlan_strip;
118
119 uint32_t link_status;
120 uint32_t link_speed;
121
122 struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
123 uint32_t rxbuf_section_cnt; /* # of Rx sections */
124 uint32_t rx_copybreak;
125 uint32_t rx_extmbuf_enable;
126 uint16_t max_queues; /* Max available queues */
127 uint16_t num_queues;
128 uint64_t rss_offloads;
129
130 rte_spinlock_t chim_lock;
131 struct rte_mem_resource *chim_res; /* UIO resource for Tx */
132 struct rte_bitmap *chim_bmap; /* Send buffer map */
133 void *chim_bmem;
134 uint32_t tx_copybreak;
135 uint32_t chim_szmax; /* Max size per buffer */
136 uint32_t chim_cnt; /* Max packets per buffer */
137
138 uint32_t latency;
139 uint32_t nvs_ver;
140 uint32_t ndis_ver;
141 uint32_t rndis_agg_size;
142 uint32_t rndis_agg_pkts;
143 uint32_t rndis_agg_align;
144
145 volatile uint32_t rndis_pending;
146 rte_atomic32_t rndis_req_id;
147 uint8_t rndis_resp[256];
148
149 uint32_t rss_hash;
150 uint8_t rss_key[40];
151 uint16_t rss_ind[128];
152
153 struct rte_eth_dev_owner owner;
154
155 struct vmbus_channel *channels[HN_MAX_CHANNELS];
156 };
157
158 static inline struct vmbus_channel *
hn_primary_chan(const struct hn_data * hv)159 hn_primary_chan(const struct hn_data *hv)
160 {
161 return hv->channels[0];
162 }
163
164 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
165 uint32_t tx_limit);
166
167 uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
168 uint16_t nb_pkts);
169 uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
170 uint16_t nb_pkts);
171
172 int hn_chim_init(struct rte_eth_dev *dev);
173 void hn_chim_uninit(struct rte_eth_dev *dev);
174 int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
175 int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
176 uint16_t nb_desc, unsigned int socket_id,
177 const struct rte_eth_txconf *tx_conf);
178 void hn_dev_tx_queue_release(void *arg);
179 void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
180 struct rte_eth_txq_info *qinfo);
181 int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
182 int hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
183
184 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
185 uint16_t queue_id,
186 unsigned int socket_id);
187 int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
188 uint16_t queue_idx, uint16_t nb_desc,
189 unsigned int socket_id,
190 const struct rte_eth_rxconf *rx_conf,
191 struct rte_mempool *mp);
192 void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
193 struct rte_eth_rxq_info *qinfo);
194 void hn_dev_rx_queue_release(void *arg);
195 uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
196 int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
197 void hn_dev_free_queues(struct rte_eth_dev *dev);
198
199 /* Check if VF is attached */
200 static inline bool
hn_vf_attached(const struct hn_data * hv)201 hn_vf_attached(const struct hn_data *hv)
202 {
203 return hv->vf_port != HN_INVALID_PORT;
204 }
205
206 /*
207 * Get VF device for existing netvsc device
208 * Assumes vf_lock is held.
209 */
210 static inline struct rte_eth_dev *
hn_get_vf_dev(const struct hn_data * hv)211 hn_get_vf_dev(const struct hn_data *hv)
212 {
213 uint16_t vf_port = hv->vf_port;
214
215 if (vf_port == HN_INVALID_PORT)
216 return NULL;
217 else
218 return &rte_eth_devices[vf_port];
219 }
220
221 int hn_vf_info_get(struct hn_data *hv,
222 struct rte_eth_dev_info *info);
223 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
224 int hn_vf_configure(struct rte_eth_dev *dev,
225 const struct rte_eth_conf *dev_conf);
226 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
227 int hn_vf_start(struct rte_eth_dev *dev);
228 void hn_vf_reset(struct rte_eth_dev *dev);
229 int hn_vf_close(struct rte_eth_dev *dev);
230 int hn_vf_stop(struct rte_eth_dev *dev);
231
232 int hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
233 int hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
234 int hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
235 int hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
236 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
237 struct rte_ether_addr *mc_addr_set,
238 uint32_t nb_mc_addr);
239
240 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
241 uint16_t queue_idx, uint16_t nb_desc,
242 unsigned int socket_id,
243 const struct rte_eth_txconf *tx_conf);
244 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
245 int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
246
247 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
248 uint16_t queue_idx, uint16_t nb_desc,
249 unsigned int socket_id,
250 const struct rte_eth_rxconf *rx_conf,
251 struct rte_mempool *mp);
252 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
253
254 int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
255 int hn_vf_stats_reset(struct rte_eth_dev *dev);
256 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
257 struct rte_eth_xstat_name *xstats_names,
258 unsigned int size);
259 int hn_vf_xstats_get(struct rte_eth_dev *dev,
260 struct rte_eth_xstat *xstats,
261 unsigned int offset, unsigned int n);
262 int hn_vf_xstats_reset(struct rte_eth_dev *dev);
263 int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
264 struct rte_eth_rss_conf *rss_conf);
265 int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
266 struct rte_eth_rss_reta_entry64 *reta_conf,
267 uint16_t reta_size);
268