xref: /f-stack/dpdk/drivers/net/netvsc/hn_nvs.c (revision 0c6bd470)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * Copyright (c) 2010-2012 Citrix Inc.
4  * Copyright (c) 2012 NetApp Inc.
5  * All rights reserved.
6  */
7 
8 /*
9  * Network Virtualization Service.
10  */
11 
12 
13 #include <stdint.h>
14 #include <string.h>
15 #include <stdio.h>
16 #include <errno.h>
17 #include <unistd.h>
18 
19 #include <rte_ethdev.h>
20 #include <rte_string_fns.h>
21 #include <rte_memzone.h>
22 #include <rte_malloc.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ether.h>
26 #include <rte_common.h>
27 #include <rte_errno.h>
28 #include <rte_cycles.h>
29 #include <rte_memory.h>
30 #include <rte_eal.h>
31 #include <rte_dev.h>
32 #include <rte_bus_vmbus.h>
33 
34 #include "hn_logs.h"
35 #include "hn_var.h"
36 #include "hn_nvs.h"
37 
38 static const uint32_t hn_nvs_version[] = {
39 	NVS_VERSION_61,
40 	NVS_VERSION_6,
41 	NVS_VERSION_5,
42 	NVS_VERSION_4,
43 	NVS_VERSION_2,
44 	NVS_VERSION_1
45 };
46 
hn_nvs_req_send(struct hn_data * hv,void * req,uint32_t reqlen)47 static int hn_nvs_req_send(struct hn_data *hv,
48 			   void *req, uint32_t reqlen)
49 {
50 	return rte_vmbus_chan_send(hn_primary_chan(hv),
51 				   VMBUS_CHANPKT_TYPE_INBAND,
52 				   req, reqlen, 0,
53 				   VMBUS_CHANPKT_FLAG_NONE, NULL);
54 }
55 
56 static int
__hn_nvs_execute(struct hn_data * hv,void * req,uint32_t reqlen,void * resp,uint32_t resplen,uint32_t type)57 __hn_nvs_execute(struct hn_data *hv,
58 	       void *req, uint32_t reqlen,
59 	       void *resp, uint32_t resplen,
60 	       uint32_t type)
61 {
62 	struct vmbus_channel *chan = hn_primary_chan(hv);
63 	char buffer[NVS_RESPSIZE_MAX];
64 	const struct hn_nvs_hdr *hdr;
65 	uint64_t xactid;
66 	uint32_t len;
67 	int ret;
68 
69 	/* Send request to ring buffer */
70 	ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
71 				  req, reqlen, 0,
72 				  VMBUS_CHANPKT_FLAG_RC, NULL);
73 
74 	if (ret) {
75 		PMD_DRV_LOG(ERR, "send request failed: %d", ret);
76 		return ret;
77 	}
78 
79  retry:
80 	len = sizeof(buffer);
81 	ret = rte_vmbus_chan_recv(chan, buffer, &len, &xactid);
82 	if (ret == -EAGAIN) {
83 		rte_delay_us(HN_CHAN_INTERVAL_US);
84 		goto retry;
85 	}
86 
87 	if (ret < 0) {
88 		PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
89 		return ret;
90 	}
91 
92 	if (len < sizeof(*hdr)) {
93 		PMD_DRV_LOG(ERR, "response missing NVS header");
94 		return -EINVAL;
95 	}
96 
97 	hdr = (struct hn_nvs_hdr *)buffer;
98 
99 	/* Silently drop received packets while waiting for response */
100 	if (hdr->type == NVS_TYPE_RNDIS) {
101 		hn_nvs_ack_rxbuf(chan, xactid);
102 		goto retry;
103 	}
104 
105 	if (hdr->type != type) {
106 		PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
107 			    hdr->type, type);
108 		return -EINVAL;
109 	}
110 
111 	if (len < resplen) {
112 		PMD_DRV_LOG(ERR,
113 			    "invalid NVS resp len %u (expect %u)",
114 			    len, resplen);
115 		return -EINVAL;
116 	}
117 
118 	memcpy(resp, buffer, resplen);
119 
120 	/* All pass! */
121 	return 0;
122 }
123 
124 
125 /*
126  * Execute one control command and get the response.
127  * Only one command can be active on a channel at once
128  * Unlike BSD, DPDK does not have an interrupt context
129  * so the polling is required to wait for response.
130  */
131 static int
hn_nvs_execute(struct hn_data * hv,void * req,uint32_t reqlen,void * resp,uint32_t resplen,uint32_t type)132 hn_nvs_execute(struct hn_data *hv,
133 	       void *req, uint32_t reqlen,
134 	       void *resp, uint32_t resplen,
135 	       uint32_t type)
136 {
137 	struct hn_rx_queue *rxq = hv->primary;
138 	int ret;
139 
140 	rte_spinlock_lock(&rxq->ring_lock);
141 	ret = __hn_nvs_execute(hv, req, reqlen, resp, resplen, type);
142 	rte_spinlock_unlock(&rxq->ring_lock);
143 
144 	return ret;
145 }
146 
147 static int
hn_nvs_doinit(struct hn_data * hv,uint32_t nvs_ver)148 hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
149 {
150 	struct hn_nvs_init init;
151 	struct hn_nvs_init_resp resp;
152 	uint32_t status;
153 	int error;
154 
155 	memset(&init, 0, sizeof(init));
156 	init.type = NVS_TYPE_INIT;
157 	init.ver_min = nvs_ver;
158 	init.ver_max = nvs_ver;
159 
160 	error = hn_nvs_execute(hv, &init, sizeof(init),
161 			       &resp, sizeof(resp),
162 			       NVS_TYPE_INIT_RESP);
163 	if (error)
164 		return error;
165 
166 	status = resp.status;
167 	if (status != NVS_STATUS_OK) {
168 		/* Not fatal, try other versions */
169 		PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
170 			     nvs_ver);
171 		return -EINVAL;
172 	}
173 
174 	return 0;
175 }
176 
177 static int
hn_nvs_conn_rxbuf(struct hn_data * hv)178 hn_nvs_conn_rxbuf(struct hn_data *hv)
179 {
180 	struct hn_nvs_rxbuf_conn conn;
181 	struct hn_nvs_rxbuf_connresp resp;
182 	uint32_t status;
183 	int error;
184 
185 	/* Kernel has already setup RXBUF on primary channel. */
186 
187 	/*
188 	 * Connect RXBUF to NVS.
189 	 */
190 	conn.type = NVS_TYPE_RXBUF_CONN;
191 	conn.gpadl = hv->rxbuf_res->phys_addr;
192 	conn.sig = NVS_RXBUF_SIG;
193 	PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
194 		    hv->rxbuf_res->addr,
195 		    hv->rxbuf_res->phys_addr);
196 
197 	error = hn_nvs_execute(hv, &conn, sizeof(conn),
198 			       &resp, sizeof(resp),
199 			       NVS_TYPE_RXBUF_CONNRESP);
200 	if (error) {
201 		PMD_DRV_LOG(ERR,
202 			    "exec nvs rxbuf conn failed: %d",
203 			    error);
204 		return error;
205 	}
206 
207 	status = resp.status;
208 	if (status != NVS_STATUS_OK) {
209 		PMD_DRV_LOG(ERR,
210 			    "nvs rxbuf conn failed: %x", status);
211 		return -EIO;
212 	}
213 	if (resp.nsect != 1) {
214 		PMD_DRV_LOG(ERR,
215 			    "nvs rxbuf response num sections %u != 1",
216 			    resp.nsect);
217 		return -EIO;
218 	}
219 
220 	PMD_DRV_LOG(INFO,
221 		    "receive buffer size %u count %u",
222 		    resp.nvs_sect[0].slotsz,
223 		    resp.nvs_sect[0].slotcnt);
224 	hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
225 
226 	/*
227 	 * Pimary queue's rxbuf_info is not allocated at creation time.
228 	 * Now we can allocate it after we figure out the slotcnt.
229 	 */
230 	hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
231 			hv->rxbuf_section_cnt,
232 			sizeof(*hv->primary->rxbuf_info),
233 			RTE_CACHE_LINE_SIZE);
234 	if (!hv->primary->rxbuf_info) {
235 		PMD_DRV_LOG(ERR,
236 			    "could not allocate rxbuf info");
237 		return -ENOMEM;
238 	}
239 
240 	return 0;
241 }
242 
243 static void
hn_nvs_disconn_rxbuf(struct hn_data * hv)244 hn_nvs_disconn_rxbuf(struct hn_data *hv)
245 {
246 	struct hn_nvs_rxbuf_disconn disconn;
247 	int error;
248 
249 	/*
250 	 * Disconnect RXBUF from NVS.
251 	 */
252 	memset(&disconn, 0, sizeof(disconn));
253 	disconn.type = NVS_TYPE_RXBUF_DISCONN;
254 	disconn.sig = NVS_RXBUF_SIG;
255 
256 	/* NOTE: No response. */
257 	error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
258 	if (error) {
259 		PMD_DRV_LOG(ERR,
260 			    "send nvs rxbuf disconn failed: %d",
261 			    error);
262 	}
263 
264 	/*
265 	 * Linger long enough for NVS to disconnect RXBUF.
266 	 */
267 	rte_delay_ms(200);
268 }
269 
270 static void
hn_nvs_disconn_chim(struct hn_data * hv)271 hn_nvs_disconn_chim(struct hn_data *hv)
272 {
273 	int error;
274 
275 	if (hv->chim_cnt != 0) {
276 		struct hn_nvs_chim_disconn disconn;
277 
278 		/* Disconnect chimney sending buffer from NVS. */
279 		memset(&disconn, 0, sizeof(disconn));
280 		disconn.type = NVS_TYPE_CHIM_DISCONN;
281 		disconn.sig = NVS_CHIM_SIG;
282 
283 		/* NOTE: No response. */
284 		error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
285 
286 		if (error) {
287 			PMD_DRV_LOG(ERR,
288 				    "send nvs chim disconn failed: %d", error);
289 		}
290 
291 		hv->chim_cnt = 0;
292 		/*
293 		 * Linger long enough for NVS to disconnect chimney
294 		 * sending buffer.
295 		 */
296 		rte_delay_ms(200);
297 	}
298 }
299 
300 static int
hn_nvs_conn_chim(struct hn_data * hv)301 hn_nvs_conn_chim(struct hn_data *hv)
302 {
303 	struct hn_nvs_chim_conn chim;
304 	struct hn_nvs_chim_connresp resp;
305 	uint32_t sectsz;
306 	unsigned long len = hv->chim_res->len;
307 	int error;
308 
309 	/* Connect chimney sending buffer to NVS */
310 	memset(&chim, 0, sizeof(chim));
311 	chim.type = NVS_TYPE_CHIM_CONN;
312 	chim.gpadl = hv->chim_res->phys_addr;
313 	chim.sig = NVS_CHIM_SIG;
314 	PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
315 		    hv->chim_res->addr,
316 		    hv->chim_res->phys_addr);
317 
318 	error = hn_nvs_execute(hv, &chim, sizeof(chim),
319 			       &resp, sizeof(resp),
320 			       NVS_TYPE_CHIM_CONNRESP);
321 	if (error) {
322 		PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
323 		return error;
324 	}
325 
326 	if (resp.status != NVS_STATUS_OK) {
327 		PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
328 			    resp.status);
329 		return -EIO;
330 	}
331 
332 	sectsz = resp.sectsz;
333 	if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
334 		/* Can't use chimney sending buffer; done! */
335 		PMD_DRV_LOG(NOTICE,
336 			    "invalid chimney sending buffer section size: %u",
337 			    sectsz);
338 		error = -EINVAL;
339 		goto cleanup;
340 	}
341 
342 	hv->chim_szmax = sectsz;
343 	hv->chim_cnt = len / sectsz;
344 
345 	PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
346 		    len, hv->chim_szmax, hv->chim_cnt);
347 
348 	/* Done! */
349 	return 0;
350 
351 cleanup:
352 	hn_nvs_disconn_chim(hv);
353 	return error;
354 }
355 
356 /*
357  * Configure MTU and enable VLAN.
358  */
359 static int
hn_nvs_conf_ndis(struct hn_data * hv,unsigned int mtu)360 hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
361 {
362 	struct hn_nvs_ndis_conf conf;
363 	int error;
364 
365 	memset(&conf, 0, sizeof(conf));
366 	conf.type = NVS_TYPE_NDIS_CONF;
367 	conf.mtu = mtu + RTE_ETHER_HDR_LEN;
368 	conf.caps = NVS_NDIS_CONF_VLAN;
369 
370 	/* enable SRIOV */
371 	if (hv->nvs_ver >= NVS_VERSION_5)
372 		conf.caps |= NVS_NDIS_CONF_SRIOV;
373 
374 	/* NOTE: No response. */
375 	error = hn_nvs_req_send(hv, &conf, sizeof(conf));
376 	if (error) {
377 		PMD_DRV_LOG(ERR,
378 			    "send nvs ndis conf failed: %d", error);
379 		return error;
380 	}
381 
382 	return 0;
383 }
384 
385 static int
hn_nvs_init_ndis(struct hn_data * hv)386 hn_nvs_init_ndis(struct hn_data *hv)
387 {
388 	struct hn_nvs_ndis_init ndis;
389 	int error;
390 
391 	memset(&ndis, 0, sizeof(ndis));
392 	ndis.type = NVS_TYPE_NDIS_INIT;
393 	ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
394 	ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
395 
396 	/* NOTE: No response. */
397 	error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
398 	if (error)
399 		PMD_DRV_LOG(ERR,
400 			    "send nvs ndis init failed: %d", error);
401 
402 	return error;
403 }
404 
405 static int
hn_nvs_init(struct hn_data * hv)406 hn_nvs_init(struct hn_data *hv)
407 {
408 	unsigned int i;
409 	int error;
410 
411 	/*
412 	 * Find the supported NVS version and set NDIS version accordingly.
413 	 */
414 	for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
415 		error = hn_nvs_doinit(hv, hn_nvs_version[i]);
416 		if (error) {
417 			PMD_INIT_LOG(DEBUG, "version %#x error %d",
418 				     hn_nvs_version[i], error);
419 			continue;
420 		}
421 
422 		hv->nvs_ver = hn_nvs_version[i];
423 
424 		/* Set NDIS version according to NVS version. */
425 		hv->ndis_ver = NDIS_VERSION_6_30;
426 		if (hv->nvs_ver <= NVS_VERSION_4)
427 			hv->ndis_ver = NDIS_VERSION_6_1;
428 
429 		PMD_INIT_LOG(DEBUG,
430 			     "NVS version %#x, NDIS version %u.%u",
431 			     hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
432 			     NDIS_VERSION_MINOR(hv->ndis_ver));
433 		return 0;
434 	}
435 
436 	PMD_DRV_LOG(ERR,
437 		    "no NVS compatible version available");
438 	return -ENXIO;
439 }
440 
441 int
hn_nvs_attach(struct hn_data * hv,unsigned int mtu)442 hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
443 {
444 	int error;
445 
446 	/*
447 	 * Initialize NVS.
448 	 */
449 	error = hn_nvs_init(hv);
450 	if (error)
451 		return error;
452 
453 	/** Configure NDIS before initializing it. */
454 	if (hv->nvs_ver >= NVS_VERSION_2) {
455 		error = hn_nvs_conf_ndis(hv, mtu);
456 		if (error)
457 			return error;
458 	}
459 
460 	/*
461 	 * Initialize NDIS.
462 	 */
463 	error = hn_nvs_init_ndis(hv);
464 	if (error)
465 		return error;
466 
467 	/*
468 	 * Connect RXBUF.
469 	 */
470 	error = hn_nvs_conn_rxbuf(hv);
471 	if (error)
472 		return error;
473 
474 	/*
475 	 * Connect chimney sending buffer.
476 	 */
477 	error = hn_nvs_conn_chim(hv);
478 	if (error) {
479 		hn_nvs_disconn_rxbuf(hv);
480 		return error;
481 	}
482 
483 	return 0;
484 }
485 
486 void
hn_nvs_detach(struct hn_data * hv __rte_unused)487 hn_nvs_detach(struct hn_data *hv __rte_unused)
488 {
489 	PMD_INIT_FUNC_TRACE();
490 
491 	/* NOTE: there are no requests to stop the NVS. */
492 	hn_nvs_disconn_rxbuf(hv);
493 	hn_nvs_disconn_chim(hv);
494 }
495 
496 /*
497  * Ack the consumed RXBUF associated w/ this channel packet,
498  * so that this RXBUF can be recycled by the hypervisor.
499  */
500 void
hn_nvs_ack_rxbuf(struct vmbus_channel * chan,uint64_t tid)501 hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
502 {
503 	unsigned int retries = 0;
504 	struct hn_nvs_rndis_ack ack = {
505 		.type = NVS_TYPE_RNDIS_ACK,
506 		.status = NVS_STATUS_OK,
507 	};
508 	int error;
509 
510 	PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
511 
512  again:
513 	error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
514 				    &ack, sizeof(ack), tid,
515 				    VMBUS_CHANPKT_FLAG_NONE, NULL);
516 
517 	if (error == 0)
518 		return;
519 
520 	if (error == -EAGAIN) {
521 		/*
522 		 * NOTE:
523 		 * This should _not_ happen in real world, since the
524 		 * consumption of the TX bufring from the TX path is
525 		 * controlled.
526 		 */
527 		PMD_RX_LOG(NOTICE, "RXBUF ack retry");
528 		if (++retries < 10) {
529 			rte_delay_ms(1);
530 			goto again;
531 		}
532 	}
533 	/* RXBUF leaks! */
534 	PMD_DRV_LOG(ERR, "RXBUF ack failed");
535 }
536 
537 int
hn_nvs_alloc_subchans(struct hn_data * hv,uint32_t * nsubch)538 hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
539 {
540 	struct hn_nvs_subch_req req;
541 	struct hn_nvs_subch_resp resp;
542 	int error;
543 
544 	memset(&req, 0, sizeof(req));
545 	req.type = NVS_TYPE_SUBCH_REQ;
546 	req.op = NVS_SUBCH_OP_ALLOC;
547 	req.nsubch = *nsubch;
548 
549 	error = hn_nvs_execute(hv, &req, sizeof(req),
550 			       &resp, sizeof(resp),
551 			       NVS_TYPE_SUBCH_RESP);
552 	if (error)
553 		return error;
554 
555 	if (resp.status != NVS_STATUS_OK) {
556 		PMD_INIT_LOG(ERR,
557 			     "nvs subch alloc failed: %#x",
558 			     resp.status);
559 		return -EIO;
560 	}
561 
562 	if (resp.nsubch > *nsubch) {
563 		PMD_INIT_LOG(NOTICE,
564 			     "%u subchans are allocated, requested %u",
565 			     resp.nsubch, *nsubch);
566 	}
567 	*nsubch = resp.nsubch;
568 
569 	return 0;
570 }
571 
572 void
hn_nvs_set_datapath(struct hn_data * hv,uint32_t path)573 hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
574 {
575 	struct hn_nvs_datapath dp;
576 	int error;
577 
578 	PMD_DRV_LOG(DEBUG, "set datapath %s",
579 		    path ? "VF" : "Synthetic");
580 
581 	memset(&dp, 0, sizeof(dp));
582 	dp.type = NVS_TYPE_SET_DATAPATH;
583 	dp.active_path = path;
584 
585 	error = hn_nvs_req_send(hv, &dp, sizeof(dp));
586 	if (error) {
587 		PMD_DRV_LOG(ERR,
588 			    "send set datapath failed: %d",
589 			    error);
590 	}
591 }
592