xref: /linux-6.15/include/linux/hyperv.h (revision 6fa79bca)
1 /*
2  *
3  * Copyright (c) 2011, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <[email protected]>
20  *   Hank Janssen  <[email protected]>
21  *   K. Y. Srinivasan <[email protected]>
22  *
23  */
24 
25 #ifndef _HYPERV_H
26 #define _HYPERV_H
27 
28 #include <linux/types.h>
29 
30 /*
31  * An implementation of HyperV key value pair (KVP) functionality for Linux.
32  *
33  *
34  * Copyright (C) 2010, Novell, Inc.
35  * Author : K. Y. Srinivasan <[email protected]>
36  *
37  */
38 
39 /*
40  * Maximum value size - used for both key names and value data, and includes
41  * any applicable NULL terminators.
42  *
43  * Note:  This limit is somewhat arbitrary, but falls easily within what is
44  * supported for all native guests (back to Win 2000) and what is reasonable
45  * for the IC KVP exchange functionality.  Note that Windows Me/98/95 are
46  * limited to 255 character key names.
47  *
48  * MSDN recommends not storing data values larger than 2048 bytes in the
49  * registry.
50  *
51  * Note:  This value is used in defining the KVP exchange message - this value
52  * cannot be modified without affecting the message size and compatibility.
53  */
54 
55 /*
56  * bytes, including any null terminators
57  */
58 #define HV_KVP_EXCHANGE_MAX_VALUE_SIZE          (2048)
59 
60 
61 /*
62  * Maximum key size - the registry limit for the length of an entry name
63  * is 256 characters, including the null terminator
64  */
65 
66 #define HV_KVP_EXCHANGE_MAX_KEY_SIZE            (512)
67 
68 /*
69  * In Linux, we implement the KVP functionality in two components:
70  * 1) The kernel component which is packaged as part of the hv_utils driver
71  * is responsible for communicating with the host and responsible for
72  * implementing the host/guest protocol. 2) A user level daemon that is
73  * responsible for data gathering.
74  *
75  * Host/Guest Protocol: The host iterates over an index and expects the guest
76  * to assign a key name to the index and also return the value corresponding to
77  * the key. The host will have atmost one KVP transaction outstanding at any
78  * given point in time. The host side iteration stops when the guest returns
79  * an error. Microsoft has specified the following mapping of key names to
80  * host specified index:
81  *
82  *	Index		Key Name
83  *	0		FullyQualifiedDomainName
84  *	1		IntegrationServicesVersion
85  *	2		NetworkAddressIPv4
86  *	3		NetworkAddressIPv6
87  *	4		OSBuildNumber
88  *	5		OSName
89  *	6		OSMajorVersion
90  *	7		OSMinorVersion
91  *	8		OSVersion
92  *	9		ProcessorArchitecture
93  *
94  * The Windows host expects the Key Name and Key Value to be encoded in utf16.
95  *
96  * Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the
97  * data gathering functionality in a user mode daemon. The user level daemon
98  * is also responsible for binding the key name to the index as well. The
99  * kernel and user-level daemon communicate using a connector channel.
100  *
101  * The user mode component first registers with the
102  * the kernel component. Subsequently, the kernel component requests, data
103  * for the specified keys. In response to this message the user mode component
104  * fills in the value corresponding to the specified key. We overload the
105  * sequence field in the cn_msg header to define our KVP message types.
106  *
107  *
108  * The kernel component simply acts as a conduit for communication between the
109  * Windows host and the user-level daemon. The kernel component passes up the
110  * index received from the Host to the user-level daemon. If the index is
111  * valid (supported), the corresponding key as well as its
112  * value (both are strings) is returned. If the index is invalid
113  * (not supported), a NULL key string is returned.
114  */
115 
116 
117 /*
118  * Registry value types.
119  */
120 
121 #define REG_SZ 1
122 #define REG_U32 4
123 #define REG_U64 8
124 
125 /*
126  * As we look at expanding the KVP functionality to include
127  * IP injection functionality, we need to maintain binary
128  * compatibility with older daemons.
129  *
130  * The KVP opcodes are defined by the host and it was unfortunate
131  * that I chose to treat the registration operation as part of the
132  * KVP operations defined by the host.
133  * Here is the level of compatibility
134  * (between the user level daemon and the kernel KVP driver) that we
135  * will implement:
136  *
137  * An older daemon will always be supported on a newer driver.
138  * A given user level daemon will require a minimal version of the
139  * kernel driver.
140  * If we cannot handle the version differences, we will fail gracefully
141  * (this can happen when we have a user level daemon that is more
142  * advanced than the KVP driver.
143  *
144  * We will use values used in this handshake for determining if we have
145  * workable user level daemon and the kernel driver. We begin by taking the
146  * registration opcode out of the KVP opcode namespace. We will however,
147  * maintain compatibility with the existing user-level daemon code.
148  */
149 
150 /*
151  * Daemon code not supporting IP injection (legacy daemon).
152  */
153 
154 #define KVP_OP_REGISTER	4
155 
156 /*
157  * Daemon code supporting IP injection.
158  * The KVP opcode field is used to communicate the
159  * registration information; so define a namespace that
160  * will be distinct from the host defined KVP opcode.
161  */
162 
163 #define KVP_OP_REGISTER1 100
164 
165 enum hv_kvp_exchg_op {
166 	KVP_OP_GET = 0,
167 	KVP_OP_SET,
168 	KVP_OP_DELETE,
169 	KVP_OP_ENUMERATE,
170 	KVP_OP_GET_IP_INFO,
171 	KVP_OP_SET_IP_INFO,
172 	KVP_OP_COUNT /* Number of operations, must be last. */
173 };
174 
175 enum hv_kvp_exchg_pool {
176 	KVP_POOL_EXTERNAL = 0,
177 	KVP_POOL_GUEST,
178 	KVP_POOL_AUTO,
179 	KVP_POOL_AUTO_EXTERNAL,
180 	KVP_POOL_AUTO_INTERNAL,
181 	KVP_POOL_COUNT /* Number of pools, must be last. */
182 };
183 
184 /*
185  * Some Hyper-V status codes.
186  */
187 
188 #define HV_S_OK				0x00000000
189 #define HV_E_FAIL			0x80004005
190 #define HV_S_CONT			0x80070103
191 #define HV_ERROR_NOT_SUPPORTED		0x80070032
192 #define HV_ERROR_MACHINE_LOCKED		0x800704F7
193 #define HV_ERROR_DEVICE_NOT_CONNECTED	0x8007048F
194 #define HV_INVALIDARG			0x80070057
195 #define HV_GUID_NOTFOUND		0x80041002
196 
197 #define ADDR_FAMILY_NONE	0x00
198 #define ADDR_FAMILY_IPV4	0x01
199 #define ADDR_FAMILY_IPV6	0x02
200 
201 #define MAX_ADAPTER_ID_SIZE	128
202 #define MAX_IP_ADDR_SIZE	1024
203 #define MAX_GATEWAY_SIZE	512
204 
205 
206 struct hv_kvp_ipaddr_value {
207 	__u16	adapter_id[MAX_ADAPTER_ID_SIZE];
208 	__u8	addr_family;
209 	__u8	dhcp_enabled;
210 	__u16	ip_addr[MAX_IP_ADDR_SIZE];
211 	__u16	sub_net[MAX_IP_ADDR_SIZE];
212 	__u16	gate_way[MAX_GATEWAY_SIZE];
213 	__u16	dns_addr[MAX_IP_ADDR_SIZE];
214 } __attribute__((packed));
215 
216 
217 struct hv_kvp_hdr {
218 	__u8 operation;
219 	__u8 pool;
220 	__u16 pad;
221 } __attribute__((packed));
222 
223 struct hv_kvp_exchg_msg_value {
224 	__u32 value_type;
225 	__u32 key_size;
226 	__u32 value_size;
227 	__u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
228 	union {
229 		__u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
230 		__u32 value_u32;
231 		__u64 value_u64;
232 	};
233 } __attribute__((packed));
234 
235 struct hv_kvp_msg_enumerate {
236 	__u32 index;
237 	struct hv_kvp_exchg_msg_value data;
238 } __attribute__((packed));
239 
240 struct hv_kvp_msg_get {
241 	struct hv_kvp_exchg_msg_value data;
242 };
243 
244 struct hv_kvp_msg_set {
245 	struct hv_kvp_exchg_msg_value data;
246 };
247 
248 struct hv_kvp_msg_delete {
249 	__u32 key_size;
250 	__u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
251 };
252 
253 struct hv_kvp_register {
254 	__u8 version[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
255 };
256 
257 struct hv_kvp_msg {
258 	union {
259 		struct hv_kvp_hdr	kvp_hdr;
260 		int error;
261 	};
262 	union {
263 		struct hv_kvp_msg_get		kvp_get;
264 		struct hv_kvp_msg_set		kvp_set;
265 		struct hv_kvp_msg_delete	kvp_delete;
266 		struct hv_kvp_msg_enumerate	kvp_enum_data;
267 		struct hv_kvp_ipaddr_value      kvp_ip_val;
268 		struct hv_kvp_register		kvp_register;
269 	} body;
270 } __attribute__((packed));
271 
272 struct hv_kvp_ip_msg {
273 	__u8 operation;
274 	__u8 pool;
275 	struct hv_kvp_ipaddr_value      kvp_ip_val;
276 } __attribute__((packed));
277 
278 #ifdef __KERNEL__
279 #include <linux/scatterlist.h>
280 #include <linux/list.h>
281 #include <linux/uuid.h>
282 #include <linux/timer.h>
283 #include <linux/workqueue.h>
284 #include <linux/completion.h>
285 #include <linux/device.h>
286 #include <linux/mod_devicetable.h>
287 
288 
289 #define MAX_PAGE_BUFFER_COUNT				19
290 #define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
291 
292 #pragma pack(push, 1)
293 
294 /* Single-page buffer */
295 struct hv_page_buffer {
296 	u32 len;
297 	u32 offset;
298 	u64 pfn;
299 };
300 
301 /* Multiple-page buffer */
302 struct hv_multipage_buffer {
303 	/* Length and Offset determines the # of pfns in the array */
304 	u32 len;
305 	u32 offset;
306 	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
307 };
308 
309 /* 0x18 includes the proprietary packet header */
310 #define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
311 					(sizeof(struct hv_page_buffer) * \
312 					 MAX_PAGE_BUFFER_COUNT))
313 #define MAX_MULTIPAGE_BUFFER_PACKET	(0x18 +			\
314 					 sizeof(struct hv_multipage_buffer))
315 
316 
317 #pragma pack(pop)
318 
319 struct hv_ring_buffer {
320 	/* Offset in bytes from the start of ring data below */
321 	u32 write_index;
322 
323 	/* Offset in bytes from the start of ring data below */
324 	u32 read_index;
325 
326 	u32 interrupt_mask;
327 
328 	/* Pad it to PAGE_SIZE so that data starts on page boundary */
329 	u8	reserved[4084];
330 
331 	/* NOTE:
332 	 * The interrupt_mask field is used only for channels but since our
333 	 * vmbus connection also uses this data structure and its data starts
334 	 * here, we commented out this field.
335 	 */
336 
337 	/*
338 	 * Ring data starts here + RingDataStartOffset
339 	 * !!! DO NOT place any fields below this !!!
340 	 */
341 	u8 buffer[0];
342 } __packed;
343 
344 struct hv_ring_buffer_info {
345 	struct hv_ring_buffer *ring_buffer;
346 	u32 ring_size;			/* Include the shared header */
347 	spinlock_t ring_lock;
348 
349 	u32 ring_datasize;		/* < ring_size */
350 	u32 ring_data_startoffset;
351 };
352 
353 struct hv_ring_buffer_debug_info {
354 	u32 current_interrupt_mask;
355 	u32 current_read_index;
356 	u32 current_write_index;
357 	u32 bytes_avail_toread;
358 	u32 bytes_avail_towrite;
359 };
360 
361 
362 /*
363  *
364  * hv_get_ringbuffer_availbytes()
365  *
366  * Get number of bytes available to read and to write to
367  * for the specified ring buffer
368  */
369 static inline void
370 hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
371 			  u32 *read, u32 *write)
372 {
373 	u32 read_loc, write_loc, dsize;
374 
375 	smp_read_barrier_depends();
376 
377 	/* Capture the read/write indices before they changed */
378 	read_loc = rbi->ring_buffer->read_index;
379 	write_loc = rbi->ring_buffer->write_index;
380 	dsize = rbi->ring_datasize;
381 
382 	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
383 		read_loc - write_loc;
384 	*read = dsize - *write;
385 }
386 
387 
388 /*
389  * We use the same version numbering for all Hyper-V modules.
390  *
391  * Definition of versioning is as follows;
392  *
393  *	Major Number	Changes for these scenarios;
394  *			1.	When a new version of Windows Hyper-V
395  *				is released.
396  *			2.	A Major change has occurred in the
397  *				Linux IC's.
398  *			(For example the merge for the first time
399  *			into the kernel) Every time the Major Number
400  *			changes, the Revision number is reset to 0.
401  *	Minor Number	Changes when new functionality is added
402  *			to the Linux IC's that is not a bug fix.
403  *
404  * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
405  */
406 #define HV_DRV_VERSION           "3.1"
407 
408 
409 /*
410  * A revision number of vmbus that is used for ensuring both ends on a
411  * partition are using compatible versions.
412  */
413 #define VMBUS_REVISION_NUMBER		13
414 
415 /* Make maximum size of pipe payload of 16K */
416 #define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
417 
418 /* Define PipeMode values. */
419 #define VMBUS_PIPE_TYPE_BYTE		0x00000000
420 #define VMBUS_PIPE_TYPE_MESSAGE		0x00000004
421 
422 /* The size of the user defined data buffer for non-pipe offers. */
423 #define MAX_USER_DEFINED_BYTES		120
424 
425 /* The size of the user defined data buffer for pipe offers. */
426 #define MAX_PIPE_USER_DEFINED_BYTES	116
427 
428 /*
429  * At the center of the Channel Management library is the Channel Offer. This
430  * struct contains the fundamental information about an offer.
431  */
432 struct vmbus_channel_offer {
433 	uuid_le if_type;
434 	uuid_le if_instance;
435 	u64 int_latency; /* in 100ns units */
436 	u32 if_revision;
437 	u32 server_ctx_size;	/* in bytes */
438 	u16 chn_flags;
439 	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
440 
441 	union {
442 		/* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
443 		struct {
444 			unsigned char user_def[MAX_USER_DEFINED_BYTES];
445 		} std;
446 
447 		/*
448 		 * Pipes:
449 		 * The following sructure is an integrated pipe protocol, which
450 		 * is implemented on top of standard user-defined data. Pipe
451 		 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
452 		 * use.
453 		 */
454 		struct {
455 			u32  pipe_mode;
456 			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
457 		} pipe;
458 	} u;
459 	u32 padding;
460 } __packed;
461 
462 /* Server Flags */
463 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE	1
464 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES	2
465 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS		4
466 #define VMBUS_CHANNEL_NAMED_PIPE_MODE			0x10
467 #define VMBUS_CHANNEL_LOOPBACK_OFFER			0x100
468 #define VMBUS_CHANNEL_PARENT_OFFER			0x200
469 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x400
470 
471 struct vmpacket_descriptor {
472 	u16 type;
473 	u16 offset8;
474 	u16 len8;
475 	u16 flags;
476 	u64 trans_id;
477 } __packed;
478 
479 struct vmpacket_header {
480 	u32 prev_pkt_start_offset;
481 	struct vmpacket_descriptor descriptor;
482 } __packed;
483 
484 struct vmtransfer_page_range {
485 	u32 byte_count;
486 	u32 byte_offset;
487 } __packed;
488 
489 struct vmtransfer_page_packet_header {
490 	struct vmpacket_descriptor d;
491 	u16 xfer_pageset_id;
492 	u8  sender_owns_set;
493 	u8 reserved;
494 	u32 range_cnt;
495 	struct vmtransfer_page_range ranges[1];
496 } __packed;
497 
498 struct vmgpadl_packet_header {
499 	struct vmpacket_descriptor d;
500 	u32 gpadl;
501 	u32 reserved;
502 } __packed;
503 
504 struct vmadd_remove_transfer_page_set {
505 	struct vmpacket_descriptor d;
506 	u32 gpadl;
507 	u16 xfer_pageset_id;
508 	u16 reserved;
509 } __packed;
510 
511 /*
512  * This structure defines a range in guest physical space that can be made to
513  * look virtually contiguous.
514  */
515 struct gpa_range {
516 	u32 byte_count;
517 	u32 byte_offset;
518 	u64 pfn_array[0];
519 };
520 
521 /*
522  * This is the format for an Establish Gpadl packet, which contains a handle by
523  * which this GPADL will be known and a set of GPA ranges associated with it.
524  * This can be converted to a MDL by the guest OS.  If there are multiple GPA
525  * ranges, then the resulting MDL will be "chained," representing multiple VA
526  * ranges.
527  */
528 struct vmestablish_gpadl {
529 	struct vmpacket_descriptor d;
530 	u32 gpadl;
531 	u32 range_cnt;
532 	struct gpa_range range[1];
533 } __packed;
534 
535 /*
536  * This is the format for a Teardown Gpadl packet, which indicates that the
537  * GPADL handle in the Establish Gpadl packet will never be referenced again.
538  */
539 struct vmteardown_gpadl {
540 	struct vmpacket_descriptor d;
541 	u32 gpadl;
542 	u32 reserved;	/* for alignment to a 8-byte boundary */
543 } __packed;
544 
545 /*
546  * This is the format for a GPA-Direct packet, which contains a set of GPA
547  * ranges, in addition to commands and/or data.
548  */
549 struct vmdata_gpa_direct {
550 	struct vmpacket_descriptor d;
551 	u32 reserved;
552 	u32 range_cnt;
553 	struct gpa_range range[1];
554 } __packed;
555 
556 /* This is the format for a Additional Data Packet. */
557 struct vmadditional_data {
558 	struct vmpacket_descriptor d;
559 	u64 total_bytes;
560 	u32 offset;
561 	u32 byte_cnt;
562 	unsigned char data[1];
563 } __packed;
564 
565 union vmpacket_largest_possible_header {
566 	struct vmpacket_descriptor simple_hdr;
567 	struct vmtransfer_page_packet_header xfer_page_hdr;
568 	struct vmgpadl_packet_header gpadl_hdr;
569 	struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
570 	struct vmestablish_gpadl establish_gpadl_hdr;
571 	struct vmteardown_gpadl teardown_gpadl_hdr;
572 	struct vmdata_gpa_direct data_gpa_direct_hdr;
573 };
574 
575 #define VMPACKET_DATA_START_ADDRESS(__packet)	\
576 	(void *)(((unsigned char *)__packet) +	\
577 	 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
578 
579 #define VMPACKET_DATA_LENGTH(__packet)		\
580 	((((struct vmpacket_descriptor)__packet)->len8 -	\
581 	  ((struct vmpacket_descriptor)__packet)->offset8) * 8)
582 
583 #define VMPACKET_TRANSFER_MODE(__packet)	\
584 	(((struct IMPACT)__packet)->type)
585 
586 enum vmbus_packet_type {
587 	VM_PKT_INVALID				= 0x0,
588 	VM_PKT_SYNCH				= 0x1,
589 	VM_PKT_ADD_XFER_PAGESET			= 0x2,
590 	VM_PKT_RM_XFER_PAGESET			= 0x3,
591 	VM_PKT_ESTABLISH_GPADL			= 0x4,
592 	VM_PKT_TEARDOWN_GPADL			= 0x5,
593 	VM_PKT_DATA_INBAND			= 0x6,
594 	VM_PKT_DATA_USING_XFER_PAGES		= 0x7,
595 	VM_PKT_DATA_USING_GPADL			= 0x8,
596 	VM_PKT_DATA_USING_GPA_DIRECT		= 0x9,
597 	VM_PKT_CANCEL_REQUEST			= 0xa,
598 	VM_PKT_COMP				= 0xb,
599 	VM_PKT_DATA_USING_ADDITIONAL_PKT	= 0xc,
600 	VM_PKT_ADDITIONAL_DATA			= 0xd
601 };
602 
603 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED	1
604 
605 
606 /* Version 1 messages */
607 enum vmbus_channel_message_type {
608 	CHANNELMSG_INVALID			=  0,
609 	CHANNELMSG_OFFERCHANNEL		=  1,
610 	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
611 	CHANNELMSG_REQUESTOFFERS		=  3,
612 	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
613 	CHANNELMSG_OPENCHANNEL		=  5,
614 	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
615 	CHANNELMSG_CLOSECHANNEL		=  7,
616 	CHANNELMSG_GPADL_HEADER		=  8,
617 	CHANNELMSG_GPADL_BODY			=  9,
618 	CHANNELMSG_GPADL_CREATED		= 10,
619 	CHANNELMSG_GPADL_TEARDOWN		= 11,
620 	CHANNELMSG_GPADL_TORNDOWN		= 12,
621 	CHANNELMSG_RELID_RELEASED		= 13,
622 	CHANNELMSG_INITIATE_CONTACT		= 14,
623 	CHANNELMSG_VERSION_RESPONSE		= 15,
624 	CHANNELMSG_UNLOAD			= 16,
625 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
626 	CHANNELMSG_VIEWRANGE_ADD		= 17,
627 	CHANNELMSG_VIEWRANGE_REMOVE		= 18,
628 #endif
629 	CHANNELMSG_COUNT
630 };
631 
632 struct vmbus_channel_message_header {
633 	enum vmbus_channel_message_type msgtype;
634 	u32 padding;
635 } __packed;
636 
637 /* Query VMBus Version parameters */
638 struct vmbus_channel_query_vmbus_version {
639 	struct vmbus_channel_message_header header;
640 	u32 version;
641 } __packed;
642 
643 /* VMBus Version Supported parameters */
644 struct vmbus_channel_version_supported {
645 	struct vmbus_channel_message_header header;
646 	u8 version_supported;
647 } __packed;
648 
649 /* Offer Channel parameters */
650 struct vmbus_channel_offer_channel {
651 	struct vmbus_channel_message_header header;
652 	struct vmbus_channel_offer offer;
653 	u32 child_relid;
654 	u8 monitorid;
655 	u8 monitor_allocated;
656 } __packed;
657 
658 /* Rescind Offer parameters */
659 struct vmbus_channel_rescind_offer {
660 	struct vmbus_channel_message_header header;
661 	u32 child_relid;
662 } __packed;
663 
664 /*
665  * Request Offer -- no parameters, SynIC message contains the partition ID
666  * Set Snoop -- no parameters, SynIC message contains the partition ID
667  * Clear Snoop -- no parameters, SynIC message contains the partition ID
668  * All Offers Delivered -- no parameters, SynIC message contains the partition
669  *		           ID
670  * Flush Client -- no parameters, SynIC message contains the partition ID
671  */
672 
673 /* Open Channel parameters */
674 struct vmbus_channel_open_channel {
675 	struct vmbus_channel_message_header header;
676 
677 	/* Identifies the specific VMBus channel that is being opened. */
678 	u32 child_relid;
679 
680 	/* ID making a particular open request at a channel offer unique. */
681 	u32 openid;
682 
683 	/* GPADL for the channel's ring buffer. */
684 	u32 ringbuffer_gpadlhandle;
685 
686 	/* GPADL for the channel's server context save area. */
687 	u32 server_contextarea_gpadlhandle;
688 
689 	/*
690 	* The upstream ring buffer begins at offset zero in the memory
691 	* described by RingBufferGpadlHandle. The downstream ring buffer
692 	* follows it at this offset (in pages).
693 	*/
694 	u32 downstream_ringbuffer_pageoffset;
695 
696 	/* User-specific data to be passed along to the server endpoint. */
697 	unsigned char userdata[MAX_USER_DEFINED_BYTES];
698 } __packed;
699 
700 /* Open Channel Result parameters */
701 struct vmbus_channel_open_result {
702 	struct vmbus_channel_message_header header;
703 	u32 child_relid;
704 	u32 openid;
705 	u32 status;
706 } __packed;
707 
708 /* Close channel parameters; */
709 struct vmbus_channel_close_channel {
710 	struct vmbus_channel_message_header header;
711 	u32 child_relid;
712 } __packed;
713 
714 /* Channel Message GPADL */
715 #define GPADL_TYPE_RING_BUFFER		1
716 #define GPADL_TYPE_SERVER_SAVE_AREA	2
717 #define GPADL_TYPE_TRANSACTION		8
718 
719 /*
720  * The number of PFNs in a GPADL message is defined by the number of
721  * pages that would be spanned by ByteCount and ByteOffset.  If the
722  * implied number of PFNs won't fit in this packet, there will be a
723  * follow-up packet that contains more.
724  */
725 struct vmbus_channel_gpadl_header {
726 	struct vmbus_channel_message_header header;
727 	u32 child_relid;
728 	u32 gpadl;
729 	u16 range_buflen;
730 	u16 rangecount;
731 	struct gpa_range range[0];
732 } __packed;
733 
734 /* This is the followup packet that contains more PFNs. */
735 struct vmbus_channel_gpadl_body {
736 	struct vmbus_channel_message_header header;
737 	u32 msgnumber;
738 	u32 gpadl;
739 	u64 pfn[0];
740 } __packed;
741 
742 struct vmbus_channel_gpadl_created {
743 	struct vmbus_channel_message_header header;
744 	u32 child_relid;
745 	u32 gpadl;
746 	u32 creation_status;
747 } __packed;
748 
749 struct vmbus_channel_gpadl_teardown {
750 	struct vmbus_channel_message_header header;
751 	u32 child_relid;
752 	u32 gpadl;
753 } __packed;
754 
755 struct vmbus_channel_gpadl_torndown {
756 	struct vmbus_channel_message_header header;
757 	u32 gpadl;
758 } __packed;
759 
760 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
761 struct vmbus_channel_view_range_add {
762 	struct vmbus_channel_message_header header;
763 	PHYSICAL_ADDRESS viewrange_base;
764 	u64 viewrange_length;
765 	u32 child_relid;
766 } __packed;
767 
768 struct vmbus_channel_view_range_remove {
769 	struct vmbus_channel_message_header header;
770 	PHYSICAL_ADDRESS viewrange_base;
771 	u32 child_relid;
772 } __packed;
773 #endif
774 
775 struct vmbus_channel_relid_released {
776 	struct vmbus_channel_message_header header;
777 	u32 child_relid;
778 } __packed;
779 
780 struct vmbus_channel_initiate_contact {
781 	struct vmbus_channel_message_header header;
782 	u32 vmbus_version_requested;
783 	u32 padding2;
784 	u64 interrupt_page;
785 	u64 monitor_page1;
786 	u64 monitor_page2;
787 } __packed;
788 
789 struct vmbus_channel_version_response {
790 	struct vmbus_channel_message_header header;
791 	u8 version_supported;
792 } __packed;
793 
794 enum vmbus_channel_state {
795 	CHANNEL_OFFER_STATE,
796 	CHANNEL_OPENING_STATE,
797 	CHANNEL_OPEN_STATE,
798 };
799 
800 struct vmbus_channel_debug_info {
801 	u32 relid;
802 	enum vmbus_channel_state state;
803 	uuid_le interfacetype;
804 	uuid_le interface_instance;
805 	u32 monitorid;
806 	u32 servermonitor_pending;
807 	u32 servermonitor_latency;
808 	u32 servermonitor_connectionid;
809 	u32 clientmonitor_pending;
810 	u32 clientmonitor_latency;
811 	u32 clientmonitor_connectionid;
812 
813 	struct hv_ring_buffer_debug_info inbound;
814 	struct hv_ring_buffer_debug_info outbound;
815 };
816 
817 /*
818  * Represents each channel msg on the vmbus connection This is a
819  * variable-size data structure depending on the msg type itself
820  */
821 struct vmbus_channel_msginfo {
822 	/* Bookkeeping stuff */
823 	struct list_head msglistentry;
824 
825 	/* So far, this is only used to handle gpadl body message */
826 	struct list_head submsglist;
827 
828 	/* Synchronize the request/response if needed */
829 	struct completion  waitevent;
830 	union {
831 		struct vmbus_channel_version_supported version_supported;
832 		struct vmbus_channel_open_result open_result;
833 		struct vmbus_channel_gpadl_torndown gpadl_torndown;
834 		struct vmbus_channel_gpadl_created gpadl_created;
835 		struct vmbus_channel_version_response version_response;
836 	} response;
837 
838 	u32 msgsize;
839 	/*
840 	 * The channel message that goes out on the "wire".
841 	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
842 	 */
843 	unsigned char msg[0];
844 };
845 
846 struct vmbus_close_msg {
847 	struct vmbus_channel_msginfo info;
848 	struct vmbus_channel_close_channel msg;
849 };
850 
851 struct vmbus_channel {
852 	struct list_head listentry;
853 
854 	struct hv_device *device_obj;
855 
856 	struct work_struct work;
857 
858 	enum vmbus_channel_state state;
859 
860 	struct vmbus_channel_offer_channel offermsg;
861 	/*
862 	 * These are based on the OfferMsg.MonitorId.
863 	 * Save it here for easy access.
864 	 */
865 	u8 monitor_grp;
866 	u8 monitor_bit;
867 
868 	u32 ringbuffer_gpadlhandle;
869 
870 	/* Allocated memory for ring buffer */
871 	void *ringbuffer_pages;
872 	u32 ringbuffer_pagecount;
873 	struct hv_ring_buffer_info outbound;	/* send to parent */
874 	struct hv_ring_buffer_info inbound;	/* receive from parent */
875 	spinlock_t inbound_lock;
876 	struct workqueue_struct *controlwq;
877 
878 	struct vmbus_close_msg close_msg;
879 
880 	/* Channel callback are invoked in this workqueue context */
881 	/* HANDLE dataWorkQueue; */
882 
883 	void (*onchannel_callback)(void *context);
884 	void *channel_callback_context;
885 };
886 
887 void vmbus_onmessage(void *context);
888 
889 int vmbus_request_offers(void);
890 
891 /* The format must be the same as struct vmdata_gpa_direct */
892 struct vmbus_channel_packet_page_buffer {
893 	u16 type;
894 	u16 dataoffset8;
895 	u16 length8;
896 	u16 flags;
897 	u64 transactionid;
898 	u32 reserved;
899 	u32 rangecount;
900 	struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
901 } __packed;
902 
903 /* The format must be the same as struct vmdata_gpa_direct */
904 struct vmbus_channel_packet_multipage_buffer {
905 	u16 type;
906 	u16 dataoffset8;
907 	u16 length8;
908 	u16 flags;
909 	u64 transactionid;
910 	u32 reserved;
911 	u32 rangecount;		/* Always 1 in this case */
912 	struct hv_multipage_buffer range;
913 } __packed;
914 
915 
916 extern int vmbus_open(struct vmbus_channel *channel,
917 			    u32 send_ringbuffersize,
918 			    u32 recv_ringbuffersize,
919 			    void *userdata,
920 			    u32 userdatalen,
921 			    void(*onchannel_callback)(void *context),
922 			    void *context);
923 
924 extern void vmbus_close(struct vmbus_channel *channel);
925 
926 extern int vmbus_sendpacket(struct vmbus_channel *channel,
927 				  const void *buffer,
928 				  u32 bufferLen,
929 				  u64 requestid,
930 				  enum vmbus_packet_type type,
931 				  u32 flags);
932 
933 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
934 					    struct hv_page_buffer pagebuffers[],
935 					    u32 pagecount,
936 					    void *buffer,
937 					    u32 bufferlen,
938 					    u64 requestid);
939 
940 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
941 					struct hv_multipage_buffer *mpb,
942 					void *buffer,
943 					u32 bufferlen,
944 					u64 requestid);
945 
946 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
947 				      void *kbuffer,
948 				      u32 size,
949 				      u32 *gpadl_handle);
950 
951 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
952 				     u32 gpadl_handle);
953 
954 extern int vmbus_recvpacket(struct vmbus_channel *channel,
955 				  void *buffer,
956 				  u32 bufferlen,
957 				  u32 *buffer_actual_len,
958 				  u64 *requestid);
959 
960 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
961 				     void *buffer,
962 				     u32 bufferlen,
963 				     u32 *buffer_actual_len,
964 				     u64 *requestid);
965 
966 
967 extern void vmbus_get_debug_info(struct vmbus_channel *channel,
968 				     struct vmbus_channel_debug_info *debug);
969 
970 extern void vmbus_ontimer(unsigned long data);
971 
972 struct hv_dev_port_info {
973 	u32 int_mask;
974 	u32 read_idx;
975 	u32 write_idx;
976 	u32 bytes_avail_toread;
977 	u32 bytes_avail_towrite;
978 };
979 
980 /* Base driver object */
981 struct hv_driver {
982 	const char *name;
983 
984 	/* the device type supported by this driver */
985 	uuid_le dev_type;
986 	const struct hv_vmbus_device_id *id_table;
987 
988 	struct device_driver driver;
989 
990 	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
991 	int (*remove)(struct hv_device *);
992 	void (*shutdown)(struct hv_device *);
993 
994 };
995 
996 /* Base device object */
997 struct hv_device {
998 	/* the device type id of this device */
999 	uuid_le dev_type;
1000 
1001 	/* the device instance id of this device */
1002 	uuid_le dev_instance;
1003 
1004 	struct device device;
1005 
1006 	struct vmbus_channel *channel;
1007 };
1008 
1009 
1010 static inline struct hv_device *device_to_hv_device(struct device *d)
1011 {
1012 	return container_of(d, struct hv_device, device);
1013 }
1014 
1015 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1016 {
1017 	return container_of(d, struct hv_driver, driver);
1018 }
1019 
1020 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1021 {
1022 	dev_set_drvdata(&dev->device, data);
1023 }
1024 
1025 static inline void *hv_get_drvdata(struct hv_device *dev)
1026 {
1027 	return dev_get_drvdata(&dev->device);
1028 }
1029 
1030 /* Vmbus interface */
1031 #define vmbus_driver_register(driver)	\
1032 	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1033 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1034 					 struct module *owner,
1035 					 const char *mod_name);
1036 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1037 
1038 /**
1039  * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
1040  *
1041  * This macro is used to create a struct hv_vmbus_device_id that matches a
1042  * specific device.
1043  */
1044 #define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7,	\
1045 		     g8, g9, ga, gb, gc, gd, ge, gf)	\
1046 	.guid = { g0, g1, g2, g3, g4, g5, g6, g7,	\
1047 		  g8, g9, ga, gb, gc, gd, ge, gf },
1048 
1049 /*
1050  * Common header for Hyper-V ICs
1051  */
1052 
1053 #define ICMSGTYPE_NEGOTIATE		0
1054 #define ICMSGTYPE_HEARTBEAT		1
1055 #define ICMSGTYPE_KVPEXCHANGE		2
1056 #define ICMSGTYPE_SHUTDOWN		3
1057 #define ICMSGTYPE_TIMESYNC		4
1058 #define ICMSGTYPE_VSS			5
1059 
1060 #define ICMSGHDRFLAG_TRANSACTION	1
1061 #define ICMSGHDRFLAG_REQUEST		2
1062 #define ICMSGHDRFLAG_RESPONSE		4
1063 
1064 
1065 /*
1066  * While we want to handle util services as regular devices,
1067  * there is only one instance of each of these services; so
1068  * we statically allocate the service specific state.
1069  */
1070 
1071 struct hv_util_service {
1072 	u8 *recv_buffer;
1073 	void (*util_cb)(void *);
1074 	int (*util_init)(struct hv_util_service *);
1075 	void (*util_deinit)(void);
1076 };
1077 
1078 struct vmbuspipe_hdr {
1079 	u32 flags;
1080 	u32 msgsize;
1081 } __packed;
1082 
1083 struct ic_version {
1084 	u16 major;
1085 	u16 minor;
1086 } __packed;
1087 
1088 struct icmsg_hdr {
1089 	struct ic_version icverframe;
1090 	u16 icmsgtype;
1091 	struct ic_version icvermsg;
1092 	u16 icmsgsize;
1093 	u32 status;
1094 	u8 ictransaction_id;
1095 	u8 icflags;
1096 	u8 reserved[2];
1097 } __packed;
1098 
1099 struct icmsg_negotiate {
1100 	u16 icframe_vercnt;
1101 	u16 icmsg_vercnt;
1102 	u32 reserved;
1103 	struct ic_version icversion_data[1]; /* any size array */
1104 } __packed;
1105 
1106 struct shutdown_msg_data {
1107 	u32 reason_code;
1108 	u32 timeout_seconds;
1109 	u32 flags;
1110 	u8  display_message[2048];
1111 } __packed;
1112 
1113 struct heartbeat_msg_data {
1114 	u64 seq_num;
1115 	u32 reserved[8];
1116 } __packed;
1117 
1118 /* Time Sync IC defs */
1119 #define ICTIMESYNCFLAG_PROBE	0
1120 #define ICTIMESYNCFLAG_SYNC	1
1121 #define ICTIMESYNCFLAG_SAMPLE	2
1122 
1123 #ifdef __x86_64__
1124 #define WLTIMEDELTA	116444736000000000L	/* in 100ns unit */
1125 #else
1126 #define WLTIMEDELTA	116444736000000000LL
1127 #endif
1128 
1129 struct ictimesync_data {
1130 	u64 parenttime;
1131 	u64 childtime;
1132 	u64 roundtriptime;
1133 	u8 flags;
1134 } __packed;
1135 
1136 struct hyperv_service_callback {
1137 	u8 msg_type;
1138 	char *log_msg;
1139 	uuid_le data;
1140 	struct vmbus_channel *channel;
1141 	void (*callback) (void *context);
1142 };
1143 
1144 #define MAX_SRV_VER	0x7ffffff
1145 extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1146 					struct icmsg_negotiate *, u8 *, int,
1147 					int);
1148 
1149 int hv_kvp_init(struct hv_util_service *);
1150 void hv_kvp_deinit(void);
1151 void hv_kvp_onchannelcallback(void *);
1152 
1153 #endif /* __KERNEL__ */
1154 #endif /* _HYPERV_H */
1155