xref: /linux-6.15/include/linux/hyperv.h (revision 840ef8b7)
1 /*
2  *
3  * Copyright (c) 2011, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <[email protected]>
20  *   Hank Janssen  <[email protected]>
21  *   K. Y. Srinivasan <[email protected]>
22  *
23  */
24 
25 #ifndef _HYPERV_H
26 #define _HYPERV_H
27 
28 #include <linux/types.h>
29 
30 /*
31  * An implementation of HyperV key value pair (KVP) functionality for Linux.
32  *
33  *
34  * Copyright (C) 2010, Novell, Inc.
35  * Author : K. Y. Srinivasan <[email protected]>
36  *
37  */
38 
39 /*
40  * Maximum value size - used for both key names and value data, and includes
41  * any applicable NULL terminators.
42  *
43  * Note:  This limit is somewhat arbitrary, but falls easily within what is
44  * supported for all native guests (back to Win 2000) and what is reasonable
45  * for the IC KVP exchange functionality.  Note that Windows Me/98/95 are
46  * limited to 255 character key names.
47  *
48  * MSDN recommends not storing data values larger than 2048 bytes in the
49  * registry.
50  *
51  * Note:  This value is used in defining the KVP exchange message - this value
52  * cannot be modified without affecting the message size and compatibility.
53  */
54 
55 /*
56  * bytes, including any null terminators
57  */
58 #define HV_KVP_EXCHANGE_MAX_VALUE_SIZE          (2048)
59 
60 
61 /*
62  * Maximum key size - the registry limit for the length of an entry name
63  * is 256 characters, including the null terminator
64  */
65 
66 #define HV_KVP_EXCHANGE_MAX_KEY_SIZE            (512)
67 
68 /*
69  * In Linux, we implement the KVP functionality in two components:
70  * 1) The kernel component which is packaged as part of the hv_utils driver
71  * is responsible for communicating with the host and responsible for
72  * implementing the host/guest protocol. 2) A user level daemon that is
73  * responsible for data gathering.
74  *
75  * Host/Guest Protocol: The host iterates over an index and expects the guest
76  * to assign a key name to the index and also return the value corresponding to
77  * the key. The host will have atmost one KVP transaction outstanding at any
78  * given point in time. The host side iteration stops when the guest returns
79  * an error. Microsoft has specified the following mapping of key names to
80  * host specified index:
81  *
82  *	Index		Key Name
83  *	0		FullyQualifiedDomainName
84  *	1		IntegrationServicesVersion
85  *	2		NetworkAddressIPv4
86  *	3		NetworkAddressIPv6
87  *	4		OSBuildNumber
88  *	5		OSName
89  *	6		OSMajorVersion
90  *	7		OSMinorVersion
91  *	8		OSVersion
92  *	9		ProcessorArchitecture
93  *
94  * The Windows host expects the Key Name and Key Value to be encoded in utf16.
95  *
96  * Guest Kernel/KVP Daemon Protocol: As noted earlier, we implement all of the
97  * data gathering functionality in a user mode daemon. The user level daemon
98  * is also responsible for binding the key name to the index as well. The
99  * kernel and user-level daemon communicate using a connector channel.
100  *
101  * The user mode component first registers with the
102  * the kernel component. Subsequently, the kernel component requests, data
103  * for the specified keys. In response to this message the user mode component
104  * fills in the value corresponding to the specified key. We overload the
105  * sequence field in the cn_msg header to define our KVP message types.
106  *
107  *
108  * The kernel component simply acts as a conduit for communication between the
109  * Windows host and the user-level daemon. The kernel component passes up the
110  * index received from the Host to the user-level daemon. If the index is
111  * valid (supported), the corresponding key as well as its
112  * value (both are strings) is returned. If the index is invalid
113  * (not supported), a NULL key string is returned.
114  */
115 
116 
117 /*
118  * Registry value types.
119  */
120 
121 #define REG_SZ 1
122 #define REG_U32 4
123 #define REG_U64 8
124 
125 /*
126  * As we look at expanding the KVP functionality to include
127  * IP injection functionality, we need to maintain binary
128  * compatibility with older daemons.
129  *
130  * The KVP opcodes are defined by the host and it was unfortunate
131  * that I chose to treat the registration operation as part of the
132  * KVP operations defined by the host.
133  * Here is the level of compatibility
134  * (between the user level daemon and the kernel KVP driver) that we
135  * will implement:
136  *
137  * An older daemon will always be supported on a newer driver.
138  * A given user level daemon will require a minimal version of the
139  * kernel driver.
140  * If we cannot handle the version differences, we will fail gracefully
141  * (this can happen when we have a user level daemon that is more
142  * advanced than the KVP driver.
143  *
144  * We will use values used in this handshake for determining if we have
145  * workable user level daemon and the kernel driver. We begin by taking the
146  * registration opcode out of the KVP opcode namespace. We will however,
147  * maintain compatibility with the existing user-level daemon code.
148  */
149 
150 /*
151  * Daemon code not supporting IP injection (legacy daemon).
152  */
153 
154 #define KVP_OP_REGISTER	4
155 
156 /*
157  * Daemon code supporting IP injection.
158  * The KVP opcode field is used to communicate the
159  * registration information; so define a namespace that
160  * will be distinct from the host defined KVP opcode.
161  */
162 
163 #define KVP_OP_REGISTER1 100
164 
165 enum hv_kvp_exchg_op {
166 	KVP_OP_GET = 0,
167 	KVP_OP_SET,
168 	KVP_OP_DELETE,
169 	KVP_OP_ENUMERATE,
170 	KVP_OP_GET_IP_INFO,
171 	KVP_OP_SET_IP_INFO,
172 	KVP_OP_COUNT /* Number of operations, must be last. */
173 };
174 
175 enum hv_kvp_exchg_pool {
176 	KVP_POOL_EXTERNAL = 0,
177 	KVP_POOL_GUEST,
178 	KVP_POOL_AUTO,
179 	KVP_POOL_AUTO_EXTERNAL,
180 	KVP_POOL_AUTO_INTERNAL,
181 	KVP_POOL_COUNT /* Number of pools, must be last. */
182 };
183 
184 /*
185  * Some Hyper-V status codes.
186  */
187 
188 #define HV_S_OK				0x00000000
189 #define HV_E_FAIL			0x80004005
190 #define HV_S_CONT			0x80070103
191 #define HV_ERROR_NOT_SUPPORTED		0x80070032
192 #define HV_ERROR_MACHINE_LOCKED		0x800704F7
193 #define HV_ERROR_DEVICE_NOT_CONNECTED	0x8007048F
194 #define HV_INVALIDARG			0x80070057
195 #define HV_GUID_NOTFOUND		0x80041002
196 
197 #define ADDR_FAMILY_NONE	0x00
198 #define ADDR_FAMILY_IPV4	0x01
199 #define ADDR_FAMILY_IPV6	0x02
200 
201 #define MAX_ADAPTER_ID_SIZE	128
202 #define MAX_IP_ADDR_SIZE	1024
203 #define MAX_GATEWAY_SIZE	512
204 
205 
206 struct hv_kvp_ipaddr_value {
207 	__u16	adapter_id[MAX_ADAPTER_ID_SIZE];
208 	__u8	addr_family;
209 	__u8	dhcp_enabled;
210 	__u16	ip_addr[MAX_IP_ADDR_SIZE];
211 	__u16	sub_net[MAX_IP_ADDR_SIZE];
212 	__u16	gate_way[MAX_GATEWAY_SIZE];
213 	__u16	dns_addr[MAX_IP_ADDR_SIZE];
214 } __attribute__((packed));
215 
216 
217 struct hv_kvp_hdr {
218 	__u8 operation;
219 	__u8 pool;
220 	__u16 pad;
221 } __attribute__((packed));
222 
223 struct hv_kvp_exchg_msg_value {
224 	__u32 value_type;
225 	__u32 key_size;
226 	__u32 value_size;
227 	__u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
228 	union {
229 		__u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE];
230 		__u32 value_u32;
231 		__u64 value_u64;
232 	};
233 } __attribute__((packed));
234 
235 struct hv_kvp_msg_enumerate {
236 	__u32 index;
237 	struct hv_kvp_exchg_msg_value data;
238 } __attribute__((packed));
239 
240 struct hv_kvp_msg_get {
241 	struct hv_kvp_exchg_msg_value data;
242 };
243 
244 struct hv_kvp_msg_set {
245 	struct hv_kvp_exchg_msg_value data;
246 };
247 
248 struct hv_kvp_msg_delete {
249 	__u32 key_size;
250 	__u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
251 };
252 
253 struct hv_kvp_register {
254 	__u8 version[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
255 };
256 
257 struct hv_kvp_msg {
258 	union {
259 		struct hv_kvp_hdr	kvp_hdr;
260 		int error;
261 	};
262 	union {
263 		struct hv_kvp_msg_get		kvp_get;
264 		struct hv_kvp_msg_set		kvp_set;
265 		struct hv_kvp_msg_delete	kvp_delete;
266 		struct hv_kvp_msg_enumerate	kvp_enum_data;
267 		struct hv_kvp_ipaddr_value      kvp_ip_val;
268 		struct hv_kvp_register		kvp_register;
269 	} body;
270 } __attribute__((packed));
271 
272 struct hv_kvp_ip_msg {
273 	__u8 operation;
274 	__u8 pool;
275 	struct hv_kvp_ipaddr_value      kvp_ip_val;
276 } __attribute__((packed));
277 
278 #ifdef __KERNEL__
279 #include <linux/scatterlist.h>
280 #include <linux/list.h>
281 #include <linux/uuid.h>
282 #include <linux/timer.h>
283 #include <linux/workqueue.h>
284 #include <linux/completion.h>
285 #include <linux/device.h>
286 #include <linux/mod_devicetable.h>
287 
288 
289 #define MAX_PAGE_BUFFER_COUNT				19
290 #define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
291 
292 #pragma pack(push, 1)
293 
294 /* Single-page buffer */
295 struct hv_page_buffer {
296 	u32 len;
297 	u32 offset;
298 	u64 pfn;
299 };
300 
301 /* Multiple-page buffer */
302 struct hv_multipage_buffer {
303 	/* Length and Offset determines the # of pfns in the array */
304 	u32 len;
305 	u32 offset;
306 	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
307 };
308 
309 /* 0x18 includes the proprietary packet header */
310 #define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
311 					(sizeof(struct hv_page_buffer) * \
312 					 MAX_PAGE_BUFFER_COUNT))
313 #define MAX_MULTIPAGE_BUFFER_PACKET	(0x18 +			\
314 					 sizeof(struct hv_multipage_buffer))
315 
316 
317 #pragma pack(pop)
318 
319 struct hv_ring_buffer {
320 	/* Offset in bytes from the start of ring data below */
321 	u32 write_index;
322 
323 	/* Offset in bytes from the start of ring data below */
324 	u32 read_index;
325 
326 	u32 interrupt_mask;
327 
328 	/*
329 	 * Win8 uses some of the reserved bits to implement
330 	 * interrupt driven flow management. On the send side
331 	 * we can request that the receiver interrupt the sender
332 	 * when the ring transitions from being full to being able
333 	 * to handle a message of size "pending_send_sz".
334 	 *
335 	 * Add necessary state for this enhancement.
336 	 */
337 	u32 pending_send_sz;
338 
339 	u32 reserved1[12];
340 
341 	union {
342 		struct {
343 			u32 feat_pending_send_sz:1;
344 		};
345 		u32 value;
346 	} feature_bits;
347 
348 	/* Pad it to PAGE_SIZE so that data starts on page boundary */
349 	u8	reserved2[4028];
350 
351 	/*
352 	 * Ring data starts here + RingDataStartOffset
353 	 * !!! DO NOT place any fields below this !!!
354 	 */
355 	u8 buffer[0];
356 } __packed;
357 
358 struct hv_ring_buffer_info {
359 	struct hv_ring_buffer *ring_buffer;
360 	u32 ring_size;			/* Include the shared header */
361 	spinlock_t ring_lock;
362 
363 	u32 ring_datasize;		/* < ring_size */
364 	u32 ring_data_startoffset;
365 };
366 
367 struct hv_ring_buffer_debug_info {
368 	u32 current_interrupt_mask;
369 	u32 current_read_index;
370 	u32 current_write_index;
371 	u32 bytes_avail_toread;
372 	u32 bytes_avail_towrite;
373 };
374 
375 
376 /*
377  *
378  * hv_get_ringbuffer_availbytes()
379  *
380  * Get number of bytes available to read and to write to
381  * for the specified ring buffer
382  */
383 static inline void
384 hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
385 			  u32 *read, u32 *write)
386 {
387 	u32 read_loc, write_loc, dsize;
388 
389 	smp_read_barrier_depends();
390 
391 	/* Capture the read/write indices before they changed */
392 	read_loc = rbi->ring_buffer->read_index;
393 	write_loc = rbi->ring_buffer->write_index;
394 	dsize = rbi->ring_datasize;
395 
396 	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
397 		read_loc - write_loc;
398 	*read = dsize - *write;
399 }
400 
401 
402 /*
403  * We use the same version numbering for all Hyper-V modules.
404  *
405  * Definition of versioning is as follows;
406  *
407  *	Major Number	Changes for these scenarios;
408  *			1.	When a new version of Windows Hyper-V
409  *				is released.
410  *			2.	A Major change has occurred in the
411  *				Linux IC's.
412  *			(For example the merge for the first time
413  *			into the kernel) Every time the Major Number
414  *			changes, the Revision number is reset to 0.
415  *	Minor Number	Changes when new functionality is added
416  *			to the Linux IC's that is not a bug fix.
417  *
418  * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
419  */
420 #define HV_DRV_VERSION           "3.1"
421 
422 /*
423  * VMBUS version is 32 bit entity broken up into
424  * two 16 bit quantities: major_number. minor_number.
425  *
426  * 0 . 13 (Windows Server 2008)
427  * 1 . 1  (Windows 7)
428  * 2 . 4  (Windows 8)
429  */
430 
431 #define VERSION_WS2008  ((0 << 16) | (13))
432 #define VERSION_WIN7    ((1 << 16) | (1))
433 #define VERSION_WIN8    ((2 << 16) | (4))
434 
435 #define VERSION_INVAL -1
436 
437 #define VERSION_CURRENT VERSION_WIN8
438 
439 /* Make maximum size of pipe payload of 16K */
440 #define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
441 
442 /* Define PipeMode values. */
443 #define VMBUS_PIPE_TYPE_BYTE		0x00000000
444 #define VMBUS_PIPE_TYPE_MESSAGE		0x00000004
445 
446 /* The size of the user defined data buffer for non-pipe offers. */
447 #define MAX_USER_DEFINED_BYTES		120
448 
449 /* The size of the user defined data buffer for pipe offers. */
450 #define MAX_PIPE_USER_DEFINED_BYTES	116
451 
452 /*
453  * At the center of the Channel Management library is the Channel Offer. This
454  * struct contains the fundamental information about an offer.
455  */
456 struct vmbus_channel_offer {
457 	uuid_le if_type;
458 	uuid_le if_instance;
459 
460 	/*
461 	 * These two fields are not currently used.
462 	 */
463 	u64 reserved1;
464 	u64 reserved2;
465 
466 	u16 chn_flags;
467 	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
468 
469 	union {
470 		/* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
471 		struct {
472 			unsigned char user_def[MAX_USER_DEFINED_BYTES];
473 		} std;
474 
475 		/*
476 		 * Pipes:
477 		 * The following sructure is an integrated pipe protocol, which
478 		 * is implemented on top of standard user-defined data. Pipe
479 		 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
480 		 * use.
481 		 */
482 		struct {
483 			u32  pipe_mode;
484 			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
485 		} pipe;
486 	} u;
487 	/*
488 	 * The sub_channel_index is defined in win8.
489 	 */
490 	u16 sub_channel_index;
491 	u16 reserved3;
492 } __packed;
493 
494 /* Server Flags */
495 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE	1
496 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES	2
497 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS		4
498 #define VMBUS_CHANNEL_NAMED_PIPE_MODE			0x10
499 #define VMBUS_CHANNEL_LOOPBACK_OFFER			0x100
500 #define VMBUS_CHANNEL_PARENT_OFFER			0x200
501 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x400
502 
503 struct vmpacket_descriptor {
504 	u16 type;
505 	u16 offset8;
506 	u16 len8;
507 	u16 flags;
508 	u64 trans_id;
509 } __packed;
510 
511 struct vmpacket_header {
512 	u32 prev_pkt_start_offset;
513 	struct vmpacket_descriptor descriptor;
514 } __packed;
515 
516 struct vmtransfer_page_range {
517 	u32 byte_count;
518 	u32 byte_offset;
519 } __packed;
520 
521 struct vmtransfer_page_packet_header {
522 	struct vmpacket_descriptor d;
523 	u16 xfer_pageset_id;
524 	u8  sender_owns_set;
525 	u8 reserved;
526 	u32 range_cnt;
527 	struct vmtransfer_page_range ranges[1];
528 } __packed;
529 
530 struct vmgpadl_packet_header {
531 	struct vmpacket_descriptor d;
532 	u32 gpadl;
533 	u32 reserved;
534 } __packed;
535 
536 struct vmadd_remove_transfer_page_set {
537 	struct vmpacket_descriptor d;
538 	u32 gpadl;
539 	u16 xfer_pageset_id;
540 	u16 reserved;
541 } __packed;
542 
543 /*
544  * This structure defines a range in guest physical space that can be made to
545  * look virtually contiguous.
546  */
547 struct gpa_range {
548 	u32 byte_count;
549 	u32 byte_offset;
550 	u64 pfn_array[0];
551 };
552 
553 /*
554  * This is the format for an Establish Gpadl packet, which contains a handle by
555  * which this GPADL will be known and a set of GPA ranges associated with it.
556  * This can be converted to a MDL by the guest OS.  If there are multiple GPA
557  * ranges, then the resulting MDL will be "chained," representing multiple VA
558  * ranges.
559  */
560 struct vmestablish_gpadl {
561 	struct vmpacket_descriptor d;
562 	u32 gpadl;
563 	u32 range_cnt;
564 	struct gpa_range range[1];
565 } __packed;
566 
567 /*
568  * This is the format for a Teardown Gpadl packet, which indicates that the
569  * GPADL handle in the Establish Gpadl packet will never be referenced again.
570  */
571 struct vmteardown_gpadl {
572 	struct vmpacket_descriptor d;
573 	u32 gpadl;
574 	u32 reserved;	/* for alignment to a 8-byte boundary */
575 } __packed;
576 
577 /*
578  * This is the format for a GPA-Direct packet, which contains a set of GPA
579  * ranges, in addition to commands and/or data.
580  */
581 struct vmdata_gpa_direct {
582 	struct vmpacket_descriptor d;
583 	u32 reserved;
584 	u32 range_cnt;
585 	struct gpa_range range[1];
586 } __packed;
587 
588 /* This is the format for a Additional Data Packet. */
589 struct vmadditional_data {
590 	struct vmpacket_descriptor d;
591 	u64 total_bytes;
592 	u32 offset;
593 	u32 byte_cnt;
594 	unsigned char data[1];
595 } __packed;
596 
597 union vmpacket_largest_possible_header {
598 	struct vmpacket_descriptor simple_hdr;
599 	struct vmtransfer_page_packet_header xfer_page_hdr;
600 	struct vmgpadl_packet_header gpadl_hdr;
601 	struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
602 	struct vmestablish_gpadl establish_gpadl_hdr;
603 	struct vmteardown_gpadl teardown_gpadl_hdr;
604 	struct vmdata_gpa_direct data_gpa_direct_hdr;
605 };
606 
607 #define VMPACKET_DATA_START_ADDRESS(__packet)	\
608 	(void *)(((unsigned char *)__packet) +	\
609 	 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
610 
611 #define VMPACKET_DATA_LENGTH(__packet)		\
612 	((((struct vmpacket_descriptor)__packet)->len8 -	\
613 	  ((struct vmpacket_descriptor)__packet)->offset8) * 8)
614 
615 #define VMPACKET_TRANSFER_MODE(__packet)	\
616 	(((struct IMPACT)__packet)->type)
617 
618 enum vmbus_packet_type {
619 	VM_PKT_INVALID				= 0x0,
620 	VM_PKT_SYNCH				= 0x1,
621 	VM_PKT_ADD_XFER_PAGESET			= 0x2,
622 	VM_PKT_RM_XFER_PAGESET			= 0x3,
623 	VM_PKT_ESTABLISH_GPADL			= 0x4,
624 	VM_PKT_TEARDOWN_GPADL			= 0x5,
625 	VM_PKT_DATA_INBAND			= 0x6,
626 	VM_PKT_DATA_USING_XFER_PAGES		= 0x7,
627 	VM_PKT_DATA_USING_GPADL			= 0x8,
628 	VM_PKT_DATA_USING_GPA_DIRECT		= 0x9,
629 	VM_PKT_CANCEL_REQUEST			= 0xa,
630 	VM_PKT_COMP				= 0xb,
631 	VM_PKT_DATA_USING_ADDITIONAL_PKT	= 0xc,
632 	VM_PKT_ADDITIONAL_DATA			= 0xd
633 };
634 
635 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED	1
636 
637 
638 /* Version 1 messages */
639 enum vmbus_channel_message_type {
640 	CHANNELMSG_INVALID			=  0,
641 	CHANNELMSG_OFFERCHANNEL		=  1,
642 	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
643 	CHANNELMSG_REQUESTOFFERS		=  3,
644 	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
645 	CHANNELMSG_OPENCHANNEL		=  5,
646 	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
647 	CHANNELMSG_CLOSECHANNEL		=  7,
648 	CHANNELMSG_GPADL_HEADER		=  8,
649 	CHANNELMSG_GPADL_BODY			=  9,
650 	CHANNELMSG_GPADL_CREATED		= 10,
651 	CHANNELMSG_GPADL_TEARDOWN		= 11,
652 	CHANNELMSG_GPADL_TORNDOWN		= 12,
653 	CHANNELMSG_RELID_RELEASED		= 13,
654 	CHANNELMSG_INITIATE_CONTACT		= 14,
655 	CHANNELMSG_VERSION_RESPONSE		= 15,
656 	CHANNELMSG_UNLOAD			= 16,
657 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
658 	CHANNELMSG_VIEWRANGE_ADD		= 17,
659 	CHANNELMSG_VIEWRANGE_REMOVE		= 18,
660 #endif
661 	CHANNELMSG_COUNT
662 };
663 
664 struct vmbus_channel_message_header {
665 	enum vmbus_channel_message_type msgtype;
666 	u32 padding;
667 } __packed;
668 
669 /* Query VMBus Version parameters */
670 struct vmbus_channel_query_vmbus_version {
671 	struct vmbus_channel_message_header header;
672 	u32 version;
673 } __packed;
674 
675 /* VMBus Version Supported parameters */
676 struct vmbus_channel_version_supported {
677 	struct vmbus_channel_message_header header;
678 	u8 version_supported;
679 } __packed;
680 
681 /* Offer Channel parameters */
682 struct vmbus_channel_offer_channel {
683 	struct vmbus_channel_message_header header;
684 	struct vmbus_channel_offer offer;
685 	u32 child_relid;
686 	u8 monitorid;
687 	/*
688 	 * win7 and beyond splits this field into a bit field.
689 	 */
690 	u8 monitor_allocated:1;
691 	u8 reserved:7;
692 	/*
693 	 * These are new fields added in win7 and later.
694 	 * Do not access these fields without checking the
695 	 * negotiated protocol.
696 	 *
697 	 * If "is_dedicated_interrupt" is set, we must not set the
698 	 * associated bit in the channel bitmap while sending the
699 	 * interrupt to the host.
700 	 *
701 	 * connection_id is to be used in signaling the host.
702 	 */
703 	u16 is_dedicated_interrupt:1;
704 	u16 reserved1:15;
705 	u32 connection_id;
706 } __packed;
707 
708 /* Rescind Offer parameters */
709 struct vmbus_channel_rescind_offer {
710 	struct vmbus_channel_message_header header;
711 	u32 child_relid;
712 } __packed;
713 
714 /*
715  * Request Offer -- no parameters, SynIC message contains the partition ID
716  * Set Snoop -- no parameters, SynIC message contains the partition ID
717  * Clear Snoop -- no parameters, SynIC message contains the partition ID
718  * All Offers Delivered -- no parameters, SynIC message contains the partition
719  *		           ID
720  * Flush Client -- no parameters, SynIC message contains the partition ID
721  */
722 
723 /* Open Channel parameters */
724 struct vmbus_channel_open_channel {
725 	struct vmbus_channel_message_header header;
726 
727 	/* Identifies the specific VMBus channel that is being opened. */
728 	u32 child_relid;
729 
730 	/* ID making a particular open request at a channel offer unique. */
731 	u32 openid;
732 
733 	/* GPADL for the channel's ring buffer. */
734 	u32 ringbuffer_gpadlhandle;
735 
736 	/*
737 	 * Starting with win8, this field will be used to specify
738 	 * the target virtual processor on which to deliver the interrupt for
739 	 * the host to guest communication.
740 	 * Prior to win8, incoming channel interrupts would only
741 	 * be delivered on cpu 0. Setting this value to 0 would
742 	 * preserve the earlier behavior.
743 	 */
744 	u32 target_vp;
745 
746 	/*
747 	* The upstream ring buffer begins at offset zero in the memory
748 	* described by RingBufferGpadlHandle. The downstream ring buffer
749 	* follows it at this offset (in pages).
750 	*/
751 	u32 downstream_ringbuffer_pageoffset;
752 
753 	/* User-specific data to be passed along to the server endpoint. */
754 	unsigned char userdata[MAX_USER_DEFINED_BYTES];
755 } __packed;
756 
757 /* Open Channel Result parameters */
758 struct vmbus_channel_open_result {
759 	struct vmbus_channel_message_header header;
760 	u32 child_relid;
761 	u32 openid;
762 	u32 status;
763 } __packed;
764 
765 /* Close channel parameters; */
766 struct vmbus_channel_close_channel {
767 	struct vmbus_channel_message_header header;
768 	u32 child_relid;
769 } __packed;
770 
771 /* Channel Message GPADL */
772 #define GPADL_TYPE_RING_BUFFER		1
773 #define GPADL_TYPE_SERVER_SAVE_AREA	2
774 #define GPADL_TYPE_TRANSACTION		8
775 
776 /*
777  * The number of PFNs in a GPADL message is defined by the number of
778  * pages that would be spanned by ByteCount and ByteOffset.  If the
779  * implied number of PFNs won't fit in this packet, there will be a
780  * follow-up packet that contains more.
781  */
782 struct vmbus_channel_gpadl_header {
783 	struct vmbus_channel_message_header header;
784 	u32 child_relid;
785 	u32 gpadl;
786 	u16 range_buflen;
787 	u16 rangecount;
788 	struct gpa_range range[0];
789 } __packed;
790 
791 /* This is the followup packet that contains more PFNs. */
792 struct vmbus_channel_gpadl_body {
793 	struct vmbus_channel_message_header header;
794 	u32 msgnumber;
795 	u32 gpadl;
796 	u64 pfn[0];
797 } __packed;
798 
799 struct vmbus_channel_gpadl_created {
800 	struct vmbus_channel_message_header header;
801 	u32 child_relid;
802 	u32 gpadl;
803 	u32 creation_status;
804 } __packed;
805 
806 struct vmbus_channel_gpadl_teardown {
807 	struct vmbus_channel_message_header header;
808 	u32 child_relid;
809 	u32 gpadl;
810 } __packed;
811 
812 struct vmbus_channel_gpadl_torndown {
813 	struct vmbus_channel_message_header header;
814 	u32 gpadl;
815 } __packed;
816 
817 #ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
818 struct vmbus_channel_view_range_add {
819 	struct vmbus_channel_message_header header;
820 	PHYSICAL_ADDRESS viewrange_base;
821 	u64 viewrange_length;
822 	u32 child_relid;
823 } __packed;
824 
825 struct vmbus_channel_view_range_remove {
826 	struct vmbus_channel_message_header header;
827 	PHYSICAL_ADDRESS viewrange_base;
828 	u32 child_relid;
829 } __packed;
830 #endif
831 
832 struct vmbus_channel_relid_released {
833 	struct vmbus_channel_message_header header;
834 	u32 child_relid;
835 } __packed;
836 
837 struct vmbus_channel_initiate_contact {
838 	struct vmbus_channel_message_header header;
839 	u32 vmbus_version_requested;
840 	u32 padding2;
841 	u64 interrupt_page;
842 	u64 monitor_page1;
843 	u64 monitor_page2;
844 } __packed;
845 
846 struct vmbus_channel_version_response {
847 	struct vmbus_channel_message_header header;
848 	u8 version_supported;
849 } __packed;
850 
851 enum vmbus_channel_state {
852 	CHANNEL_OFFER_STATE,
853 	CHANNEL_OPENING_STATE,
854 	CHANNEL_OPEN_STATE,
855 };
856 
857 struct vmbus_channel_debug_info {
858 	u32 relid;
859 	enum vmbus_channel_state state;
860 	uuid_le interfacetype;
861 	uuid_le interface_instance;
862 	u32 monitorid;
863 	u32 servermonitor_pending;
864 	u32 servermonitor_latency;
865 	u32 servermonitor_connectionid;
866 	u32 clientmonitor_pending;
867 	u32 clientmonitor_latency;
868 	u32 clientmonitor_connectionid;
869 
870 	struct hv_ring_buffer_debug_info inbound;
871 	struct hv_ring_buffer_debug_info outbound;
872 };
873 
874 /*
875  * Represents each channel msg on the vmbus connection This is a
876  * variable-size data structure depending on the msg type itself
877  */
878 struct vmbus_channel_msginfo {
879 	/* Bookkeeping stuff */
880 	struct list_head msglistentry;
881 
882 	/* So far, this is only used to handle gpadl body message */
883 	struct list_head submsglist;
884 
885 	/* Synchronize the request/response if needed */
886 	struct completion  waitevent;
887 	union {
888 		struct vmbus_channel_version_supported version_supported;
889 		struct vmbus_channel_open_result open_result;
890 		struct vmbus_channel_gpadl_torndown gpadl_torndown;
891 		struct vmbus_channel_gpadl_created gpadl_created;
892 		struct vmbus_channel_version_response version_response;
893 	} response;
894 
895 	u32 msgsize;
896 	/*
897 	 * The channel message that goes out on the "wire".
898 	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
899 	 */
900 	unsigned char msg[0];
901 };
902 
903 struct vmbus_close_msg {
904 	struct vmbus_channel_msginfo info;
905 	struct vmbus_channel_close_channel msg;
906 };
907 
908 /* Define connection identifier type. */
909 union hv_connection_id {
910 	u32 asu32;
911 	struct {
912 		u32 id:24;
913 		u32 reserved:8;
914 	} u;
915 };
916 
917 /* Definition of the hv_signal_event hypercall input structure. */
918 struct hv_input_signal_event {
919 	union hv_connection_id connectionid;
920 	u16 flag_number;
921 	u16 rsvdz;
922 };
923 
924 struct hv_input_signal_event_buffer {
925 	u64 align8;
926 	struct hv_input_signal_event event;
927 };
928 
929 struct vmbus_channel {
930 	struct list_head listentry;
931 
932 	struct hv_device *device_obj;
933 
934 	struct work_struct work;
935 
936 	enum vmbus_channel_state state;
937 
938 	struct vmbus_channel_offer_channel offermsg;
939 	/*
940 	 * These are based on the OfferMsg.MonitorId.
941 	 * Save it here for easy access.
942 	 */
943 	u8 monitor_grp;
944 	u8 monitor_bit;
945 
946 	u32 ringbuffer_gpadlhandle;
947 
948 	/* Allocated memory for ring buffer */
949 	void *ringbuffer_pages;
950 	u32 ringbuffer_pagecount;
951 	struct hv_ring_buffer_info outbound;	/* send to parent */
952 	struct hv_ring_buffer_info inbound;	/* receive from parent */
953 	spinlock_t inbound_lock;
954 	struct workqueue_struct *controlwq;
955 
956 	struct vmbus_close_msg close_msg;
957 
958 	/* Channel callback are invoked in this workqueue context */
959 	/* HANDLE dataWorkQueue; */
960 
961 	void (*onchannel_callback)(void *context);
962 	void *channel_callback_context;
963 
964 	/*
965 	 * A channel can be marked for efficient (batched)
966 	 * reading:
967 	 * If batched_reading is set to "true", we read until the
968 	 * channel is empty and hold off interrupts from the host
969 	 * during the entire read process.
970 	 * If batched_reading is set to "false", the client is not
971 	 * going to perform batched reading.
972 	 *
973 	 * By default we will enable batched reading; specific
974 	 * drivers that don't want this behavior can turn it off.
975 	 */
976 
977 	bool batched_reading;
978 
979 	bool is_dedicated_interrupt;
980 	struct hv_input_signal_event_buffer sig_buf;
981 	struct hv_input_signal_event *sig_event;
982 
983 	/*
984 	 * Starting with win8, this field will be used to specify
985 	 * the target virtual processor on which to deliver the interrupt for
986 	 * the host to guest communication.
987 	 * Prior to win8, incoming channel interrupts would only
988 	 * be delivered on cpu 0. Setting this value to 0 would
989 	 * preserve the earlier behavior.
990 	 */
991 	u32 target_vp;
992 };
993 
994 static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
995 {
996 	c->batched_reading = state;
997 }
998 
999 void vmbus_onmessage(void *context);
1000 
1001 int vmbus_request_offers(void);
1002 
1003 /* The format must be the same as struct vmdata_gpa_direct */
1004 struct vmbus_channel_packet_page_buffer {
1005 	u16 type;
1006 	u16 dataoffset8;
1007 	u16 length8;
1008 	u16 flags;
1009 	u64 transactionid;
1010 	u32 reserved;
1011 	u32 rangecount;
1012 	struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1013 } __packed;
1014 
1015 /* The format must be the same as struct vmdata_gpa_direct */
1016 struct vmbus_channel_packet_multipage_buffer {
1017 	u16 type;
1018 	u16 dataoffset8;
1019 	u16 length8;
1020 	u16 flags;
1021 	u64 transactionid;
1022 	u32 reserved;
1023 	u32 rangecount;		/* Always 1 in this case */
1024 	struct hv_multipage_buffer range;
1025 } __packed;
1026 
1027 
1028 extern int vmbus_open(struct vmbus_channel *channel,
1029 			    u32 send_ringbuffersize,
1030 			    u32 recv_ringbuffersize,
1031 			    void *userdata,
1032 			    u32 userdatalen,
1033 			    void(*onchannel_callback)(void *context),
1034 			    void *context);
1035 
1036 extern void vmbus_close(struct vmbus_channel *channel);
1037 
1038 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1039 				  const void *buffer,
1040 				  u32 bufferLen,
1041 				  u64 requestid,
1042 				  enum vmbus_packet_type type,
1043 				  u32 flags);
1044 
1045 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1046 					    struct hv_page_buffer pagebuffers[],
1047 					    u32 pagecount,
1048 					    void *buffer,
1049 					    u32 bufferlen,
1050 					    u64 requestid);
1051 
1052 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
1053 					struct hv_multipage_buffer *mpb,
1054 					void *buffer,
1055 					u32 bufferlen,
1056 					u64 requestid);
1057 
1058 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1059 				      void *kbuffer,
1060 				      u32 size,
1061 				      u32 *gpadl_handle);
1062 
1063 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1064 				     u32 gpadl_handle);
1065 
1066 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1067 				  void *buffer,
1068 				  u32 bufferlen,
1069 				  u32 *buffer_actual_len,
1070 				  u64 *requestid);
1071 
1072 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1073 				     void *buffer,
1074 				     u32 bufferlen,
1075 				     u32 *buffer_actual_len,
1076 				     u64 *requestid);
1077 
1078 
1079 extern void vmbus_get_debug_info(struct vmbus_channel *channel,
1080 				     struct vmbus_channel_debug_info *debug);
1081 
1082 extern void vmbus_ontimer(unsigned long data);
1083 
1084 struct hv_dev_port_info {
1085 	u32 int_mask;
1086 	u32 read_idx;
1087 	u32 write_idx;
1088 	u32 bytes_avail_toread;
1089 	u32 bytes_avail_towrite;
1090 };
1091 
1092 /* Base driver object */
1093 struct hv_driver {
1094 	const char *name;
1095 
1096 	/* the device type supported by this driver */
1097 	uuid_le dev_type;
1098 	const struct hv_vmbus_device_id *id_table;
1099 
1100 	struct device_driver driver;
1101 
1102 	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1103 	int (*remove)(struct hv_device *);
1104 	void (*shutdown)(struct hv_device *);
1105 
1106 };
1107 
1108 /* Base device object */
1109 struct hv_device {
1110 	/* the device type id of this device */
1111 	uuid_le dev_type;
1112 
1113 	/* the device instance id of this device */
1114 	uuid_le dev_instance;
1115 
1116 	struct device device;
1117 
1118 	struct vmbus_channel *channel;
1119 };
1120 
1121 
1122 static inline struct hv_device *device_to_hv_device(struct device *d)
1123 {
1124 	return container_of(d, struct hv_device, device);
1125 }
1126 
1127 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1128 {
1129 	return container_of(d, struct hv_driver, driver);
1130 }
1131 
1132 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1133 {
1134 	dev_set_drvdata(&dev->device, data);
1135 }
1136 
1137 static inline void *hv_get_drvdata(struct hv_device *dev)
1138 {
1139 	return dev_get_drvdata(&dev->device);
1140 }
1141 
1142 /* Vmbus interface */
1143 #define vmbus_driver_register(driver)	\
1144 	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1145 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1146 					 struct module *owner,
1147 					 const char *mod_name);
1148 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1149 
1150 /**
1151  * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
1152  *
1153  * This macro is used to create a struct hv_vmbus_device_id that matches a
1154  * specific device.
1155  */
1156 #define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7,	\
1157 		     g8, g9, ga, gb, gc, gd, ge, gf)	\
1158 	.guid = { g0, g1, g2, g3, g4, g5, g6, g7,	\
1159 		  g8, g9, ga, gb, gc, gd, ge, gf },
1160 
1161 /*
1162  * GUID definitions of various offer types - services offered to the guest.
1163  */
1164 
1165 /*
1166  * Network GUID
1167  * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1168  */
1169 #define HV_NIC_GUID \
1170 	.guid = { \
1171 			0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \
1172 			0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \
1173 		}
1174 
1175 /*
1176  * IDE GUID
1177  * {32412632-86cb-44a2-9b5c-50d1417354f5}
1178  */
1179 #define HV_IDE_GUID \
1180 	.guid = { \
1181 			0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \
1182 			0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \
1183 		}
1184 
1185 /*
1186  * SCSI GUID
1187  * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1188  */
1189 #define HV_SCSI_GUID \
1190 	.guid = { \
1191 			0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \
1192 			0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \
1193 		}
1194 
1195 /*
1196  * Shutdown GUID
1197  * {0e0b6031-5213-4934-818b-38d90ced39db}
1198  */
1199 #define HV_SHUTDOWN_GUID \
1200 	.guid = { \
1201 			0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \
1202 			0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \
1203 		}
1204 
1205 /*
1206  * Time Synch GUID
1207  * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1208  */
1209 #define HV_TS_GUID \
1210 	.guid = { \
1211 			0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \
1212 			0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \
1213 		}
1214 
1215 /*
1216  * Heartbeat GUID
1217  * {57164f39-9115-4e78-ab55-382f3bd5422d}
1218  */
1219 #define HV_HEART_BEAT_GUID \
1220 	.guid = { \
1221 			0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \
1222 			0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \
1223 		}
1224 
1225 /*
1226  * KVP GUID
1227  * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1228  */
1229 #define HV_KVP_GUID \
1230 	.guid = { \
1231 			0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \
1232 			0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3,  0xe6 \
1233 		}
1234 
1235 /*
1236  * Dynamic memory GUID
1237  * {525074dc-8985-46e2-8057-a307dc18a502}
1238  */
1239 #define HV_DM_GUID \
1240 	.guid = { \
1241 			0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \
1242 			0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \
1243 		}
1244 
1245 /*
1246  * Mouse GUID
1247  * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1248  */
1249 #define HV_MOUSE_GUID \
1250 	.guid = { \
1251 			0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \
1252 			0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \
1253 		}
1254 
1255 /*
1256  * Common header for Hyper-V ICs
1257  */
1258 
1259 #define ICMSGTYPE_NEGOTIATE		0
1260 #define ICMSGTYPE_HEARTBEAT		1
1261 #define ICMSGTYPE_KVPEXCHANGE		2
1262 #define ICMSGTYPE_SHUTDOWN		3
1263 #define ICMSGTYPE_TIMESYNC		4
1264 #define ICMSGTYPE_VSS			5
1265 
1266 #define ICMSGHDRFLAG_TRANSACTION	1
1267 #define ICMSGHDRFLAG_REQUEST		2
1268 #define ICMSGHDRFLAG_RESPONSE		4
1269 
1270 
1271 /*
1272  * While we want to handle util services as regular devices,
1273  * there is only one instance of each of these services; so
1274  * we statically allocate the service specific state.
1275  */
1276 
1277 struct hv_util_service {
1278 	u8 *recv_buffer;
1279 	void (*util_cb)(void *);
1280 	int (*util_init)(struct hv_util_service *);
1281 	void (*util_deinit)(void);
1282 };
1283 
1284 struct vmbuspipe_hdr {
1285 	u32 flags;
1286 	u32 msgsize;
1287 } __packed;
1288 
1289 struct ic_version {
1290 	u16 major;
1291 	u16 minor;
1292 } __packed;
1293 
1294 struct icmsg_hdr {
1295 	struct ic_version icverframe;
1296 	u16 icmsgtype;
1297 	struct ic_version icvermsg;
1298 	u16 icmsgsize;
1299 	u32 status;
1300 	u8 ictransaction_id;
1301 	u8 icflags;
1302 	u8 reserved[2];
1303 } __packed;
1304 
1305 struct icmsg_negotiate {
1306 	u16 icframe_vercnt;
1307 	u16 icmsg_vercnt;
1308 	u32 reserved;
1309 	struct ic_version icversion_data[1]; /* any size array */
1310 } __packed;
1311 
1312 struct shutdown_msg_data {
1313 	u32 reason_code;
1314 	u32 timeout_seconds;
1315 	u32 flags;
1316 	u8  display_message[2048];
1317 } __packed;
1318 
1319 struct heartbeat_msg_data {
1320 	u64 seq_num;
1321 	u32 reserved[8];
1322 } __packed;
1323 
1324 /* Time Sync IC defs */
1325 #define ICTIMESYNCFLAG_PROBE	0
1326 #define ICTIMESYNCFLAG_SYNC	1
1327 #define ICTIMESYNCFLAG_SAMPLE	2
1328 
1329 #ifdef __x86_64__
1330 #define WLTIMEDELTA	116444736000000000L	/* in 100ns unit */
1331 #else
1332 #define WLTIMEDELTA	116444736000000000LL
1333 #endif
1334 
1335 struct ictimesync_data {
1336 	u64 parenttime;
1337 	u64 childtime;
1338 	u64 roundtriptime;
1339 	u8 flags;
1340 } __packed;
1341 
1342 struct hyperv_service_callback {
1343 	u8 msg_type;
1344 	char *log_msg;
1345 	uuid_le data;
1346 	struct vmbus_channel *channel;
1347 	void (*callback) (void *context);
1348 };
1349 
1350 #define MAX_SRV_VER	0x7ffffff
1351 extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
1352 					struct icmsg_negotiate *, u8 *, int,
1353 					int);
1354 
1355 int hv_kvp_init(struct hv_util_service *);
1356 void hv_kvp_deinit(void);
1357 void hv_kvp_onchannelcallback(void *);
1358 
1359 /*
1360  * Negotiated version with the Host.
1361  */
1362 
1363 extern __u32 vmbus_proto_version;
1364 
1365 #endif /* __KERNEL__ */
1366 #endif /* _HYPERV_H */
1367