xref: /linux-6.15/include/linux/hyperv.h (revision bb4e9af0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright (c) 2011, Microsoft Corporation.
5  *
6  * Authors:
7  *   Haiyang Zhang <[email protected]>
8  *   Hank Janssen  <[email protected]>
9  *   K. Y. Srinivasan <[email protected]>
10  */
11 
12 #ifndef _HYPERV_H
13 #define _HYPERV_H
14 
15 #include <uapi/linux/hyperv.h>
16 
17 #include <linux/types.h>
18 #include <linux/scatterlist.h>
19 #include <linux/list.h>
20 #include <linux/timer.h>
21 #include <linux/completion.h>
22 #include <linux/device.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/interrupt.h>
25 #include <linux/reciprocal_div.h>
26 
27 #define MAX_PAGE_BUFFER_COUNT				32
28 #define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
29 
30 #pragma pack(push, 1)
31 
32 /* Single-page buffer */
33 struct hv_page_buffer {
34 	u32 len;
35 	u32 offset;
36 	u64 pfn;
37 };
38 
39 /* Multiple-page buffer */
40 struct hv_multipage_buffer {
41 	/* Length and Offset determines the # of pfns in the array */
42 	u32 len;
43 	u32 offset;
44 	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
45 };
46 
47 /*
48  * Multiple-page buffer array; the pfn array is variable size:
49  * The number of entries in the PFN array is determined by
50  * "len" and "offset".
51  */
52 struct hv_mpb_array {
53 	/* Length and Offset determines the # of pfns in the array */
54 	u32 len;
55 	u32 offset;
56 	u64 pfn_array[];
57 };
58 
59 /* 0x18 includes the proprietary packet header */
60 #define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
61 					(sizeof(struct hv_page_buffer) * \
62 					 MAX_PAGE_BUFFER_COUNT))
63 #define MAX_MULTIPAGE_BUFFER_PACKET	(0x18 +			\
64 					 sizeof(struct hv_multipage_buffer))
65 
66 
67 #pragma pack(pop)
68 
69 struct hv_ring_buffer {
70 	/* Offset in bytes from the start of ring data below */
71 	u32 write_index;
72 
73 	/* Offset in bytes from the start of ring data below */
74 	u32 read_index;
75 
76 	u32 interrupt_mask;
77 
78 	/*
79 	 * WS2012/Win8 and later versions of Hyper-V implement interrupt
80 	 * driven flow management. The feature bit feat_pending_send_sz
81 	 * is set by the host on the host->guest ring buffer, and by the
82 	 * guest on the guest->host ring buffer.
83 	 *
84 	 * The meaning of the feature bit is a bit complex in that it has
85 	 * semantics that apply to both ring buffers.  If the guest sets
86 	 * the feature bit in the guest->host ring buffer, the guest is
87 	 * telling the host that:
88 	 * 1) It will set the pending_send_sz field in the guest->host ring
89 	 *    buffer when it is waiting for space to become available, and
90 	 * 2) It will read the pending_send_sz field in the host->guest
91 	 *    ring buffer and interrupt the host when it frees enough space
92 	 *
93 	 * Similarly, if the host sets the feature bit in the host->guest
94 	 * ring buffer, the host is telling the guest that:
95 	 * 1) It will set the pending_send_sz field in the host->guest ring
96 	 *    buffer when it is waiting for space to become available, and
97 	 * 2) It will read the pending_send_sz field in the guest->host
98 	 *    ring buffer and interrupt the guest when it frees enough space
99 	 *
100 	 * If either the guest or host does not set the feature bit that it
101 	 * owns, that guest or host must do polling if it encounters a full
102 	 * ring buffer, and not signal the other end with an interrupt.
103 	 */
104 	u32 pending_send_sz;
105 	u32 reserved1[12];
106 	union {
107 		struct {
108 			u32 feat_pending_send_sz:1;
109 		};
110 		u32 value;
111 	} feature_bits;
112 
113 	/* Pad it to PAGE_SIZE so that data starts on page boundary */
114 	u8	reserved2[4028];
115 
116 	/*
117 	 * Ring data starts here + RingDataStartOffset
118 	 * !!! DO NOT place any fields below this !!!
119 	 */
120 	u8 buffer[0];
121 } __packed;
122 
123 struct hv_ring_buffer_info {
124 	struct hv_ring_buffer *ring_buffer;
125 	u32 ring_size;			/* Include the shared header */
126 	struct reciprocal_value ring_size_div10_reciprocal;
127 	spinlock_t ring_lock;
128 
129 	u32 ring_datasize;		/* < ring_size */
130 	u32 priv_read_index;
131 	/*
132 	 * The ring buffer mutex lock. This lock prevents the ring buffer from
133 	 * being freed while the ring buffer is being accessed.
134 	 */
135 	struct mutex ring_buffer_mutex;
136 };
137 
138 
139 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
140 {
141 	u32 read_loc, write_loc, dsize, read;
142 
143 	dsize = rbi->ring_datasize;
144 	read_loc = rbi->ring_buffer->read_index;
145 	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
146 
147 	read = write_loc >= read_loc ? (write_loc - read_loc) :
148 		(dsize - read_loc) + write_loc;
149 
150 	return read;
151 }
152 
153 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
154 {
155 	u32 read_loc, write_loc, dsize, write;
156 
157 	dsize = rbi->ring_datasize;
158 	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
159 	write_loc = rbi->ring_buffer->write_index;
160 
161 	write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
162 		read_loc - write_loc;
163 	return write;
164 }
165 
166 static inline u32 hv_get_avail_to_write_percent(
167 		const struct hv_ring_buffer_info *rbi)
168 {
169 	u32 avail_write = hv_get_bytes_to_write(rbi);
170 
171 	return reciprocal_divide(
172 			(avail_write  << 3) + (avail_write << 1),
173 			rbi->ring_size_div10_reciprocal);
174 }
175 
176 /*
177  * VMBUS version is 32 bit entity broken up into
178  * two 16 bit quantities: major_number. minor_number.
179  *
180  * 0 . 13 (Windows Server 2008)
181  * 1 . 1  (Windows 7)
182  * 2 . 4  (Windows 8)
183  * 3 . 0  (Windows 8 R2)
184  * 4 . 0  (Windows 10)
185  * 5 . 0  (Newer Windows 10)
186  */
187 
188 #define VERSION_WS2008  ((0 << 16) | (13))
189 #define VERSION_WIN7    ((1 << 16) | (1))
190 #define VERSION_WIN8    ((2 << 16) | (4))
191 #define VERSION_WIN8_1    ((3 << 16) | (0))
192 #define VERSION_WIN10	((4 << 16) | (0))
193 #define VERSION_WIN10_V5 ((5 << 16) | (0))
194 
195 #define VERSION_INVAL -1
196 
197 #define VERSION_CURRENT VERSION_WIN10_V5
198 
199 /* Make maximum size of pipe payload of 16K */
200 #define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
201 
202 /* Define PipeMode values. */
203 #define VMBUS_PIPE_TYPE_BYTE		0x00000000
204 #define VMBUS_PIPE_TYPE_MESSAGE		0x00000004
205 
206 /* The size of the user defined data buffer for non-pipe offers. */
207 #define MAX_USER_DEFINED_BYTES		120
208 
209 /* The size of the user defined data buffer for pipe offers. */
210 #define MAX_PIPE_USER_DEFINED_BYTES	116
211 
212 /*
213  * At the center of the Channel Management library is the Channel Offer. This
214  * struct contains the fundamental information about an offer.
215  */
216 struct vmbus_channel_offer {
217 	guid_t if_type;
218 	guid_t if_instance;
219 
220 	/*
221 	 * These two fields are not currently used.
222 	 */
223 	u64 reserved1;
224 	u64 reserved2;
225 
226 	u16 chn_flags;
227 	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
228 
229 	union {
230 		/* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
231 		struct {
232 			unsigned char user_def[MAX_USER_DEFINED_BYTES];
233 		} std;
234 
235 		/*
236 		 * Pipes:
237 		 * The following sructure is an integrated pipe protocol, which
238 		 * is implemented on top of standard user-defined data. Pipe
239 		 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
240 		 * use.
241 		 */
242 		struct {
243 			u32  pipe_mode;
244 			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
245 		} pipe;
246 	} u;
247 	/*
248 	 * The sub_channel_index is defined in win8.
249 	 */
250 	u16 sub_channel_index;
251 	u16 reserved3;
252 } __packed;
253 
254 /* Server Flags */
255 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE	1
256 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES	2
257 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS		4
258 #define VMBUS_CHANNEL_NAMED_PIPE_MODE			0x10
259 #define VMBUS_CHANNEL_LOOPBACK_OFFER			0x100
260 #define VMBUS_CHANNEL_PARENT_OFFER			0x200
261 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x400
262 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER		0x2000
263 
264 struct vmpacket_descriptor {
265 	u16 type;
266 	u16 offset8;
267 	u16 len8;
268 	u16 flags;
269 	u64 trans_id;
270 } __packed;
271 
272 struct vmpacket_header {
273 	u32 prev_pkt_start_offset;
274 	struct vmpacket_descriptor descriptor;
275 } __packed;
276 
277 struct vmtransfer_page_range {
278 	u32 byte_count;
279 	u32 byte_offset;
280 } __packed;
281 
282 struct vmtransfer_page_packet_header {
283 	struct vmpacket_descriptor d;
284 	u16 xfer_pageset_id;
285 	u8  sender_owns_set;
286 	u8 reserved;
287 	u32 range_cnt;
288 	struct vmtransfer_page_range ranges[1];
289 } __packed;
290 
291 struct vmgpadl_packet_header {
292 	struct vmpacket_descriptor d;
293 	u32 gpadl;
294 	u32 reserved;
295 } __packed;
296 
297 struct vmadd_remove_transfer_page_set {
298 	struct vmpacket_descriptor d;
299 	u32 gpadl;
300 	u16 xfer_pageset_id;
301 	u16 reserved;
302 } __packed;
303 
304 /*
305  * This structure defines a range in guest physical space that can be made to
306  * look virtually contiguous.
307  */
308 struct gpa_range {
309 	u32 byte_count;
310 	u32 byte_offset;
311 	u64 pfn_array[0];
312 };
313 
314 /*
315  * This is the format for an Establish Gpadl packet, which contains a handle by
316  * which this GPADL will be known and a set of GPA ranges associated with it.
317  * This can be converted to a MDL by the guest OS.  If there are multiple GPA
318  * ranges, then the resulting MDL will be "chained," representing multiple VA
319  * ranges.
320  */
321 struct vmestablish_gpadl {
322 	struct vmpacket_descriptor d;
323 	u32 gpadl;
324 	u32 range_cnt;
325 	struct gpa_range range[1];
326 } __packed;
327 
328 /*
329  * This is the format for a Teardown Gpadl packet, which indicates that the
330  * GPADL handle in the Establish Gpadl packet will never be referenced again.
331  */
332 struct vmteardown_gpadl {
333 	struct vmpacket_descriptor d;
334 	u32 gpadl;
335 	u32 reserved;	/* for alignment to a 8-byte boundary */
336 } __packed;
337 
338 /*
339  * This is the format for a GPA-Direct packet, which contains a set of GPA
340  * ranges, in addition to commands and/or data.
341  */
342 struct vmdata_gpa_direct {
343 	struct vmpacket_descriptor d;
344 	u32 reserved;
345 	u32 range_cnt;
346 	struct gpa_range range[1];
347 } __packed;
348 
349 /* This is the format for a Additional Data Packet. */
350 struct vmadditional_data {
351 	struct vmpacket_descriptor d;
352 	u64 total_bytes;
353 	u32 offset;
354 	u32 byte_cnt;
355 	unsigned char data[1];
356 } __packed;
357 
358 union vmpacket_largest_possible_header {
359 	struct vmpacket_descriptor simple_hdr;
360 	struct vmtransfer_page_packet_header xfer_page_hdr;
361 	struct vmgpadl_packet_header gpadl_hdr;
362 	struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
363 	struct vmestablish_gpadl establish_gpadl_hdr;
364 	struct vmteardown_gpadl teardown_gpadl_hdr;
365 	struct vmdata_gpa_direct data_gpa_direct_hdr;
366 };
367 
368 #define VMPACKET_DATA_START_ADDRESS(__packet)	\
369 	(void *)(((unsigned char *)__packet) +	\
370 	 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
371 
372 #define VMPACKET_DATA_LENGTH(__packet)		\
373 	((((struct vmpacket_descriptor)__packet)->len8 -	\
374 	  ((struct vmpacket_descriptor)__packet)->offset8) * 8)
375 
376 #define VMPACKET_TRANSFER_MODE(__packet)	\
377 	(((struct IMPACT)__packet)->type)
378 
379 enum vmbus_packet_type {
380 	VM_PKT_INVALID				= 0x0,
381 	VM_PKT_SYNCH				= 0x1,
382 	VM_PKT_ADD_XFER_PAGESET			= 0x2,
383 	VM_PKT_RM_XFER_PAGESET			= 0x3,
384 	VM_PKT_ESTABLISH_GPADL			= 0x4,
385 	VM_PKT_TEARDOWN_GPADL			= 0x5,
386 	VM_PKT_DATA_INBAND			= 0x6,
387 	VM_PKT_DATA_USING_XFER_PAGES		= 0x7,
388 	VM_PKT_DATA_USING_GPADL			= 0x8,
389 	VM_PKT_DATA_USING_GPA_DIRECT		= 0x9,
390 	VM_PKT_CANCEL_REQUEST			= 0xa,
391 	VM_PKT_COMP				= 0xb,
392 	VM_PKT_DATA_USING_ADDITIONAL_PKT	= 0xc,
393 	VM_PKT_ADDITIONAL_DATA			= 0xd
394 };
395 
396 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED	1
397 
398 
399 /* Version 1 messages */
400 enum vmbus_channel_message_type {
401 	CHANNELMSG_INVALID			=  0,
402 	CHANNELMSG_OFFERCHANNEL		=  1,
403 	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
404 	CHANNELMSG_REQUESTOFFERS		=  3,
405 	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
406 	CHANNELMSG_OPENCHANNEL		=  5,
407 	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
408 	CHANNELMSG_CLOSECHANNEL		=  7,
409 	CHANNELMSG_GPADL_HEADER		=  8,
410 	CHANNELMSG_GPADL_BODY			=  9,
411 	CHANNELMSG_GPADL_CREATED		= 10,
412 	CHANNELMSG_GPADL_TEARDOWN		= 11,
413 	CHANNELMSG_GPADL_TORNDOWN		= 12,
414 	CHANNELMSG_RELID_RELEASED		= 13,
415 	CHANNELMSG_INITIATE_CONTACT		= 14,
416 	CHANNELMSG_VERSION_RESPONSE		= 15,
417 	CHANNELMSG_UNLOAD			= 16,
418 	CHANNELMSG_UNLOAD_RESPONSE		= 17,
419 	CHANNELMSG_18				= 18,
420 	CHANNELMSG_19				= 19,
421 	CHANNELMSG_20				= 20,
422 	CHANNELMSG_TL_CONNECT_REQUEST		= 21,
423 	CHANNELMSG_COUNT
424 };
425 
426 struct vmbus_channel_message_header {
427 	enum vmbus_channel_message_type msgtype;
428 	u32 padding;
429 } __packed;
430 
431 /* Query VMBus Version parameters */
432 struct vmbus_channel_query_vmbus_version {
433 	struct vmbus_channel_message_header header;
434 	u32 version;
435 } __packed;
436 
437 /* VMBus Version Supported parameters */
438 struct vmbus_channel_version_supported {
439 	struct vmbus_channel_message_header header;
440 	u8 version_supported;
441 } __packed;
442 
443 /* Offer Channel parameters */
444 struct vmbus_channel_offer_channel {
445 	struct vmbus_channel_message_header header;
446 	struct vmbus_channel_offer offer;
447 	u32 child_relid;
448 	u8 monitorid;
449 	/*
450 	 * win7 and beyond splits this field into a bit field.
451 	 */
452 	u8 monitor_allocated:1;
453 	u8 reserved:7;
454 	/*
455 	 * These are new fields added in win7 and later.
456 	 * Do not access these fields without checking the
457 	 * negotiated protocol.
458 	 *
459 	 * If "is_dedicated_interrupt" is set, we must not set the
460 	 * associated bit in the channel bitmap while sending the
461 	 * interrupt to the host.
462 	 *
463 	 * connection_id is to be used in signaling the host.
464 	 */
465 	u16 is_dedicated_interrupt:1;
466 	u16 reserved1:15;
467 	u32 connection_id;
468 } __packed;
469 
470 /* Rescind Offer parameters */
471 struct vmbus_channel_rescind_offer {
472 	struct vmbus_channel_message_header header;
473 	u32 child_relid;
474 } __packed;
475 
476 static inline u32
477 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
478 {
479 	return rbi->ring_buffer->pending_send_sz;
480 }
481 
482 /*
483  * Request Offer -- no parameters, SynIC message contains the partition ID
484  * Set Snoop -- no parameters, SynIC message contains the partition ID
485  * Clear Snoop -- no parameters, SynIC message contains the partition ID
486  * All Offers Delivered -- no parameters, SynIC message contains the partition
487  *		           ID
488  * Flush Client -- no parameters, SynIC message contains the partition ID
489  */
490 
491 /* Open Channel parameters */
492 struct vmbus_channel_open_channel {
493 	struct vmbus_channel_message_header header;
494 
495 	/* Identifies the specific VMBus channel that is being opened. */
496 	u32 child_relid;
497 
498 	/* ID making a particular open request at a channel offer unique. */
499 	u32 openid;
500 
501 	/* GPADL for the channel's ring buffer. */
502 	u32 ringbuffer_gpadlhandle;
503 
504 	/*
505 	 * Starting with win8, this field will be used to specify
506 	 * the target virtual processor on which to deliver the interrupt for
507 	 * the host to guest communication.
508 	 * Prior to win8, incoming channel interrupts would only
509 	 * be delivered on cpu 0. Setting this value to 0 would
510 	 * preserve the earlier behavior.
511 	 */
512 	u32 target_vp;
513 
514 	/*
515 	 * The upstream ring buffer begins at offset zero in the memory
516 	 * described by RingBufferGpadlHandle. The downstream ring buffer
517 	 * follows it at this offset (in pages).
518 	 */
519 	u32 downstream_ringbuffer_pageoffset;
520 
521 	/* User-specific data to be passed along to the server endpoint. */
522 	unsigned char userdata[MAX_USER_DEFINED_BYTES];
523 } __packed;
524 
525 /* Open Channel Result parameters */
526 struct vmbus_channel_open_result {
527 	struct vmbus_channel_message_header header;
528 	u32 child_relid;
529 	u32 openid;
530 	u32 status;
531 } __packed;
532 
533 /* Close channel parameters; */
534 struct vmbus_channel_close_channel {
535 	struct vmbus_channel_message_header header;
536 	u32 child_relid;
537 } __packed;
538 
539 /* Channel Message GPADL */
540 #define GPADL_TYPE_RING_BUFFER		1
541 #define GPADL_TYPE_SERVER_SAVE_AREA	2
542 #define GPADL_TYPE_TRANSACTION		8
543 
544 /*
545  * The number of PFNs in a GPADL message is defined by the number of
546  * pages that would be spanned by ByteCount and ByteOffset.  If the
547  * implied number of PFNs won't fit in this packet, there will be a
548  * follow-up packet that contains more.
549  */
550 struct vmbus_channel_gpadl_header {
551 	struct vmbus_channel_message_header header;
552 	u32 child_relid;
553 	u32 gpadl;
554 	u16 range_buflen;
555 	u16 rangecount;
556 	struct gpa_range range[0];
557 } __packed;
558 
559 /* This is the followup packet that contains more PFNs. */
560 struct vmbus_channel_gpadl_body {
561 	struct vmbus_channel_message_header header;
562 	u32 msgnumber;
563 	u32 gpadl;
564 	u64 pfn[0];
565 } __packed;
566 
567 struct vmbus_channel_gpadl_created {
568 	struct vmbus_channel_message_header header;
569 	u32 child_relid;
570 	u32 gpadl;
571 	u32 creation_status;
572 } __packed;
573 
574 struct vmbus_channel_gpadl_teardown {
575 	struct vmbus_channel_message_header header;
576 	u32 child_relid;
577 	u32 gpadl;
578 } __packed;
579 
580 struct vmbus_channel_gpadl_torndown {
581 	struct vmbus_channel_message_header header;
582 	u32 gpadl;
583 } __packed;
584 
585 struct vmbus_channel_relid_released {
586 	struct vmbus_channel_message_header header;
587 	u32 child_relid;
588 } __packed;
589 
590 struct vmbus_channel_initiate_contact {
591 	struct vmbus_channel_message_header header;
592 	u32 vmbus_version_requested;
593 	u32 target_vcpu; /* The VCPU the host should respond to */
594 	union {
595 		u64 interrupt_page;
596 		struct {
597 			u8	msg_sint;
598 			u8	padding1[3];
599 			u32	padding2;
600 		};
601 	};
602 	u64 monitor_page1;
603 	u64 monitor_page2;
604 } __packed;
605 
606 /* Hyper-V socket: guest's connect()-ing to host */
607 struct vmbus_channel_tl_connect_request {
608 	struct vmbus_channel_message_header header;
609 	guid_t guest_endpoint_id;
610 	guid_t host_service_id;
611 } __packed;
612 
613 struct vmbus_channel_version_response {
614 	struct vmbus_channel_message_header header;
615 	u8 version_supported;
616 
617 	u8 connection_state;
618 	u16 padding;
619 
620 	/*
621 	 * On new hosts that support VMBus protocol 5.0, we must use
622 	 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
623 	 * and for subsequent messages, we must use the Message Connection ID
624 	 * field in the host-returned Version Response Message.
625 	 *
626 	 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
627 	 */
628 	u32 msg_conn_id;
629 } __packed;
630 
631 enum vmbus_channel_state {
632 	CHANNEL_OFFER_STATE,
633 	CHANNEL_OPENING_STATE,
634 	CHANNEL_OPEN_STATE,
635 	CHANNEL_OPENED_STATE,
636 };
637 
638 /*
639  * Represents each channel msg on the vmbus connection This is a
640  * variable-size data structure depending on the msg type itself
641  */
642 struct vmbus_channel_msginfo {
643 	/* Bookkeeping stuff */
644 	struct list_head msglistentry;
645 
646 	/* So far, this is only used to handle gpadl body message */
647 	struct list_head submsglist;
648 
649 	/* Synchronize the request/response if needed */
650 	struct completion  waitevent;
651 	struct vmbus_channel *waiting_channel;
652 	union {
653 		struct vmbus_channel_version_supported version_supported;
654 		struct vmbus_channel_open_result open_result;
655 		struct vmbus_channel_gpadl_torndown gpadl_torndown;
656 		struct vmbus_channel_gpadl_created gpadl_created;
657 		struct vmbus_channel_version_response version_response;
658 	} response;
659 
660 	u32 msgsize;
661 	/*
662 	 * The channel message that goes out on the "wire".
663 	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
664 	 */
665 	unsigned char msg[0];
666 };
667 
668 struct vmbus_close_msg {
669 	struct vmbus_channel_msginfo info;
670 	struct vmbus_channel_close_channel msg;
671 };
672 
673 /* Define connection identifier type. */
674 union hv_connection_id {
675 	u32 asu32;
676 	struct {
677 		u32 id:24;
678 		u32 reserved:8;
679 	} u;
680 };
681 
682 enum hv_numa_policy {
683 	HV_BALANCED = 0,
684 	HV_LOCALIZED,
685 };
686 
687 enum vmbus_device_type {
688 	HV_IDE = 0,
689 	HV_SCSI,
690 	HV_FC,
691 	HV_NIC,
692 	HV_ND,
693 	HV_PCIE,
694 	HV_FB,
695 	HV_KBD,
696 	HV_MOUSE,
697 	HV_KVP,
698 	HV_TS,
699 	HV_HB,
700 	HV_SHUTDOWN,
701 	HV_FCOPY,
702 	HV_BACKUP,
703 	HV_DM,
704 	HV_UNKNOWN,
705 };
706 
707 struct vmbus_device {
708 	u16  dev_type;
709 	guid_t guid;
710 	bool perf_device;
711 };
712 
713 struct vmbus_channel {
714 	struct list_head listentry;
715 
716 	struct hv_device *device_obj;
717 
718 	enum vmbus_channel_state state;
719 
720 	struct vmbus_channel_offer_channel offermsg;
721 	/*
722 	 * These are based on the OfferMsg.MonitorId.
723 	 * Save it here for easy access.
724 	 */
725 	u8 monitor_grp;
726 	u8 monitor_bit;
727 
728 	bool rescind; /* got rescind msg */
729 	struct completion rescind_event;
730 
731 	u32 ringbuffer_gpadlhandle;
732 
733 	/* Allocated memory for ring buffer */
734 	struct page *ringbuffer_page;
735 	u32 ringbuffer_pagecount;
736 	u32 ringbuffer_send_offset;
737 	struct hv_ring_buffer_info outbound;	/* send to parent */
738 	struct hv_ring_buffer_info inbound;	/* receive from parent */
739 
740 	struct vmbus_close_msg close_msg;
741 
742 	/* Statistics */
743 	u64	interrupts;	/* Host to Guest interrupts */
744 	u64	sig_events;	/* Guest to Host events */
745 
746 	/*
747 	 * Guest to host interrupts caused by the outbound ring buffer changing
748 	 * from empty to not empty.
749 	 */
750 	u64 intr_out_empty;
751 
752 	/*
753 	 * Indicates that a full outbound ring buffer was encountered. The flag
754 	 * is set to true when a full outbound ring buffer is encountered and
755 	 * set to false when a write to the outbound ring buffer is completed.
756 	 */
757 	bool out_full_flag;
758 
759 	/* Channel callback's invoked in softirq context */
760 	struct tasklet_struct callback_event;
761 	void (*onchannel_callback)(void *context);
762 	void *channel_callback_context;
763 
764 	/*
765 	 * A channel can be marked for one of three modes of reading:
766 	 *   BATCHED - callback called from taslket and should read
767 	 *            channel until empty. Interrupts from the host
768 	 *            are masked while read is in process (default).
769 	 *   DIRECT - callback called from tasklet (softirq).
770 	 *   ISR - callback called in interrupt context and must
771 	 *         invoke its own deferred processing.
772 	 *         Host interrupts are disabled and must be re-enabled
773 	 *         when ring is empty.
774 	 */
775 	enum hv_callback_mode {
776 		HV_CALL_BATCHED,
777 		HV_CALL_DIRECT,
778 		HV_CALL_ISR
779 	} callback_mode;
780 
781 	bool is_dedicated_interrupt;
782 	u64 sig_event;
783 
784 	/*
785 	 * Starting with win8, this field will be used to specify
786 	 * the target virtual processor on which to deliver the interrupt for
787 	 * the host to guest communication.
788 	 * Prior to win8, incoming channel interrupts would only
789 	 * be delivered on cpu 0. Setting this value to 0 would
790 	 * preserve the earlier behavior.
791 	 */
792 	u32 target_vp;
793 	/* The corresponding CPUID in the guest */
794 	u32 target_cpu;
795 	/*
796 	 * State to manage the CPU affiliation of channels.
797 	 */
798 	struct cpumask alloced_cpus_in_node;
799 	int numa_node;
800 	/*
801 	 * Support for sub-channels. For high performance devices,
802 	 * it will be useful to have multiple sub-channels to support
803 	 * a scalable communication infrastructure with the host.
804 	 * The support for sub-channels is implemented as an extention
805 	 * to the current infrastructure.
806 	 * The initial offer is considered the primary channel and this
807 	 * offer message will indicate if the host supports sub-channels.
808 	 * The guest is free to ask for sub-channels to be offerred and can
809 	 * open these sub-channels as a normal "primary" channel. However,
810 	 * all sub-channels will have the same type and instance guids as the
811 	 * primary channel. Requests sent on a given channel will result in a
812 	 * response on the same channel.
813 	 */
814 
815 	/*
816 	 * Sub-channel creation callback. This callback will be called in
817 	 * process context when a sub-channel offer is received from the host.
818 	 * The guest can open the sub-channel in the context of this callback.
819 	 */
820 	void (*sc_creation_callback)(struct vmbus_channel *new_sc);
821 
822 	/*
823 	 * Channel rescind callback. Some channels (the hvsock ones), need to
824 	 * register a callback which is invoked in vmbus_onoffer_rescind().
825 	 */
826 	void (*chn_rescind_callback)(struct vmbus_channel *channel);
827 
828 	/*
829 	 * The spinlock to protect the structure. It is being used to protect
830 	 * test-and-set access to various attributes of the structure as well
831 	 * as all sc_list operations.
832 	 */
833 	spinlock_t lock;
834 	/*
835 	 * All Sub-channels of a primary channel are linked here.
836 	 */
837 	struct list_head sc_list;
838 	/*
839 	 * The primary channel this sub-channel belongs to.
840 	 * This will be NULL for the primary channel.
841 	 */
842 	struct vmbus_channel *primary_channel;
843 	/*
844 	 * Support per-channel state for use by vmbus drivers.
845 	 */
846 	void *per_channel_state;
847 	/*
848 	 * To support per-cpu lookup mapping of relid to channel,
849 	 * link up channels based on their CPU affinity.
850 	 */
851 	struct list_head percpu_list;
852 
853 	/*
854 	 * Defer freeing channel until after all cpu's have
855 	 * gone through grace period.
856 	 */
857 	struct rcu_head rcu;
858 
859 	/*
860 	 * For sysfs per-channel properties.
861 	 */
862 	struct kobject			kobj;
863 
864 	/*
865 	 * For performance critical channels (storage, networking
866 	 * etc,), Hyper-V has a mechanism to enhance the throughput
867 	 * at the expense of latency:
868 	 * When the host is to be signaled, we just set a bit in a shared page
869 	 * and this bit will be inspected by the hypervisor within a certain
870 	 * window and if the bit is set, the host will be signaled. The window
871 	 * of time is the monitor latency - currently around 100 usecs. This
872 	 * mechanism improves throughput by:
873 	 *
874 	 * A) Making the host more efficient - each time it wakes up,
875 	 *    potentially it will process morev number of packets. The
876 	 *    monitor latency allows a batch to build up.
877 	 * B) By deferring the hypercall to signal, we will also minimize
878 	 *    the interrupts.
879 	 *
880 	 * Clearly, these optimizations improve throughput at the expense of
881 	 * latency. Furthermore, since the channel is shared for both
882 	 * control and data messages, control messages currently suffer
883 	 * unnecessary latency adversley impacting performance and boot
884 	 * time. To fix this issue, permit tagging the channel as being
885 	 * in "low latency" mode. In this mode, we will bypass the monitor
886 	 * mechanism.
887 	 */
888 	bool low_latency;
889 
890 	/*
891 	 * NUMA distribution policy:
892 	 * We support two policies:
893 	 * 1) Balanced: Here all performance critical channels are
894 	 *    distributed evenly amongst all the NUMA nodes.
895 	 *    This policy will be the default policy.
896 	 * 2) Localized: All channels of a given instance of a
897 	 *    performance critical service will be assigned CPUs
898 	 *    within a selected NUMA node.
899 	 */
900 	enum hv_numa_policy affinity_policy;
901 
902 	bool probe_done;
903 
904 	/*
905 	 * We must offload the handling of the primary/sub channels
906 	 * from the single-threaded vmbus_connection.work_queue to
907 	 * two different workqueue, otherwise we can block
908 	 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
909 	 */
910 	struct work_struct add_channel_work;
911 
912 	/*
913 	 * Guest to host interrupts caused by the inbound ring buffer changing
914 	 * from full to not full while a packet is waiting.
915 	 */
916 	u64 intr_in_full;
917 
918 	/*
919 	 * The total number of write operations that encountered a full
920 	 * outbound ring buffer.
921 	 */
922 	u64 out_full_total;
923 
924 	/*
925 	 * The number of write operations that were the first to encounter a
926 	 * full outbound ring buffer.
927 	 */
928 	u64 out_full_first;
929 };
930 
931 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
932 {
933 	return !!(c->offermsg.offer.chn_flags &
934 		  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
935 }
936 
937 static inline void set_channel_affinity_state(struct vmbus_channel *c,
938 					      enum hv_numa_policy policy)
939 {
940 	c->affinity_policy = policy;
941 }
942 
943 static inline void set_channel_read_mode(struct vmbus_channel *c,
944 					enum hv_callback_mode mode)
945 {
946 	c->callback_mode = mode;
947 }
948 
949 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
950 {
951 	c->per_channel_state = s;
952 }
953 
954 static inline void *get_per_channel_state(struct vmbus_channel *c)
955 {
956 	return c->per_channel_state;
957 }
958 
959 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
960 						 u32 size)
961 {
962 	unsigned long flags;
963 
964 	if (size) {
965 		spin_lock_irqsave(&c->outbound.ring_lock, flags);
966 		++c->out_full_total;
967 
968 		if (!c->out_full_flag) {
969 			++c->out_full_first;
970 			c->out_full_flag = true;
971 		}
972 		spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
973 	} else {
974 		c->out_full_flag = false;
975 	}
976 
977 	c->outbound.ring_buffer->pending_send_sz = size;
978 }
979 
980 static inline void set_low_latency_mode(struct vmbus_channel *c)
981 {
982 	c->low_latency = true;
983 }
984 
985 static inline void clear_low_latency_mode(struct vmbus_channel *c)
986 {
987 	c->low_latency = false;
988 }
989 
990 void vmbus_onmessage(void *context);
991 
992 int vmbus_request_offers(void);
993 
994 /*
995  * APIs for managing sub-channels.
996  */
997 
998 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
999 			void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1000 
1001 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1002 		void (*chn_rescind_cb)(struct vmbus_channel *));
1003 
1004 /*
1005  * Check if sub-channels have already been offerred. This API will be useful
1006  * when the driver is unloaded after establishing sub-channels. In this case,
1007  * when the driver is re-loaded, the driver would have to check if the
1008  * subchannels have already been established before attempting to request
1009  * the creation of sub-channels.
1010  * This function returns TRUE to indicate that subchannels have already been
1011  * created.
1012  * This function should be invoked after setting the callback function for
1013  * sub-channel creation.
1014  */
1015 bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1016 
1017 /* The format must be the same as struct vmdata_gpa_direct */
1018 struct vmbus_channel_packet_page_buffer {
1019 	u16 type;
1020 	u16 dataoffset8;
1021 	u16 length8;
1022 	u16 flags;
1023 	u64 transactionid;
1024 	u32 reserved;
1025 	u32 rangecount;
1026 	struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1027 } __packed;
1028 
1029 /* The format must be the same as struct vmdata_gpa_direct */
1030 struct vmbus_channel_packet_multipage_buffer {
1031 	u16 type;
1032 	u16 dataoffset8;
1033 	u16 length8;
1034 	u16 flags;
1035 	u64 transactionid;
1036 	u32 reserved;
1037 	u32 rangecount;		/* Always 1 in this case */
1038 	struct hv_multipage_buffer range;
1039 } __packed;
1040 
1041 /* The format must be the same as struct vmdata_gpa_direct */
1042 struct vmbus_packet_mpb_array {
1043 	u16 type;
1044 	u16 dataoffset8;
1045 	u16 length8;
1046 	u16 flags;
1047 	u64 transactionid;
1048 	u32 reserved;
1049 	u32 rangecount;         /* Always 1 in this case */
1050 	struct hv_mpb_array range;
1051 } __packed;
1052 
1053 int vmbus_alloc_ring(struct vmbus_channel *channel,
1054 		     u32 send_size, u32 recv_size);
1055 void vmbus_free_ring(struct vmbus_channel *channel);
1056 
1057 int vmbus_connect_ring(struct vmbus_channel *channel,
1058 		       void (*onchannel_callback)(void *context),
1059 		       void *context);
1060 int vmbus_disconnect_ring(struct vmbus_channel *channel);
1061 
1062 extern int vmbus_open(struct vmbus_channel *channel,
1063 			    u32 send_ringbuffersize,
1064 			    u32 recv_ringbuffersize,
1065 			    void *userdata,
1066 			    u32 userdatalen,
1067 			    void (*onchannel_callback)(void *context),
1068 			    void *context);
1069 
1070 extern void vmbus_close(struct vmbus_channel *channel);
1071 
1072 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1073 				  void *buffer,
1074 				  u32 bufferLen,
1075 				  u64 requestid,
1076 				  enum vmbus_packet_type type,
1077 				  u32 flags);
1078 
1079 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1080 					    struct hv_page_buffer pagebuffers[],
1081 					    u32 pagecount,
1082 					    void *buffer,
1083 					    u32 bufferlen,
1084 					    u64 requestid);
1085 
1086 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1087 				     struct vmbus_packet_mpb_array *mpb,
1088 				     u32 desc_size,
1089 				     void *buffer,
1090 				     u32 bufferlen,
1091 				     u64 requestid);
1092 
1093 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1094 				      void *kbuffer,
1095 				      u32 size,
1096 				      u32 *gpadl_handle);
1097 
1098 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1099 				     u32 gpadl_handle);
1100 
1101 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1102 
1103 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1104 				  void *buffer,
1105 				  u32 bufferlen,
1106 				  u32 *buffer_actual_len,
1107 				  u64 *requestid);
1108 
1109 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1110 				     void *buffer,
1111 				     u32 bufferlen,
1112 				     u32 *buffer_actual_len,
1113 				     u64 *requestid);
1114 
1115 
1116 extern void vmbus_ontimer(unsigned long data);
1117 
1118 /* Base driver object */
1119 struct hv_driver {
1120 	const char *name;
1121 
1122 	/*
1123 	 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1124 	 * channel flag, actually doesn't mean a synthetic device because the
1125 	 * offer's if_type/if_instance can change for every new hvsock
1126 	 * connection.
1127 	 *
1128 	 * However, to facilitate the notification of new-offer/rescind-offer
1129 	 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1130 	 * a special vmbus device, and hence we need the below flag to
1131 	 * indicate if the driver is the hvsock driver or not: we need to
1132 	 * specially treat the hvosck offer & driver in vmbus_match().
1133 	 */
1134 	bool hvsock;
1135 
1136 	/* the device type supported by this driver */
1137 	guid_t dev_type;
1138 	const struct hv_vmbus_device_id *id_table;
1139 
1140 	struct device_driver driver;
1141 
1142 	/* dynamic device GUID's */
1143 	struct  {
1144 		spinlock_t lock;
1145 		struct list_head list;
1146 	} dynids;
1147 
1148 	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1149 	int (*remove)(struct hv_device *);
1150 	void (*shutdown)(struct hv_device *);
1151 
1152 };
1153 
1154 /* Base device object */
1155 struct hv_device {
1156 	/* the device type id of this device */
1157 	guid_t dev_type;
1158 
1159 	/* the device instance id of this device */
1160 	guid_t dev_instance;
1161 	u16 vendor_id;
1162 	u16 device_id;
1163 
1164 	struct device device;
1165 	char *driver_override; /* Driver name to force a match */
1166 
1167 	struct vmbus_channel *channel;
1168 	struct kset	     *channels_kset;
1169 };
1170 
1171 
1172 static inline struct hv_device *device_to_hv_device(struct device *d)
1173 {
1174 	return container_of(d, struct hv_device, device);
1175 }
1176 
1177 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1178 {
1179 	return container_of(d, struct hv_driver, driver);
1180 }
1181 
1182 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1183 {
1184 	dev_set_drvdata(&dev->device, data);
1185 }
1186 
1187 static inline void *hv_get_drvdata(struct hv_device *dev)
1188 {
1189 	return dev_get_drvdata(&dev->device);
1190 }
1191 
1192 struct hv_ring_buffer_debug_info {
1193 	u32 current_interrupt_mask;
1194 	u32 current_read_index;
1195 	u32 current_write_index;
1196 	u32 bytes_avail_toread;
1197 	u32 bytes_avail_towrite;
1198 };
1199 
1200 
1201 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1202 				struct hv_ring_buffer_debug_info *debug_info);
1203 
1204 /* Vmbus interface */
1205 #define vmbus_driver_register(driver)	\
1206 	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1207 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1208 					 struct module *owner,
1209 					 const char *mod_name);
1210 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1211 
1212 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1213 
1214 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1215 			resource_size_t min, resource_size_t max,
1216 			resource_size_t size, resource_size_t align,
1217 			bool fb_overlap_ok);
1218 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1219 
1220 /*
1221  * GUID definitions of various offer types - services offered to the guest.
1222  */
1223 
1224 /*
1225  * Network GUID
1226  * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1227  */
1228 #define HV_NIC_GUID \
1229 	.guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1230 			  0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1231 
1232 /*
1233  * IDE GUID
1234  * {32412632-86cb-44a2-9b5c-50d1417354f5}
1235  */
1236 #define HV_IDE_GUID \
1237 	.guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1238 			  0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1239 
1240 /*
1241  * SCSI GUID
1242  * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1243  */
1244 #define HV_SCSI_GUID \
1245 	.guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1246 			  0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1247 
1248 /*
1249  * Shutdown GUID
1250  * {0e0b6031-5213-4934-818b-38d90ced39db}
1251  */
1252 #define HV_SHUTDOWN_GUID \
1253 	.guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1254 			  0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1255 
1256 /*
1257  * Time Synch GUID
1258  * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1259  */
1260 #define HV_TS_GUID \
1261 	.guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1262 			  0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1263 
1264 /*
1265  * Heartbeat GUID
1266  * {57164f39-9115-4e78-ab55-382f3bd5422d}
1267  */
1268 #define HV_HEART_BEAT_GUID \
1269 	.guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1270 			  0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1271 
1272 /*
1273  * KVP GUID
1274  * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1275  */
1276 #define HV_KVP_GUID \
1277 	.guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1278 			  0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1279 
1280 /*
1281  * Dynamic memory GUID
1282  * {525074dc-8985-46e2-8057-a307dc18a502}
1283  */
1284 #define HV_DM_GUID \
1285 	.guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1286 			  0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1287 
1288 /*
1289  * Mouse GUID
1290  * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1291  */
1292 #define HV_MOUSE_GUID \
1293 	.guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1294 			  0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1295 
1296 /*
1297  * Keyboard GUID
1298  * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1299  */
1300 #define HV_KBD_GUID \
1301 	.guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1302 			  0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1303 
1304 /*
1305  * VSS (Backup/Restore) GUID
1306  */
1307 #define HV_VSS_GUID \
1308 	.guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1309 			  0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1310 /*
1311  * Synthetic Video GUID
1312  * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1313  */
1314 #define HV_SYNTHVID_GUID \
1315 	.guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1316 			  0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1317 
1318 /*
1319  * Synthetic FC GUID
1320  * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1321  */
1322 #define HV_SYNTHFC_GUID \
1323 	.guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1324 			  0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1325 
1326 /*
1327  * Guest File Copy Service
1328  * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1329  */
1330 
1331 #define HV_FCOPY_GUID \
1332 	.guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1333 			  0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1334 
1335 /*
1336  * NetworkDirect. This is the guest RDMA service.
1337  * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1338  */
1339 #define HV_ND_GUID \
1340 	.guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1341 			  0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1342 
1343 /*
1344  * PCI Express Pass Through
1345  * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1346  */
1347 
1348 #define HV_PCIE_GUID \
1349 	.guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1350 			  0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1351 
1352 /*
1353  * Linux doesn't support the 3 devices: the first two are for
1354  * Automatic Virtual Machine Activation, and the third is for
1355  * Remote Desktop Virtualization.
1356  * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1357  * {3375baf4-9e15-4b30-b765-67acb10d607b}
1358  * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1359  */
1360 
1361 #define HV_AVMA1_GUID \
1362 	.guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1363 			  0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1364 
1365 #define HV_AVMA2_GUID \
1366 	.guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1367 			  0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1368 
1369 #define HV_RDV_GUID \
1370 	.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1371 			  0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1372 
1373 /*
1374  * Common header for Hyper-V ICs
1375  */
1376 
1377 #define ICMSGTYPE_NEGOTIATE		0
1378 #define ICMSGTYPE_HEARTBEAT		1
1379 #define ICMSGTYPE_KVPEXCHANGE		2
1380 #define ICMSGTYPE_SHUTDOWN		3
1381 #define ICMSGTYPE_TIMESYNC		4
1382 #define ICMSGTYPE_VSS			5
1383 
1384 #define ICMSGHDRFLAG_TRANSACTION	1
1385 #define ICMSGHDRFLAG_REQUEST		2
1386 #define ICMSGHDRFLAG_RESPONSE		4
1387 
1388 
1389 /*
1390  * While we want to handle util services as regular devices,
1391  * there is only one instance of each of these services; so
1392  * we statically allocate the service specific state.
1393  */
1394 
1395 struct hv_util_service {
1396 	u8 *recv_buffer;
1397 	void *channel;
1398 	void (*util_cb)(void *);
1399 	int (*util_init)(struct hv_util_service *);
1400 	void (*util_deinit)(void);
1401 };
1402 
1403 struct vmbuspipe_hdr {
1404 	u32 flags;
1405 	u32 msgsize;
1406 } __packed;
1407 
1408 struct ic_version {
1409 	u16 major;
1410 	u16 minor;
1411 } __packed;
1412 
1413 struct icmsg_hdr {
1414 	struct ic_version icverframe;
1415 	u16 icmsgtype;
1416 	struct ic_version icvermsg;
1417 	u16 icmsgsize;
1418 	u32 status;
1419 	u8 ictransaction_id;
1420 	u8 icflags;
1421 	u8 reserved[2];
1422 } __packed;
1423 
1424 struct icmsg_negotiate {
1425 	u16 icframe_vercnt;
1426 	u16 icmsg_vercnt;
1427 	u32 reserved;
1428 	struct ic_version icversion_data[1]; /* any size array */
1429 } __packed;
1430 
1431 struct shutdown_msg_data {
1432 	u32 reason_code;
1433 	u32 timeout_seconds;
1434 	u32 flags;
1435 	u8  display_message[2048];
1436 } __packed;
1437 
1438 struct heartbeat_msg_data {
1439 	u64 seq_num;
1440 	u32 reserved[8];
1441 } __packed;
1442 
1443 /* Time Sync IC defs */
1444 #define ICTIMESYNCFLAG_PROBE	0
1445 #define ICTIMESYNCFLAG_SYNC	1
1446 #define ICTIMESYNCFLAG_SAMPLE	2
1447 
1448 #ifdef __x86_64__
1449 #define WLTIMEDELTA	116444736000000000L	/* in 100ns unit */
1450 #else
1451 #define WLTIMEDELTA	116444736000000000LL
1452 #endif
1453 
1454 struct ictimesync_data {
1455 	u64 parenttime;
1456 	u64 childtime;
1457 	u64 roundtriptime;
1458 	u8 flags;
1459 } __packed;
1460 
1461 struct ictimesync_ref_data {
1462 	u64 parenttime;
1463 	u64 vmreferencetime;
1464 	u8 flags;
1465 	char leapflags;
1466 	char stratum;
1467 	u8 reserved[3];
1468 } __packed;
1469 
1470 struct hyperv_service_callback {
1471 	u8 msg_type;
1472 	char *log_msg;
1473 	guid_t data;
1474 	struct vmbus_channel *channel;
1475 	void (*callback)(void *context);
1476 };
1477 
1478 #define MAX_SRV_VER	0x7ffffff
1479 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1480 				const int *fw_version, int fw_vercnt,
1481 				const int *srv_version, int srv_vercnt,
1482 				int *nego_fw_version, int *nego_srv_version);
1483 
1484 void hv_process_channel_removal(struct vmbus_channel *channel);
1485 
1486 void vmbus_setevent(struct vmbus_channel *channel);
1487 /*
1488  * Negotiated version with the Host.
1489  */
1490 
1491 extern __u32 vmbus_proto_version;
1492 
1493 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1494 				  const guid_t *shv_host_servie_id);
1495 void vmbus_set_event(struct vmbus_channel *channel);
1496 
1497 /* Get the start of the ring buffer. */
1498 static inline void *
1499 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1500 {
1501 	return ring_info->ring_buffer->buffer;
1502 }
1503 
1504 /*
1505  * Mask off host interrupt callback notifications
1506  */
1507 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1508 {
1509 	rbi->ring_buffer->interrupt_mask = 1;
1510 
1511 	/* make sure mask update is not reordered */
1512 	virt_mb();
1513 }
1514 
1515 /*
1516  * Re-enable host callback and return number of outstanding bytes
1517  */
1518 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1519 {
1520 
1521 	rbi->ring_buffer->interrupt_mask = 0;
1522 
1523 	/* make sure mask update is not reordered */
1524 	virt_mb();
1525 
1526 	/*
1527 	 * Now check to see if the ring buffer is still empty.
1528 	 * If it is not, we raced and we need to process new
1529 	 * incoming messages.
1530 	 */
1531 	return hv_get_bytes_to_read(rbi);
1532 }
1533 
1534 /*
1535  * An API to support in-place processing of incoming VMBUS packets.
1536  */
1537 
1538 /* Get data payload associated with descriptor */
1539 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1540 {
1541 	return (void *)((unsigned long)desc + (desc->offset8 << 3));
1542 }
1543 
1544 /* Get data size associated with descriptor */
1545 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1546 {
1547 	return (desc->len8 << 3) - (desc->offset8 << 3);
1548 }
1549 
1550 
1551 struct vmpacket_descriptor *
1552 hv_pkt_iter_first(struct vmbus_channel *channel);
1553 
1554 struct vmpacket_descriptor *
1555 __hv_pkt_iter_next(struct vmbus_channel *channel,
1556 		   const struct vmpacket_descriptor *pkt);
1557 
1558 void hv_pkt_iter_close(struct vmbus_channel *channel);
1559 
1560 /*
1561  * Get next packet descriptor from iterator
1562  * If at end of list, return NULL and update host.
1563  */
1564 static inline struct vmpacket_descriptor *
1565 hv_pkt_iter_next(struct vmbus_channel *channel,
1566 		 const struct vmpacket_descriptor *pkt)
1567 {
1568 	struct vmpacket_descriptor *nxt;
1569 
1570 	nxt = __hv_pkt_iter_next(channel, pkt);
1571 	if (!nxt)
1572 		hv_pkt_iter_close(channel);
1573 
1574 	return nxt;
1575 }
1576 
1577 #define foreach_vmbus_pkt(pkt, channel) \
1578 	for (pkt = hv_pkt_iter_first(channel); pkt; \
1579 	    pkt = hv_pkt_iter_next(channel, pkt))
1580 
1581 #endif /* _HYPERV_H */
1582