xref: /linux-6.15/include/linux/hyperv.h (revision 45a442fe)
13b20eb23SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
246a97191SGreg Kroah-Hartman /*
346a97191SGreg Kroah-Hartman  *
446a97191SGreg Kroah-Hartman  * Copyright (c) 2011, Microsoft Corporation.
546a97191SGreg Kroah-Hartman  *
646a97191SGreg Kroah-Hartman  * Authors:
746a97191SGreg Kroah-Hartman  *   Haiyang Zhang <[email protected]>
846a97191SGreg Kroah-Hartman  *   Hank Janssen  <[email protected]>
946a97191SGreg Kroah-Hartman  *   K. Y. Srinivasan <[email protected]>
1046a97191SGreg Kroah-Hartman  */
1146a97191SGreg Kroah-Hartman 
1246a97191SGreg Kroah-Hartman #ifndef _HYPERV_H
1346a97191SGreg Kroah-Hartman #define _HYPERV_H
1446a97191SGreg Kroah-Hartman 
155267cf02SBjarke Istrup Pedersen #include <uapi/linux/hyperv.h>
165267cf02SBjarke Istrup Pedersen 
17bca6b91dSBoqun Feng #include <linux/mm.h>
182939437cSK. Y. Srinivasan #include <linux/types.h>
1946a97191SGreg Kroah-Hartman #include <linux/scatterlist.h>
2046a97191SGreg Kroah-Hartman #include <linux/list.h>
2146a97191SGreg Kroah-Hartman #include <linux/timer.h>
2246a97191SGreg Kroah-Hartman #include <linux/completion.h>
2346a97191SGreg Kroah-Hartman #include <linux/device.h>
2446a97191SGreg Kroah-Hartman #include <linux/mod_devicetable.h>
25631e63a9SStephen Hemminger #include <linux/interrupt.h>
2663273cb4SLong Li #include <linux/reciprocal_div.h>
27ef5a3c92SNuno Das Neves #include <hyperv/hvhdk.h>
2846a97191SGreg Kroah-Hartman 
297e5ec368SK. Y. Srinivasan #define MAX_PAGE_BUFFER_COUNT				32
3046a97191SGreg Kroah-Hartman #define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
3146a97191SGreg Kroah-Hartman 
3246a97191SGreg Kroah-Hartman #pragma pack(push, 1)
3346a97191SGreg Kroah-Hartman 
34c1135c7fSBoqun Feng /*
35c1135c7fSBoqun Feng  * Types for GPADL, decides is how GPADL header is created.
36c1135c7fSBoqun Feng  *
37c1135c7fSBoqun Feng  * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
38c1135c7fSBoqun Feng  * same as HV_HYP_PAGE_SIZE.
39c1135c7fSBoqun Feng  *
40c1135c7fSBoqun Feng  * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
41c1135c7fSBoqun Feng  * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
42c1135c7fSBoqun Feng  * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
43c1135c7fSBoqun Feng  * HV_HYP_PAGE will be different between different types of GPADL, for example
44c1135c7fSBoqun Feng  * if PAGE_SIZE is 64K:
45c1135c7fSBoqun Feng  *
46c1135c7fSBoqun Feng  * BUFFER:
47c1135c7fSBoqun Feng  *
48c1135c7fSBoqun Feng  * gva:    |--       64k      --|--       64k      --| ... |
49c1135c7fSBoqun Feng  * gpa:    | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
50c1135c7fSBoqun Feng  * index:  0    1    2     15   16   17   18 .. 31   32 ...
51c1135c7fSBoqun Feng  *         |    |    ...   |    |    |   ...    |   ...
52c1135c7fSBoqun Feng  *         v    V          V    V    V          V
53c1135c7fSBoqun Feng  * gpadl:  | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
54c1135c7fSBoqun Feng  * index:  0    1    2 ... 15   16   17   18 .. 31   32 ...
55c1135c7fSBoqun Feng  *
56c1135c7fSBoqun Feng  * RING:
57c1135c7fSBoqun Feng  *
58c1135c7fSBoqun Feng  *         | header  |           data           | header  |     data      |
59c1135c7fSBoqun Feng  * gva:    |-- 64k --|--       64k      --| ... |-- 64k --|-- 64k --| ... |
60c1135c7fSBoqun Feng  * gpa:    | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
61c1135c7fSBoqun Feng  * index:  0    1    16   17   18    31   ...   n   n+1  n+16 ...         2n
62c1135c7fSBoqun Feng  *         |         /    /          /          |         /               /
63c1135c7fSBoqun Feng  *         |        /    /          /           |        /               /
64c1135c7fSBoqun Feng  *         |       /    /   ...    /    ...     |       /      ...      /
65c1135c7fSBoqun Feng  *         |      /    /          /             |      /               /
66c1135c7fSBoqun Feng  *         |     /    /          /              |     /               /
67c1135c7fSBoqun Feng  *         V    V    V          V               V    V               v
68c1135c7fSBoqun Feng  * gpadl:  | 4k | 4k |   ...    |    ...        | 4k | 4k |  ...     |
69c1135c7fSBoqun Feng  * index:  0    1    2   ...    16   ...       n-15 n-14 n-13  ...  2n-30
70c1135c7fSBoqun Feng  */
71c1135c7fSBoqun Feng enum hv_gpadl_type {
72c1135c7fSBoqun Feng 	HV_GPADL_BUFFER,
73c1135c7fSBoqun Feng 	HV_GPADL_RING
74c1135c7fSBoqun Feng };
75c1135c7fSBoqun Feng 
7646a97191SGreg Kroah-Hartman /* Single-page buffer */
7746a97191SGreg Kroah-Hartman struct hv_page_buffer {
7846a97191SGreg Kroah-Hartman 	u32 len;
7946a97191SGreg Kroah-Hartman 	u32 offset;
8046a97191SGreg Kroah-Hartman 	u64 pfn;
8146a97191SGreg Kroah-Hartman };
8246a97191SGreg Kroah-Hartman 
8346a97191SGreg Kroah-Hartman /* Multiple-page buffer */
8446a97191SGreg Kroah-Hartman struct hv_multipage_buffer {
8546a97191SGreg Kroah-Hartman 	/* Length and Offset determines the # of pfns in the array */
8646a97191SGreg Kroah-Hartman 	u32 len;
8746a97191SGreg Kroah-Hartman 	u32 offset;
8846a97191SGreg Kroah-Hartman 	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
8946a97191SGreg Kroah-Hartman };
9046a97191SGreg Kroah-Hartman 
91d61031eeSK. Y. Srinivasan /*
92d61031eeSK. Y. Srinivasan  * Multiple-page buffer array; the pfn array is variable size:
93d61031eeSK. Y. Srinivasan  * The number of entries in the PFN array is determined by
94d61031eeSK. Y. Srinivasan  * "len" and "offset".
95d61031eeSK. Y. Srinivasan  */
96d61031eeSK. Y. Srinivasan struct hv_mpb_array {
97d61031eeSK. Y. Srinivasan 	/* Length and Offset determines the # of pfns in the array */
98d61031eeSK. Y. Srinivasan 	u32 len;
99d61031eeSK. Y. Srinivasan 	u32 offset;
100d61031eeSK. Y. Srinivasan 	u64 pfn_array[];
101d61031eeSK. Y. Srinivasan };
102d61031eeSK. Y. Srinivasan 
10346a97191SGreg Kroah-Hartman /* 0x18 includes the proprietary packet header */
10446a97191SGreg Kroah-Hartman #define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
10546a97191SGreg Kroah-Hartman 					(sizeof(struct hv_page_buffer) * \
10646a97191SGreg Kroah-Hartman 					 MAX_PAGE_BUFFER_COUNT))
10746a97191SGreg Kroah-Hartman #define MAX_MULTIPAGE_BUFFER_PACKET	(0x18 +			\
10846a97191SGreg Kroah-Hartman 					 sizeof(struct hv_multipage_buffer))
10946a97191SGreg Kroah-Hartman 
11046a97191SGreg Kroah-Hartman 
11146a97191SGreg Kroah-Hartman #pragma pack(pop)
11246a97191SGreg Kroah-Hartman 
11346a97191SGreg Kroah-Hartman struct hv_ring_buffer {
11446a97191SGreg Kroah-Hartman 	/* Offset in bytes from the start of ring data below */
11546a97191SGreg Kroah-Hartman 	u32 write_index;
11646a97191SGreg Kroah-Hartman 
11746a97191SGreg Kroah-Hartman 	/* Offset in bytes from the start of ring data below */
11846a97191SGreg Kroah-Hartman 	u32 read_index;
11946a97191SGreg Kroah-Hartman 
12046a97191SGreg Kroah-Hartman 	u32 interrupt_mask;
12146a97191SGreg Kroah-Hartman 
1222416603eSK. Y. Srinivasan 	/*
12371b38245SMichael Kelley 	 * WS2012/Win8 and later versions of Hyper-V implement interrupt
12471b38245SMichael Kelley 	 * driven flow management. The feature bit feat_pending_send_sz
12571b38245SMichael Kelley 	 * is set by the host on the host->guest ring buffer, and by the
12671b38245SMichael Kelley 	 * guest on the guest->host ring buffer.
1272416603eSK. Y. Srinivasan 	 *
12871b38245SMichael Kelley 	 * The meaning of the feature bit is a bit complex in that it has
12971b38245SMichael Kelley 	 * semantics that apply to both ring buffers.  If the guest sets
13071b38245SMichael Kelley 	 * the feature bit in the guest->host ring buffer, the guest is
13171b38245SMichael Kelley 	 * telling the host that:
13271b38245SMichael Kelley 	 * 1) It will set the pending_send_sz field in the guest->host ring
13371b38245SMichael Kelley 	 *    buffer when it is waiting for space to become available, and
13471b38245SMichael Kelley 	 * 2) It will read the pending_send_sz field in the host->guest
13571b38245SMichael Kelley 	 *    ring buffer and interrupt the host when it frees enough space
13671b38245SMichael Kelley 	 *
13771b38245SMichael Kelley 	 * Similarly, if the host sets the feature bit in the host->guest
13871b38245SMichael Kelley 	 * ring buffer, the host is telling the guest that:
13971b38245SMichael Kelley 	 * 1) It will set the pending_send_sz field in the host->guest ring
14071b38245SMichael Kelley 	 *    buffer when it is waiting for space to become available, and
14171b38245SMichael Kelley 	 * 2) It will read the pending_send_sz field in the guest->host
14271b38245SMichael Kelley 	 *    ring buffer and interrupt the guest when it frees enough space
14371b38245SMichael Kelley 	 *
14471b38245SMichael Kelley 	 * If either the guest or host does not set the feature bit that it
14571b38245SMichael Kelley 	 * owns, that guest or host must do polling if it encounters a full
14671b38245SMichael Kelley 	 * ring buffer, and not signal the other end with an interrupt.
14746a97191SGreg Kroah-Hartman 	 */
1482416603eSK. Y. Srinivasan 	u32 pending_send_sz;
1492416603eSK. Y. Srinivasan 	u32 reserved1[12];
1502416603eSK. Y. Srinivasan 	union {
1512416603eSK. Y. Srinivasan 		struct {
1522416603eSK. Y. Srinivasan 			u32 feat_pending_send_sz:1;
1532416603eSK. Y. Srinivasan 		};
1542416603eSK. Y. Srinivasan 		u32 value;
1552416603eSK. Y. Srinivasan 	} feature_bits;
1562416603eSK. Y. Srinivasan 
1572416603eSK. Y. Srinivasan 	/* Pad it to PAGE_SIZE so that data starts on page boundary */
158c1135c7fSBoqun Feng 	u8	reserved2[PAGE_SIZE - 68];
15946a97191SGreg Kroah-Hartman 
16046a97191SGreg Kroah-Hartman 	/*
16146a97191SGreg Kroah-Hartman 	 * Ring data starts here + RingDataStartOffset
16246a97191SGreg Kroah-Hartman 	 * !!! DO NOT place any fields below this !!!
16346a97191SGreg Kroah-Hartman 	 */
164db5871e8SGustavo A. R. Silva 	u8 buffer[];
16546a97191SGreg Kroah-Hartman } __packed;
16646a97191SGreg Kroah-Hartman 
167b8209544SMichael Kelley 
168b8209544SMichael Kelley /*
169b8209544SMichael Kelley  * If the requested ring buffer size is at least 8 times the size of the
170b8209544SMichael Kelley  * header, steal space from the ring buffer for the header. Otherwise, add
171b8209544SMichael Kelley  * space for the header so that is doesn't take too much of the ring buffer
172b8209544SMichael Kelley  * space.
173b8209544SMichael Kelley  *
174b8209544SMichael Kelley  * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
175b8209544SMichael Kelley  * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
176b8209544SMichael Kelley  * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
177b8209544SMichael Kelley  * large allocation that will be almost half wasted. As a contrasting example,
178b8209544SMichael Kelley  * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
179b8209544SMichael Kelley  * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
180b8209544SMichael Kelley  * In this latter case, we must add 64 Kbytes for the header and not worry
181b8209544SMichael Kelley  * about what's wasted.
182b8209544SMichael Kelley  */
183b8209544SMichael Kelley #define VMBUS_HEADER_ADJ(payload_sz) \
184b8209544SMichael Kelley 	((payload_sz) >=  8 * sizeof(struct hv_ring_buffer) ? \
185b8209544SMichael Kelley 	0 : sizeof(struct hv_ring_buffer))
186b8209544SMichael Kelley 
187c1135c7fSBoqun Feng /* Calculate the proper size of a ringbuffer, it must be page-aligned */
188b8209544SMichael Kelley #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
189c1135c7fSBoqun Feng 					       (payload_sz))
190c1135c7fSBoqun Feng 
19146a97191SGreg Kroah-Hartman struct hv_ring_buffer_info {
19246a97191SGreg Kroah-Hartman 	struct hv_ring_buffer *ring_buffer;
19346a97191SGreg Kroah-Hartman 	u32 ring_size;			/* Include the shared header */
19463273cb4SLong Li 	struct reciprocal_value ring_size_div10_reciprocal;
19546a97191SGreg Kroah-Hartman 	spinlock_t ring_lock;
19646a97191SGreg Kroah-Hartman 
19746a97191SGreg Kroah-Hartman 	u32 ring_datasize;		/* < ring_size */
198ab028db4SK. Y. Srinivasan 	u32 priv_read_index;
19914948e39SKimberly Brown 	/*
20014948e39SKimberly Brown 	 * The ring buffer mutex lock. This lock prevents the ring buffer from
20114948e39SKimberly Brown 	 * being freed while the ring buffer is being accessed.
20214948e39SKimberly Brown 	 */
20314948e39SKimberly Brown 	struct mutex ring_buffer_mutex;
204adae1e93SAndres Beltran 
205adae1e93SAndres Beltran 	/* Buffer that holds a copy of an incoming host packet */
206adae1e93SAndres Beltran 	void *pkt_buffer;
207adae1e93SAndres Beltran 	u32 pkt_buffer_size;
20846a97191SGreg Kroah-Hartman };
20946a97191SGreg Kroah-Hartman 
21033be96e4SHaiyang Zhang 
hv_get_bytes_to_read(const struct hv_ring_buffer_info * rbi)211e4165a0fSStephen Hemminger static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
212a6341f00SK. Y. Srinivasan {
213a6341f00SK. Y. Srinivasan 	u32 read_loc, write_loc, dsize, read;
214a6341f00SK. Y. Srinivasan 
215a6341f00SK. Y. Srinivasan 	dsize = rbi->ring_datasize;
216a6341f00SK. Y. Srinivasan 	read_loc = rbi->ring_buffer->read_index;
217a6341f00SK. Y. Srinivasan 	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
218a6341f00SK. Y. Srinivasan 
219a6341f00SK. Y. Srinivasan 	read = write_loc >= read_loc ? (write_loc - read_loc) :
220a6341f00SK. Y. Srinivasan 		(dsize - read_loc) + write_loc;
221a6341f00SK. Y. Srinivasan 
222a6341f00SK. Y. Srinivasan 	return read;
223a6341f00SK. Y. Srinivasan }
224a6341f00SK. Y. Srinivasan 
hv_get_bytes_to_write(const struct hv_ring_buffer_info * rbi)225e4165a0fSStephen Hemminger static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
226a6341f00SK. Y. Srinivasan {
227a6341f00SK. Y. Srinivasan 	u32 read_loc, write_loc, dsize, write;
228a6341f00SK. Y. Srinivasan 
229a6341f00SK. Y. Srinivasan 	dsize = rbi->ring_datasize;
230a6341f00SK. Y. Srinivasan 	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
231a6341f00SK. Y. Srinivasan 	write_loc = rbi->ring_buffer->write_index;
232a6341f00SK. Y. Srinivasan 
233a6341f00SK. Y. Srinivasan 	write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
234a6341f00SK. Y. Srinivasan 		read_loc - write_loc;
235a6341f00SK. Y. Srinivasan 	return write;
236a6341f00SK. Y. Srinivasan }
237a6341f00SK. Y. Srinivasan 
hv_get_avail_to_write_percent(const struct hv_ring_buffer_info * rbi)23863273cb4SLong Li static inline u32 hv_get_avail_to_write_percent(
23963273cb4SLong Li 		const struct hv_ring_buffer_info *rbi)
24063273cb4SLong Li {
24163273cb4SLong Li 	u32 avail_write = hv_get_bytes_to_write(rbi);
24263273cb4SLong Li 
24363273cb4SLong Li 	return reciprocal_divide(
24463273cb4SLong Li 			(avail_write  << 3) + (avail_write << 1),
24563273cb4SLong Li 			rbi->ring_size_div10_reciprocal);
24663273cb4SLong Li }
24763273cb4SLong Li 
248eafa7072SK. Y. Srinivasan /*
249eafa7072SK. Y. Srinivasan  * VMBUS version is 32 bit entity broken up into
250eafa7072SK. Y. Srinivasan  * two 16 bit quantities: major_number. minor_number.
251eafa7072SK. Y. Srinivasan  *
252eafa7072SK. Y. Srinivasan  * 0 . 13 (Windows Server 2008)
253a6b94c6bSMichael Kelley  * 1 . 1  (Windows 7, WS2008 R2)
254a6b94c6bSMichael Kelley  * 2 . 4  (Windows 8, WS2012)
255a6b94c6bSMichael Kelley  * 3 . 0  (Windows 8.1, WS2012 R2)
2566c4e5f9cSKeith Mange  * 4 . 0  (Windows 10)
2572d4f49b3SAndrea Parri  * 4 . 1  (Windows 10 RS3)
258ae20b254SDexuan Cui  * 5 . 0  (Newer Windows 10)
2592d4f49b3SAndrea Parri  * 5 . 1  (Windows 10 RS4)
2602d4f49b3SAndrea Parri  * 5 . 2  (Windows Server 2019, RS5)
2611df53d21SAndrea Parri (Microsoft)  * 5 . 3  (Windows Server 2022)
262a6b94c6bSMichael Kelley  *
263a6b94c6bSMichael Kelley  * The WS2008 and WIN7 versions are listed here for
264a6b94c6bSMichael Kelley  * completeness but are no longer supported in the
265a6b94c6bSMichael Kelley  * Linux kernel.
266eafa7072SK. Y. Srinivasan  */
267eafa7072SK. Y. Srinivasan 
268eafa7072SK. Y. Srinivasan #define VERSION_WS2008  ((0 << 16) | (13))
269eafa7072SK. Y. Srinivasan #define VERSION_WIN7    ((1 << 16) | (1))
270eafa7072SK. Y. Srinivasan #define VERSION_WIN8    ((2 << 16) | (4))
27103367ef5SK. Y. Srinivasan #define VERSION_WIN8_1    ((3 << 16) | (0))
2726c4e5f9cSKeith Mange #define VERSION_WIN10 ((4 << 16) | (0))
2732d4f49b3SAndrea Parri #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
274ae20b254SDexuan Cui #define VERSION_WIN10_V5 ((5 << 16) | (0))
2752d4f49b3SAndrea Parri #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
2762d4f49b3SAndrea Parri #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
2771df53d21SAndrea Parri (Microsoft) #define VERSION_WIN10_V5_3 ((5 << 16) | (3))
278eafa7072SK. Y. Srinivasan 
27946a97191SGreg Kroah-Hartman /* Make maximum size of pipe payload of 16K */
28046a97191SGreg Kroah-Hartman #define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
28146a97191SGreg Kroah-Hartman 
28246a97191SGreg Kroah-Hartman /* Define PipeMode values. */
28346a97191SGreg Kroah-Hartman #define VMBUS_PIPE_TYPE_BYTE		0x00000000
28446a97191SGreg Kroah-Hartman #define VMBUS_PIPE_TYPE_MESSAGE		0x00000004
28546a97191SGreg Kroah-Hartman 
28646a97191SGreg Kroah-Hartman /* The size of the user defined data buffer for non-pipe offers. */
28746a97191SGreg Kroah-Hartman #define MAX_USER_DEFINED_BYTES		120
28846a97191SGreg Kroah-Hartman 
28946a97191SGreg Kroah-Hartman /* The size of the user defined data buffer for pipe offers. */
29046a97191SGreg Kroah-Hartman #define MAX_PIPE_USER_DEFINED_BYTES	116
29146a97191SGreg Kroah-Hartman 
29246a97191SGreg Kroah-Hartman /*
29346a97191SGreg Kroah-Hartman  * At the center of the Channel Management library is the Channel Offer. This
29446a97191SGreg Kroah-Hartman  * struct contains the fundamental information about an offer.
29546a97191SGreg Kroah-Hartman  */
29646a97191SGreg Kroah-Hartman struct vmbus_channel_offer {
297593db803SAndy Shevchenko 	guid_t if_type;
298593db803SAndy Shevchenko 	guid_t if_instance;
29929423b7eSK. Y. Srinivasan 
30029423b7eSK. Y. Srinivasan 	/*
30129423b7eSK. Y. Srinivasan 	 * These two fields are not currently used.
30229423b7eSK. Y. Srinivasan 	 */
30329423b7eSK. Y. Srinivasan 	u64 reserved1;
30429423b7eSK. Y. Srinivasan 	u64 reserved2;
30529423b7eSK. Y. Srinivasan 
30646a97191SGreg Kroah-Hartman 	u16 chn_flags;
30746a97191SGreg Kroah-Hartman 	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
30846a97191SGreg Kroah-Hartman 
30946a97191SGreg Kroah-Hartman 	union {
31046a97191SGreg Kroah-Hartman 		/* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
31146a97191SGreg Kroah-Hartman 		struct {
31246a97191SGreg Kroah-Hartman 			unsigned char user_def[MAX_USER_DEFINED_BYTES];
31346a97191SGreg Kroah-Hartman 		} std;
31446a97191SGreg Kroah-Hartman 
31546a97191SGreg Kroah-Hartman 		/*
31646a97191SGreg Kroah-Hartman 		 * Pipes:
3176bbdc3dbSBhaskar Chowdhury 		 * The following structure is an integrated pipe protocol, which
31846a97191SGreg Kroah-Hartman 		 * is implemented on top of standard user-defined data. Pipe
31946a97191SGreg Kroah-Hartman 		 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
32046a97191SGreg Kroah-Hartman 		 * use.
32146a97191SGreg Kroah-Hartman 		 */
32246a97191SGreg Kroah-Hartman 		struct {
32346a97191SGreg Kroah-Hartman 			u32  pipe_mode;
32446a97191SGreg Kroah-Hartman 			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
32546a97191SGreg Kroah-Hartman 		} pipe;
32646a97191SGreg Kroah-Hartman 	} u;
32729423b7eSK. Y. Srinivasan 	/*
328ed56ef67SDexuan Cui 	 * The sub_channel_index is defined in Win8: a value of zero means a
329ed56ef67SDexuan Cui 	 * primary channel and a value of non-zero means a sub-channel.
330ed56ef67SDexuan Cui 	 *
331ed56ef67SDexuan Cui 	 * Before Win8, the field is reserved, meaning it's always zero.
33229423b7eSK. Y. Srinivasan 	 */
33329423b7eSK. Y. Srinivasan 	u16 sub_channel_index;
33429423b7eSK. Y. Srinivasan 	u16 reserved3;
33546a97191SGreg Kroah-Hartman } __packed;
33646a97191SGreg Kroah-Hartman 
33746a97191SGreg Kroah-Hartman /* Server Flags */
33846a97191SGreg Kroah-Hartman #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE	1
33946a97191SGreg Kroah-Hartman #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES	2
34046a97191SGreg Kroah-Hartman #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS		4
34146a97191SGreg Kroah-Hartman #define VMBUS_CHANNEL_NAMED_PIPE_MODE			0x10
34246a97191SGreg Kroah-Hartman #define VMBUS_CHANNEL_LOOPBACK_OFFER			0x100
34346a97191SGreg Kroah-Hartman #define VMBUS_CHANNEL_PARENT_OFFER			0x200
34446a97191SGreg Kroah-Hartman #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x400
345e8d6ca02SDexuan Cui #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER		0x2000
34646a97191SGreg Kroah-Hartman 
34746a97191SGreg Kroah-Hartman struct vmpacket_descriptor {
34846a97191SGreg Kroah-Hartman 	u16 type;
34946a97191SGreg Kroah-Hartman 	u16 offset8;
35046a97191SGreg Kroah-Hartman 	u16 len8;
35146a97191SGreg Kroah-Hartman 	u16 flags;
35246a97191SGreg Kroah-Hartman 	u64 trans_id;
35346a97191SGreg Kroah-Hartman } __packed;
35446a97191SGreg Kroah-Hartman 
35546a97191SGreg Kroah-Hartman struct vmpacket_header {
35646a97191SGreg Kroah-Hartman 	u32 prev_pkt_start_offset;
35746a97191SGreg Kroah-Hartman 	struct vmpacket_descriptor descriptor;
35846a97191SGreg Kroah-Hartman } __packed;
35946a97191SGreg Kroah-Hartman 
36046a97191SGreg Kroah-Hartman struct vmtransfer_page_range {
36146a97191SGreg Kroah-Hartman 	u32 byte_count;
36246a97191SGreg Kroah-Hartman 	u32 byte_offset;
36346a97191SGreg Kroah-Hartman } __packed;
36446a97191SGreg Kroah-Hartman 
36546a97191SGreg Kroah-Hartman struct vmtransfer_page_packet_header {
36646a97191SGreg Kroah-Hartman 	struct vmpacket_descriptor d;
36746a97191SGreg Kroah-Hartman 	u16 xfer_pageset_id;
3681508d811SK. Y. Srinivasan 	u8  sender_owns_set;
36946a97191SGreg Kroah-Hartman 	u8 reserved;
37046a97191SGreg Kroah-Hartman 	u32 range_cnt;
371bb9b0e46SSaurabh Sengar 	struct vmtransfer_page_range ranges[];
37246a97191SGreg Kroah-Hartman } __packed;
37346a97191SGreg Kroah-Hartman 
37446a97191SGreg Kroah-Hartman /*
37546a97191SGreg Kroah-Hartman  * This structure defines a range in guest physical space that can be made to
37646a97191SGreg Kroah-Hartman  * look virtually contiguous.
37746a97191SGreg Kroah-Hartman  */
37846a97191SGreg Kroah-Hartman struct gpa_range {
37946a97191SGreg Kroah-Hartman 	u32 byte_count;
38046a97191SGreg Kroah-Hartman 	u32 byte_offset;
381db5871e8SGustavo A. R. Silva 	u64 pfn_array[];
38246a97191SGreg Kroah-Hartman };
38346a97191SGreg Kroah-Hartman 
38446a97191SGreg Kroah-Hartman /*
38546a97191SGreg Kroah-Hartman  * This is the format for a GPA-Direct packet, which contains a set of GPA
38646a97191SGreg Kroah-Hartman  * ranges, in addition to commands and/or data.
38746a97191SGreg Kroah-Hartman  */
38846a97191SGreg Kroah-Hartman struct vmdata_gpa_direct {
38946a97191SGreg Kroah-Hartman 	struct vmpacket_descriptor d;
39046a97191SGreg Kroah-Hartman 	u32 reserved;
39146a97191SGreg Kroah-Hartman 	u32 range_cnt;
39246a97191SGreg Kroah-Hartman 	struct gpa_range range[1];
39346a97191SGreg Kroah-Hartman } __packed;
39446a97191SGreg Kroah-Hartman 
39546a97191SGreg Kroah-Hartman #define VMPACKET_DATA_START_ADDRESS(__packet)	\
39646a97191SGreg Kroah-Hartman 	(void *)(((unsigned char *)__packet) +	\
39746a97191SGreg Kroah-Hartman 	 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
39846a97191SGreg Kroah-Hartman 
39946a97191SGreg Kroah-Hartman #define VMPACKET_DATA_LENGTH(__packet)		\
40046a97191SGreg Kroah-Hartman 	((((struct vmpacket_descriptor)__packet)->len8 -	\
40146a97191SGreg Kroah-Hartman 	  ((struct vmpacket_descriptor)__packet)->offset8) * 8)
40246a97191SGreg Kroah-Hartman 
40346a97191SGreg Kroah-Hartman #define VMPACKET_TRANSFER_MODE(__packet)	\
40446a97191SGreg Kroah-Hartman 	(((struct IMPACT)__packet)->type)
40546a97191SGreg Kroah-Hartman 
40646a97191SGreg Kroah-Hartman enum vmbus_packet_type {
40746a97191SGreg Kroah-Hartman 	VM_PKT_INVALID				= 0x0,
40846a97191SGreg Kroah-Hartman 	VM_PKT_SYNCH				= 0x1,
40946a97191SGreg Kroah-Hartman 	VM_PKT_ADD_XFER_PAGESET			= 0x2,
41046a97191SGreg Kroah-Hartman 	VM_PKT_RM_XFER_PAGESET			= 0x3,
41146a97191SGreg Kroah-Hartman 	VM_PKT_ESTABLISH_GPADL			= 0x4,
41246a97191SGreg Kroah-Hartman 	VM_PKT_TEARDOWN_GPADL			= 0x5,
41346a97191SGreg Kroah-Hartman 	VM_PKT_DATA_INBAND			= 0x6,
41446a97191SGreg Kroah-Hartman 	VM_PKT_DATA_USING_XFER_PAGES		= 0x7,
41546a97191SGreg Kroah-Hartman 	VM_PKT_DATA_USING_GPADL			= 0x8,
41646a97191SGreg Kroah-Hartman 	VM_PKT_DATA_USING_GPA_DIRECT		= 0x9,
41746a97191SGreg Kroah-Hartman 	VM_PKT_CANCEL_REQUEST			= 0xa,
41846a97191SGreg Kroah-Hartman 	VM_PKT_COMP				= 0xb,
41946a97191SGreg Kroah-Hartman 	VM_PKT_DATA_USING_ADDITIONAL_PKT	= 0xc,
42046a97191SGreg Kroah-Hartman 	VM_PKT_ADDITIONAL_DATA			= 0xd
42146a97191SGreg Kroah-Hartman };
42246a97191SGreg Kroah-Hartman 
42346a97191SGreg Kroah-Hartman #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED	1
42446a97191SGreg Kroah-Hartman 
42546a97191SGreg Kroah-Hartman 
42646a97191SGreg Kroah-Hartman /* Version 1 messages */
42746a97191SGreg Kroah-Hartman enum vmbus_channel_message_type {
42846a97191SGreg Kroah-Hartman 	CHANNELMSG_INVALID			=  0,
42946a97191SGreg Kroah-Hartman 	CHANNELMSG_OFFERCHANNEL		=  1,
43046a97191SGreg Kroah-Hartman 	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
43146a97191SGreg Kroah-Hartman 	CHANNELMSG_REQUESTOFFERS		=  3,
43246a97191SGreg Kroah-Hartman 	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
43346a97191SGreg Kroah-Hartman 	CHANNELMSG_OPENCHANNEL		=  5,
43446a97191SGreg Kroah-Hartman 	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
43546a97191SGreg Kroah-Hartman 	CHANNELMSG_CLOSECHANNEL		=  7,
43646a97191SGreg Kroah-Hartman 	CHANNELMSG_GPADL_HEADER		=  8,
43746a97191SGreg Kroah-Hartman 	CHANNELMSG_GPADL_BODY			=  9,
43846a97191SGreg Kroah-Hartman 	CHANNELMSG_GPADL_CREATED		= 10,
43946a97191SGreg Kroah-Hartman 	CHANNELMSG_GPADL_TEARDOWN		= 11,
44046a97191SGreg Kroah-Hartman 	CHANNELMSG_GPADL_TORNDOWN		= 12,
44146a97191SGreg Kroah-Hartman 	CHANNELMSG_RELID_RELEASED		= 13,
44246a97191SGreg Kroah-Hartman 	CHANNELMSG_INITIATE_CONTACT		= 14,
44346a97191SGreg Kroah-Hartman 	CHANNELMSG_VERSION_RESPONSE		= 15,
44446a97191SGreg Kroah-Hartman 	CHANNELMSG_UNLOAD			= 16,
4452db84effSK. Y. Srinivasan 	CHANNELMSG_UNLOAD_RESPONSE		= 17,
4465c23a1a5SDexuan Cui 	CHANNELMSG_18				= 18,
4475c23a1a5SDexuan Cui 	CHANNELMSG_19				= 19,
4485c23a1a5SDexuan Cui 	CHANNELMSG_20				= 20,
4495c23a1a5SDexuan Cui 	CHANNELMSG_TL_CONNECT_REQUEST		= 21,
45075278105SAndrea Parri (Microsoft) 	CHANNELMSG_MODIFYCHANNEL		= 22,
451ddc9d357SDexuan Cui 	CHANNELMSG_TL_CONNECT_RESULT		= 23,
452870ced05SAndrea Parri (Microsoft) 	CHANNELMSG_MODIFYCHANNEL_RESPONSE	= 24,
45346a97191SGreg Kroah-Hartman 	CHANNELMSG_COUNT
45446a97191SGreg Kroah-Hartman };
45546a97191SGreg Kroah-Hartman 
456d8bd2d44SDexuan Cui /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
457d8bd2d44SDexuan Cui #define INVALID_RELID	U32_MAX
458d8bd2d44SDexuan Cui 
45946a97191SGreg Kroah-Hartman struct vmbus_channel_message_header {
46046a97191SGreg Kroah-Hartman 	enum vmbus_channel_message_type msgtype;
46146a97191SGreg Kroah-Hartman 	u32 padding;
46246a97191SGreg Kroah-Hartman } __packed;
46346a97191SGreg Kroah-Hartman 
46446a97191SGreg Kroah-Hartman /* Query VMBus Version parameters */
46546a97191SGreg Kroah-Hartman struct vmbus_channel_query_vmbus_version {
46646a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
46746a97191SGreg Kroah-Hartman 	u32 version;
46846a97191SGreg Kroah-Hartman } __packed;
46946a97191SGreg Kroah-Hartman 
47046a97191SGreg Kroah-Hartman /* VMBus Version Supported parameters */
47146a97191SGreg Kroah-Hartman struct vmbus_channel_version_supported {
47246a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
4731508d811SK. Y. Srinivasan 	u8 version_supported;
47446a97191SGreg Kroah-Hartman } __packed;
47546a97191SGreg Kroah-Hartman 
47646a97191SGreg Kroah-Hartman /* Offer Channel parameters */
47746a97191SGreg Kroah-Hartman struct vmbus_channel_offer_channel {
47846a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
47946a97191SGreg Kroah-Hartman 	struct vmbus_channel_offer offer;
48046a97191SGreg Kroah-Hartman 	u32 child_relid;
48146a97191SGreg Kroah-Hartman 	u8 monitorid;
48229423b7eSK. Y. Srinivasan 	/*
48329423b7eSK. Y. Srinivasan 	 * win7 and beyond splits this field into a bit field.
48429423b7eSK. Y. Srinivasan 	 */
48529423b7eSK. Y. Srinivasan 	u8 monitor_allocated:1;
48629423b7eSK. Y. Srinivasan 	u8 reserved:7;
48729423b7eSK. Y. Srinivasan 	/*
48829423b7eSK. Y. Srinivasan 	 * These are new fields added in win7 and later.
48929423b7eSK. Y. Srinivasan 	 * Do not access these fields without checking the
49029423b7eSK. Y. Srinivasan 	 * negotiated protocol.
49129423b7eSK. Y. Srinivasan 	 *
49229423b7eSK. Y. Srinivasan 	 * If "is_dedicated_interrupt" is set, we must not set the
49329423b7eSK. Y. Srinivasan 	 * associated bit in the channel bitmap while sending the
49429423b7eSK. Y. Srinivasan 	 * interrupt to the host.
49529423b7eSK. Y. Srinivasan 	 *
49629423b7eSK. Y. Srinivasan 	 * connection_id is to be used in signaling the host.
49729423b7eSK. Y. Srinivasan 	 */
49829423b7eSK. Y. Srinivasan 	u16 is_dedicated_interrupt:1;
49929423b7eSK. Y. Srinivasan 	u16 reserved1:15;
50029423b7eSK. Y. Srinivasan 	u32 connection_id;
50146a97191SGreg Kroah-Hartman } __packed;
50246a97191SGreg Kroah-Hartman 
50346a97191SGreg Kroah-Hartman /* Rescind Offer parameters */
50446a97191SGreg Kroah-Hartman struct vmbus_channel_rescind_offer {
50546a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
50646a97191SGreg Kroah-Hartman 	u32 child_relid;
50746a97191SGreg Kroah-Hartman } __packed;
50846a97191SGreg Kroah-Hartman 
50946a97191SGreg Kroah-Hartman /*
51046a97191SGreg Kroah-Hartman  * Request Offer -- no parameters, SynIC message contains the partition ID
51146a97191SGreg Kroah-Hartman  * Set Snoop -- no parameters, SynIC message contains the partition ID
51246a97191SGreg Kroah-Hartman  * Clear Snoop -- no parameters, SynIC message contains the partition ID
51346a97191SGreg Kroah-Hartman  * All Offers Delivered -- no parameters, SynIC message contains the partition
51446a97191SGreg Kroah-Hartman  *		           ID
51546a97191SGreg Kroah-Hartman  * Flush Client -- no parameters, SynIC message contains the partition ID
51646a97191SGreg Kroah-Hartman  */
51746a97191SGreg Kroah-Hartman 
51846a97191SGreg Kroah-Hartman /* Open Channel parameters */
51946a97191SGreg Kroah-Hartman struct vmbus_channel_open_channel {
52046a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
52146a97191SGreg Kroah-Hartman 
52246a97191SGreg Kroah-Hartman 	/* Identifies the specific VMBus channel that is being opened. */
52346a97191SGreg Kroah-Hartman 	u32 child_relid;
52446a97191SGreg Kroah-Hartman 
52546a97191SGreg Kroah-Hartman 	/* ID making a particular open request at a channel offer unique. */
52646a97191SGreg Kroah-Hartman 	u32 openid;
52746a97191SGreg Kroah-Hartman 
52846a97191SGreg Kroah-Hartman 	/* GPADL for the channel's ring buffer. */
52946a97191SGreg Kroah-Hartman 	u32 ringbuffer_gpadlhandle;
53046a97191SGreg Kroah-Hartman 
531abbf3b2aSK. Y. Srinivasan 	/*
532abbf3b2aSK. Y. Srinivasan 	 * Starting with win8, this field will be used to specify
533abbf3b2aSK. Y. Srinivasan 	 * the target virtual processor on which to deliver the interrupt for
534abbf3b2aSK. Y. Srinivasan 	 * the host to guest communication.
535abbf3b2aSK. Y. Srinivasan 	 * Prior to win8, incoming channel interrupts would only
536abbf3b2aSK. Y. Srinivasan 	 * be delivered on cpu 0. Setting this value to 0 would
537abbf3b2aSK. Y. Srinivasan 	 * preserve the earlier behavior.
538abbf3b2aSK. Y. Srinivasan 	 */
539abbf3b2aSK. Y. Srinivasan 	u32 target_vp;
54046a97191SGreg Kroah-Hartman 
54146a97191SGreg Kroah-Hartman 	/*
54246a97191SGreg Kroah-Hartman 	 * The upstream ring buffer begins at offset zero in the memory
54346a97191SGreg Kroah-Hartman 	 * described by RingBufferGpadlHandle. The downstream ring buffer
54446a97191SGreg Kroah-Hartman 	 * follows it at this offset (in pages).
54546a97191SGreg Kroah-Hartman 	 */
54646a97191SGreg Kroah-Hartman 	u32 downstream_ringbuffer_pageoffset;
54746a97191SGreg Kroah-Hartman 
54846a97191SGreg Kroah-Hartman 	/* User-specific data to be passed along to the server endpoint. */
54946a97191SGreg Kroah-Hartman 	unsigned char userdata[MAX_USER_DEFINED_BYTES];
55046a97191SGreg Kroah-Hartman } __packed;
55146a97191SGreg Kroah-Hartman 
55246a97191SGreg Kroah-Hartman /* Open Channel Result parameters */
55346a97191SGreg Kroah-Hartman struct vmbus_channel_open_result {
55446a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
55546a97191SGreg Kroah-Hartman 	u32 child_relid;
55646a97191SGreg Kroah-Hartman 	u32 openid;
55746a97191SGreg Kroah-Hartman 	u32 status;
55846a97191SGreg Kroah-Hartman } __packed;
55946a97191SGreg Kroah-Hartman 
560870ced05SAndrea Parri (Microsoft) /* Modify Channel Result parameters */
561870ced05SAndrea Parri (Microsoft) struct vmbus_channel_modifychannel_response {
562870ced05SAndrea Parri (Microsoft) 	struct vmbus_channel_message_header header;
563870ced05SAndrea Parri (Microsoft) 	u32 child_relid;
564870ced05SAndrea Parri (Microsoft) 	u32 status;
565870ced05SAndrea Parri (Microsoft) } __packed;
566870ced05SAndrea Parri (Microsoft) 
56746a97191SGreg Kroah-Hartman /* Close channel parameters; */
56846a97191SGreg Kroah-Hartman struct vmbus_channel_close_channel {
56946a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
57046a97191SGreg Kroah-Hartman 	u32 child_relid;
57146a97191SGreg Kroah-Hartman } __packed;
57246a97191SGreg Kroah-Hartman 
57346a97191SGreg Kroah-Hartman /* Channel Message GPADL */
57446a97191SGreg Kroah-Hartman #define GPADL_TYPE_RING_BUFFER		1
57546a97191SGreg Kroah-Hartman #define GPADL_TYPE_SERVER_SAVE_AREA	2
57646a97191SGreg Kroah-Hartman #define GPADL_TYPE_TRANSACTION		8
57746a97191SGreg Kroah-Hartman 
57846a97191SGreg Kroah-Hartman /*
57946a97191SGreg Kroah-Hartman  * The number of PFNs in a GPADL message is defined by the number of
58046a97191SGreg Kroah-Hartman  * pages that would be spanned by ByteCount and ByteOffset.  If the
58146a97191SGreg Kroah-Hartman  * implied number of PFNs won't fit in this packet, there will be a
58246a97191SGreg Kroah-Hartman  * follow-up packet that contains more.
58346a97191SGreg Kroah-Hartman  */
58446a97191SGreg Kroah-Hartman struct vmbus_channel_gpadl_header {
58546a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
58646a97191SGreg Kroah-Hartman 	u32 child_relid;
58746a97191SGreg Kroah-Hartman 	u32 gpadl;
58846a97191SGreg Kroah-Hartman 	u16 range_buflen;
58946a97191SGreg Kroah-Hartman 	u16 rangecount;
590db5871e8SGustavo A. R. Silva 	struct gpa_range range[];
59146a97191SGreg Kroah-Hartman } __packed;
59246a97191SGreg Kroah-Hartman 
59346a97191SGreg Kroah-Hartman /* This is the followup packet that contains more PFNs. */
59446a97191SGreg Kroah-Hartman struct vmbus_channel_gpadl_body {
59546a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
59646a97191SGreg Kroah-Hartman 	u32 msgnumber;
59746a97191SGreg Kroah-Hartman 	u32 gpadl;
598db5871e8SGustavo A. R. Silva 	u64 pfn[];
59946a97191SGreg Kroah-Hartman } __packed;
60046a97191SGreg Kroah-Hartman 
60146a97191SGreg Kroah-Hartman struct vmbus_channel_gpadl_created {
60246a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
60346a97191SGreg Kroah-Hartman 	u32 child_relid;
60446a97191SGreg Kroah-Hartman 	u32 gpadl;
60546a97191SGreg Kroah-Hartman 	u32 creation_status;
60646a97191SGreg Kroah-Hartman } __packed;
60746a97191SGreg Kroah-Hartman 
60846a97191SGreg Kroah-Hartman struct vmbus_channel_gpadl_teardown {
60946a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
61046a97191SGreg Kroah-Hartman 	u32 child_relid;
61146a97191SGreg Kroah-Hartman 	u32 gpadl;
61246a97191SGreg Kroah-Hartman } __packed;
61346a97191SGreg Kroah-Hartman 
61446a97191SGreg Kroah-Hartman struct vmbus_channel_gpadl_torndown {
61546a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
61646a97191SGreg Kroah-Hartman 	u32 gpadl;
61746a97191SGreg Kroah-Hartman } __packed;
61846a97191SGreg Kroah-Hartman 
61946a97191SGreg Kroah-Hartman struct vmbus_channel_relid_released {
62046a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
62146a97191SGreg Kroah-Hartman 	u32 child_relid;
62246a97191SGreg Kroah-Hartman } __packed;
62346a97191SGreg Kroah-Hartman 
62446a97191SGreg Kroah-Hartman struct vmbus_channel_initiate_contact {
62546a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
62646a97191SGreg Kroah-Hartman 	u32 vmbus_version_requested;
627e28bab48SK. Y. Srinivasan 	u32 target_vcpu; /* The VCPU the host should respond to */
628ae20b254SDexuan Cui 	union {
62946a97191SGreg Kroah-Hartman 		u64 interrupt_page;
630ae20b254SDexuan Cui 		struct {
631ae20b254SDexuan Cui 			u8	msg_sint;
6328387ce06STianyu Lan 			u8	msg_vtl;
6338387ce06STianyu Lan 			u8	reserved[6];
634ae20b254SDexuan Cui 		};
635ae20b254SDexuan Cui 	};
63646a97191SGreg Kroah-Hartman 	u64 monitor_page1;
63746a97191SGreg Kroah-Hartman 	u64 monitor_page2;
63846a97191SGreg Kroah-Hartman } __packed;
63946a97191SGreg Kroah-Hartman 
6405c23a1a5SDexuan Cui /* Hyper-V socket: guest's connect()-ing to host */
6415c23a1a5SDexuan Cui struct vmbus_channel_tl_connect_request {
6425c23a1a5SDexuan Cui 	struct vmbus_channel_message_header header;
643593db803SAndy Shevchenko 	guid_t guest_endpoint_id;
644593db803SAndy Shevchenko 	guid_t host_service_id;
6455c23a1a5SDexuan Cui } __packed;
6465c23a1a5SDexuan Cui 
64775278105SAndrea Parri (Microsoft) /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
64875278105SAndrea Parri (Microsoft) struct vmbus_channel_modifychannel {
64975278105SAndrea Parri (Microsoft) 	struct vmbus_channel_message_header header;
65075278105SAndrea Parri (Microsoft) 	u32 child_relid;
65175278105SAndrea Parri (Microsoft) 	u32 target_vp;
65275278105SAndrea Parri (Microsoft) } __packed;
65375278105SAndrea Parri (Microsoft) 
65446a97191SGreg Kroah-Hartman struct vmbus_channel_version_response {
65546a97191SGreg Kroah-Hartman 	struct vmbus_channel_message_header header;
6561508d811SK. Y. Srinivasan 	u8 version_supported;
657ae20b254SDexuan Cui 
658ae20b254SDexuan Cui 	u8 connection_state;
659ae20b254SDexuan Cui 	u16 padding;
660ae20b254SDexuan Cui 
661ae20b254SDexuan Cui 	/*
662ae20b254SDexuan Cui 	 * On new hosts that support VMBus protocol 5.0, we must use
663ae20b254SDexuan Cui 	 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
664ae20b254SDexuan Cui 	 * and for subsequent messages, we must use the Message Connection ID
665ae20b254SDexuan Cui 	 * field in the host-returned Version Response Message.
666ae20b254SDexuan Cui 	 *
667ae20b254SDexuan Cui 	 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
668ae20b254SDexuan Cui 	 */
669ae20b254SDexuan Cui 	u32 msg_conn_id;
67046a97191SGreg Kroah-Hartman } __packed;
67146a97191SGreg Kroah-Hartman 
67246a97191SGreg Kroah-Hartman enum vmbus_channel_state {
67346a97191SGreg Kroah-Hartman 	CHANNEL_OFFER_STATE,
67446a97191SGreg Kroah-Hartman 	CHANNEL_OPENING_STATE,
67546a97191SGreg Kroah-Hartman 	CHANNEL_OPEN_STATE,
676e68d2971SK. Y. Srinivasan 	CHANNEL_OPENED_STATE,
67746a97191SGreg Kroah-Hartman };
67846a97191SGreg Kroah-Hartman 
67946a97191SGreg Kroah-Hartman /*
68046a97191SGreg Kroah-Hartman  * Represents each channel msg on the vmbus connection This is a
68146a97191SGreg Kroah-Hartman  * variable-size data structure depending on the msg type itself
68246a97191SGreg Kroah-Hartman  */
68346a97191SGreg Kroah-Hartman struct vmbus_channel_msginfo {
68446a97191SGreg Kroah-Hartman 	/* Bookkeeping stuff */
68546a97191SGreg Kroah-Hartman 	struct list_head msglistentry;
68646a97191SGreg Kroah-Hartman 
68746a97191SGreg Kroah-Hartman 	/* So far, this is only used to handle gpadl body message */
68846a97191SGreg Kroah-Hartman 	struct list_head submsglist;
68946a97191SGreg Kroah-Hartman 
69046a97191SGreg Kroah-Hartman 	/* Synchronize the request/response if needed */
69146a97191SGreg Kroah-Hartman 	struct completion  waitevent;
692ccb61f8aSK. Y. Srinivasan 	struct vmbus_channel *waiting_channel;
69346a97191SGreg Kroah-Hartman 	union {
69446a97191SGreg Kroah-Hartman 		struct vmbus_channel_version_supported version_supported;
69546a97191SGreg Kroah-Hartman 		struct vmbus_channel_open_result open_result;
69646a97191SGreg Kroah-Hartman 		struct vmbus_channel_gpadl_torndown gpadl_torndown;
69746a97191SGreg Kroah-Hartman 		struct vmbus_channel_gpadl_created gpadl_created;
69846a97191SGreg Kroah-Hartman 		struct vmbus_channel_version_response version_response;
699870ced05SAndrea Parri (Microsoft) 		struct vmbus_channel_modifychannel_response modify_response;
70046a97191SGreg Kroah-Hartman 	} response;
70146a97191SGreg Kroah-Hartman 
70246a97191SGreg Kroah-Hartman 	u32 msgsize;
70346a97191SGreg Kroah-Hartman 	/*
70446a97191SGreg Kroah-Hartman 	 * The channel message that goes out on the "wire".
70546a97191SGreg Kroah-Hartman 	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
70646a97191SGreg Kroah-Hartman 	 */
707db5871e8SGustavo A. R. Silva 	unsigned char msg[];
70846a97191SGreg Kroah-Hartman };
70946a97191SGreg Kroah-Hartman 
71046a97191SGreg Kroah-Hartman struct vmbus_close_msg {
71146a97191SGreg Kroah-Hartman 	struct vmbus_channel_msginfo info;
71246a97191SGreg Kroah-Hartman 	struct vmbus_channel_close_channel msg;
71346a97191SGreg Kroah-Hartman };
71446a97191SGreg Kroah-Hartman 
7157047f17dSK. Y. Srinivasan enum vmbus_device_type {
7167047f17dSK. Y. Srinivasan 	HV_IDE = 0,
7177047f17dSK. Y. Srinivasan 	HV_SCSI,
7187047f17dSK. Y. Srinivasan 	HV_FC,
7197047f17dSK. Y. Srinivasan 	HV_NIC,
7207047f17dSK. Y. Srinivasan 	HV_ND,
7217047f17dSK. Y. Srinivasan 	HV_PCIE,
7227047f17dSK. Y. Srinivasan 	HV_FB,
7237047f17dSK. Y. Srinivasan 	HV_KBD,
7247047f17dSK. Y. Srinivasan 	HV_MOUSE,
7257047f17dSK. Y. Srinivasan 	HV_KVP,
7267047f17dSK. Y. Srinivasan 	HV_TS,
7277047f17dSK. Y. Srinivasan 	HV_HB,
7287047f17dSK. Y. Srinivasan 	HV_SHUTDOWN,
7297047f17dSK. Y. Srinivasan 	HV_FCOPY,
7307047f17dSK. Y. Srinivasan 	HV_BACKUP,
7317047f17dSK. Y. Srinivasan 	HV_DM,
732f45be72cSHaiyang Zhang 	HV_UNKNOWN,
7337047f17dSK. Y. Srinivasan };
7347047f17dSK. Y. Srinivasan 
735e8b7db38SAndres Beltran /*
736e8b7db38SAndres Beltran  * Provides request ids for VMBus. Encapsulates guest memory
737e8b7db38SAndres Beltran  * addresses and stores the next available slot in req_arr
738e8b7db38SAndres Beltran  * to generate new ids in constant time.
739e8b7db38SAndres Beltran  */
740e8b7db38SAndres Beltran struct vmbus_requestor {
741e8b7db38SAndres Beltran 	u64 *req_arr;
742e8b7db38SAndres Beltran 	unsigned long *req_bitmap; /* is a given slot available? */
743e8b7db38SAndres Beltran 	u32 size;
744e8b7db38SAndres Beltran 	u64 next_request_id;
745e8b7db38SAndres Beltran 	spinlock_t req_lock; /* provides atomicity */
746e8b7db38SAndres Beltran };
747e8b7db38SAndres Beltran 
748e8b7db38SAndres Beltran #define VMBUS_NO_RQSTOR U64_MAX
749e8b7db38SAndres Beltran #define VMBUS_RQST_ERROR (U64_MAX - 1)
7500aadb6a7SAndrea Parri (Microsoft) #define VMBUS_RQST_ADDR_ANY U64_MAX
751bf5fd8caSAndrea Parri (Microsoft) /* NetVSC-specific */
7524d18fcc9SAndres Beltran #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
753bf5fd8caSAndrea Parri (Microsoft) /* StorVSC-specific */
754bf5fd8caSAndrea Parri (Microsoft) #define VMBUS_RQST_INIT (U64_MAX - 2)
755bf5fd8caSAndrea Parri (Microsoft) #define VMBUS_RQST_RESET (U64_MAX - 3)
756e8b7db38SAndres Beltran 
7577047f17dSK. Y. Srinivasan struct vmbus_device {
758e8c4bd6cSSaurabh Sengar 	/* preferred ring buffer size in KB, 0 means no preferred size for this device */
759e8c4bd6cSSaurabh Sengar 	size_t pref_ring_size;
7607047f17dSK. Y. Srinivasan 	u16  dev_type;
761593db803SAndy Shevchenko 	guid_t guid;
7627047f17dSK. Y. Srinivasan 	bool perf_device;
76321a4e356SAndrea Parri (Microsoft) 	bool allowed_in_isolated;
7647047f17dSK. Y. Srinivasan };
7657047f17dSK. Y. Srinivasan 
766adae1e93SAndres Beltran #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
767adae1e93SAndres Beltran 
768d4dccf35STianyu Lan struct vmbus_gpadl {
769d4dccf35STianyu Lan 	u32 gpadl_handle;
770d4dccf35STianyu Lan 	u32 size;
771d4dccf35STianyu Lan 	void *buffer;
772211f514eSRick Edgecombe 	bool decrypted;
773d4dccf35STianyu Lan };
774d4dccf35STianyu Lan 
77546a97191SGreg Kroah-Hartman struct vmbus_channel {
77646a97191SGreg Kroah-Hartman 	struct list_head listentry;
77746a97191SGreg Kroah-Hartman 
77846a97191SGreg Kroah-Hartman 	struct hv_device *device_obj;
77946a97191SGreg Kroah-Hartman 
78046a97191SGreg Kroah-Hartman 	enum vmbus_channel_state state;
78146a97191SGreg Kroah-Hartman 
78246a97191SGreg Kroah-Hartman 	struct vmbus_channel_offer_channel offermsg;
78346a97191SGreg Kroah-Hartman 	/*
78446a97191SGreg Kroah-Hartman 	 * These are based on the OfferMsg.MonitorId.
78546a97191SGreg Kroah-Hartman 	 * Save it here for easy access.
78646a97191SGreg Kroah-Hartman 	 */
78746a97191SGreg Kroah-Hartman 	u8 monitor_grp;
78846a97191SGreg Kroah-Hartman 	u8 monitor_bit;
78946a97191SGreg Kroah-Hartman 
790c3582a2cSHaiyang Zhang 	bool rescind; /* got rescind msg */
791e4d221b4SAndrea Parri (Microsoft) 	bool rescind_ref; /* got rescind msg, got channel reference */
7927fa32e5eSK. Y. Srinivasan 	struct completion rescind_event;
793c3582a2cSHaiyang Zhang 
794d4dccf35STianyu Lan 	struct vmbus_gpadl ringbuffer_gpadlhandle;
79546a97191SGreg Kroah-Hartman 
79646a97191SGreg Kroah-Hartman 	/* Allocated memory for ring buffer */
79752a42c2aSStephen Hemminger 	struct page *ringbuffer_page;
79846a97191SGreg Kroah-Hartman 	u32 ringbuffer_pagecount;
799ae6935edSStephen Hemminger 	u32 ringbuffer_send_offset;
80046a97191SGreg Kroah-Hartman 	struct hv_ring_buffer_info outbound;	/* send to parent */
80146a97191SGreg Kroah-Hartman 	struct hv_ring_buffer_info inbound;	/* receive from parent */
80246a97191SGreg Kroah-Hartman 
80346a97191SGreg Kroah-Hartman 	struct vmbus_close_msg close_msg;
80446a97191SGreg Kroah-Hartman 
8056981fbf3SStephen Hemminger 	/* Statistics */
8066981fbf3SStephen Hemminger 	u64	interrupts;	/* Host to Guest interrupts */
8076981fbf3SStephen Hemminger 	u64	sig_events;	/* Guest to Host events */
8086981fbf3SStephen Hemminger 
809396ae57eSKimberly Brown 	/*
810396ae57eSKimberly Brown 	 * Guest to host interrupts caused by the outbound ring buffer changing
811396ae57eSKimberly Brown 	 * from empty to not empty.
812396ae57eSKimberly Brown 	 */
813396ae57eSKimberly Brown 	u64 intr_out_empty;
814396ae57eSKimberly Brown 
815396ae57eSKimberly Brown 	/*
816396ae57eSKimberly Brown 	 * Indicates that a full outbound ring buffer was encountered. The flag
817396ae57eSKimberly Brown 	 * is set to true when a full outbound ring buffer is encountered and
818396ae57eSKimberly Brown 	 * set to false when a write to the outbound ring buffer is completed.
819396ae57eSKimberly Brown 	 */
820396ae57eSKimberly Brown 	bool out_full_flag;
821396ae57eSKimberly Brown 
82251c6ce2aSStephen Hemminger 	/* Channel callback's invoked in softirq context */
823631e63a9SStephen Hemminger 	struct tasklet_struct callback_event;
82446a97191SGreg Kroah-Hartman 	void (*onchannel_callback)(void *context);
82546a97191SGreg Kroah-Hartman 	void *channel_callback_context;
826132368bdSK. Y. Srinivasan 
8277769e18cSAndrea Parri (Microsoft) 	void (*change_target_cpu_callback)(struct vmbus_channel *channel,
8287769e18cSAndrea Parri (Microsoft) 			u32 old, u32 new);
8297769e18cSAndrea Parri (Microsoft) 
830132368bdSK. Y. Srinivasan 	/*
8319403b66eSAndrea Parri (Microsoft) 	 * Synchronize channel scheduling and channel removal; see the inline
8329403b66eSAndrea Parri (Microsoft) 	 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
8339403b66eSAndrea Parri (Microsoft) 	 */
8349403b66eSAndrea Parri (Microsoft) 	spinlock_t sched_lock;
8359403b66eSAndrea Parri (Microsoft) 
8369403b66eSAndrea Parri (Microsoft) 	/*
837b71e3282SStephen Hemminger 	 * A channel can be marked for one of three modes of reading:
838b71e3282SStephen Hemminger 	 *   BATCHED - callback called from taslket and should read
839b71e3282SStephen Hemminger 	 *            channel until empty. Interrupts from the host
840b71e3282SStephen Hemminger 	 *            are masked while read is in process (default).
841b71e3282SStephen Hemminger 	 *   DIRECT - callback called from tasklet (softirq).
842b71e3282SStephen Hemminger 	 *   ISR - callback called in interrupt context and must
843b71e3282SStephen Hemminger 	 *         invoke its own deferred processing.
844b71e3282SStephen Hemminger 	 *         Host interrupts are disabled and must be re-enabled
845b71e3282SStephen Hemminger 	 *         when ring is empty.
846132368bdSK. Y. Srinivasan 	 */
847b71e3282SStephen Hemminger 	enum hv_callback_mode {
848b71e3282SStephen Hemminger 		HV_CALL_BATCHED,
849b71e3282SStephen Hemminger 		HV_CALL_DIRECT,
850b71e3282SStephen Hemminger 		HV_CALL_ISR
851b71e3282SStephen Hemminger 	} callback_mode;
852b3bf60c7SK. Y. Srinivasan 
853b3bf60c7SK. Y. Srinivasan 	bool is_dedicated_interrupt;
85405784171SVitaly Kuznetsov 	u64 sig_event;
855abbf3b2aSK. Y. Srinivasan 
856abbf3b2aSK. Y. Srinivasan 	/*
8575bf74682SAndrea Parri (Microsoft) 	 * Starting with win8, this field will be used to specify the
8585bf74682SAndrea Parri (Microsoft) 	 * target CPU on which to deliver the interrupt for the host
8595bf74682SAndrea Parri (Microsoft) 	 * to guest communication.
8605bf74682SAndrea Parri (Microsoft) 	 *
8615bf74682SAndrea Parri (Microsoft) 	 * Prior to win8, incoming channel interrupts would only be
8625bf74682SAndrea Parri (Microsoft) 	 * delivered on CPU 0. Setting this value to 0 would preserve
8635bf74682SAndrea Parri (Microsoft) 	 * the earlier behavior.
864abbf3b2aSK. Y. Srinivasan 	 */
865d3ba720dSK. Y. Srinivasan 	u32 target_cpu;
8661f656ff3SK. Y. Srinivasan 	/*
867e68d2971SK. Y. Srinivasan 	 * Support for sub-channels. For high performance devices,
868e68d2971SK. Y. Srinivasan 	 * it will be useful to have multiple sub-channels to support
869e68d2971SK. Y. Srinivasan 	 * a scalable communication infrastructure with the host.
8706bbdc3dbSBhaskar Chowdhury 	 * The support for sub-channels is implemented as an extension
871e68d2971SK. Y. Srinivasan 	 * to the current infrastructure.
872e68d2971SK. Y. Srinivasan 	 * The initial offer is considered the primary channel and this
873e68d2971SK. Y. Srinivasan 	 * offer message will indicate if the host supports sub-channels.
8746bbdc3dbSBhaskar Chowdhury 	 * The guest is free to ask for sub-channels to be offered and can
875e68d2971SK. Y. Srinivasan 	 * open these sub-channels as a normal "primary" channel. However,
876e68d2971SK. Y. Srinivasan 	 * all sub-channels will have the same type and instance guids as the
877e68d2971SK. Y. Srinivasan 	 * primary channel. Requests sent on a given channel will result in a
878e68d2971SK. Y. Srinivasan 	 * response on the same channel.
879e68d2971SK. Y. Srinivasan 	 */
880e68d2971SK. Y. Srinivasan 
881e68d2971SK. Y. Srinivasan 	/*
882e68d2971SK. Y. Srinivasan 	 * Sub-channel creation callback. This callback will be called in
883e68d2971SK. Y. Srinivasan 	 * process context when a sub-channel offer is received from the host.
884e68d2971SK. Y. Srinivasan 	 * The guest can open the sub-channel in the context of this callback.
885e68d2971SK. Y. Srinivasan 	 */
886e68d2971SK. Y. Srinivasan 	void (*sc_creation_callback)(struct vmbus_channel *new_sc);
887e68d2971SK. Y. Srinivasan 
88867fae053SVitaly Kuznetsov 	/*
889499e8401SDexuan Cui 	 * Channel rescind callback. Some channels (the hvsock ones), need to
890499e8401SDexuan Cui 	 * register a callback which is invoked in vmbus_onoffer_rescind().
891499e8401SDexuan Cui 	 */
892499e8401SDexuan Cui 	void (*chn_rescind_callback)(struct vmbus_channel *channel);
893499e8401SDexuan Cui 
894499e8401SDexuan Cui 	/*
895e68d2971SK. Y. Srinivasan 	 * All Sub-channels of a primary channel are linked here.
896e68d2971SK. Y. Srinivasan 	 */
897e68d2971SK. Y. Srinivasan 	struct list_head sc_list;
898e68d2971SK. Y. Srinivasan 	/*
899e68d2971SK. Y. Srinivasan 	 * The primary channel this sub-channel belongs to.
900e68d2971SK. Y. Srinivasan 	 * This will be NULL for the primary channel.
901e68d2971SK. Y. Srinivasan 	 */
902e68d2971SK. Y. Srinivasan 	struct vmbus_channel *primary_channel;
9038a7206a8SK. Y. Srinivasan 	/*
9048a7206a8SK. Y. Srinivasan 	 * Support per-channel state for use by vmbus drivers.
9058a7206a8SK. Y. Srinivasan 	 */
9068a7206a8SK. Y. Srinivasan 	void *per_channel_state;
9078200f208SStephen Hemminger 
9088200f208SStephen Hemminger 	/*
9098200f208SStephen Hemminger 	 * Defer freeing channel until after all cpu's have
9108200f208SStephen Hemminger 	 * gone through grace period.
9118200f208SStephen Hemminger 	 */
9128200f208SStephen Hemminger 	struct rcu_head rcu;
9138200f208SStephen Hemminger 
9148599846dSK. Y. Srinivasan 	/*
915c2e5df61SStephen Hemminger 	 * For sysfs per-channel properties.
916c2e5df61SStephen Hemminger 	 */
917c2e5df61SStephen Hemminger 	struct kobject			kobj;
918c2e5df61SStephen Hemminger 
919c2e5df61SStephen Hemminger 	/*
9203724287cSK. Y. Srinivasan 	 * For performance critical channels (storage, networking
9213724287cSK. Y. Srinivasan 	 * etc,), Hyper-V has a mechanism to enhance the throughput
9223724287cSK. Y. Srinivasan 	 * at the expense of latency:
9233724287cSK. Y. Srinivasan 	 * When the host is to be signaled, we just set a bit in a shared page
9243724287cSK. Y. Srinivasan 	 * and this bit will be inspected by the hypervisor within a certain
9253724287cSK. Y. Srinivasan 	 * window and if the bit is set, the host will be signaled. The window
9263724287cSK. Y. Srinivasan 	 * of time is the monitor latency - currently around 100 usecs. This
9273724287cSK. Y. Srinivasan 	 * mechanism improves throughput by:
9283724287cSK. Y. Srinivasan 	 *
9293724287cSK. Y. Srinivasan 	 * A) Making the host more efficient - each time it wakes up,
930fce444faSOlaf Hering 	 *    potentially it will process more number of packets. The
9313724287cSK. Y. Srinivasan 	 *    monitor latency allows a batch to build up.
9323724287cSK. Y. Srinivasan 	 * B) By deferring the hypercall to signal, we will also minimize
9333724287cSK. Y. Srinivasan 	 *    the interrupts.
9343724287cSK. Y. Srinivasan 	 *
9353724287cSK. Y. Srinivasan 	 * Clearly, these optimizations improve throughput at the expense of
9363724287cSK. Y. Srinivasan 	 * latency. Furthermore, since the channel is shared for both
9373724287cSK. Y. Srinivasan 	 * control and data messages, control messages currently suffer
9386bbdc3dbSBhaskar Chowdhury 	 * unnecessary latency adversely impacting performance and boot
9393724287cSK. Y. Srinivasan 	 * time. To fix this issue, permit tagging the channel as being
9403724287cSK. Y. Srinivasan 	 * in "low latency" mode. In this mode, we will bypass the monitor
9413724287cSK. Y. Srinivasan 	 * mechanism.
9423724287cSK. Y. Srinivasan 	 */
9433724287cSK. Y. Srinivasan 	bool low_latency;
944fe760e4dSK. Y. Srinivasan 
9456f3d791fSK. Y. Srinivasan 	bool probe_done;
9466f3d791fSK. Y. Srinivasan 
94737c2578cSDexuan Cui 	/*
948afaa33daSAndrea Parri (Microsoft) 	 * Cache the device ID here for easy access; this is useful, in
949afaa33daSAndrea Parri (Microsoft) 	 * particular, in situations where the channel's device_obj has
950afaa33daSAndrea Parri (Microsoft) 	 * not been allocated/initialized yet.
951afaa33daSAndrea Parri (Microsoft) 	 */
952afaa33daSAndrea Parri (Microsoft) 	u16 device_id;
953afaa33daSAndrea Parri (Microsoft) 
954afaa33daSAndrea Parri (Microsoft) 	/*
95537c2578cSDexuan Cui 	 * We must offload the handling of the primary/sub channels
95637c2578cSDexuan Cui 	 * from the single-threaded vmbus_connection.work_queue to
95737c2578cSDexuan Cui 	 * two different workqueue, otherwise we can block
95837c2578cSDexuan Cui 	 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
95937c2578cSDexuan Cui 	 */
96037c2578cSDexuan Cui 	struct work_struct add_channel_work;
961396ae57eSKimberly Brown 
962396ae57eSKimberly Brown 	/*
963396ae57eSKimberly Brown 	 * Guest to host interrupts caused by the inbound ring buffer changing
964396ae57eSKimberly Brown 	 * from full to not full while a packet is waiting.
965396ae57eSKimberly Brown 	 */
966396ae57eSKimberly Brown 	u64 intr_in_full;
967396ae57eSKimberly Brown 
968396ae57eSKimberly Brown 	/*
969396ae57eSKimberly Brown 	 * The total number of write operations that encountered a full
970396ae57eSKimberly Brown 	 * outbound ring buffer.
971396ae57eSKimberly Brown 	 */
972396ae57eSKimberly Brown 	u64 out_full_total;
973396ae57eSKimberly Brown 
974396ae57eSKimberly Brown 	/*
975396ae57eSKimberly Brown 	 * The number of write operations that were the first to encounter a
976396ae57eSKimberly Brown 	 * full outbound ring buffer.
977396ae57eSKimberly Brown 	 */
978396ae57eSKimberly Brown 	u64 out_full_first;
979af9ca6f9SBranden Bonaby 
980af9ca6f9SBranden Bonaby 	/* enabling/disabling fuzz testing on the channel (default is false)*/
981af9ca6f9SBranden Bonaby 	bool fuzz_testing_state;
982af9ca6f9SBranden Bonaby 
983af9ca6f9SBranden Bonaby 	/*
984af9ca6f9SBranden Bonaby 	 * Interrupt delay will delay the guest from emptying the ring buffer
985af9ca6f9SBranden Bonaby 	 * for a specific amount of time. The delay is in microseconds and will
986af9ca6f9SBranden Bonaby 	 * be between 1 to a maximum of 1000, its default is 0 (no delay).
987af9ca6f9SBranden Bonaby 	 * The  Message delay will delay guest reading on a per message basis
988af9ca6f9SBranden Bonaby 	 * in microseconds between 1 to 1000 with the default being 0
989af9ca6f9SBranden Bonaby 	 * (no delay).
990af9ca6f9SBranden Bonaby 	 */
991af9ca6f9SBranden Bonaby 	u32 fuzz_testing_interrupt_delay;
992af9ca6f9SBranden Bonaby 	u32 fuzz_testing_message_delay;
993af9ca6f9SBranden Bonaby 
994bf5fd8caSAndrea Parri (Microsoft) 	/* callback to generate a request ID from a request address */
995bf5fd8caSAndrea Parri (Microsoft) 	u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
996bf5fd8caSAndrea Parri (Microsoft) 	/* callback to retrieve a request address from a request ID */
997bf5fd8caSAndrea Parri (Microsoft) 	u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
998bf5fd8caSAndrea Parri (Microsoft) 
999e8b7db38SAndres Beltran 	/* request/transaction ids for VMBus */
1000e8b7db38SAndres Beltran 	struct vmbus_requestor requestor;
1001e8b7db38SAndres Beltran 	u32 rqstor_size;
1002adae1e93SAndres Beltran 
1003adae1e93SAndres Beltran 	/* The max size of a packet on this channel */
1004adae1e93SAndres Beltran 	u32 max_pkt_size;
100546a97191SGreg Kroah-Hartman 
100646a97191SGreg Kroah-Hartman 	/* function to mmap ring buffer memory to the channel's sysfs ring attribute */
1007b91eaf72SAndrea Parri (Microsoft) 	int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
1008b91eaf72SAndrea Parri (Microsoft) 
1009b91eaf72SAndrea Parri (Microsoft) 	/* boolean to control visibility of sysfs for ring buffer */
1010b91eaf72SAndrea Parri (Microsoft) 	bool ring_sysfs_visible;
1011b91eaf72SAndrea Parri (Microsoft) };
1012b91eaf72SAndrea Parri (Microsoft) 
1013b91eaf72SAndrea Parri (Microsoft) #define lock_requestor(channel, flags)					\
1014b91eaf72SAndrea Parri (Microsoft) do {									\
1015b91eaf72SAndrea Parri (Microsoft) 	struct vmbus_requestor *rqstor = &(channel)->requestor;		\
1016b91eaf72SAndrea Parri (Microsoft) 									\
1017b91eaf72SAndrea Parri (Microsoft) 	spin_lock_irqsave(&rqstor->req_lock, flags);			\
1018b91eaf72SAndrea Parri (Microsoft) } while (0)
1019b91eaf72SAndrea Parri (Microsoft) 
unlock_requestor(struct vmbus_channel * channel,unsigned long flags)1020b91eaf72SAndrea Parri (Microsoft) static __always_inline void unlock_requestor(struct vmbus_channel *channel,
1021b91eaf72SAndrea Parri (Microsoft) 					     unsigned long flags)
1022bf5fd8caSAndrea Parri (Microsoft) {
10230aadb6a7SAndrea Parri (Microsoft) 	struct vmbus_requestor *rqstor = &channel->requestor;
10240aadb6a7SAndrea Parri (Microsoft) 
10250aadb6a7SAndrea Parri (Microsoft) 	spin_unlock_irqrestore(&rqstor->req_lock, flags);
10260aadb6a7SAndrea Parri (Microsoft) }
1027bf5fd8caSAndrea Parri (Microsoft) 
1028e8b7db38SAndres Beltran u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
1029da795eb2SAndrea Parri (Microsoft) u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1030da795eb2SAndrea Parri (Microsoft) 			       u64 rqst_addr);
1031da795eb2SAndrea Parri (Microsoft) u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1032da795eb2SAndrea Parri (Microsoft) 			     u64 rqst_addr);
1033da795eb2SAndrea Parri (Microsoft) u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
1034e8d6ca02SDexuan Cui 
is_hvsock_offer(const struct vmbus_channel_offer_channel * o)1035e8d6ca02SDexuan Cui static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
1036da795eb2SAndrea Parri (Microsoft) {
1037e8d6ca02SDexuan Cui 	return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1038e8d6ca02SDexuan Cui }
1039ed56ef67SDexuan Cui 
is_hvsock_channel(const struct vmbus_channel * c)1040ed56ef67SDexuan Cui static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1041ed56ef67SDexuan Cui {
1042ed56ef67SDexuan Cui 	return is_hvsock_offer(&c->offermsg);
1043ed56ef67SDexuan Cui }
1044b71e3282SStephen Hemminger 
is_sub_channel(const struct vmbus_channel * c)1045b71e3282SStephen Hemminger static inline bool is_sub_channel(const struct vmbus_channel *c)
1046132368bdSK. Y. Srinivasan {
1047b71e3282SStephen Hemminger 	return c->offermsg.offer.sub_channel_index != 0;
1048132368bdSK. Y. Srinivasan }
1049132368bdSK. Y. Srinivasan 
set_channel_read_mode(struct vmbus_channel * c,enum hv_callback_mode mode)10508a7206a8SK. Y. Srinivasan static inline void set_channel_read_mode(struct vmbus_channel *c,
10518a7206a8SK. Y. Srinivasan 					enum hv_callback_mode mode)
10528a7206a8SK. Y. Srinivasan {
10538a7206a8SK. Y. Srinivasan 	c->callback_mode = mode;
10548a7206a8SK. Y. Srinivasan }
10558a7206a8SK. Y. Srinivasan 
set_per_channel_state(struct vmbus_channel * c,void * s)10568a7206a8SK. Y. Srinivasan static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
10578a7206a8SK. Y. Srinivasan {
10588a7206a8SK. Y. Srinivasan 	c->per_channel_state = s;
10598a7206a8SK. Y. Srinivasan }
10603c75354dSDexuan Cui 
get_per_channel_state(struct vmbus_channel * c)10613c75354dSDexuan Cui static inline void *get_per_channel_state(struct vmbus_channel *c)
10623c75354dSDexuan Cui {
1063396ae57eSKimberly Brown 	return c->per_channel_state;
1064396ae57eSKimberly Brown }
1065396ae57eSKimberly Brown 
set_channel_pending_send_size(struct vmbus_channel * c,u32 size)1066396ae57eSKimberly Brown static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1067396ae57eSKimberly Brown 						 u32 size)
1068396ae57eSKimberly Brown {
1069396ae57eSKimberly Brown 	unsigned long flags;
1070396ae57eSKimberly Brown 
1071396ae57eSKimberly Brown 	if (size) {
1072396ae57eSKimberly Brown 		spin_lock_irqsave(&c->outbound.ring_lock, flags);
1073396ae57eSKimberly Brown 		++c->out_full_total;
1074396ae57eSKimberly Brown 
1075396ae57eSKimberly Brown 		if (!c->out_full_flag) {
1076396ae57eSKimberly Brown 			++c->out_full_first;
1077396ae57eSKimberly Brown 			c->out_full_flag = true;
10783c75354dSDexuan Cui 		}
10793c75354dSDexuan Cui 		spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
10803c75354dSDexuan Cui 	} else {
10815cc41500SVitaly Kuznetsov 		c->out_full_flag = false;
108246a97191SGreg Kroah-Hartman 	}
108346a97191SGreg Kroah-Hartman 
108446a97191SGreg Kroah-Hartman 	c->outbound.ring_buffer->pending_send_sz = size;
1085e68d2971SK. Y. Srinivasan }
1086e68d2971SK. Y. Srinivasan 
1087e68d2971SK. Y. Srinivasan void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1088e68d2971SK. Y. Srinivasan 
1089e68d2971SK. Y. Srinivasan int vmbus_request_offers(void);
1090e68d2971SK. Y. Srinivasan 
1091e68d2971SK. Y. Srinivasan /*
1092499e8401SDexuan Cui  * APIs for managing sub-channels.
1093499e8401SDexuan Cui  */
1094499e8401SDexuan Cui 
109546a97191SGreg Kroah-Hartman void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
109646a97191SGreg Kroah-Hartman 			void (*sc_cr_cb)(struct vmbus_channel *new_sc));
109746a97191SGreg Kroah-Hartman 
109846a97191SGreg Kroah-Hartman void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
109946a97191SGreg Kroah-Hartman 		void (*chn_rescind_cb)(struct vmbus_channel *));
110046a97191SGreg Kroah-Hartman 
110146a97191SGreg Kroah-Hartman /* The format must be the same as struct vmdata_gpa_direct */
110246a97191SGreg Kroah-Hartman struct vmbus_channel_packet_page_buffer {
110346a97191SGreg Kroah-Hartman 	u16 type;
110446a97191SGreg Kroah-Hartman 	u16 dataoffset8;
110546a97191SGreg Kroah-Hartman 	u16 length8;
110646a97191SGreg Kroah-Hartman 	u16 flags;
110746a97191SGreg Kroah-Hartman 	u64 transactionid;
110846a97191SGreg Kroah-Hartman 	u32 reserved;
110946a97191SGreg Kroah-Hartman 	u32 rangecount;
111046a97191SGreg Kroah-Hartman 	struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
111146a97191SGreg Kroah-Hartman } __packed;
111246a97191SGreg Kroah-Hartman 
111346a97191SGreg Kroah-Hartman /* The format must be the same as struct vmdata_gpa_direct */
111446a97191SGreg Kroah-Hartman struct vmbus_channel_packet_multipage_buffer {
111546a97191SGreg Kroah-Hartman 	u16 type;
111646a97191SGreg Kroah-Hartman 	u16 dataoffset8;
111746a97191SGreg Kroah-Hartman 	u16 length8;
111846a97191SGreg Kroah-Hartman 	u16 flags;
1119d61031eeSK. Y. Srinivasan 	u64 transactionid;
1120d61031eeSK. Y. Srinivasan 	u32 reserved;
1121d61031eeSK. Y. Srinivasan 	u32 rangecount;		/* Always 1 in this case */
1122d61031eeSK. Y. Srinivasan 	struct hv_multipage_buffer range;
1123d61031eeSK. Y. Srinivasan } __packed;
1124d61031eeSK. Y. Srinivasan 
1125d61031eeSK. Y. Srinivasan /* The format must be the same as struct vmdata_gpa_direct */
1126d61031eeSK. Y. Srinivasan struct vmbus_packet_mpb_array {
1127d61031eeSK. Y. Srinivasan 	u16 type;
1128d61031eeSK. Y. Srinivasan 	u16 dataoffset8;
1129d61031eeSK. Y. Srinivasan 	u16 length8;
1130d61031eeSK. Y. Srinivasan 	u16 flags;
1131ae6935edSStephen Hemminger 	u64 transactionid;
1132ae6935edSStephen Hemminger 	u32 reserved;
1133ae6935edSStephen Hemminger 	u32 rangecount;         /* Always 1 in this case */
1134ae6935edSStephen Hemminger 	struct hv_mpb_array range;
1135ae6935edSStephen Hemminger } __packed;
1136ae6935edSStephen Hemminger 
1137ae6935edSStephen Hemminger int vmbus_alloc_ring(struct vmbus_channel *channel,
1138ae6935edSStephen Hemminger 		     u32 send_size, u32 recv_size);
113946a97191SGreg Kroah-Hartman void vmbus_free_ring(struct vmbus_channel *channel);
114046a97191SGreg Kroah-Hartman 
114146a97191SGreg Kroah-Hartman int vmbus_connect_ring(struct vmbus_channel *channel,
114246a97191SGreg Kroah-Hartman 		       void (*onchannel_callback)(void *context),
114346a97191SGreg Kroah-Hartman 		       void *context);
114446a97191SGreg Kroah-Hartman int vmbus_disconnect_ring(struct vmbus_channel *channel);
114546a97191SGreg Kroah-Hartman 
114646a97191SGreg Kroah-Hartman extern int vmbus_open(struct vmbus_channel *channel,
114746a97191SGreg Kroah-Hartman 			    u32 send_ringbuffersize,
114846a97191SGreg Kroah-Hartman 			    u32 recv_ringbuffersize,
114946a97191SGreg Kroah-Hartman 			    void *userdata,
1150b03afa57SAndrea Parri (Microsoft) 			    u32 userdatalen,
1151b03afa57SAndrea Parri (Microsoft) 			    void (*onchannel_callback)(void *context),
1152b03afa57SAndrea Parri (Microsoft) 			    void *context);
1153b03afa57SAndrea Parri (Microsoft) 
1154b03afa57SAndrea Parri (Microsoft) extern void vmbus_close(struct vmbus_channel *channel);
1155b03afa57SAndrea Parri (Microsoft) 
1156b03afa57SAndrea Parri (Microsoft) extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
115746a97191SGreg Kroah-Hartman 				  void *buffer,
1158011a7c3cSK. Y. Srinivasan 				  u32 bufferLen,
115946a97191SGreg Kroah-Hartman 				  u64 requestid,
116046a97191SGreg Kroah-Hartman 				  u64 *trans_id,
116146a97191SGreg Kroah-Hartman 				  enum vmbus_packet_type type,
116246a97191SGreg Kroah-Hartman 				  u32 flags);
116346a97191SGreg Kroah-Hartman extern int vmbus_sendpacket(struct vmbus_channel *channel,
1164d61031eeSK. Y. Srinivasan 				  void *buffer,
1165d61031eeSK. Y. Srinivasan 				  u32 bufferLen,
1166d61031eeSK. Y. Srinivasan 				  u64 requestid,
1167d61031eeSK. Y. Srinivasan 				  enum vmbus_packet_type type,
1168d61031eeSK. Y. Srinivasan 				  u32 flags);
1169d61031eeSK. Y. Srinivasan 
1170d61031eeSK. Y. Srinivasan extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
117146a97191SGreg Kroah-Hartman 				     struct vmbus_packet_mpb_array *mpb,
117246a97191SGreg Kroah-Hartman 				     u32 desc_size,
117346a97191SGreg Kroah-Hartman 				     void *buffer,
1174d4dccf35STianyu Lan 				     u32 bufferlen,
117546a97191SGreg Kroah-Hartman 				     u64 requestid);
117646a97191SGreg Kroah-Hartman 
1177d4dccf35STianyu Lan extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
117846a97191SGreg Kroah-Hartman 				      void *kbuffer,
1179d3b26dd7SDexuan Cui 				      u32 size,
1180d3b26dd7SDexuan Cui 				      struct vmbus_gpadl *gpadl);
118146a97191SGreg Kroah-Hartman 
118246a97191SGreg Kroah-Hartman extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
118346a97191SGreg Kroah-Hartman 				     struct vmbus_gpadl *gpadl);
118446a97191SGreg Kroah-Hartman 
118546a97191SGreg Kroah-Hartman void vmbus_reset_channel_cb(struct vmbus_channel *channel);
118646a97191SGreg Kroah-Hartman 
118746a97191SGreg Kroah-Hartman extern int vmbus_recvpacket(struct vmbus_channel *channel,
118846a97191SGreg Kroah-Hartman 				  void *buffer,
118946a97191SGreg Kroah-Hartman 				  u32 bufferlen,
119046a97191SGreg Kroah-Hartman 				  u32 *buffer_actual_len,
119146a97191SGreg Kroah-Hartman 				  u64 *requestid);
119246a97191SGreg Kroah-Hartman 
119346a97191SGreg Kroah-Hartman extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
119446a97191SGreg Kroah-Hartman 				     void *buffer,
119546a97191SGreg Kroah-Hartman 				     u32 bufferlen,
119646a97191SGreg Kroah-Hartman 				     u32 *buffer_actual_len,
11978981da32SDexuan Cui 				     u64 *requestid);
11988981da32SDexuan Cui 
11998981da32SDexuan Cui /* Base driver object */
12008981da32SDexuan Cui struct hv_driver {
12018981da32SDexuan Cui 	const char *name;
12028981da32SDexuan Cui 
12038981da32SDexuan Cui 	/*
12048981da32SDexuan Cui 	 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
12058981da32SDexuan Cui 	 * channel flag, actually doesn't mean a synthetic device because the
12068981da32SDexuan Cui 	 * offer's if_type/if_instance can change for every new hvsock
12078981da32SDexuan Cui 	 * connection.
12088981da32SDexuan Cui 	 *
12098981da32SDexuan Cui 	 * However, to facilitate the notification of new-offer/rescind-offer
12108981da32SDexuan Cui 	 * from vmbus driver to hvsock driver, we can handle hvsock offer as
121146a97191SGreg Kroah-Hartman 	 * a special vmbus device, and hence we need the below flag to
1212593db803SAndy Shevchenko 	 * indicate if the driver is the hvsock driver or not: we need to
121346a97191SGreg Kroah-Hartman 	 * specially treat the hvosck offer & driver in vmbus_match().
121446a97191SGreg Kroah-Hartman 	 */
121546a97191SGreg Kroah-Hartman 	bool hvsock;
121646a97191SGreg Kroah-Hartman 
1217fc76936dSStephen Hemminger 	/* the device type supported by this driver */
1218fc76936dSStephen Hemminger 	guid_t dev_type;
1219fc76936dSStephen Hemminger 	const struct hv_vmbus_device_id *id_table;
1220fc76936dSStephen Hemminger 
1221fc76936dSStephen Hemminger 	struct device_driver driver;
1222fc76936dSStephen Hemminger 
122346a97191SGreg Kroah-Hartman 	/* dynamic device GUID's */
122496ec2939SDawei Li 	struct  {
122546a97191SGreg Kroah-Hartman 		spinlock_t lock;
122646a97191SGreg Kroah-Hartman 		struct list_head list;
1227271b2224SDexuan Cui 	} dynids;
1228271b2224SDexuan Cui 
1229271b2224SDexuan Cui 	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
123046a97191SGreg Kroah-Hartman 	void (*remove)(struct hv_device *dev);
123146a97191SGreg Kroah-Hartman 	void (*shutdown)(struct hv_device *);
123246a97191SGreg Kroah-Hartman 
123346a97191SGreg Kroah-Hartman 	int (*suspend)(struct hv_device *);
123446a97191SGreg Kroah-Hartman 	int (*resume)(struct hv_device *);
1235593db803SAndy Shevchenko 
123646a97191SGreg Kroah-Hartman };
123746a97191SGreg Kroah-Hartman 
1238593db803SAndy Shevchenko /* Base device object */
12397047f17dSK. Y. Srinivasan struct hv_device {
12407047f17dSK. Y. Srinivasan 	/* the device type id of this device */
124146a97191SGreg Kroah-Hartman 	guid_t dev_type;
124246a97191SGreg Kroah-Hartman 
124301ed1002SKrzysztof Kozlowski 	/* the device instance id of this device */
124401ed1002SKrzysztof Kozlowski 	guid_t dev_instance;
124501ed1002SKrzysztof Kozlowski 	u16 vendor_id;
124601ed1002SKrzysztof Kozlowski 	u16 device_id;
124701ed1002SKrzysztof Kozlowski 
124846a97191SGreg Kroah-Hartman 	struct device device;
124946a97191SGreg Kroah-Hartman 	/*
1250c2e5df61SStephen Hemminger 	 * Driver name to force a match.  Do not set directly, because core
1251743b237cSTianyu Lan 	 * frees it.  Use driver_set_override() to set or clear it.
12526bf625a4SMichael Kelley 	 */
1253af9ca6f9SBranden Bonaby 	const char *driver_override;
1254af9ca6f9SBranden Bonaby 
1255af9ca6f9SBranden Bonaby 	struct vmbus_channel *channel;
1256af9ca6f9SBranden Bonaby 	struct kset	     *channels_kset;
125746a97191SGreg Kroah-Hartman 	struct device_dma_parameters dma_parms;
125846a97191SGreg Kroah-Hartman 	u64 dma_mask;
125946a97191SGreg Kroah-Hartman 
12608afdae83SGreg Kroah-Hartman 	/* place holder to keep track of the dir for hv device in debugfs */
1261d69d8048SGreg Kroah-Hartman 	struct dentry *debug_dir;
126246a97191SGreg Kroah-Hartman 
126346a97191SGreg Kroah-Hartman };
126446a97191SGreg Kroah-Hartman 
126546a97191SGreg Kroah-Hartman 
126646a97191SGreg Kroah-Hartman #define device_to_hv_device(d)	container_of_const(d, struct hv_device, device)
126746a97191SGreg Kroah-Hartman #define drv_to_hv_drv(d)	container_of_const(d, struct hv_driver, driver)
126846a97191SGreg Kroah-Hartman 
hv_set_drvdata(struct hv_device * dev,void * data)126946a97191SGreg Kroah-Hartman static inline void hv_set_drvdata(struct hv_device *dev, void *data)
127046a97191SGreg Kroah-Hartman {
127146a97191SGreg Kroah-Hartman 	dev_set_drvdata(&dev->device, data);
127246a97191SGreg Kroah-Hartman }
12734827ee1dSStephen Hemminger 
hv_get_drvdata(struct hv_device * dev)12744827ee1dSStephen Hemminger static inline void *hv_get_drvdata(struct hv_device *dev)
12754827ee1dSStephen Hemminger {
12764827ee1dSStephen Hemminger 	return dev_get_drvdata(&dev->device);
12774827ee1dSStephen Hemminger }
12784827ee1dSStephen Hemminger 
12794827ee1dSStephen Hemminger struct hv_ring_buffer_debug_info {
12804827ee1dSStephen Hemminger 	u32 current_interrupt_mask;
1281ba50bf1cSDexuan Cui 	u32 current_read_index;
128214948e39SKimberly Brown 	u32 current_write_index;
12834827ee1dSStephen Hemminger 	u32 bytes_avail_toread;
12844827ee1dSStephen Hemminger 	u32 bytes_avail_towrite;
12851d044ca0SGuilherme G. Piccoli };
12861d044ca0SGuilherme G. Piccoli 
128746a97191SGreg Kroah-Hartman 
128846a97191SGreg Kroah-Hartman int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
128946a97191SGreg Kroah-Hartman 				struct hv_ring_buffer_debug_info *debug_info);
129046a97191SGreg Kroah-Hartman 
129146a97191SGreg Kroah-Hartman bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
129246a97191SGreg Kroah-Hartman 
129346a97191SGreg Kroah-Hartman /* Vmbus interface */
129446a97191SGreg Kroah-Hartman #define vmbus_driver_register(driver)	\
129585d9aa70SDexuan Cui 	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
129685d9aa70SDexuan Cui int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
129735464483SJake Oshins 					 struct module *owner,
129835464483SJake Oshins 					 const char *mod_name);
129935464483SJake Oshins void vmbus_driver_unregister(struct hv_driver *hv_driver);
130035464483SJake Oshins 
130197fb77dcSJake Oshins void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1302619848bdSJake Oshins 
130346a97191SGreg Kroah-Hartman int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
13047fb96565SK. Y. Srinivasan 			resource_size_t min, resource_size_t max,
13057fb96565SK. Y. Srinivasan 			resource_size_t size, resource_size_t align,
13067fb96565SK. Y. Srinivasan 			bool fb_overlap_ok);
13077fb96565SK. Y. Srinivasan void vmbus_free_mmio(resource_size_t start, resource_size_t size);
13087fb96565SK. Y. Srinivasan 
13097fb96565SK. Y. Srinivasan /*
13107fb96565SK. Y. Srinivasan  * GUID definitions of various offer types - services offered to the guest.
13117fb96565SK. Y. Srinivasan  */
1312593db803SAndy Shevchenko 
1313af3ff643SK. Y. Srinivasan /*
13147fb96565SK. Y. Srinivasan  * Network GUID
13157fb96565SK. Y. Srinivasan  * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
13167fb96565SK. Y. Srinivasan  */
13177fb96565SK. Y. Srinivasan #define HV_NIC_GUID \
13187fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
13197fb96565SK. Y. Srinivasan 			  0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1320593db803SAndy Shevchenko 
1321af3ff643SK. Y. Srinivasan /*
13227fb96565SK. Y. Srinivasan  * IDE GUID
13237fb96565SK. Y. Srinivasan  * {32412632-86cb-44a2-9b5c-50d1417354f5}
13247fb96565SK. Y. Srinivasan  */
13257fb96565SK. Y. Srinivasan #define HV_IDE_GUID \
13267fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
13277fb96565SK. Y. Srinivasan 			  0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1328593db803SAndy Shevchenko 
1329af3ff643SK. Y. Srinivasan /*
13307fb96565SK. Y. Srinivasan  * SCSI GUID
13317fb96565SK. Y. Srinivasan  * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
13327fb96565SK. Y. Srinivasan  */
13337fb96565SK. Y. Srinivasan #define HV_SCSI_GUID \
13347fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
13357fb96565SK. Y. Srinivasan 			  0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1336593db803SAndy Shevchenko 
1337af3ff643SK. Y. Srinivasan /*
13387fb96565SK. Y. Srinivasan  * Shutdown GUID
13397fb96565SK. Y. Srinivasan  * {0e0b6031-5213-4934-818b-38d90ced39db}
13407fb96565SK. Y. Srinivasan  */
13417fb96565SK. Y. Srinivasan #define HV_SHUTDOWN_GUID \
13427fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
13437fb96565SK. Y. Srinivasan 			  0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1344593db803SAndy Shevchenko 
1345af3ff643SK. Y. Srinivasan /*
13467fb96565SK. Y. Srinivasan  * Time Synch GUID
13477fb96565SK. Y. Srinivasan  * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
13487fb96565SK. Y. Srinivasan  */
13497fb96565SK. Y. Srinivasan #define HV_TS_GUID \
13507fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
13517fb96565SK. Y. Srinivasan 			  0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1352593db803SAndy Shevchenko 
1353af3ff643SK. Y. Srinivasan /*
13547fb96565SK. Y. Srinivasan  * Heartbeat GUID
13557fb96565SK. Y. Srinivasan  * {57164f39-9115-4e78-ab55-382f3bd5422d}
13567fb96565SK. Y. Srinivasan  */
13577fb96565SK. Y. Srinivasan #define HV_HEART_BEAT_GUID \
13587fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
13597fb96565SK. Y. Srinivasan 			  0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1360593db803SAndy Shevchenko 
1361af3ff643SK. Y. Srinivasan /*
13627fb96565SK. Y. Srinivasan  * KVP GUID
13637fb96565SK. Y. Srinivasan  * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
13647fb96565SK. Y. Srinivasan  */
13657fb96565SK. Y. Srinivasan #define HV_KVP_GUID \
13667fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
13677fb96565SK. Y. Srinivasan 			  0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1368593db803SAndy Shevchenko 
1369af3ff643SK. Y. Srinivasan /*
13707fb96565SK. Y. Srinivasan  * Dynamic memory GUID
13717fb96565SK. Y. Srinivasan  * {525074dc-8985-46e2-8057-a307dc18a502}
13727fb96565SK. Y. Srinivasan  */
13737fb96565SK. Y. Srinivasan #define HV_DM_GUID \
13747fb96565SK. Y. Srinivasan 	.guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
13757fb96565SK. Y. Srinivasan 			  0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1376593db803SAndy Shevchenko 
1377af3ff643SK. Y. Srinivasan /*
13787fb96565SK. Y. Srinivasan  * Mouse GUID
13797fb96565SK. Y. Srinivasan  * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
13802048157aSDexuan Cui  */
13812048157aSDexuan Cui #define HV_MOUSE_GUID \
13822048157aSDexuan Cui 	.guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
13832048157aSDexuan Cui 			  0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1384593db803SAndy Shevchenko 
13852048157aSDexuan Cui /*
13862048157aSDexuan Cui  * Keyboard GUID
13872048157aSDexuan Cui  * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
138896dd86faSK. Y. Srinivasan  */
138996dd86faSK. Y. Srinivasan #define HV_KBD_GUID \
139096dd86faSK. Y. Srinivasan 	.guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1391593db803SAndy Shevchenko 			  0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1392af3ff643SK. Y. Srinivasan 
139396dd86faSK. Y. Srinivasan /*
139468a2d20bSHaiyang Zhang  * VSS (Backup/Restore) GUID
139568a2d20bSHaiyang Zhang  */
139668a2d20bSHaiyang Zhang #define HV_VSS_GUID \
139768a2d20bSHaiyang Zhang 	.guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1398593db803SAndy Shevchenko 			  0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1399af3ff643SK. Y. Srinivasan /*
140068a2d20bSHaiyang Zhang  * Synthetic Video GUID
140168a2d20bSHaiyang Zhang  * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
140298b80d89SK. Y. Srinivasan  */
140398b80d89SK. Y. Srinivasan #define HV_SYNTHVID_GUID \
140498b80d89SK. Y. Srinivasan 	.guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
140598b80d89SK. Y. Srinivasan 			  0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1406593db803SAndy Shevchenko 
1407af3ff643SK. Y. Srinivasan /*
140898b80d89SK. Y. Srinivasan  * Synthetic FC GUID
140998b80d89SK. Y. Srinivasan  * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
141001325476SK. Y. Srinivasan  */
141101325476SK. Y. Srinivasan #define HV_SYNTHFC_GUID \
141201325476SK. Y. Srinivasan 	.guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
141301325476SK. Y. Srinivasan 			  0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
141401325476SK. Y. Srinivasan 
1415593db803SAndy Shevchenko /*
1416af3ff643SK. Y. Srinivasan  * Guest File Copy Service
141701325476SK. Y. Srinivasan  * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
141801325476SK. Y. Srinivasan  */
141904653a00SK. Y. Srinivasan 
142004653a00SK. Y. Srinivasan #define HV_FCOPY_GUID \
142104653a00SK. Y. Srinivasan 	.guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
142204653a00SK. Y. Srinivasan 			  0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1423593db803SAndy Shevchenko 
1424af3ff643SK. Y. Srinivasan /*
142504653a00SK. Y. Srinivasan  * NetworkDirect. This is the guest RDMA service.
142604653a00SK. Y. Srinivasan  * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
14273053c762SJake Oshins  */
14283053c762SJake Oshins #define HV_ND_GUID \
14293053c762SJake Oshins 	.guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
14303053c762SJake Oshins 			  0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
14313053c762SJake Oshins 
1432593db803SAndy Shevchenko /*
1433af3ff643SK. Y. Srinivasan  * PCI Express Pass Through
14343053c762SJake Oshins  * {44C4F61D-4444-4400-9D52-802E27EDE19F}
14353053c762SJake Oshins  */
143666200bbcSMichael Kelley 
143766200bbcSMichael Kelley #define HV_PCIE_GUID \
143866200bbcSMichael Kelley 	.guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
143966200bbcSMichael Kelley 			  0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
14400f98829aSDexuan Cui 
14410f98829aSDexuan Cui /*
14420f98829aSDexuan Cui  * Linux doesn't support these 4 devices: the first two are for
144366200bbcSMichael Kelley  * Automatic Virtual Machine Activation, the third is for
14440f98829aSDexuan Cui  * Remote Desktop Virtualization, and the fourth is Initial
14450f98829aSDexuan Cui  * Machine Configuration (IMC) used only by Windows guests.
14460f98829aSDexuan Cui  * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1447593db803SAndy Shevchenko  * {3375baf4-9e15-4b30-b765-67acb10d607b}
14480f98829aSDexuan Cui  * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
14490f98829aSDexuan Cui  * {c376c1c3-d276-48d2-90a9-c04748072c60}
14500f98829aSDexuan Cui  */
1451593db803SAndy Shevchenko 
14520f98829aSDexuan Cui #define HV_AVMA1_GUID \
14530f98829aSDexuan Cui 	.guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
14540f98829aSDexuan Cui 			  0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1455593db803SAndy Shevchenko 
14560f98829aSDexuan Cui #define HV_AVMA2_GUID \
14570f98829aSDexuan Cui 	.guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
145866200bbcSMichael Kelley 			  0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
145966200bbcSMichael Kelley 
146066200bbcSMichael Kelley #define HV_RDV_GUID \
146166200bbcSMichael Kelley 	.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
14620f98829aSDexuan Cui 			  0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
146346a97191SGreg Kroah-Hartman 
146446a97191SGreg Kroah-Hartman #define HV_IMC_GUID \
146546a97191SGreg Kroah-Hartman 	.guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
146646a97191SGreg Kroah-Hartman 			  0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
146746a97191SGreg Kroah-Hartman 
146846a97191SGreg Kroah-Hartman /*
146946a97191SGreg Kroah-Hartman  * Common header for Hyper-V ICs
147046a97191SGreg Kroah-Hartman  */
147146a97191SGreg Kroah-Hartman 
147206caa778SAndres Beltran #define ICMSGTYPE_NEGOTIATE		0
147346a97191SGreg Kroah-Hartman #define ICMSGTYPE_HEARTBEAT		1
147446a97191SGreg Kroah-Hartman #define ICMSGTYPE_KVPEXCHANGE		2
147546a97191SGreg Kroah-Hartman #define ICMSGTYPE_SHUTDOWN		3
147646a97191SGreg Kroah-Hartman #define ICMSGTYPE_TIMESYNC		4
147746a97191SGreg Kroah-Hartman #define ICMSGTYPE_VSS			5
147846a97191SGreg Kroah-Hartman #define ICMSGTYPE_FCOPY			7
147946a97191SGreg Kroah-Hartman 
148046a97191SGreg Kroah-Hartman #define ICMSGHDRFLAG_TRANSACTION	1
148146a97191SGreg Kroah-Hartman #define ICMSGHDRFLAG_REQUEST		2
148246a97191SGreg Kroah-Hartman #define ICMSGHDRFLAG_RESPONSE		4
148346a97191SGreg Kroah-Hartman 
148446a97191SGreg Kroah-Hartman 
148546a97191SGreg Kroah-Hartman /*
148646a97191SGreg Kroah-Hartman  * While we want to handle util services as regular devices,
1487b9830d12SK. Y. Srinivasan  * there is only one instance of each of these services; so
148846a97191SGreg Kroah-Hartman  * we statically allocate the service specific state.
148946a97191SGreg Kroah-Hartman  */
149007a756a4SMichael Kelley 
149146a97191SGreg Kroah-Hartman struct hv_util_service {
149254e19d34SDexuan Cui 	u8 *recv_buffer;
149354e19d34SDexuan Cui 	void *channel;
149446a97191SGreg Kroah-Hartman 	void (*util_cb)(void *);
149546a97191SGreg Kroah-Hartman 	int (*util_init)(struct hv_util_service *);
149646a97191SGreg Kroah-Hartman 	int (*util_init_transport)(void);
149746a97191SGreg Kroah-Hartman 	void (*util_deinit)(void);
149846a97191SGreg Kroah-Hartman 	int (*util_pre_suspend)(void);
149946a97191SGreg Kroah-Hartman 	int (*util_pre_resume)(void);
150046a97191SGreg Kroah-Hartman };
150146a97191SGreg Kroah-Hartman 
150246a97191SGreg Kroah-Hartman struct vmbuspipe_hdr {
150346a97191SGreg Kroah-Hartman 	u32 flags;
150446a97191SGreg Kroah-Hartman 	u32 msgsize;
150546a97191SGreg Kroah-Hartman } __packed;
150646a97191SGreg Kroah-Hartman 
150746a97191SGreg Kroah-Hartman struct ic_version {
150846a97191SGreg Kroah-Hartman 	u16 major;
150946a97191SGreg Kroah-Hartman 	u16 minor;
151046a97191SGreg Kroah-Hartman } __packed;
151146a97191SGreg Kroah-Hartman 
151246a97191SGreg Kroah-Hartman struct icmsg_hdr {
151346a97191SGreg Kroah-Hartman 	struct ic_version icverframe;
151446a97191SGreg Kroah-Hartman 	u16 icmsgtype;
151546a97191SGreg Kroah-Hartman 	struct ic_version icvermsg;
151646a97191SGreg Kroah-Hartman 	u16 icmsgsize;
151706caa778SAndres Beltran 	u32 status;
151806caa778SAndres Beltran 	u8 ictransaction_id;
151906caa778SAndres Beltran 	u8 icflags;
152078785010SGustavo A. R. Silva 	u8 reserved[2];
152106caa778SAndres Beltran } __packed;
152206caa778SAndres Beltran 
152346a97191SGreg Kroah-Hartman #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
152446a97191SGreg Kroah-Hartman #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
152546a97191SGreg Kroah-Hartman #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
152646a97191SGreg Kroah-Hartman 	(ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
152778785010SGustavo A. R. Silva 	 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
152846a97191SGreg Kroah-Hartman 
152946a97191SGreg Kroah-Hartman struct icmsg_negotiate {
153046a97191SGreg Kroah-Hartman 	u16 icframe_vercnt;
153146a97191SGreg Kroah-Hartman 	u16 icmsg_vercnt;
153246a97191SGreg Kroah-Hartman 	u32 reserved;
153346a97191SGreg Kroah-Hartman 	struct ic_version icversion_data[]; /* any size array */
153446a97191SGreg Kroah-Hartman } __packed;
153546a97191SGreg Kroah-Hartman 
153646a97191SGreg Kroah-Hartman struct shutdown_msg_data {
153746a97191SGreg Kroah-Hartman 	u32 reason_code;
153846a97191SGreg Kroah-Hartman 	u32 timeout_seconds;
153946a97191SGreg Kroah-Hartman 	u32 flags;
154046a97191SGreg Kroah-Hartman 	u8  display_message[2048];
154146a97191SGreg Kroah-Hartman } __packed;
154246a97191SGreg Kroah-Hartman 
154346a97191SGreg Kroah-Hartman struct heartbeat_msg_data {
154446a97191SGreg Kroah-Hartman 	u64 seq_num;
154546a97191SGreg Kroah-Hartman 	u32 reserved[8];
154646a97191SGreg Kroah-Hartman } __packed;
154746a97191SGreg Kroah-Hartman 
154846a97191SGreg Kroah-Hartman /* Time Sync IC defs */
154946a97191SGreg Kroah-Hartman #define ICTIMESYNCFLAG_PROBE	0
155046a97191SGreg Kroah-Hartman #define ICTIMESYNCFLAG_SYNC	1
155146a97191SGreg Kroah-Hartman #define ICTIMESYNCFLAG_SAMPLE	2
155246a97191SGreg Kroah-Hartman 
155346a97191SGreg Kroah-Hartman #ifdef __x86_64__
155446a97191SGreg Kroah-Hartman #define WLTIMEDELTA	116444736000000000L	/* in 100ns unit */
155546a97191SGreg Kroah-Hartman #else
155646a97191SGreg Kroah-Hartman #define WLTIMEDELTA	116444736000000000LL
155746a97191SGreg Kroah-Hartman #endif
155846a97191SGreg Kroah-Hartman 
155946a97191SGreg Kroah-Hartman struct ictimesync_data {
15608e1d2607SAlex Ng 	u64 parenttime;
15618e1d2607SAlex Ng 	u64 childtime;
15628e1d2607SAlex Ng 	u64 roundtriptime;
15638e1d2607SAlex Ng 	u8 flags;
15648e1d2607SAlex Ng } __packed;
15658e1d2607SAlex Ng 
15668e1d2607SAlex Ng struct ictimesync_ref_data {
15678e1d2607SAlex Ng 	u64 parenttime;
15688e1d2607SAlex Ng 	u64 vmreferencetime;
156946a97191SGreg Kroah-Hartman 	u8 flags;
157046a97191SGreg Kroah-Hartman 	char leapflags;
157146a97191SGreg Kroah-Hartman 	char stratum;
1572593db803SAndy Shevchenko 	u8 reserved[3];
157346a97191SGreg Kroah-Hartman } __packed;
157446a97191SGreg Kroah-Hartman 
157546a97191SGreg Kroah-Hartman struct hyperv_service_callback {
157646a97191SGreg Kroah-Hartman 	u8 msg_type;
1577846da38dSTianyu Lan 	char *log_msg;
1578846da38dSTianyu Lan 	guid_t data;
1579846da38dSTianyu Lan 	struct vmbus_channel *channel;
1580846da38dSTianyu Lan 	void (*callback)(void *context);
1581846da38dSTianyu Lan };
1582c836d0abSK. Y. Srinivasan 
158306caa778SAndres Beltran struct hv_dma_range {
1584a1656454SAlex Ng 	dma_addr_t dma;
1585a1656454SAlex Ng 	u32 mapping_size;
1586a1656454SAlex Ng };
158746a97191SGreg Kroah-Hartman 
1588800b9329SStephen Hemminger #define MAX_SRV_VER	0x7ffffff
158996dd86faSK. Y. Srinivasan extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
15901f6ee4e7SK. Y. Srinivasan 				const int *fw_version, int fw_vercnt,
159137f7278bSK. Y. Srinivasan 				const int *srv_version, int srv_vercnt,
159237f7278bSK. Y. Srinivasan 				int *nego_fw_version, int *nego_srv_version);
159337f7278bSK. Y. Srinivasan 
159437f7278bSK. Y. Srinivasan void hv_process_channel_removal(struct vmbus_channel *channel);
159537f7278bSK. Y. Srinivasan 
159637f7278bSK. Y. Srinivasan void vmbus_setevent(struct vmbus_channel *channel);
1597593db803SAndy Shevchenko /*
1598593db803SAndy Shevchenko  * Negotiated version with the Host.
1599870ced05SAndrea Parri (Microsoft)  */
16005cc47247SK. Y. Srinivasan 
1601*5e4304ffSHamza Mahfooz extern __u32 vmbus_proto_version;
1602687f32e6SK. Y. Srinivasan 
1603687f32e6SK. Y. Srinivasan int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1604687f32e6SK. Y. Srinivasan 				  const guid_t *shv_host_servie_id);
1605e4165a0fSStephen Hemminger int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
1606687f32e6SK. Y. Srinivasan void vmbus_set_event(struct vmbus_channel *channel);
1607e4165a0fSStephen Hemminger int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu);
1608687f32e6SK. Y. Srinivasan 
1609687f32e6SK. Y. Srinivasan /* Get the start of the ring buffer. */
1610687f32e6SK. Y. Srinivasan static inline void *
hv_get_ring_buffer(const struct hv_ring_buffer_info * ring_info)16116e47dd3eSStephen Hemminger hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
16126e47dd3eSStephen Hemminger {
16136e47dd3eSStephen Hemminger 	return ring_info->ring_buffer->buffer;
16146e47dd3eSStephen Hemminger }
16156e47dd3eSStephen Hemminger 
16166e47dd3eSStephen Hemminger /*
16176e47dd3eSStephen Hemminger  * Mask off host interrupt callback notifications
16186e47dd3eSStephen Hemminger  */
hv_begin_read(struct hv_ring_buffer_info * rbi)16196e47dd3eSStephen Hemminger static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
16206e47dd3eSStephen Hemminger {
16216e47dd3eSStephen Hemminger 	rbi->ring_buffer->interrupt_mask = 1;
16226e47dd3eSStephen Hemminger 
16236e47dd3eSStephen Hemminger 	/* make sure mask update is not reordered */
16246e47dd3eSStephen Hemminger 	virt_mb();
16256e47dd3eSStephen Hemminger }
16266e47dd3eSStephen Hemminger 
16276e47dd3eSStephen Hemminger /*
16286e47dd3eSStephen Hemminger  * Re-enable host callback and return number of outstanding bytes
16296e47dd3eSStephen Hemminger  */
hv_end_read(struct hv_ring_buffer_info * rbi)16306e47dd3eSStephen Hemminger static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
16316e47dd3eSStephen Hemminger {
16326e47dd3eSStephen Hemminger 
16336e47dd3eSStephen Hemminger 	rbi->ring_buffer->interrupt_mask = 0;
16346e47dd3eSStephen Hemminger 
16356e47dd3eSStephen Hemminger 	/* make sure mask update is not reordered */
16366e47dd3eSStephen Hemminger 	virt_mb();
16376e47dd3eSStephen Hemminger 
16386e47dd3eSStephen Hemminger 	/*
16396e47dd3eSStephen Hemminger 	 * Now check to see if the ring buffer is still empty.
16406e47dd3eSStephen Hemminger 	 * If it is not, we raced and we need to process new
1641ab028db4SK. Y. Srinivasan 	 * incoming messages.
1642ab028db4SK. Y. Srinivasan 	 */
1643ab028db4SK. Y. Srinivasan 	return hv_get_bytes_to_read(rbi);
1644f3dd3f47Sstephen hemminger }
1645f3dd3f47Sstephen hemminger 
1646f3dd3f47Sstephen hemminger /*
1647f3dd3f47Sstephen hemminger  * An API to support in-place processing of incoming VMBUS packets.
1648f3dd3f47Sstephen hemminger  */
1649f3dd3f47Sstephen hemminger 
1650f3dd3f47Sstephen hemminger /* Get data payload associated with descriptor */
hv_pkt_data(const struct vmpacket_descriptor * desc)1651f3dd3f47Sstephen hemminger static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1652f3dd3f47Sstephen hemminger {
1653f3dd3f47Sstephen hemminger 	return (void *)((unsigned long)desc + (desc->offset8 << 3));
1654f3dd3f47Sstephen hemminger }
1655f3dd3f47Sstephen hemminger 
1656dbde6d0cSAndrea Parri (Microsoft) /* Get data size associated with descriptor */
hv_pkt_datalen(const struct vmpacket_descriptor * desc)1657dbde6d0cSAndrea Parri (Microsoft) static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1658dbde6d0cSAndrea Parri (Microsoft) {
1659dbde6d0cSAndrea Parri (Microsoft) 	return (desc->len8 << 3) - (desc->offset8 << 3);
1660dbde6d0cSAndrea Parri (Microsoft) }
1661f3dd3f47Sstephen hemminger 
1662f3dd3f47Sstephen hemminger /* Get packet length associated with descriptor */
hv_pkt_len(const struct vmpacket_descriptor * desc)1663f3dd3f47Sstephen hemminger static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
1664f3dd3f47Sstephen hemminger {
1665f3dd3f47Sstephen hemminger 	return desc->len8 << 3;
1666f3dd3f47Sstephen hemminger }
16671c9de08fSAndrea Parri (Microsoft) 
1668f3dd3f47Sstephen hemminger struct vmpacket_descriptor *
1669f3dd3f47Sstephen hemminger hv_pkt_iter_first(struct vmbus_channel *channel);
1670f3dd3f47Sstephen hemminger 
1671adae1e93SAndres Beltran struct vmpacket_descriptor *
16721c9de08fSAndrea Parri (Microsoft) __hv_pkt_iter_next(struct vmbus_channel *channel,
16731c9de08fSAndrea Parri (Microsoft) 		   const struct vmpacket_descriptor *pkt);
1674adae1e93SAndres Beltran 
1675adae1e93SAndres Beltran void hv_pkt_iter_close(struct vmbus_channel *channel);
1676adae1e93SAndres Beltran 
16771c9de08fSAndrea Parri (Microsoft) static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt)1678adae1e93SAndres Beltran hv_pkt_iter_next(struct vmbus_channel *channel,
1679adae1e93SAndres Beltran 		 const struct vmpacket_descriptor *pkt)
1680adae1e93SAndres Beltran {
1681adae1e93SAndres Beltran 	struct vmpacket_descriptor *nxt;
1682adae1e93SAndres Beltran 
1683adae1e93SAndres Beltran 	nxt = __hv_pkt_iter_next(channel, pkt);
1684f3dd3f47Sstephen hemminger 	if (!nxt)
1685f3dd3f47Sstephen hemminger 		hv_pkt_iter_close(channel);
1686f3dd3f47Sstephen hemminger 
1687ab028db4SK. Y. Srinivasan 	return nxt;
1688e5d2f910SDexuan Cui }
1689348dd93eSHaiyang Zhang 
1690e5d2f910SDexuan Cui #define foreach_vmbus_pkt(pkt, channel) \
1691e5d2f910SDexuan Cui 	for (pkt = hv_pkt_iter_first(channel); pkt; \
1692e5d2f910SDexuan Cui 	    pkt = hv_pkt_iter_next(channel, pkt))
1693e5d2f910SDexuan Cui 
1694e5d2f910SDexuan Cui /*
1695e5d2f910SDexuan Cui  * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1696348dd93eSHaiyang Zhang  * sends requests to read and write blocks. Each block must be 128 bytes or
1697348dd93eSHaiyang Zhang  * smaller. Optionally, the VF driver can register a callback function which
1698e5d2f910SDexuan Cui  * will be invoked when the host says that one or more of the first 64 block
1699348dd93eSHaiyang Zhang  * IDs is "invalid" which means that the VF driver should reread them.
1700e5d2f910SDexuan Cui  */
1701348dd93eSHaiyang Zhang #define HV_CONFIG_BLOCK_SIZE_MAX 128
1702e5d2f910SDexuan Cui 
1703e5d2f910SDexuan Cui int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1704348dd93eSHaiyang Zhang 			unsigned int block_id, unsigned int *bytes_returned);
1705348dd93eSHaiyang Zhang int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1706348dd93eSHaiyang Zhang 			 unsigned int block_id);
1707348dd93eSHaiyang Zhang int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1708348dd93eSHaiyang Zhang 				void (*block_invalidate)(void *context,
1709348dd93eSHaiyang Zhang 							 u64 block_mask));
1710348dd93eSHaiyang Zhang 
1711348dd93eSHaiyang Zhang struct hyperv_pci_block_ops {
1712348dd93eSHaiyang Zhang 	int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1713348dd93eSHaiyang Zhang 			  unsigned int block_id, unsigned int *bytes_returned);
1714348dd93eSHaiyang Zhang 	int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1715348dd93eSHaiyang Zhang 			   unsigned int block_id);
1716348dd93eSHaiyang Zhang 	int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1717bca6b91dSBoqun Feng 				  void (*block_invalidate)(void *context,
1718bca6b91dSBoqun Feng 							   u64 block_mask));
1719bca6b91dSBoqun Feng };
1720bca6b91dSBoqun Feng 
1721bca6b91dSBoqun Feng extern struct hyperv_pci_block_ops hvpci_block_ops;
1722bca6b91dSBoqun Feng 
virt_to_hvpfn(void * addr)1723bca6b91dSBoqun Feng static inline unsigned long virt_to_hvpfn(void *addr)
1724bca6b91dSBoqun Feng {
1725bca6b91dSBoqun Feng 	phys_addr_t paddr;
1726bca6b91dSBoqun Feng 
1727bca6b91dSBoqun Feng 	if (is_vmalloc_addr(addr))
1728bca6b91dSBoqun Feng 		paddr = page_to_phys(vmalloc_to_page(addr)) +
1729bca6b91dSBoqun Feng 				     offset_in_page(addr);
1730106dee08SBoqun Feng 	else
1731106dee08SBoqun Feng 		paddr = __pa(addr);
1732106dee08SBoqun Feng 
17333d9c3dccSMichael Kelley 	return  paddr >> HV_HYP_PAGE_SHIFT;
1734106dee08SBoqun Feng }
1735106dee08SBoqun Feng 
173646a97191SGreg Kroah-Hartman #define NR_HV_HYP_PAGES_IN_PAGE	(PAGE_SIZE / HV_HYP_PAGE_SIZE)
1737 #define offset_in_hvpage(ptr)	((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1738 #define HVPFN_UP(x)	(((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1739 #define HVPFN_DOWN(x)	((x) >> HV_HYP_PAGE_SHIFT)
1740 #define page_to_hvpfn(page)	(page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1741 
1742 #endif /* _HYPERV_H */
1743