xref: /linux-6.15/include/uapi/linux/raid/md_p.h (revision bbb03029)
1 /*
2    md_p.h : physical layout of Linux RAID devices
3           Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4 
5    This program is free software; you can redistribute it and/or modify
6    it under the terms of the GNU General Public License as published by
7    the Free Software Foundation; either version 2, or (at your option)
8    any later version.
9 
10    You should have received a copy of the GNU General Public License
11    (for example /usr/src/linux/COPYING); if not, write to the Free
12    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13 */
14 
15 #ifndef _MD_P_H
16 #define _MD_P_H
17 
18 #include <linux/types.h>
19 #include <asm/byteorder.h>
20 
21 /*
22  * RAID superblock.
23  *
24  * The RAID superblock maintains some statistics on each RAID configuration.
25  * Each real device in the RAID set contains it near the end of the device.
26  * Some of the ideas are copied from the ext2fs implementation.
27  *
28  * We currently use 4096 bytes as follows:
29  *
30  *	word offset	function
31  *
32  *	   0  -    31	Constant generic RAID device information.
33  *        32  -    63   Generic state information.
34  *	  64  -   127	Personality specific information.
35  *	 128  -   511	12 32-words descriptors of the disks in the raid set.
36  *	 512  -   911	Reserved.
37  *	 912  -  1023	Disk specific descriptor.
38  */
39 
40 /*
41  * If x is the real device size in bytes, we return an apparent size of:
42  *
43  *	y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
44  *
45  * and place the 4kB superblock at offset y.
46  */
47 #define MD_RESERVED_BYTES		(64 * 1024)
48 #define MD_RESERVED_SECTORS		(MD_RESERVED_BYTES / 512)
49 
50 #define MD_NEW_SIZE_SECTORS(x)		((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
51 
52 #define MD_SB_BYTES			4096
53 #define MD_SB_WORDS			(MD_SB_BYTES / 4)
54 #define MD_SB_SECTORS			(MD_SB_BYTES / 512)
55 
56 /*
57  * The following are counted in 32-bit words
58  */
59 #define	MD_SB_GENERIC_OFFSET		0
60 #define MD_SB_PERSONALITY_OFFSET	64
61 #define MD_SB_DISKS_OFFSET		128
62 #define MD_SB_DESCRIPTOR_OFFSET		992
63 
64 #define MD_SB_GENERIC_CONSTANT_WORDS	32
65 #define MD_SB_GENERIC_STATE_WORDS	32
66 #define MD_SB_GENERIC_WORDS		(MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
67 #define MD_SB_PERSONALITY_WORDS		64
68 #define MD_SB_DESCRIPTOR_WORDS		32
69 #define MD_SB_DISKS			27
70 #define MD_SB_DISKS_WORDS		(MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS)
71 #define MD_SB_RESERVED_WORDS		(1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
72 #define MD_SB_EQUAL_WORDS		(MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
73 
74 /*
75  * Device "operational" state bits
76  */
77 #define MD_DISK_FAULTY		0 /* disk is faulty / operational */
78 #define MD_DISK_ACTIVE		1 /* disk is running or spare disk */
79 #define MD_DISK_SYNC		2 /* disk is in sync with the raid set */
80 #define MD_DISK_REMOVED		3 /* disk is in sync with the raid set */
81 #define MD_DISK_CLUSTER_ADD     4 /* Initiate a disk add across the cluster
82 				   * For clustered enviroments only.
83 				   */
84 #define MD_DISK_CANDIDATE	5 /* disk is added as spare (local) until confirmed
85 				   * For clustered enviroments only.
86 				   */
87 #define MD_DISK_FAILFAST	10 /* Send REQ_FAILFAST if there are multiple
88 				    * devices available - and don't try to
89 				    * correct read errors.
90 				    */
91 
92 #define	MD_DISK_WRITEMOSTLY	9 /* disk is "write-mostly" is RAID1 config.
93 				   * read requests will only be sent here in
94 				   * dire need
95 				   */
96 #define MD_DISK_JOURNAL		18 /* disk is used as the write journal in RAID-5/6 */
97 
98 #define MD_DISK_ROLE_SPARE	0xffff
99 #define MD_DISK_ROLE_FAULTY	0xfffe
100 #define MD_DISK_ROLE_JOURNAL	0xfffd
101 #define MD_DISK_ROLE_MAX	0xff00 /* max value of regular disk role */
102 
103 typedef struct mdp_device_descriptor_s {
104 	__u32 number;		/* 0 Device number in the entire set	      */
105 	__u32 major;		/* 1 Device major number		      */
106 	__u32 minor;		/* 2 Device minor number		      */
107 	__u32 raid_disk;	/* 3 The role of the device in the raid set   */
108 	__u32 state;		/* 4 Operational state			      */
109 	__u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
110 } mdp_disk_t;
111 
112 #define MD_SB_MAGIC		0xa92b4efc
113 
114 /*
115  * Superblock state bits
116  */
117 #define MD_SB_CLEAN		0
118 #define MD_SB_ERRORS		1
119 
120 #define	MD_SB_CLUSTERED		5 /* MD is clustered */
121 #define	MD_SB_BITMAP_PRESENT	8 /* bitmap may be present nearby */
122 
123 /*
124  * Notes:
125  * - if an array is being reshaped (restriped) in order to change the
126  *   the number of active devices in the array, 'raid_disks' will be
127  *   the larger of the old and new numbers.  'delta_disks' will
128  *   be the "new - old".  So if +ve, raid_disks is the new value, and
129  *   "raid_disks-delta_disks" is the old.  If -ve, raid_disks is the
130  *   old value and "raid_disks+delta_disks" is the new (smaller) value.
131  */
132 
133 
134 typedef struct mdp_superblock_s {
135 	/*
136 	 * Constant generic information
137 	 */
138 	__u32 md_magic;		/*  0 MD identifier 			      */
139 	__u32 major_version;	/*  1 major version to which the set conforms */
140 	__u32 minor_version;	/*  2 minor version ...			      */
141 	__u32 patch_version;	/*  3 patchlevel version ...		      */
142 	__u32 gvalid_words;	/*  4 Number of used words in this section    */
143 	__u32 set_uuid0;	/*  5 Raid set identifier		      */
144 	__u32 ctime;		/*  6 Creation time			      */
145 	__u32 level;		/*  7 Raid personality			      */
146 	__u32 size;		/*  8 Apparent size of each individual disk   */
147 	__u32 nr_disks;		/*  9 total disks in the raid set	      */
148 	__u32 raid_disks;	/* 10 disks in a fully functional raid set    */
149 	__u32 md_minor;		/* 11 preferred MD minor device number	      */
150 	__u32 not_persistent;	/* 12 does it have a persistent superblock    */
151 	__u32 set_uuid1;	/* 13 Raid set identifier #2		      */
152 	__u32 set_uuid2;	/* 14 Raid set identifier #3		      */
153 	__u32 set_uuid3;	/* 15 Raid set identifier #4		      */
154 	__u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16];
155 
156 	/*
157 	 * Generic state information
158 	 */
159 	__u32 utime;		/*  0 Superblock update time		      */
160 	__u32 state;		/*  1 State bits (clean, ...)		      */
161 	__u32 active_disks;	/*  2 Number of currently active disks	      */
162 	__u32 working_disks;	/*  3 Number of working disks		      */
163 	__u32 failed_disks;	/*  4 Number of failed disks		      */
164 	__u32 spare_disks;	/*  5 Number of spare disks		      */
165 	__u32 sb_csum;		/*  6 checksum of the whole superblock        */
166 #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
167 	__u32 events_hi;	/*  7 high-order of superblock update count   */
168 	__u32 events_lo;	/*  8 low-order of superblock update count    */
169 	__u32 cp_events_hi;	/*  9 high-order of checkpoint update count   */
170 	__u32 cp_events_lo;	/* 10 low-order of checkpoint update count    */
171 #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
172 	__u32 events_lo;	/*  7 low-order of superblock update count    */
173 	__u32 events_hi;	/*  8 high-order of superblock update count   */
174 	__u32 cp_events_lo;	/*  9 low-order of checkpoint update count    */
175 	__u32 cp_events_hi;	/* 10 high-order of checkpoint update count   */
176 #else
177 #error unspecified endianness
178 #endif
179 	__u32 recovery_cp;	/* 11 recovery checkpoint sector count	      */
180 	/* There are only valid for minor_version > 90 */
181 	__u64 reshape_position;	/* 12,13 next address in array-space for reshape */
182 	__u32 new_level;	/* 14 new level we are reshaping to	      */
183 	__u32 delta_disks;	/* 15 change in number of raid_disks	      */
184 	__u32 new_layout;	/* 16 new layout			      */
185 	__u32 new_chunk;	/* 17 new chunk size (bytes)		      */
186 	__u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
187 
188 	/*
189 	 * Personality information
190 	 */
191 	__u32 layout;		/*  0 the array's physical layout	      */
192 	__u32 chunk_size;	/*  1 chunk size in bytes		      */
193 	__u32 root_pv;		/*  2 LV root PV */
194 	__u32 root_block;	/*  3 LV root block */
195 	__u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4];
196 
197 	/*
198 	 * Disks information
199 	 */
200 	mdp_disk_t disks[MD_SB_DISKS];
201 
202 	/*
203 	 * Reserved
204 	 */
205 	__u32 reserved[MD_SB_RESERVED_WORDS];
206 
207 	/*
208 	 * Active descriptor
209 	 */
210 	mdp_disk_t this_disk;
211 
212 } mdp_super_t;
213 
214 static inline __u64 md_event(mdp_super_t *sb) {
215 	__u64 ev = sb->events_hi;
216 	return (ev<<32)| sb->events_lo;
217 }
218 
219 #define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1)
220 
221 /*
222  * The version-1 superblock :
223  * All numeric fields are little-endian.
224  *
225  * total size: 256 bytes plus 2 per device.
226  *  1K allows 384 devices.
227  */
228 struct mdp_superblock_1 {
229 	/* constant array information - 128 bytes */
230 	__le32	magic;		/* MD_SB_MAGIC: 0xa92b4efc - little endian */
231 	__le32	major_version;	/* 1 */
232 	__le32	feature_map;	/* bit 0 set if 'bitmap_offset' is meaningful */
233 	__le32	pad0;		/* always set to 0 when writing */
234 
235 	__u8	set_uuid[16];	/* user-space generated. */
236 	char	set_name[32];	/* set and interpreted by user-space */
237 
238 	__le64	ctime;		/* lo 40 bits are seconds, top 24 are microseconds or 0*/
239 	__le32	level;		/* -4 (multipath), -1 (linear), 0,1,4,5 */
240 	__le32	layout;		/* only for raid5 and raid10 currently */
241 	__le64	size;		/* used size of component devices, in 512byte sectors */
242 
243 	__le32	chunksize;	/* in 512byte sectors */
244 	__le32	raid_disks;
245 	union {
246 		__le32	bitmap_offset;	/* sectors after start of superblock that bitmap starts
247 					 * NOTE: signed, so bitmap can be before superblock
248 					 * only meaningful of feature_map[0] is set.
249 					 */
250 
251 		/* only meaningful when feature_map[MD_FEATURE_PPL] is set */
252 		struct {
253 			__le16 offset; /* sectors from start of superblock that ppl starts (signed) */
254 			__le16 size; /* ppl size in sectors */
255 		} ppl;
256 	};
257 
258 	/* These are only valid with feature bit '4' */
259 	__le32	new_level;	/* new level we are reshaping to		*/
260 	__le64	reshape_position;	/* next address in array-space for reshape */
261 	__le32	delta_disks;	/* change in number of raid_disks		*/
262 	__le32	new_layout;	/* new layout					*/
263 	__le32	new_chunk;	/* new chunk size (512byte sectors)		*/
264 	__le32  new_offset;	/* signed number to add to data_offset in new
265 				 * layout.  0 == no-change.  This can be
266 				 * different on each device in the array.
267 				 */
268 
269 	/* constant this-device information - 64 bytes */
270 	__le64	data_offset;	/* sector start of data, often 0 */
271 	__le64	data_size;	/* sectors in this device that can be used for data */
272 	__le64	super_offset;	/* sector start of this superblock */
273 	union {
274 		__le64	recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
275 		__le64	journal_tail;/* journal tail of journal device (from data_offset) */
276 	};
277 	__le32	dev_number;	/* permanent identifier of this  device - not role in raid */
278 	__le32	cnt_corrected_read; /* number of read errors that were corrected by re-writing */
279 	__u8	device_uuid[16]; /* user-space setable, ignored by kernel */
280 	__u8	devflags;	/* per-device flags.  Only two defined...*/
281 #define	WriteMostly1	1	/* mask for writemostly flag in above */
282 #define	FailFast1	2	/* Should avoid retries and fixups and just fail */
283 	/* Bad block log.  If there are any bad blocks the feature flag is set.
284 	 * If offset and size are non-zero, that space is reserved and available
285 	 */
286 	__u8	bblog_shift;	/* shift from sectors to block size */
287 	__le16	bblog_size;	/* number of sectors reserved for list */
288 	__le32	bblog_offset;	/* sector offset from superblock to bblog,
289 				 * signed - not unsigned */
290 
291 	/* array state information - 64 bytes */
292 	__le64	utime;		/* 40 bits second, 24 bits microseconds */
293 	__le64	events;		/* incremented when superblock updated */
294 	__le64	resync_offset;	/* data before this offset (from data_offset) known to be in sync */
295 	__le32	sb_csum;	/* checksum up to devs[max_dev] */
296 	__le32	max_dev;	/* size of devs[] array to consider */
297 	__u8	pad3[64-32];	/* set to 0 when writing */
298 
299 	/* device state information. Indexed by dev_number.
300 	 * 2 bytes per device
301 	 * Note there are no per-device state flags. State information is rolled
302 	 * into the 'roles' value.  If a device is spare or faulty, then it doesn't
303 	 * have a meaningful role.
304 	 */
305 	__le16	dev_roles[0];	/* role in array, or 0xffff for a spare, or 0xfffe for faulty */
306 };
307 
308 /* feature_map bits */
309 #define MD_FEATURE_BITMAP_OFFSET	1
310 #define	MD_FEATURE_RECOVERY_OFFSET	2 /* recovery_offset is present and
311 					   * must be honoured
312 					   */
313 #define	MD_FEATURE_RESHAPE_ACTIVE	4
314 #define	MD_FEATURE_BAD_BLOCKS		8 /* badblock list is not empty */
315 #define	MD_FEATURE_REPLACEMENT		16 /* This device is replacing an
316 					    * active device with same 'role'.
317 					    * 'recovery_offset' is also set.
318 					    */
319 #define	MD_FEATURE_RESHAPE_BACKWARDS	32 /* Reshape doesn't change number
320 					    * of devices, but is going
321 					    * backwards anyway.
322 					    */
323 #define	MD_FEATURE_NEW_OFFSET		64 /* new_offset must be honoured */
324 #define	MD_FEATURE_RECOVERY_BITMAP	128 /* recovery that is happening
325 					     * is guided by bitmap.
326 					     */
327 #define MD_FEATURE_CLUSTERED		256 /* clustered MD */
328 #define	MD_FEATURE_JOURNAL		512 /* support write cache */
329 #define	MD_FEATURE_PPL			1024 /* support PPL */
330 #define	MD_FEATURE_ALL			(MD_FEATURE_BITMAP_OFFSET	\
331 					|MD_FEATURE_RECOVERY_OFFSET	\
332 					|MD_FEATURE_RESHAPE_ACTIVE	\
333 					|MD_FEATURE_BAD_BLOCKS		\
334 					|MD_FEATURE_REPLACEMENT		\
335 					|MD_FEATURE_RESHAPE_BACKWARDS	\
336 					|MD_FEATURE_NEW_OFFSET		\
337 					|MD_FEATURE_RECOVERY_BITMAP	\
338 					|MD_FEATURE_CLUSTERED		\
339 					|MD_FEATURE_JOURNAL		\
340 					|MD_FEATURE_PPL			\
341 					)
342 
343 struct r5l_payload_header {
344 	__le16 type;
345 	__le16 flags;
346 } __attribute__ ((__packed__));
347 
348 enum r5l_payload_type {
349 	R5LOG_PAYLOAD_DATA = 0,
350 	R5LOG_PAYLOAD_PARITY = 1,
351 	R5LOG_PAYLOAD_FLUSH = 2,
352 };
353 
354 struct r5l_payload_data_parity {
355 	struct r5l_payload_header header;
356 	__le32 size;		/* sector. data/parity size. each 4k
357 				 * has a checksum */
358 	__le64 location;	/* sector. For data, it's raid sector. For
359 				 * parity, it's stripe sector */
360 	__le32 checksum[];
361 } __attribute__ ((__packed__));
362 
363 enum r5l_payload_data_parity_flag {
364 	R5LOG_PAYLOAD_FLAG_DISCARD = 1, /* payload is discard */
365 	/*
366 	 * RESHAPED/RESHAPING is only set when there is reshape activity. Note,
367 	 * both data/parity of a stripe should have the same flag set
368 	 *
369 	 * RESHAPED: reshape is running, and this stripe finished reshape
370 	 * RESHAPING: reshape is running, and this stripe isn't reshaped
371 	 */
372 	R5LOG_PAYLOAD_FLAG_RESHAPED = 2,
373 	R5LOG_PAYLOAD_FLAG_RESHAPING = 3,
374 };
375 
376 struct r5l_payload_flush {
377 	struct r5l_payload_header header;
378 	__le32 size; /* flush_stripes size, bytes */
379 	__le64 flush_stripes[];
380 } __attribute__ ((__packed__));
381 
382 enum r5l_payload_flush_flag {
383 	R5LOG_PAYLOAD_FLAG_FLUSH_STRIPE = 1, /* data represents whole stripe */
384 };
385 
386 struct r5l_meta_block {
387 	__le32 magic;
388 	__le32 checksum;
389 	__u8 version;
390 	__u8 __zero_pading_1;
391 	__le16 __zero_pading_2;
392 	__le32 meta_size; /* whole size of the block */
393 
394 	__le64 seq;
395 	__le64 position; /* sector, start from rdev->data_offset, current position */
396 	struct r5l_payload_header payloads[];
397 } __attribute__ ((__packed__));
398 
399 #define R5LOG_VERSION 0x1
400 #define R5LOG_MAGIC 0x6433c509
401 
402 struct ppl_header_entry {
403 	__le64 data_sector;	/* raid sector of the new data */
404 	__le32 pp_size;		/* length of partial parity */
405 	__le32 data_size;	/* length of data */
406 	__le32 parity_disk;	/* member disk containing parity */
407 	__le32 checksum;	/* checksum of partial parity data for this
408 				 * entry (~crc32c) */
409 } __attribute__ ((__packed__));
410 
411 #define PPL_HEADER_SIZE 4096
412 #define PPL_HDR_RESERVED 512
413 #define PPL_HDR_ENTRY_SPACE \
414 	(PPL_HEADER_SIZE - PPL_HDR_RESERVED - 4 * sizeof(__le32) - sizeof(__le64))
415 #define PPL_HDR_MAX_ENTRIES \
416 	(PPL_HDR_ENTRY_SPACE / sizeof(struct ppl_header_entry))
417 
418 struct ppl_header {
419 	__u8 reserved[PPL_HDR_RESERVED];/* reserved space, fill with 0xff */
420 	__le32 signature;		/* signature (family number of volume) */
421 	__le32 padding;			/* zero pad */
422 	__le64 generation;		/* generation number of the header */
423 	__le32 entries_count;		/* number of entries in entry array */
424 	__le32 checksum;		/* checksum of the header (~crc32c) */
425 	struct ppl_header_entry entries[PPL_HDR_MAX_ENTRIES];
426 } __attribute__ ((__packed__));
427 
428 #endif
429