xref: /linux-6.15/include/linux/ceph/decode.h (revision 473bd2d7)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __CEPH_DECODE_H
3 #define __CEPH_DECODE_H
4 
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/slab.h>
8 #include <linux/time.h>
9 #include <asm/unaligned.h>
10 
11 #include <linux/ceph/types.h>
12 
13 /*
14  * in all cases,
15  *   void **p     pointer to position pointer
16  *   void *end    pointer to end of buffer (last byte + 1)
17  */
18 
19 static inline u64 ceph_decode_64(void **p)
20 {
21 	u64 v = get_unaligned_le64(*p);
22 	*p += sizeof(u64);
23 	return v;
24 }
25 static inline u32 ceph_decode_32(void **p)
26 {
27 	u32 v = get_unaligned_le32(*p);
28 	*p += sizeof(u32);
29 	return v;
30 }
31 static inline u16 ceph_decode_16(void **p)
32 {
33 	u16 v = get_unaligned_le16(*p);
34 	*p += sizeof(u16);
35 	return v;
36 }
37 static inline u8 ceph_decode_8(void **p)
38 {
39 	u8 v = *(u8 *)*p;
40 	(*p)++;
41 	return v;
42 }
43 static inline void ceph_decode_copy(void **p, void *pv, size_t n)
44 {
45 	memcpy(pv, *p, n);
46 	*p += n;
47 }
48 
49 /*
50  * bounds check input.
51  */
52 static inline bool ceph_has_room(void **p, void *end, size_t n)
53 {
54 	return end >= *p && n <= end - *p;
55 }
56 
57 #define ceph_decode_need(p, end, n, bad)			\
58 	do {							\
59 		if (!likely(ceph_has_room(p, end, n)))		\
60 			goto bad;				\
61 	} while (0)
62 
63 #define ceph_decode_64_safe(p, end, v, bad)			\
64 	do {							\
65 		ceph_decode_need(p, end, sizeof(u64), bad);	\
66 		v = ceph_decode_64(p);				\
67 	} while (0)
68 #define ceph_decode_32_safe(p, end, v, bad)			\
69 	do {							\
70 		ceph_decode_need(p, end, sizeof(u32), bad);	\
71 		v = ceph_decode_32(p);				\
72 	} while (0)
73 #define ceph_decode_16_safe(p, end, v, bad)			\
74 	do {							\
75 		ceph_decode_need(p, end, sizeof(u16), bad);	\
76 		v = ceph_decode_16(p);				\
77 	} while (0)
78 #define ceph_decode_8_safe(p, end, v, bad)			\
79 	do {							\
80 		ceph_decode_need(p, end, sizeof(u8), bad);	\
81 		v = ceph_decode_8(p);				\
82 	} while (0)
83 
84 #define ceph_decode_copy_safe(p, end, pv, n, bad)		\
85 	do {							\
86 		ceph_decode_need(p, end, n, bad);		\
87 		ceph_decode_copy(p, pv, n);			\
88 	} while (0)
89 
90 /*
91  * Allocate a buffer big enough to hold the wire-encoded string, and
92  * decode the string into it.  The resulting string will always be
93  * terminated with '\0'.  If successful, *p will be advanced
94  * past the decoded data.  Also, if lenp is not a null pointer, the
95  * length (not including the terminating '\0') will be recorded in
96  * *lenp.  Note that a zero-length string is a valid return value.
97  *
98  * Returns a pointer to the newly-allocated string buffer, or a
99  * pointer-coded errno if an error occurs.  Neither *p nor *lenp
100  * will have been updated if an error is returned.
101  *
102  * There are two possible failures:
103  *   - converting the string would require accessing memory at or
104  *     beyond the "end" pointer provided (-ERANGE)
105  *   - memory could not be allocated for the result (-ENOMEM)
106  */
107 static inline char *ceph_extract_encoded_string(void **p, void *end,
108 						size_t *lenp, gfp_t gfp)
109 {
110 	u32 len;
111 	void *sp = *p;
112 	char *buf;
113 
114 	ceph_decode_32_safe(&sp, end, len, bad);
115 	if (!ceph_has_room(&sp, end, len))
116 		goto bad;
117 
118 	buf = kmalloc(len + 1, gfp);
119 	if (!buf)
120 		return ERR_PTR(-ENOMEM);
121 
122 	if (len)
123 		memcpy(buf, sp, len);
124 	buf[len] = '\0';
125 
126 	*p = (char *) *p + sizeof (u32) + len;
127 	if (lenp)
128 		*lenp = (size_t) len;
129 
130 	return buf;
131 
132 bad:
133 	return ERR_PTR(-ERANGE);
134 }
135 
136 /*
137  * skip helpers
138  */
139 #define ceph_decode_skip_n(p, end, n, bad)			\
140 	do {							\
141 		ceph_decode_need(p, end, n, bad);		\
142                 *p += n;					\
143 	} while (0)
144 
145 #define ceph_decode_skip_64(p, end, bad)			\
146 ceph_decode_skip_n(p, end, sizeof(u64), bad)
147 
148 #define ceph_decode_skip_32(p, end, bad)			\
149 ceph_decode_skip_n(p, end, sizeof(u32), bad)
150 
151 #define ceph_decode_skip_16(p, end, bad)			\
152 ceph_decode_skip_n(p, end, sizeof(u16), bad)
153 
154 #define ceph_decode_skip_8(p, end, bad)				\
155 ceph_decode_skip_n(p, end, sizeof(u8), bad)
156 
157 #define ceph_decode_skip_string(p, end, bad)			\
158 	do {							\
159 		u32 len;					\
160 								\
161 		ceph_decode_32_safe(p, end, len, bad);		\
162 		ceph_decode_skip_n(p, end, len, bad);		\
163 	} while (0)
164 
165 #define ceph_decode_skip_set(p, end, type, bad)			\
166 	do {							\
167 		u32 len;					\
168 								\
169 		ceph_decode_32_safe(p, end, len, bad);		\
170 		while (len--)					\
171 			ceph_decode_skip_##type(p, end, bad);	\
172 	} while (0)
173 
174 #define ceph_decode_skip_map(p, end, ktype, vtype, bad)		\
175 	do {							\
176 		u32 len;					\
177 								\
178 		ceph_decode_32_safe(p, end, len, bad);		\
179 		while (len--) {					\
180 			ceph_decode_skip_##ktype(p, end, bad);	\
181 			ceph_decode_skip_##vtype(p, end, bad);	\
182 		}						\
183 	} while (0)
184 
185 #define ceph_decode_skip_map_of_map(p, end, ktype1, ktype2, vtype2, bad) \
186 	do {							\
187 		u32 len;					\
188 								\
189 		ceph_decode_32_safe(p, end, len, bad);		\
190 		while (len--) {					\
191 			ceph_decode_skip_##ktype1(p, end, bad);	\
192 			ceph_decode_skip_map(p, end, ktype2, vtype2, bad); \
193 		}						\
194 	} while (0)
195 
196 /*
197  * struct ceph_timespec <-> struct timespec64
198  */
199 static inline void ceph_decode_timespec64(struct timespec64 *ts,
200 					  const struct ceph_timespec *tv)
201 {
202 	/*
203 	 * This will still overflow in year 2106.  We could extend
204 	 * the protocol to steal two more bits from tv_nsec to
205 	 * add three more 136 year epochs after that the way ext4
206 	 * does if necessary.
207 	 */
208 	ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec);
209 	ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec);
210 }
211 static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
212 					  const struct timespec64 *ts)
213 {
214 	tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
215 	tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
216 }
217 static inline void ceph_decode_timespec(struct timespec *ts,
218 					const struct ceph_timespec *tv)
219 {
220 	ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec);
221 	ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec);
222 }
223 static inline void ceph_encode_timespec(struct ceph_timespec *tv,
224 					const struct timespec *ts)
225 {
226 	tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
227 	tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
228 }
229 
230 /*
231  * sockaddr_storage <-> ceph_sockaddr
232  */
233 static inline void ceph_encode_addr(struct ceph_entity_addr *a)
234 {
235 	__be16 ss_family = htons(a->in_addr.ss_family);
236 	a->in_addr.ss_family = *(__u16 *)&ss_family;
237 }
238 static inline void ceph_decode_addr(struct ceph_entity_addr *a)
239 {
240 	__be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
241 	a->in_addr.ss_family = ntohs(ss_family);
242 	WARN_ON(a->in_addr.ss_family == 512);
243 }
244 
245 /*
246  * encoders
247  */
248 static inline void ceph_encode_64(void **p, u64 v)
249 {
250 	put_unaligned_le64(v, (__le64 *)*p);
251 	*p += sizeof(u64);
252 }
253 static inline void ceph_encode_32(void **p, u32 v)
254 {
255 	put_unaligned_le32(v, (__le32 *)*p);
256 	*p += sizeof(u32);
257 }
258 static inline void ceph_encode_16(void **p, u16 v)
259 {
260 	put_unaligned_le16(v, (__le16 *)*p);
261 	*p += sizeof(u16);
262 }
263 static inline void ceph_encode_8(void **p, u8 v)
264 {
265 	*(u8 *)*p = v;
266 	(*p)++;
267 }
268 static inline void ceph_encode_copy(void **p, const void *s, int len)
269 {
270 	memcpy(*p, s, len);
271 	*p += len;
272 }
273 
274 /*
275  * filepath, string encoders
276  */
277 static inline void ceph_encode_filepath(void **p, void *end,
278 					u64 ino, const char *path)
279 {
280 	u32 len = path ? strlen(path) : 0;
281 	BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end);
282 	ceph_encode_8(p, 1);
283 	ceph_encode_64(p, ino);
284 	ceph_encode_32(p, len);
285 	if (len)
286 		memcpy(*p, path, len);
287 	*p += len;
288 }
289 
290 static inline void ceph_encode_string(void **p, void *end,
291 				      const char *s, u32 len)
292 {
293 	BUG_ON(*p + sizeof(len) + len > end);
294 	ceph_encode_32(p, len);
295 	if (len)
296 		memcpy(*p, s, len);
297 	*p += len;
298 }
299 
300 /*
301  * version and length starting block encoders/decoders
302  */
303 
304 /* current code version (u8) + compat code version (u8) + len of struct (u32) */
305 #define CEPH_ENCODING_START_BLK_LEN 6
306 
307 /**
308  * ceph_start_encoding - start encoding block
309  * @struct_v: current (code) version of the encoding
310  * @struct_compat: oldest code version that can decode it
311  * @struct_len: length of struct encoding
312  */
313 static inline void ceph_start_encoding(void **p, u8 struct_v, u8 struct_compat,
314 				       u32 struct_len)
315 {
316 	ceph_encode_8(p, struct_v);
317 	ceph_encode_8(p, struct_compat);
318 	ceph_encode_32(p, struct_len);
319 }
320 
321 /**
322  * ceph_start_decoding - start decoding block
323  * @v: current version of the encoding that the code supports
324  * @name: name of the struct (free-form)
325  * @struct_v: out param for the encoding version
326  * @struct_len: out param for the length of struct encoding
327  *
328  * Validates the length of struct encoding, so unsafe ceph_decode_*
329  * variants can be used for decoding.
330  */
331 static inline int ceph_start_decoding(void **p, void *end, u8 v,
332 				      const char *name, u8 *struct_v,
333 				      u32 *struct_len)
334 {
335 	u8 struct_compat;
336 
337 	ceph_decode_need(p, end, CEPH_ENCODING_START_BLK_LEN, bad);
338 	*struct_v = ceph_decode_8(p);
339 	struct_compat = ceph_decode_8(p);
340 	if (v < struct_compat) {
341 		pr_warn("got struct_v %d struct_compat %d > %d of %s\n",
342 			*struct_v, struct_compat, v, name);
343 		return -EINVAL;
344 	}
345 
346 	*struct_len = ceph_decode_32(p);
347 	ceph_decode_need(p, end, *struct_len, bad);
348 	return 0;
349 
350 bad:
351 	return -ERANGE;
352 }
353 
354 #define ceph_encode_need(p, end, n, bad)			\
355 	do {							\
356 		if (!likely(ceph_has_room(p, end, n)))		\
357 			goto bad;				\
358 	} while (0)
359 
360 #define ceph_encode_64_safe(p, end, v, bad)			\
361 	do {							\
362 		ceph_encode_need(p, end, sizeof(u64), bad);	\
363 		ceph_encode_64(p, v);				\
364 	} while (0)
365 #define ceph_encode_32_safe(p, end, v, bad)			\
366 	do {							\
367 		ceph_encode_need(p, end, sizeof(u32), bad);	\
368 		ceph_encode_32(p, v);				\
369 	} while (0)
370 #define ceph_encode_16_safe(p, end, v, bad)			\
371 	do {							\
372 		ceph_encode_need(p, end, sizeof(u16), bad);	\
373 		ceph_encode_16(p, v);				\
374 	} while (0)
375 #define ceph_encode_8_safe(p, end, v, bad)			\
376 	do {							\
377 		ceph_encode_need(p, end, sizeof(u8), bad);	\
378 		ceph_encode_8(p, v);				\
379 	} while (0)
380 
381 #define ceph_encode_copy_safe(p, end, pv, n, bad)		\
382 	do {							\
383 		ceph_encode_need(p, end, n, bad);		\
384 		ceph_encode_copy(p, pv, n);			\
385 	} while (0)
386 #define ceph_encode_string_safe(p, end, s, n, bad)		\
387 	do {							\
388 		ceph_encode_need(p, end, n, bad);		\
389 		ceph_encode_string(p, end, s, n);		\
390 	} while (0)
391 
392 
393 #endif
394