1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #ifndef MALLOC_ELEM_H_ 6 #define MALLOC_ELEM_H_ 7 8 #include <stdbool.h> 9 10 #define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE) 11 12 /* dummy definition of struct so we can use pointers to it in malloc_elem struct */ 13 struct malloc_heap; 14 15 enum elem_state { 16 ELEM_FREE = 0, 17 ELEM_BUSY, 18 ELEM_PAD /* element is a padding-only header */ 19 }; 20 21 struct malloc_elem { 22 struct malloc_heap *heap; 23 struct malloc_elem *volatile prev; 24 /**< points to prev elem in memseg */ 25 struct malloc_elem *volatile next; 26 /**< points to next elem in memseg */ 27 LIST_ENTRY(malloc_elem) free_list; 28 /**< list of free elements in heap */ 29 struct rte_memseg_list *msl; 30 /** Element state, @c dirty and @c pad validity depends on it. */ 31 /* An extra bit is needed to represent enum elem_state as signed int. */ 32 enum elem_state state : 3; 33 /** If state == ELEM_FREE: the memory is not filled with zeroes. */ 34 uint32_t dirty : 1; 35 /** Reserved for future use. */ 36 uint32_t reserved : 28; 37 uint32_t pad; 38 size_t size; 39 struct malloc_elem *orig_elem; 40 size_t orig_size; 41 #ifdef RTE_MALLOC_DEBUG 42 uint64_t header_cookie; /* Cookie marking start of data */ 43 /* trailer cookie at start + size */ 44 #endif 45 #ifdef RTE_MALLOC_ASAN 46 size_t user_size; 47 uint64_t asan_cookie[2]; /* must be next to header_cookie */ 48 #endif 49 } __rte_cache_aligned; 50 51 static const unsigned int MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem); 52 53 #ifndef RTE_MALLOC_DEBUG 54 #ifdef RTE_MALLOC_ASAN 55 static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; 56 #else 57 static const unsigned int MALLOC_ELEM_TRAILER_LEN; 58 #endif 59 60 /* dummy function - just check if pointer is non-null */ 61 static inline int 62 malloc_elem_cookies_ok(const struct malloc_elem *elem){ return elem != NULL; } 63 64 /* dummy function - no header if malloc_debug is not enabled */ 65 static inline void 66 set_header(struct malloc_elem *elem __rte_unused){ } 67 68 /* dummy function - no trailer if malloc_debug is not enabled */ 69 static inline void 70 set_trailer(struct malloc_elem *elem __rte_unused){ } 71 72 73 #else 74 static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; 75 76 #define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */ 77 #define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ 78 79 /* define macros to make referencing the header and trailer cookies easier */ 80 #define MALLOC_ELEM_TRAILER(elem) (*((uint64_t*)RTE_PTR_ADD(elem, \ 81 elem->size - MALLOC_ELEM_TRAILER_LEN))) 82 #define MALLOC_ELEM_HEADER(elem) (elem->header_cookie) 83 84 static inline void 85 set_header(struct malloc_elem *elem) 86 { 87 if (elem != NULL) 88 MALLOC_ELEM_HEADER(elem) = MALLOC_HEADER_COOKIE; 89 } 90 91 static inline void 92 set_trailer(struct malloc_elem *elem) 93 { 94 if (elem != NULL) 95 MALLOC_ELEM_TRAILER(elem) = MALLOC_TRAILER_COOKIE; 96 } 97 98 /* check that the header and trailer cookies are set correctly */ 99 static inline int 100 malloc_elem_cookies_ok(const struct malloc_elem *elem) 101 { 102 return elem != NULL && 103 MALLOC_ELEM_HEADER(elem) == MALLOC_HEADER_COOKIE && 104 MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE; 105 } 106 107 #endif 108 109 #define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN) 110 111 #ifdef RTE_MALLOC_ASAN 112 113 /* 114 * ASAN_SHADOW_OFFSET should match to the corresponding 115 * value defined in gcc/libsanitizer/asan/asan_mapping.h 116 */ 117 #ifdef RTE_ARCH_X86_64 118 #define ASAN_SHADOW_OFFSET 0x00007fff8000 119 #elif defined(RTE_ARCH_ARM64) 120 #define ASAN_SHADOW_OFFSET 0x001000000000 121 #elif defined(RTE_ARCH_PPC_64) 122 #define ASAN_SHADOW_OFFSET 0x020000000000 123 #endif 124 125 #define ASAN_SHADOW_GRAIN_SIZE 8 126 #define ASAN_MEM_FREE_FLAG 0xfd 127 #define ASAN_MEM_REDZONE_FLAG 0xfa 128 #define ASAN_SHADOW_SCALE 3 129 130 #define ASAN_MEM_SHIFT(mem) ((void *)((uintptr_t)(mem) >> ASAN_SHADOW_SCALE)) 131 #define ASAN_MEM_TO_SHADOW(mem) \ 132 RTE_PTR_ADD(ASAN_MEM_SHIFT(mem), ASAN_SHADOW_OFFSET) 133 134 #if defined(__clang__) 135 #define __rte_no_asan __attribute__((no_sanitize("address", "hwaddress"))) 136 #else 137 #define __rte_no_asan __attribute__((no_sanitize_address)) 138 #endif 139 140 __rte_no_asan 141 static inline void 142 asan_set_shadow(void *addr, char val) 143 { 144 *(char *)addr = val; 145 } 146 147 static inline void 148 asan_set_zone(void *ptr, size_t len, uint32_t val) 149 { 150 size_t offset, i; 151 void *shadow; 152 size_t zone_len = len / ASAN_SHADOW_GRAIN_SIZE; 153 if (len % ASAN_SHADOW_GRAIN_SIZE != 0) 154 zone_len += 1; 155 156 for (i = 0; i < zone_len; i++) { 157 offset = i * ASAN_SHADOW_GRAIN_SIZE; 158 shadow = ASAN_MEM_TO_SHADOW((uintptr_t)ptr + offset); 159 asan_set_shadow(shadow, val); 160 } 161 } 162 163 /* 164 * When the memory is released, the release mark is 165 * set in the corresponding range of the shadow area. 166 */ 167 static inline void 168 asan_set_freezone(void *ptr, size_t size) 169 { 170 asan_set_zone(ptr, size, ASAN_MEM_FREE_FLAG); 171 } 172 173 /* 174 * When the memory is allocated, memory state must set as accessible. 175 */ 176 static inline void 177 asan_clear_alloczone(struct malloc_elem *elem) 178 { 179 asan_set_zone((void *)elem, elem->size, 0x0); 180 } 181 182 static inline void 183 asan_clear_split_alloczone(struct malloc_elem *elem) 184 { 185 void *ptr = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN); 186 asan_set_zone(ptr, MALLOC_ELEM_OVERHEAD, 0x0); 187 } 188 189 /* 190 * When the memory is allocated, the memory boundary is 191 * marked in the corresponding range of the shadow area. 192 * Requirement: redzone >= 16, is a power of two. 193 */ 194 static inline void 195 asan_set_redzone(struct malloc_elem *elem, size_t user_size) 196 { 197 uintptr_t head_redzone; 198 uintptr_t tail_redzone; 199 void *front_shadow; 200 void *tail_shadow; 201 uint32_t val; 202 203 if (elem != NULL) { 204 if (elem->state != ELEM_PAD) 205 elem = RTE_PTR_ADD(elem, elem->pad); 206 207 elem->user_size = user_size; 208 209 /* Set mark before the start of the allocated memory */ 210 head_redzone = (uintptr_t)RTE_PTR_ADD(elem, 211 MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE); 212 front_shadow = ASAN_MEM_TO_SHADOW(head_redzone); 213 asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG); 214 front_shadow = ASAN_MEM_TO_SHADOW(head_redzone 215 - ASAN_SHADOW_GRAIN_SIZE); 216 asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG); 217 218 /* Set mark after the end of the allocated memory */ 219 tail_redzone = (uintptr_t)RTE_PTR_ADD(elem, 220 MALLOC_ELEM_HEADER_LEN 221 + elem->user_size); 222 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone); 223 val = (tail_redzone % ASAN_SHADOW_GRAIN_SIZE); 224 val = (val == 0) ? ASAN_MEM_REDZONE_FLAG : val; 225 asan_set_shadow(tail_shadow, val); 226 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone 227 + ASAN_SHADOW_GRAIN_SIZE); 228 asan_set_shadow(tail_shadow, ASAN_MEM_REDZONE_FLAG); 229 } 230 } 231 232 /* 233 * When the memory is released, the mark of the memory boundary 234 * in the corresponding range of the shadow area is cleared. 235 * Requirement: redzone >= 16, is a power of two. 236 */ 237 static inline void 238 asan_clear_redzone(struct malloc_elem *elem) 239 { 240 uintptr_t head_redzone; 241 uintptr_t tail_redzone; 242 void *head_shadow; 243 void *tail_shadow; 244 245 if (elem != NULL) { 246 elem = RTE_PTR_ADD(elem, elem->pad); 247 248 /* Clear mark before the start of the allocated memory */ 249 head_redzone = (uintptr_t)RTE_PTR_ADD(elem, 250 MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE); 251 head_shadow = ASAN_MEM_TO_SHADOW(head_redzone); 252 asan_set_shadow(head_shadow, 0x00); 253 head_shadow = ASAN_MEM_TO_SHADOW(head_redzone 254 - ASAN_SHADOW_GRAIN_SIZE); 255 asan_set_shadow(head_shadow, 0x00); 256 257 /* Clear mark after the end of the allocated memory */ 258 tail_redzone = (uintptr_t)RTE_PTR_ADD(elem, 259 MALLOC_ELEM_HEADER_LEN + elem->user_size); 260 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone); 261 asan_set_shadow(tail_shadow, 0x00); 262 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone 263 + ASAN_SHADOW_GRAIN_SIZE); 264 asan_set_shadow(tail_shadow, 0x00); 265 } 266 } 267 268 static inline size_t 269 old_malloc_size(struct malloc_elem *elem) 270 { 271 if (elem->state != ELEM_PAD) 272 elem = RTE_PTR_ADD(elem, elem->pad); 273 274 return elem->user_size; 275 } 276 277 #else /* !RTE_MALLOC_ASAN */ 278 279 #define __rte_no_asan 280 281 static inline void 282 asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { } 283 284 static inline void 285 asan_clear_alloczone(struct malloc_elem *elem __rte_unused) { } 286 287 static inline void 288 asan_clear_split_alloczone(struct malloc_elem *elem __rte_unused) { } 289 290 static inline void 291 asan_set_redzone(struct malloc_elem *elem __rte_unused, 292 size_t user_size __rte_unused) { } 293 294 static inline void 295 asan_clear_redzone(struct malloc_elem *elem __rte_unused) { } 296 297 static inline size_t 298 old_malloc_size(struct malloc_elem *elem) 299 { 300 return elem->size - elem->pad - MALLOC_ELEM_OVERHEAD; 301 } 302 #endif /* !RTE_MALLOC_ASAN */ 303 304 /* 305 * Given a pointer to the start of a memory block returned by malloc, get 306 * the actual malloc_elem header for that block. 307 */ 308 static inline struct malloc_elem * 309 malloc_elem_from_data(const void *data) 310 { 311 if (data == NULL) 312 return NULL; 313 314 struct malloc_elem *elem = RTE_PTR_SUB(data, MALLOC_ELEM_HEADER_LEN); 315 if (!malloc_elem_cookies_ok(elem)) 316 return NULL; 317 return elem->state != ELEM_PAD ? elem: RTE_PTR_SUB(elem, elem->pad); 318 } 319 320 /* 321 * initialise a malloc_elem header 322 */ 323 void 324 malloc_elem_init(struct malloc_elem *elem, 325 struct malloc_heap *heap, 326 struct rte_memseg_list *msl, 327 size_t size, 328 struct malloc_elem *orig_elem, 329 size_t orig_size, 330 bool dirty); 331 332 void 333 malloc_elem_insert(struct malloc_elem *elem); 334 335 /* 336 * return true if the current malloc_elem can hold a block of data 337 * of the requested size and with the requested alignment 338 */ 339 int 340 malloc_elem_can_hold(struct malloc_elem *elem, size_t size, 341 unsigned int align, size_t bound, bool contig); 342 343 /* 344 * reserve a block of data in an existing malloc_elem. If the malloc_elem 345 * is much larger than the data block requested, we split the element in two. 346 */ 347 struct malloc_elem * 348 malloc_elem_alloc(struct malloc_elem *elem, size_t size, 349 unsigned int align, size_t bound, bool contig); 350 351 /* 352 * free a malloc_elem block by adding it to the free list. If the 353 * blocks either immediately before or immediately after newly freed block 354 * are also free, the blocks are merged together. 355 */ 356 struct malloc_elem * 357 malloc_elem_free(struct malloc_elem *elem); 358 359 struct malloc_elem * 360 malloc_elem_join_adjacent_free(struct malloc_elem *elem); 361 362 /* 363 * attempt to resize a malloc_elem by expanding into any free space 364 * immediately after it in memory. 365 */ 366 int 367 malloc_elem_resize(struct malloc_elem *elem, size_t size); 368 369 void 370 malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len); 371 372 void 373 malloc_elem_free_list_remove(struct malloc_elem *elem); 374 375 /* 376 * dump contents of malloc elem to a file. 377 */ 378 void 379 malloc_elem_dump(const struct malloc_elem *elem, FILE *f); 380 381 /* 382 * Given an element size, compute its freelist index. 383 */ 384 size_t 385 malloc_elem_free_list_index(size_t size); 386 387 /* 388 * Add element to its heap's free list. 389 */ 390 void 391 malloc_elem_free_list_insert(struct malloc_elem *elem); 392 393 /* 394 * Find biggest IOVA-contiguous zone within an element with specified alignment. 395 */ 396 size_t 397 malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align); 398 399 #endif /* MALLOC_ELEM_H_ */ 400