1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_KASAN_H 3 #define _LINUX_KASAN_H 4 5 #include <linux/bug.h> 6 #include <linux/static_key.h> 7 #include <linux/types.h> 8 9 struct kmem_cache; 10 struct page; 11 struct vm_struct; 12 struct task_struct; 13 14 #ifdef CONFIG_KASAN 15 16 #include <linux/linkage.h> 17 #include <asm/kasan.h> 18 19 /* kasan_data struct is used in KUnit tests for KASAN expected failures */ 20 struct kunit_kasan_expectation { 21 bool report_found; 22 }; 23 24 #endif 25 26 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 27 28 #include <linux/pgtable.h> 29 30 /* Software KASAN implementations use shadow memory. */ 31 32 #ifdef CONFIG_KASAN_SW_TAGS 33 /* This matches KASAN_TAG_INVALID. */ 34 #define KASAN_SHADOW_INIT 0xFE 35 #else 36 #define KASAN_SHADOW_INIT 0 37 #endif 38 39 #ifndef PTE_HWTABLE_PTRS 40 #define PTE_HWTABLE_PTRS 0 41 #endif 42 43 extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; 44 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS]; 45 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD]; 46 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD]; 47 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D]; 48 49 int kasan_populate_early_shadow(const void *shadow_start, 50 const void *shadow_end); 51 52 static inline void *kasan_mem_to_shadow(const void *addr) 53 { 54 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT) 55 + KASAN_SHADOW_OFFSET; 56 } 57 58 int kasan_add_zero_shadow(void *start, unsigned long size); 59 void kasan_remove_zero_shadow(void *start, unsigned long size); 60 61 /* Enable reporting bugs after kasan_disable_current() */ 62 extern void kasan_enable_current(void); 63 64 /* Disable reporting bugs for current task */ 65 extern void kasan_disable_current(void); 66 67 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 68 69 static inline int kasan_add_zero_shadow(void *start, unsigned long size) 70 { 71 return 0; 72 } 73 static inline void kasan_remove_zero_shadow(void *start, 74 unsigned long size) 75 {} 76 77 static inline void kasan_enable_current(void) {} 78 static inline void kasan_disable_current(void) {} 79 80 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ 81 82 #ifdef CONFIG_KASAN_HW_TAGS 83 84 DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); 85 86 static __always_inline bool kasan_enabled(void) 87 { 88 return static_branch_likely(&kasan_flag_enabled); 89 } 90 91 static inline bool kasan_has_integrated_init(void) 92 { 93 return kasan_enabled(); 94 } 95 96 void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); 97 void kasan_free_pages(struct page *page, unsigned int order); 98 99 #else /* CONFIG_KASAN_HW_TAGS */ 100 101 static inline bool kasan_enabled(void) 102 { 103 return IS_ENABLED(CONFIG_KASAN); 104 } 105 106 static inline bool kasan_has_integrated_init(void) 107 { 108 return false; 109 } 110 111 static __always_inline void kasan_alloc_pages(struct page *page, 112 unsigned int order, gfp_t flags) 113 { 114 /* Only available for integrated init. */ 115 BUILD_BUG(); 116 } 117 118 static __always_inline void kasan_free_pages(struct page *page, 119 unsigned int order) 120 { 121 /* Only available for integrated init. */ 122 BUILD_BUG(); 123 } 124 125 #endif /* CONFIG_KASAN_HW_TAGS */ 126 127 #ifdef CONFIG_KASAN 128 129 struct kasan_cache { 130 int alloc_meta_offset; 131 int free_meta_offset; 132 bool is_kmalloc; 133 }; 134 135 slab_flags_t __kasan_never_merge(void); 136 static __always_inline slab_flags_t kasan_never_merge(void) 137 { 138 if (kasan_enabled()) 139 return __kasan_never_merge(); 140 return 0; 141 } 142 143 void __kasan_unpoison_range(const void *addr, size_t size); 144 static __always_inline void kasan_unpoison_range(const void *addr, size_t size) 145 { 146 if (kasan_enabled()) 147 __kasan_unpoison_range(addr, size); 148 } 149 150 void __kasan_poison_pages(struct page *page, unsigned int order, bool init); 151 static __always_inline void kasan_poison_pages(struct page *page, 152 unsigned int order, bool init) 153 { 154 if (kasan_enabled()) 155 __kasan_poison_pages(page, order, init); 156 } 157 158 void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init); 159 static __always_inline void kasan_unpoison_pages(struct page *page, 160 unsigned int order, bool init) 161 { 162 if (kasan_enabled()) 163 __kasan_unpoison_pages(page, order, init); 164 } 165 166 void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, 167 slab_flags_t *flags); 168 static __always_inline void kasan_cache_create(struct kmem_cache *cache, 169 unsigned int *size, slab_flags_t *flags) 170 { 171 if (kasan_enabled()) 172 __kasan_cache_create(cache, size, flags); 173 } 174 175 void __kasan_cache_create_kmalloc(struct kmem_cache *cache); 176 static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) 177 { 178 if (kasan_enabled()) 179 __kasan_cache_create_kmalloc(cache); 180 } 181 182 size_t __kasan_metadata_size(struct kmem_cache *cache); 183 static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache) 184 { 185 if (kasan_enabled()) 186 return __kasan_metadata_size(cache); 187 return 0; 188 } 189 190 void __kasan_poison_slab(struct page *page); 191 static __always_inline void kasan_poison_slab(struct page *page) 192 { 193 if (kasan_enabled()) 194 __kasan_poison_slab(page); 195 } 196 197 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 198 static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache, 199 void *object) 200 { 201 if (kasan_enabled()) 202 __kasan_unpoison_object_data(cache, object); 203 } 204 205 void __kasan_poison_object_data(struct kmem_cache *cache, void *object); 206 static __always_inline void kasan_poison_object_data(struct kmem_cache *cache, 207 void *object) 208 { 209 if (kasan_enabled()) 210 __kasan_poison_object_data(cache, object); 211 } 212 213 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache, 214 const void *object); 215 static __always_inline void * __must_check kasan_init_slab_obj( 216 struct kmem_cache *cache, const void *object) 217 { 218 if (kasan_enabled()) 219 return __kasan_init_slab_obj(cache, object); 220 return (void *)object; 221 } 222 223 bool __kasan_slab_free(struct kmem_cache *s, void *object, 224 unsigned long ip, bool init); 225 static __always_inline bool kasan_slab_free(struct kmem_cache *s, 226 void *object, bool init) 227 { 228 if (kasan_enabled()) 229 return __kasan_slab_free(s, object, _RET_IP_, init); 230 return false; 231 } 232 233 void __kasan_kfree_large(void *ptr, unsigned long ip); 234 static __always_inline void kasan_kfree_large(void *ptr) 235 { 236 if (kasan_enabled()) 237 __kasan_kfree_large(ptr, _RET_IP_); 238 } 239 240 void __kasan_slab_free_mempool(void *ptr, unsigned long ip); 241 static __always_inline void kasan_slab_free_mempool(void *ptr) 242 { 243 if (kasan_enabled()) 244 __kasan_slab_free_mempool(ptr, _RET_IP_); 245 } 246 247 void * __must_check __kasan_slab_alloc(struct kmem_cache *s, 248 void *object, gfp_t flags, bool init); 249 static __always_inline void * __must_check kasan_slab_alloc( 250 struct kmem_cache *s, void *object, gfp_t flags, bool init) 251 { 252 if (kasan_enabled()) 253 return __kasan_slab_alloc(s, object, flags, init); 254 return object; 255 } 256 257 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object, 258 size_t size, gfp_t flags); 259 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s, 260 const void *object, size_t size, gfp_t flags) 261 { 262 if (kasan_enabled()) 263 return __kasan_kmalloc(s, object, size, flags); 264 return (void *)object; 265 } 266 267 void * __must_check __kasan_kmalloc_large(const void *ptr, 268 size_t size, gfp_t flags); 269 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr, 270 size_t size, gfp_t flags) 271 { 272 if (kasan_enabled()) 273 return __kasan_kmalloc_large(ptr, size, flags); 274 return (void *)ptr; 275 } 276 277 void * __must_check __kasan_krealloc(const void *object, 278 size_t new_size, gfp_t flags); 279 static __always_inline void * __must_check kasan_krealloc(const void *object, 280 size_t new_size, gfp_t flags) 281 { 282 if (kasan_enabled()) 283 return __kasan_krealloc(object, new_size, flags); 284 return (void *)object; 285 } 286 287 /* 288 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for 289 * the hardware tag-based mode that doesn't rely on compiler instrumentation. 290 */ 291 bool __kasan_check_byte(const void *addr, unsigned long ip); 292 static __always_inline bool kasan_check_byte(const void *addr) 293 { 294 if (kasan_enabled()) 295 return __kasan_check_byte(addr, _RET_IP_); 296 return true; 297 } 298 299 300 bool kasan_save_enable_multi_shot(void); 301 void kasan_restore_multi_shot(bool enabled); 302 303 #else /* CONFIG_KASAN */ 304 305 static inline slab_flags_t kasan_never_merge(void) 306 { 307 return 0; 308 } 309 static inline void kasan_unpoison_range(const void *address, size_t size) {} 310 static inline void kasan_poison_pages(struct page *page, unsigned int order, 311 bool init) {} 312 static inline void kasan_unpoison_pages(struct page *page, unsigned int order, 313 bool init) {} 314 static inline void kasan_cache_create(struct kmem_cache *cache, 315 unsigned int *size, 316 slab_flags_t *flags) {} 317 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {} 318 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } 319 static inline void kasan_poison_slab(struct page *page) {} 320 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, 321 void *object) {} 322 static inline void kasan_poison_object_data(struct kmem_cache *cache, 323 void *object) {} 324 static inline void *kasan_init_slab_obj(struct kmem_cache *cache, 325 const void *object) 326 { 327 return (void *)object; 328 } 329 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) 330 { 331 return false; 332 } 333 static inline void kasan_kfree_large(void *ptr) {} 334 static inline void kasan_slab_free_mempool(void *ptr) {} 335 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, 336 gfp_t flags, bool init) 337 { 338 return object; 339 } 340 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object, 341 size_t size, gfp_t flags) 342 { 343 return (void *)object; 344 } 345 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) 346 { 347 return (void *)ptr; 348 } 349 static inline void *kasan_krealloc(const void *object, size_t new_size, 350 gfp_t flags) 351 { 352 return (void *)object; 353 } 354 static inline bool kasan_check_byte(const void *address) 355 { 356 return true; 357 } 358 359 #endif /* CONFIG_KASAN */ 360 361 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) 362 void kasan_unpoison_task_stack(struct task_struct *task); 363 #else 364 static inline void kasan_unpoison_task_stack(struct task_struct *task) {} 365 #endif 366 367 #ifdef CONFIG_KASAN_GENERIC 368 369 void kasan_cache_shrink(struct kmem_cache *cache); 370 void kasan_cache_shutdown(struct kmem_cache *cache); 371 void kasan_record_aux_stack(void *ptr); 372 373 #else /* CONFIG_KASAN_GENERIC */ 374 375 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 376 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 377 static inline void kasan_record_aux_stack(void *ptr) {} 378 379 #endif /* CONFIG_KASAN_GENERIC */ 380 381 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) 382 383 static inline void *kasan_reset_tag(const void *addr) 384 { 385 return (void *)arch_kasan_reset_tag(addr); 386 } 387 388 /** 389 * kasan_report - print a report about a bad memory access detected by KASAN 390 * @addr: address of the bad access 391 * @size: size of the bad access 392 * @is_write: whether the bad access is a write or a read 393 * @ip: instruction pointer for the accessibility check or the bad access itself 394 */ 395 bool kasan_report(unsigned long addr, size_t size, 396 bool is_write, unsigned long ip); 397 398 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ 399 400 static inline void *kasan_reset_tag(const void *addr) 401 { 402 return (void *)addr; 403 } 404 405 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/ 406 407 #ifdef CONFIG_KASAN_HW_TAGS 408 409 void kasan_report_async(void); 410 411 #endif /* CONFIG_KASAN_HW_TAGS */ 412 413 #ifdef CONFIG_KASAN_SW_TAGS 414 void __init kasan_init_sw_tags(void); 415 #else 416 static inline void kasan_init_sw_tags(void) { } 417 #endif 418 419 #ifdef CONFIG_KASAN_HW_TAGS 420 void kasan_init_hw_tags_cpu(void); 421 void __init kasan_init_hw_tags(void); 422 #else 423 static inline void kasan_init_hw_tags_cpu(void) { } 424 static inline void kasan_init_hw_tags(void) { } 425 #endif 426 427 #ifdef CONFIG_KASAN_VMALLOC 428 429 int kasan_populate_vmalloc(unsigned long addr, unsigned long size); 430 void kasan_poison_vmalloc(const void *start, unsigned long size); 431 void kasan_unpoison_vmalloc(const void *start, unsigned long size); 432 void kasan_release_vmalloc(unsigned long start, unsigned long end, 433 unsigned long free_region_start, 434 unsigned long free_region_end); 435 436 #else /* CONFIG_KASAN_VMALLOC */ 437 438 static inline int kasan_populate_vmalloc(unsigned long start, 439 unsigned long size) 440 { 441 return 0; 442 } 443 444 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 445 { } 446 static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) 447 { } 448 static inline void kasan_release_vmalloc(unsigned long start, 449 unsigned long end, 450 unsigned long free_region_start, 451 unsigned long free_region_end) {} 452 453 #endif /* CONFIG_KASAN_VMALLOC */ 454 455 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ 456 !defined(CONFIG_KASAN_VMALLOC) 457 458 /* 459 * These functions provide a special case to support backing module 460 * allocations with real shadow memory. With KASAN vmalloc, the special 461 * case is unnecessary, as the work is handled in the generic case. 462 */ 463 int kasan_module_alloc(void *addr, size_t size); 464 void kasan_free_shadow(const struct vm_struct *vm); 465 466 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 467 468 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } 469 static inline void kasan_free_shadow(const struct vm_struct *vm) {} 470 471 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ 472 473 #ifdef CONFIG_KASAN_INLINE 474 void kasan_non_canonical_hook(unsigned long addr); 475 #else /* CONFIG_KASAN_INLINE */ 476 static inline void kasan_non_canonical_hook(unsigned long addr) { } 477 #endif /* CONFIG_KASAN_INLINE */ 478 479 #endif /* LINUX_KASAN_H */ 480