xref: /linux-6.15/include/linux/kasan.h (revision d40797d6)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
20b24beccSAndrey Ryabinin #ifndef _LINUX_KASAN_H
30b24beccSAndrey Ryabinin #define _LINUX_KASAN_H
40b24beccSAndrey Ryabinin 
57a3b8353SPeter Collingbourne #include <linux/bug.h>
6f9b5e46fSPeter Collingbourne #include <linux/kasan-enabled.h>
75cb6674bSAndrey Konovalov #include <linux/kasan-tags.h>
82db710ccSMarco Elver #include <linux/kernel.h>
934303244SAndrey Konovalov #include <linux/static_key.h>
100b24beccSAndrey Ryabinin #include <linux/types.h>
110b24beccSAndrey Ryabinin 
120b24beccSAndrey Ryabinin struct kmem_cache;
130b24beccSAndrey Ryabinin struct page;
146e48a966SMatthew Wilcox (Oracle) struct slab;
15a5af5aa8SAndrey Ryabinin struct vm_struct;
165be9b730SMasami Hiramatsu struct task_struct;
170b24beccSAndrey Ryabinin 
180b24beccSAndrey Ryabinin #ifdef CONFIG_KASAN
190b24beccSAndrey Ryabinin 
20d5750edfSAndrey Konovalov #include <linux/linkage.h>
2165fddcfcSMike Rapoport #include <asm/kasan.h>
220b24beccSAndrey Ryabinin 
23d5750edfSAndrey Konovalov #endif
24d5750edfSAndrey Konovalov 
2523689e91SAndrey Konovalov typedef unsigned int __bitwise kasan_vmalloc_flags_t;
2623689e91SAndrey Konovalov 
27ec2a0f9cSAndrey Konovalov #define KASAN_VMALLOC_NONE		((__force kasan_vmalloc_flags_t)0x00u)
28ec2a0f9cSAndrey Konovalov #define KASAN_VMALLOC_INIT		((__force kasan_vmalloc_flags_t)0x01u)
29ec2a0f9cSAndrey Konovalov #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
30ec2a0f9cSAndrey Konovalov #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
3123689e91SAndrey Konovalov 
32*9e9e085eSAdrian Huang #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
33*9e9e085eSAdrian Huang #define KASAN_VMALLOC_TLB_FLUSH  0x2 /* TLB flush */
34*9e9e085eSAdrian Huang 
35d5750edfSAndrey Konovalov #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
36d5750edfSAndrey Konovalov 
37d5750edfSAndrey Konovalov #include <linux/pgtable.h>
38d5750edfSAndrey Konovalov 
39d5750edfSAndrey Konovalov /* Software KASAN implementations use shadow memory. */
40d5750edfSAndrey Konovalov 
41d5750edfSAndrey Konovalov #ifdef CONFIG_KASAN_SW_TAGS
42a064cb00SAndrey Konovalov /* This matches KASAN_TAG_INVALID. */
43a064cb00SAndrey Konovalov #define KASAN_SHADOW_INIT 0xFE
44d5750edfSAndrey Konovalov #else
45d5750edfSAndrey Konovalov #define KASAN_SHADOW_INIT 0
46d5750edfSAndrey Konovalov #endif
47d5750edfSAndrey Konovalov 
4829970dc2SHailong Liu #ifndef PTE_HWTABLE_PTRS
4929970dc2SHailong Liu #define PTE_HWTABLE_PTRS 0
5029970dc2SHailong Liu #endif
5129970dc2SHailong Liu 
529577dd74SAndrey Konovalov extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
53cb32c9c5SDaniel Axtens extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
54cb32c9c5SDaniel Axtens extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
55cb32c9c5SDaniel Axtens extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
569577dd74SAndrey Konovalov extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
5769786cdbSAndrey Ryabinin 
589577dd74SAndrey Konovalov int kasan_populate_early_shadow(const void *shadow_start,
5969786cdbSAndrey Ryabinin 				const void *shadow_end);
6069786cdbSAndrey Ryabinin 
612a86f1b5SHuacai Chen #ifndef kasan_mem_to_shadow
kasan_mem_to_shadow(const void * addr)620b24beccSAndrey Ryabinin static inline void *kasan_mem_to_shadow(const void *addr)
630b24beccSAndrey Ryabinin {
640b24beccSAndrey Ryabinin 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
650b24beccSAndrey Ryabinin 		+ KASAN_SHADOW_OFFSET;
660b24beccSAndrey Ryabinin }
679b04c764SQing Zhang #endif
680b24beccSAndrey Ryabinin 
69d5750edfSAndrey Konovalov int kasan_add_zero_shadow(void *start, unsigned long size);
70d5750edfSAndrey Konovalov void kasan_remove_zero_shadow(void *start, unsigned long size);
71d5750edfSAndrey Konovalov 
72d73b4936SAndrey Konovalov /* Enable reporting bugs after kasan_disable_current() */
73d73b4936SAndrey Konovalov extern void kasan_enable_current(void);
74d73b4936SAndrey Konovalov 
75d73b4936SAndrey Konovalov /* Disable reporting bugs for current task */
76d73b4936SAndrey Konovalov extern void kasan_disable_current(void);
77d73b4936SAndrey Konovalov 
78d5750edfSAndrey Konovalov #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
79d5750edfSAndrey Konovalov 
kasan_add_zero_shadow(void * start,unsigned long size)80d5750edfSAndrey Konovalov static inline int kasan_add_zero_shadow(void *start, unsigned long size)
81d5750edfSAndrey Konovalov {
82d5750edfSAndrey Konovalov 	return 0;
83d5750edfSAndrey Konovalov }
kasan_remove_zero_shadow(void * start,unsigned long size)84d5750edfSAndrey Konovalov static inline void kasan_remove_zero_shadow(void *start,
85d5750edfSAndrey Konovalov 					unsigned long size)
86d5750edfSAndrey Konovalov {}
87d5750edfSAndrey Konovalov 
kasan_enable_current(void)88d73b4936SAndrey Konovalov static inline void kasan_enable_current(void) {}
kasan_disable_current(void)89d73b4936SAndrey Konovalov static inline void kasan_disable_current(void) {}
90d73b4936SAndrey Konovalov 
91d5750edfSAndrey Konovalov #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
92d5750edfSAndrey Konovalov 
9334303244SAndrey Konovalov #ifdef CONFIG_KASAN_HW_TAGS
94e86f8b09SAndrey Konovalov 
95e86f8b09SAndrey Konovalov #else /* CONFIG_KASAN_HW_TAGS */
96e86f8b09SAndrey Konovalov 
97e86f8b09SAndrey Konovalov #endif /* CONFIG_KASAN_HW_TAGS */
98e86f8b09SAndrey Konovalov 
kasan_has_integrated_init(void)99e5af50a5SPeter Collingbourne static inline bool kasan_has_integrated_init(void)
100e5af50a5SPeter Collingbourne {
101e5af50a5SPeter Collingbourne 	return kasan_hw_tags_enabled();
102e5af50a5SPeter Collingbourne }
103e5af50a5SPeter Collingbourne 
1047a3b8353SPeter Collingbourne #ifdef CONFIG_KASAN
10534303244SAndrey Konovalov void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)10634303244SAndrey Konovalov static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
10734303244SAndrey Konovalov {
10834303244SAndrey Konovalov 	if (kasan_enabled())
10934303244SAndrey Konovalov 		__kasan_unpoison_range(addr, size);
11034303244SAndrey Konovalov }
11134303244SAndrey Konovalov 
1127a3b8353SPeter Collingbourne void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)1137a3b8353SPeter Collingbourne static __always_inline void kasan_poison_pages(struct page *page,
1141bb5eab3SAndrey Konovalov 						unsigned int order, bool init)
11534303244SAndrey Konovalov {
11634303244SAndrey Konovalov 	if (kasan_enabled())
1177a3b8353SPeter Collingbourne 		__kasan_poison_pages(page, order, init);
11834303244SAndrey Konovalov }
11934303244SAndrey Konovalov 
12044383cefSAndrey Konovalov bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)12144383cefSAndrey Konovalov static __always_inline bool kasan_unpoison_pages(struct page *page,
1221bb5eab3SAndrey Konovalov 						 unsigned int order, bool init)
12334303244SAndrey Konovalov {
12434303244SAndrey Konovalov 	if (kasan_enabled())
12544383cefSAndrey Konovalov 		return __kasan_unpoison_pages(page, order, init);
12644383cefSAndrey Konovalov 	return false;
12734303244SAndrey Konovalov }
12834303244SAndrey Konovalov 
1296e48a966SMatthew Wilcox (Oracle) void __kasan_poison_slab(struct slab *slab);
kasan_poison_slab(struct slab * slab)1306e48a966SMatthew Wilcox (Oracle) static __always_inline void kasan_poison_slab(struct slab *slab)
13134303244SAndrey Konovalov {
13234303244SAndrey Konovalov 	if (kasan_enabled())
1336e48a966SMatthew Wilcox (Oracle) 		__kasan_poison_slab(slab);
13434303244SAndrey Konovalov }
13534303244SAndrey Konovalov 
1361ce9a052SAndrey Konovalov void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
1371ce9a052SAndrey Konovalov /**
1381ce9a052SAndrey Konovalov  * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
1391ce9a052SAndrey Konovalov  * @cache: Cache the object belong to.
1401ce9a052SAndrey Konovalov  * @object: Pointer to the object.
1411ce9a052SAndrey Konovalov  *
1421ce9a052SAndrey Konovalov  * This function is intended for the slab allocator's internal use. It
1431ce9a052SAndrey Konovalov  * temporarily unpoisons an object from a newly allocated slab without doing
1441ce9a052SAndrey Konovalov  * anything else. The object must later be repoisoned by
1451ce9a052SAndrey Konovalov  * kasan_poison_new_object().
1461ce9a052SAndrey Konovalov  */
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)1471ce9a052SAndrey Konovalov static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
14834303244SAndrey Konovalov 							void *object)
14934303244SAndrey Konovalov {
15034303244SAndrey Konovalov 	if (kasan_enabled())
1511ce9a052SAndrey Konovalov 		__kasan_unpoison_new_object(cache, object);
15234303244SAndrey Konovalov }
15334303244SAndrey Konovalov 
1541ce9a052SAndrey Konovalov void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
1551ce9a052SAndrey Konovalov /**
1561ce9a052SAndrey Konovalov  * kasan_poison_new_object - Repoison a new slab object.
1571ce9a052SAndrey Konovalov  * @cache: Cache the object belong to.
1581ce9a052SAndrey Konovalov  * @object: Pointer to the object.
1591ce9a052SAndrey Konovalov  *
1601ce9a052SAndrey Konovalov  * This function is intended for the slab allocator's internal use. It
1611ce9a052SAndrey Konovalov  * repoisons an object that was previously unpoisoned by
1621ce9a052SAndrey Konovalov  * kasan_unpoison_new_object() without doing anything else.
1631ce9a052SAndrey Konovalov  */
kasan_poison_new_object(struct kmem_cache * cache,void * object)1641ce9a052SAndrey Konovalov static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
16534303244SAndrey Konovalov 							void *object)
16634303244SAndrey Konovalov {
16734303244SAndrey Konovalov 	if (kasan_enabled())
1681ce9a052SAndrey Konovalov 		__kasan_poison_new_object(cache, object);
16934303244SAndrey Konovalov }
17034303244SAndrey Konovalov 
17134303244SAndrey Konovalov void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
17234303244SAndrey Konovalov 					  const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)17334303244SAndrey Konovalov static __always_inline void * __must_check kasan_init_slab_obj(
17434303244SAndrey Konovalov 				struct kmem_cache *cache, const void *object)
17534303244SAndrey Konovalov {
17634303244SAndrey Konovalov 	if (kasan_enabled())
17734303244SAndrey Konovalov 		return __kasan_init_slab_obj(cache, object);
17834303244SAndrey Konovalov 	return (void *)object;
17934303244SAndrey Konovalov }
18034303244SAndrey Konovalov 
181b3c34245SJann Horn bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
182b3c34245SJann Horn 			unsigned long ip);
183b3c34245SJann Horn /**
184b3c34245SJann Horn  * kasan_slab_pre_free - Check whether freeing a slab object is safe.
185b3c34245SJann Horn  * @object: Object to be freed.
186b3c34245SJann Horn  *
187b3c34245SJann Horn  * This function checks whether freeing the given object is safe. It may
188b3c34245SJann Horn  * check for double-free and invalid-free bugs and report them.
189b3c34245SJann Horn  *
190b3c34245SJann Horn  * This function is intended only for use by the slab allocator.
191b3c34245SJann Horn  *
192b3c34245SJann Horn  * @Return true if freeing the object is unsafe; false otherwise.
193b3c34245SJann Horn  */
kasan_slab_pre_free(struct kmem_cache * s,void * object)194b3c34245SJann Horn static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
195b3c34245SJann Horn 						void *object)
196b3c34245SJann Horn {
197b3c34245SJann Horn 	if (kasan_enabled())
198b3c34245SJann Horn 		return __kasan_slab_pre_free(s, object, _RET_IP_);
199b3c34245SJann Horn 	return false;
200b3c34245SJann Horn }
201b3c34245SJann Horn 
202b8c8ba73SJann Horn bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
203b8c8ba73SJann Horn 		       bool still_accessible);
204b3c34245SJann Horn /**
205b3c34245SJann Horn  * kasan_slab_free - Poison, initialize, and quarantine a slab object.
206b3c34245SJann Horn  * @object: Object to be freed.
207b3c34245SJann Horn  * @init: Whether to initialize the object.
208b8c8ba73SJann Horn  * @still_accessible: Whether the object contents are still accessible.
209b3c34245SJann Horn  *
210b3c34245SJann Horn  * This function informs that a slab object has been freed and is not
211b8c8ba73SJann Horn  * supposed to be accessed anymore, except when @still_accessible is set
212b8c8ba73SJann Horn  * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
213b8c8ba73SJann Horn  * grace period might not have passed yet).
214b3c34245SJann Horn  *
215b3c34245SJann Horn  * For KASAN modes that have integrated memory initialization
216b3c34245SJann Horn  * (kasan_has_integrated_init() == true), this function also initializes
217b3c34245SJann Horn  * the object's memory. For other modes, the @init argument is ignored.
218b3c34245SJann Horn  *
219b3c34245SJann Horn  * This function might also take ownership of the object to quarantine it.
220b3c34245SJann Horn  * When this happens, KASAN will defer freeing the object to a later
221b3c34245SJann Horn  * stage and handle it internally until then. The return value indicates
222b3c34245SJann Horn  * whether KASAN took ownership of the object.
223b3c34245SJann Horn  *
224b3c34245SJann Horn  * This function is intended only for use by the slab allocator.
225b3c34245SJann Horn  *
226b3c34245SJann Horn  * @Return true if KASAN took ownership of the object; false otherwise.
227b3c34245SJann Horn  */
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible)228d57a964eSAndrey Konovalov static __always_inline bool kasan_slab_free(struct kmem_cache *s,
229b8c8ba73SJann Horn 						void *object, bool init,
230b8c8ba73SJann Horn 						bool still_accessible)
23134303244SAndrey Konovalov {
23234303244SAndrey Konovalov 	if (kasan_enabled())
233b8c8ba73SJann Horn 		return __kasan_slab_free(s, object, init, still_accessible);
23434303244SAndrey Konovalov 	return false;
23534303244SAndrey Konovalov }
23634303244SAndrey Konovalov 
237200072ceSAndrey Konovalov void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)238200072ceSAndrey Konovalov static __always_inline void kasan_kfree_large(void *ptr)
239200072ceSAndrey Konovalov {
240200072ceSAndrey Konovalov 	if (kasan_enabled())
241200072ceSAndrey Konovalov 		__kasan_kfree_large(ptr, _RET_IP_);
242200072ceSAndrey Konovalov }
243200072ceSAndrey Konovalov 
24434303244SAndrey Konovalov void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
245da844b78SAndrey Konovalov 				       void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)24634303244SAndrey Konovalov static __always_inline void * __must_check kasan_slab_alloc(
247da844b78SAndrey Konovalov 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
24834303244SAndrey Konovalov {
24934303244SAndrey Konovalov 	if (kasan_enabled())
250da844b78SAndrey Konovalov 		return __kasan_slab_alloc(s, object, flags, init);
25134303244SAndrey Konovalov 	return object;
25234303244SAndrey Konovalov }
25334303244SAndrey Konovalov 
25434303244SAndrey Konovalov void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
25534303244SAndrey Konovalov 				    size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)25634303244SAndrey Konovalov static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
25734303244SAndrey Konovalov 				const void *object, size_t size, gfp_t flags)
25834303244SAndrey Konovalov {
25934303244SAndrey Konovalov 	if (kasan_enabled())
26034303244SAndrey Konovalov 		return __kasan_kmalloc(s, object, size, flags);
26134303244SAndrey Konovalov 	return (void *)object;
26234303244SAndrey Konovalov }
26334303244SAndrey Konovalov 
26434303244SAndrey Konovalov void * __must_check __kasan_kmalloc_large(const void *ptr,
26534303244SAndrey Konovalov 					  size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)26634303244SAndrey Konovalov static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
26734303244SAndrey Konovalov 						      size_t size, gfp_t flags)
26834303244SAndrey Konovalov {
26934303244SAndrey Konovalov 	if (kasan_enabled())
27034303244SAndrey Konovalov 		return __kasan_kmalloc_large(ptr, size, flags);
27134303244SAndrey Konovalov 	return (void *)ptr;
27234303244SAndrey Konovalov }
27334303244SAndrey Konovalov 
27434303244SAndrey Konovalov void * __must_check __kasan_krealloc(const void *object,
27534303244SAndrey Konovalov 				     size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)27634303244SAndrey Konovalov static __always_inline void * __must_check kasan_krealloc(const void *object,
27734303244SAndrey Konovalov 						 size_t new_size, gfp_t flags)
27834303244SAndrey Konovalov {
27934303244SAndrey Konovalov 	if (kasan_enabled())
28034303244SAndrey Konovalov 		return __kasan_krealloc(object, new_size, flags);
28134303244SAndrey Konovalov 	return (void *)object;
28234303244SAndrey Konovalov }
28334303244SAndrey Konovalov 
284f129c310SAndrey Konovalov bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
285f129c310SAndrey Konovalov 				  unsigned long ip);
286f129c310SAndrey Konovalov /**
287f129c310SAndrey Konovalov  * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
288f129c310SAndrey Konovalov  * @page: Pointer to the page allocation.
289f129c310SAndrey Konovalov  * @order: Order of the allocation.
290f129c310SAndrey Konovalov  *
291f129c310SAndrey Konovalov  * This function is intended for kernel subsystems that cache page allocations
292f129c310SAndrey Konovalov  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
293f129c310SAndrey Konovalov  *
294f129c310SAndrey Konovalov  * This function is similar to kasan_mempool_poison_object() but operates on
295f129c310SAndrey Konovalov  * page allocations.
296f129c310SAndrey Konovalov  *
2979f41c59aSAndrey Konovalov  * Before the poisoned allocation can be reused, it must be unpoisoned via
2989f41c59aSAndrey Konovalov  * kasan_mempool_unpoison_pages().
2999f41c59aSAndrey Konovalov  *
300f129c310SAndrey Konovalov  * Return: true if the allocation can be safely reused; false otherwise.
301f129c310SAndrey Konovalov  */
kasan_mempool_poison_pages(struct page * page,unsigned int order)302f129c310SAndrey Konovalov static __always_inline bool kasan_mempool_poison_pages(struct page *page,
303f129c310SAndrey Konovalov 						       unsigned int order)
304f129c310SAndrey Konovalov {
305f129c310SAndrey Konovalov 	if (kasan_enabled())
306f129c310SAndrey Konovalov 		return __kasan_mempool_poison_pages(page, order, _RET_IP_);
307f129c310SAndrey Konovalov 	return true;
308f129c310SAndrey Konovalov }
309f129c310SAndrey Konovalov 
3109f41c59aSAndrey Konovalov void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
3119f41c59aSAndrey Konovalov 				    unsigned long ip);
3129f41c59aSAndrey Konovalov /**
3139f41c59aSAndrey Konovalov  * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
3149f41c59aSAndrey Konovalov  * @page: Pointer to the page allocation.
3159f41c59aSAndrey Konovalov  * @order: Order of the allocation.
3169f41c59aSAndrey Konovalov  *
3179f41c59aSAndrey Konovalov  * This function is intended for kernel subsystems that cache page allocations
3189f41c59aSAndrey Konovalov  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
3199f41c59aSAndrey Konovalov  *
3209f41c59aSAndrey Konovalov  * This function unpoisons a page allocation that was previously poisoned by
3219f41c59aSAndrey Konovalov  * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
3229f41c59aSAndrey Konovalov  * the tag-based modes, this function assigns a new tag to the allocation.
3239f41c59aSAndrey Konovalov  */
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)3249f41c59aSAndrey Konovalov static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
3259f41c59aSAndrey Konovalov 							 unsigned int order)
3269f41c59aSAndrey Konovalov {
3279f41c59aSAndrey Konovalov 	if (kasan_enabled())
3289f41c59aSAndrey Konovalov 		__kasan_mempool_unpoison_pages(page, order, _RET_IP_);
3299f41c59aSAndrey Konovalov }
3309f41c59aSAndrey Konovalov 
3312e7c954cSAndrey Konovalov bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
3321bb84304SAndrey Konovalov /**
3331bb84304SAndrey Konovalov  * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
3341bb84304SAndrey Konovalov  * @ptr: Pointer to the slab allocation.
3351bb84304SAndrey Konovalov  *
3361bb84304SAndrey Konovalov  * This function is intended for kernel subsystems that cache slab allocations
3371bb84304SAndrey Konovalov  * to reuse them instead of freeing them back to the slab allocator (e.g.
3381bb84304SAndrey Konovalov  * mempool).
3391bb84304SAndrey Konovalov  *
340b556a462SAndrey Konovalov  * This function poisons a slab allocation and saves a free stack trace for it
341b556a462SAndrey Konovalov  * without initializing the allocation's memory and without putting it into the
342b556a462SAndrey Konovalov  * quarantine (for the Generic mode).
3431bb84304SAndrey Konovalov  *
3441bb84304SAndrey Konovalov  * This function also performs checks to detect double-free and invalid-free
3452e7c954cSAndrey Konovalov  * bugs and reports them. The caller can use the return value of this function
3462e7c954cSAndrey Konovalov  * to find out if the allocation is buggy.
3471bb84304SAndrey Konovalov  *
34819568327SAndrey Konovalov  * Before the poisoned allocation can be reused, it must be unpoisoned via
34919568327SAndrey Konovalov  * kasan_mempool_unpoison_object().
35019568327SAndrey Konovalov  *
3511bb84304SAndrey Konovalov  * This function operates on all slab allocations including large kmalloc
3521bb84304SAndrey Konovalov  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
3531bb84304SAndrey Konovalov  * size > KMALLOC_MAX_SIZE).
3542e7c954cSAndrey Konovalov  *
3552e7c954cSAndrey Konovalov  * Return: true if the allocation can be safely reused; false otherwise.
3561bb84304SAndrey Konovalov  */
kasan_mempool_poison_object(void * ptr)3572e7c954cSAndrey Konovalov static __always_inline bool kasan_mempool_poison_object(void *ptr)
3589b94fe91SAndrey Konovalov {
3599b94fe91SAndrey Konovalov 	if (kasan_enabled())
3602e7c954cSAndrey Konovalov 		return __kasan_mempool_poison_object(ptr, _RET_IP_);
3612e7c954cSAndrey Konovalov 	return true;
3629b94fe91SAndrey Konovalov }
3639b94fe91SAndrey Konovalov 
36419568327SAndrey Konovalov void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
36519568327SAndrey Konovalov /**
36619568327SAndrey Konovalov  * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
36719568327SAndrey Konovalov  * @ptr: Pointer to the slab allocation.
36819568327SAndrey Konovalov  * @size: Size to be unpoisoned.
36919568327SAndrey Konovalov  *
37019568327SAndrey Konovalov  * This function is intended for kernel subsystems that cache slab allocations
37119568327SAndrey Konovalov  * to reuse them instead of freeing them back to the slab allocator (e.g.
37219568327SAndrey Konovalov  * mempool).
37319568327SAndrey Konovalov  *
37419568327SAndrey Konovalov  * This function unpoisons a slab allocation that was previously poisoned via
37529d7355aSAndrey Konovalov  * kasan_mempool_poison_object() and saves an alloc stack trace for it without
37629d7355aSAndrey Konovalov  * initializing the allocation's memory. For the tag-based modes, this function
37729d7355aSAndrey Konovalov  * does not assign a new tag to the allocation and instead restores the
37829d7355aSAndrey Konovalov  * original tags based on the pointer value.
37919568327SAndrey Konovalov  *
38019568327SAndrey Konovalov  * This function operates on all slab allocations including large kmalloc
38119568327SAndrey Konovalov  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
38219568327SAndrey Konovalov  * size > KMALLOC_MAX_SIZE).
38319568327SAndrey Konovalov  */
kasan_mempool_unpoison_object(void * ptr,size_t size)38419568327SAndrey Konovalov static __always_inline void kasan_mempool_unpoison_object(void *ptr,
38519568327SAndrey Konovalov 							  size_t size)
38619568327SAndrey Konovalov {
38719568327SAndrey Konovalov 	if (kasan_enabled())
38819568327SAndrey Konovalov 		__kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
38919568327SAndrey Konovalov }
39019568327SAndrey Konovalov 
391611806b4SAndrey Konovalov /*
392611806b4SAndrey Konovalov  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
393611806b4SAndrey Konovalov  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
394611806b4SAndrey Konovalov  */
395611806b4SAndrey Konovalov bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)396611806b4SAndrey Konovalov static __always_inline bool kasan_check_byte(const void *addr)
397611806b4SAndrey Konovalov {
398611806b4SAndrey Konovalov 	if (kasan_enabled())
399611806b4SAndrey Konovalov 		return __kasan_check_byte(addr, _RET_IP_);
400611806b4SAndrey Konovalov 	return true;
401611806b4SAndrey Konovalov }
402611806b4SAndrey Konovalov 
4030b24beccSAndrey Ryabinin #else /* CONFIG_KASAN */
4040b24beccSAndrey Ryabinin 
kasan_unpoison_range(const void * address,size_t size)405cebd0eb2SAndrey Konovalov static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)4067a3b8353SPeter Collingbourne static inline void kasan_poison_pages(struct page *page, unsigned int order,
4077a3b8353SPeter Collingbourne 				      bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)40844383cefSAndrey Konovalov static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
40944383cefSAndrey Konovalov 					bool init)
41044383cefSAndrey Konovalov {
41144383cefSAndrey Konovalov 	return false;
41244383cefSAndrey Konovalov }
kasan_poison_slab(struct slab * slab)4136e48a966SMatthew Wilcox (Oracle) static inline void kasan_poison_slab(struct slab *slab) {}
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)4141ce9a052SAndrey Konovalov static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
4150316bec2SAndrey Ryabinin 					void *object) {}
kasan_poison_new_object(struct kmem_cache * cache,void * object)4161ce9a052SAndrey Konovalov static inline void kasan_poison_new_object(struct kmem_cache *cache,
4170316bec2SAndrey Ryabinin 					void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)4180116523cSAndrey Konovalov static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
4190116523cSAndrey Konovalov 				const void *object)
4200116523cSAndrey Konovalov {
4210116523cSAndrey Konovalov 	return (void *)object;
4220116523cSAndrey Konovalov }
423b3c34245SJann Horn 
kasan_slab_pre_free(struct kmem_cache * s,void * object)424b3c34245SJann Horn static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
425b3c34245SJann Horn {
426b3c34245SJann Horn 	return false;
427b3c34245SJann Horn }
428b3c34245SJann Horn 
kasan_slab_free(struct kmem_cache * s,void * object,bool init,bool still_accessible)429b8c8ba73SJann Horn static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
430b8c8ba73SJann Horn 				   bool init, bool still_accessible)
4310116523cSAndrey Konovalov {
43234303244SAndrey Konovalov 	return false;
4330116523cSAndrey Konovalov }
kasan_kfree_large(void * ptr)434200072ceSAndrey Konovalov static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)43534303244SAndrey Konovalov static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
436da844b78SAndrey Konovalov 				   gfp_t flags, bool init)
43734303244SAndrey Konovalov {
43834303244SAndrey Konovalov 	return object;
43934303244SAndrey Konovalov }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)4400116523cSAndrey Konovalov static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
4410116523cSAndrey Konovalov 				size_t size, gfp_t flags)
4420116523cSAndrey Konovalov {
4430116523cSAndrey Konovalov 	return (void *)object;
4440116523cSAndrey Konovalov }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)44534303244SAndrey Konovalov static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
44634303244SAndrey Konovalov {
44734303244SAndrey Konovalov 	return (void *)ptr;
44834303244SAndrey Konovalov }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)4490116523cSAndrey Konovalov static inline void *kasan_krealloc(const void *object, size_t new_size,
4500116523cSAndrey Konovalov 				 gfp_t flags)
4510116523cSAndrey Konovalov {
4520116523cSAndrey Konovalov 	return (void *)object;
4530116523cSAndrey Konovalov }
kasan_mempool_poison_pages(struct page * page,unsigned int order)454f129c310SAndrey Konovalov static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
455f129c310SAndrey Konovalov {
456f129c310SAndrey Konovalov 	return true;
457f129c310SAndrey Konovalov }
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)4589f41c59aSAndrey Konovalov static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
kasan_mempool_poison_object(void * ptr)4592e7c954cSAndrey Konovalov static inline bool kasan_mempool_poison_object(void *ptr)
4602e7c954cSAndrey Konovalov {
4612e7c954cSAndrey Konovalov 	return true;
4622e7c954cSAndrey Konovalov }
kasan_mempool_unpoison_object(void * ptr,size_t size)46319568327SAndrey Konovalov static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
46419568327SAndrey Konovalov 
kasan_check_byte(const void * address)465611806b4SAndrey Konovalov static inline bool kasan_check_byte(const void *address)
466611806b4SAndrey Konovalov {
467611806b4SAndrey Konovalov 	return true;
468611806b4SAndrey Konovalov }
4699b75a867SAndrey Ryabinin 
4700b24beccSAndrey Ryabinin #endif /* CONFIG_KASAN */
4710b24beccSAndrey Ryabinin 
47202c58773SWalter Wu #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
473d56a9ef8SAndrey Konovalov void kasan_unpoison_task_stack(struct task_struct *task);
4747ccb84f0SKumar Kartikeya Dwivedi asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
475d56a9ef8SAndrey Konovalov #else
kasan_unpoison_task_stack(struct task_struct * task)476d56a9ef8SAndrey Konovalov static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
kasan_unpoison_task_stack_below(const void * watermark)4777ccb84f0SKumar Kartikeya Dwivedi static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
478d56a9ef8SAndrey Konovalov #endif
479d56a9ef8SAndrey Konovalov 
4802bd926b4SAndrey Konovalov #ifdef CONFIG_KASAN_GENERIC
4812bd926b4SAndrey Konovalov 
482bbc61844SFeng Tang struct kasan_cache {
483bbc61844SFeng Tang 	int alloc_meta_offset;
484bbc61844SFeng Tang 	int free_meta_offset;
485bbc61844SFeng Tang };
486bbc61844SFeng Tang 
4875d1ba310SFeng Tang size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
488682ed089SAndrey Konovalov void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
489682ed089SAndrey Konovalov 			slab_flags_t *flags);
490f372bde9SAndrey Konovalov 
4912bd926b4SAndrey Konovalov void kasan_cache_shrink(struct kmem_cache *cache);
4922bd926b4SAndrey Konovalov void kasan_cache_shutdown(struct kmem_cache *cache);
49326e760c9SWalter Wu void kasan_record_aux_stack(void *ptr);
4942bd926b4SAndrey Konovalov 
4952bd926b4SAndrey Konovalov #else /* CONFIG_KASAN_GENERIC */
4962bd926b4SAndrey Konovalov 
497f372bde9SAndrey Konovalov /* Tag-based KASAN modes do not use per-object metadata. */
kasan_metadata_size(struct kmem_cache * cache,bool in_object)4985d1ba310SFeng Tang static inline size_t kasan_metadata_size(struct kmem_cache *cache,
4995d1ba310SFeng Tang 						bool in_object)
500f372bde9SAndrey Konovalov {
501f372bde9SAndrey Konovalov 	return 0;
502f372bde9SAndrey Konovalov }
503682ed089SAndrey Konovalov /* And no cache-related metadata initialization is required. */
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)504682ed089SAndrey Konovalov static inline void kasan_cache_create(struct kmem_cache *cache,
505682ed089SAndrey Konovalov 				      unsigned int *size,
506682ed089SAndrey Konovalov 				      slab_flags_t *flags) {}
507f372bde9SAndrey Konovalov 
kasan_cache_shrink(struct kmem_cache * cache)5082bd926b4SAndrey Konovalov static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)5092bd926b4SAndrey Konovalov static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)51026e760c9SWalter Wu static inline void kasan_record_aux_stack(void *ptr) {}
5112bd926b4SAndrey Konovalov 
5122bd926b4SAndrey Konovalov #endif /* CONFIG_KASAN_GENERIC */
5132bd926b4SAndrey Konovalov 
5142e903b91SAndrey Konovalov #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
5153c9e3aa1SAndrey Konovalov 
kasan_reset_tag(const void * addr)516c0054c56SAndrey Konovalov static inline void *kasan_reset_tag(const void *addr)
517c0054c56SAndrey Konovalov {
518c0054c56SAndrey Konovalov 	return (void *)arch_kasan_reset_tag(addr);
519c0054c56SAndrey Konovalov }
5203c9e3aa1SAndrey Konovalov 
52149c6631dSVincenzo Frascino /**
52249c6631dSVincenzo Frascino  * kasan_report - print a report about a bad memory access detected by KASAN
52349c6631dSVincenzo Frascino  * @addr: address of the bad access
52449c6631dSVincenzo Frascino  * @size: size of the bad access
52549c6631dSVincenzo Frascino  * @is_write: whether the bad access is a write or a read
52649c6631dSVincenzo Frascino  * @ip: instruction pointer for the accessibility check or the bad access itself
52749c6631dSVincenzo Frascino  */
528bb6e04a1SArnd Bergmann bool kasan_report(const void *addr, size_t size,
52941eea9cdSAndrey Konovalov 		bool is_write, unsigned long ip);
53041eea9cdSAndrey Konovalov 
5312e903b91SAndrey Konovalov #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
5323c9e3aa1SAndrey Konovalov 
kasan_reset_tag(const void * addr)5333c9e3aa1SAndrey Konovalov static inline void *kasan_reset_tag(const void *addr)
5343c9e3aa1SAndrey Konovalov {
5353c9e3aa1SAndrey Konovalov 	return (void *)addr;
5363c9e3aa1SAndrey Konovalov }
5373c9e3aa1SAndrey Konovalov 
5382e903b91SAndrey Konovalov #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
5392e903b91SAndrey Konovalov 
5408f7b5054SVincenzo Frascino #ifdef CONFIG_KASAN_HW_TAGS
5418f7b5054SVincenzo Frascino 
5428f7b5054SVincenzo Frascino void kasan_report_async(void);
5438f7b5054SVincenzo Frascino 
5448f7b5054SVincenzo Frascino #endif /* CONFIG_KASAN_HW_TAGS */
5458f7b5054SVincenzo Frascino 
5462e903b91SAndrey Konovalov #ifdef CONFIG_KASAN_SW_TAGS
5472e903b91SAndrey Konovalov void __init kasan_init_sw_tags(void);
5482e903b91SAndrey Konovalov #else
kasan_init_sw_tags(void)5492e903b91SAndrey Konovalov static inline void kasan_init_sw_tags(void) { }
5502e903b91SAndrey Konovalov #endif
5512e903b91SAndrey Konovalov 
5522e903b91SAndrey Konovalov #ifdef CONFIG_KASAN_HW_TAGS
5532e903b91SAndrey Konovalov void kasan_init_hw_tags_cpu(void);
5542e903b91SAndrey Konovalov void __init kasan_init_hw_tags(void);
5552e903b91SAndrey Konovalov #else
kasan_init_hw_tags_cpu(void)5562e903b91SAndrey Konovalov static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)5572e903b91SAndrey Konovalov static inline void kasan_init_hw_tags(void) { }
5582e903b91SAndrey Konovalov #endif
559080eb83fSAndrey Konovalov 
5603c5c3cfbSDaniel Axtens #ifdef CONFIG_KASAN_VMALLOC
5613b1a4a86SAndrey Konovalov 
56223689e91SAndrey Konovalov #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
56323689e91SAndrey Konovalov 
5645bd9bae2SAndrey Konovalov void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
565d98c9e83SAndrey Ryabinin int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
5663c5c3cfbSDaniel Axtens void kasan_release_vmalloc(unsigned long start, unsigned long end,
5673c5c3cfbSDaniel Axtens 			   unsigned long free_region_start,
568*9e9e085eSAdrian Huang 			   unsigned long free_region_end,
569*9e9e085eSAdrian Huang 			   unsigned long flags);
5703b1a4a86SAndrey Konovalov 
57123689e91SAndrey Konovalov #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
57223689e91SAndrey Konovalov 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)57323689e91SAndrey Konovalov static inline void kasan_populate_early_vm_area_shadow(void *start,
57423689e91SAndrey Konovalov 						       unsigned long size)
57523689e91SAndrey Konovalov { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)57623689e91SAndrey Konovalov static inline int kasan_populate_vmalloc(unsigned long start,
577579fb0acSAndrey Konovalov 					unsigned long size)
578579fb0acSAndrey Konovalov {
57923689e91SAndrey Konovalov 	return 0;
58023689e91SAndrey Konovalov }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)58123689e91SAndrey Konovalov static inline void kasan_release_vmalloc(unsigned long start,
58223689e91SAndrey Konovalov 					 unsigned long end,
58323689e91SAndrey Konovalov 					 unsigned long free_region_start,
584*9e9e085eSAdrian Huang 					 unsigned long free_region_end,
585*9e9e085eSAdrian Huang 					 unsigned long flags) { }
58623689e91SAndrey Konovalov 
58723689e91SAndrey Konovalov #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
58823689e91SAndrey Konovalov 
58923689e91SAndrey Konovalov void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
59023689e91SAndrey Konovalov 			       kasan_vmalloc_flags_t flags);
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)59123689e91SAndrey Konovalov static __always_inline void *kasan_unpoison_vmalloc(const void *start,
59223689e91SAndrey Konovalov 						unsigned long size,
59323689e91SAndrey Konovalov 						kasan_vmalloc_flags_t flags)
59423689e91SAndrey Konovalov {
595579fb0acSAndrey Konovalov 	if (kasan_enabled())
59623689e91SAndrey Konovalov 		return __kasan_unpoison_vmalloc(start, size, flags);
5971d96320fSAndrey Konovalov 	return (void *)start;
598579fb0acSAndrey Konovalov }
599579fb0acSAndrey Konovalov 
600579fb0acSAndrey Konovalov void __kasan_poison_vmalloc(const void *start, unsigned long size);
kasan_poison_vmalloc(const void * start,unsigned long size)601579fb0acSAndrey Konovalov static __always_inline void kasan_poison_vmalloc(const void *start,
602579fb0acSAndrey Konovalov 						 unsigned long size)
603579fb0acSAndrey Konovalov {
604579fb0acSAndrey Konovalov 	if (kasan_enabled())
605579fb0acSAndrey Konovalov 		__kasan_poison_vmalloc(start, size);
606579fb0acSAndrey Konovalov }
6073252b1d8SKefeng Wang 
6083b1a4a86SAndrey Konovalov #else /* CONFIG_KASAN_VMALLOC */
6093b1a4a86SAndrey Konovalov 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)6105bd9bae2SAndrey Konovalov static inline void kasan_populate_early_vm_area_shadow(void *start,
6115bd9bae2SAndrey Konovalov 						       unsigned long size) { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)612d98c9e83SAndrey Ryabinin static inline int kasan_populate_vmalloc(unsigned long start,
613d98c9e83SAndrey Ryabinin 					unsigned long size)
6143c5c3cfbSDaniel Axtens {
6153c5c3cfbSDaniel Axtens 	return 0;
6163c5c3cfbSDaniel Axtens }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end,unsigned long flags)6173c5c3cfbSDaniel Axtens static inline void kasan_release_vmalloc(unsigned long start,
6183c5c3cfbSDaniel Axtens 					 unsigned long end,
6193c5c3cfbSDaniel Axtens 					 unsigned long free_region_start,
620*9e9e085eSAdrian Huang 					 unsigned long free_region_end,
621*9e9e085eSAdrian Huang 					 unsigned long flags) { }
6223b1a4a86SAndrey Konovalov 
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)6231d96320fSAndrey Konovalov static inline void *kasan_unpoison_vmalloc(const void *start,
62423689e91SAndrey Konovalov 					   unsigned long size,
62523689e91SAndrey Konovalov 					   kasan_vmalloc_flags_t flags)
6261d96320fSAndrey Konovalov {
6271d96320fSAndrey Konovalov 	return (void *)start;
6281d96320fSAndrey Konovalov }
kasan_poison_vmalloc(const void * start,unsigned long size)6295bd9bae2SAndrey Konovalov static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
6303252b1d8SKefeng Wang { }
6313252b1d8SKefeng Wang 
6323b1a4a86SAndrey Konovalov #endif /* CONFIG_KASAN_VMALLOC */
6333b1a4a86SAndrey Konovalov 
6340fea6e9aSAndrey Konovalov #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
6350fea6e9aSAndrey Konovalov 		!defined(CONFIG_KASAN_VMALLOC)
6363b1a4a86SAndrey Konovalov 
6373b1a4a86SAndrey Konovalov /*
63863840de2SAndrey Konovalov  * These functions allocate and free shadow memory for kernel modules.
63963840de2SAndrey Konovalov  * They are only required when KASAN_VMALLOC is not supported, as otherwise
64063840de2SAndrey Konovalov  * shadow memory is allocated by the generic vmalloc handlers.
6413b1a4a86SAndrey Konovalov  */
64263840de2SAndrey Konovalov int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
64363840de2SAndrey Konovalov void kasan_free_module_shadow(const struct vm_struct *vm);
6443b1a4a86SAndrey Konovalov 
6450fea6e9aSAndrey Konovalov #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
6463b1a4a86SAndrey Konovalov 
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)64763840de2SAndrey Konovalov static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
kasan_free_module_shadow(const struct vm_struct * vm)64863840de2SAndrey Konovalov static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
6493b1a4a86SAndrey Konovalov 
6500fea6e9aSAndrey Konovalov #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
6513c5c3cfbSDaniel Axtens 
65217c17567SArnd Bergmann #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
6532f004eeaSJann Horn void kasan_non_canonical_hook(unsigned long addr);
65417c17567SArnd Bergmann #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
kasan_non_canonical_hook(unsigned long addr)6552f004eeaSJann Horn static inline void kasan_non_canonical_hook(unsigned long addr) { }
65617c17567SArnd Bergmann #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
6572f004eeaSJann Horn 
6580b24beccSAndrey Ryabinin #endif /* LINUX_KASAN_H */
659