xref: /linux-6.15/include/linux/kmsan.h (revision e6553e2f)
1b073d7f8SAlexander Potapenko /* SPDX-License-Identifier: GPL-2.0 */
2b073d7f8SAlexander Potapenko /*
3b073d7f8SAlexander Potapenko  * KMSAN API for subsystems.
4b073d7f8SAlexander Potapenko  *
5b073d7f8SAlexander Potapenko  * Copyright (C) 2017-2022 Google LLC
6b073d7f8SAlexander Potapenko  * Author: Alexander Potapenko <[email protected]>
7b073d7f8SAlexander Potapenko  *
8b073d7f8SAlexander Potapenko  */
9b073d7f8SAlexander Potapenko #ifndef _LINUX_KMSAN_H
10b073d7f8SAlexander Potapenko #define _LINUX_KMSAN_H
11b073d7f8SAlexander Potapenko 
127ade4f10SAlexander Potapenko #include <linux/dma-direction.h>
13b073d7f8SAlexander Potapenko #include <linux/gfp.h>
14b073d7f8SAlexander Potapenko #include <linux/kmsan-checks.h>
15b073d7f8SAlexander Potapenko #include <linux/types.h>
16b073d7f8SAlexander Potapenko 
17b073d7f8SAlexander Potapenko struct page;
1868ef169aSAlexander Potapenko struct kmem_cache;
1950b5e49cSAlexander Potapenko struct task_struct;
207ade4f10SAlexander Potapenko struct scatterlist;
21553a8018SAlexander Potapenko struct urb;
22b073d7f8SAlexander Potapenko 
23b073d7f8SAlexander Potapenko #ifdef CONFIG_KMSAN
24b073d7f8SAlexander Potapenko 
25b073d7f8SAlexander Potapenko /**
2650b5e49cSAlexander Potapenko  * kmsan_task_create() - Initialize KMSAN state for the task.
2750b5e49cSAlexander Potapenko  * @task: task to initialize.
2850b5e49cSAlexander Potapenko  */
2950b5e49cSAlexander Potapenko void kmsan_task_create(struct task_struct *task);
3050b5e49cSAlexander Potapenko 
3150b5e49cSAlexander Potapenko /**
3250b5e49cSAlexander Potapenko  * kmsan_task_exit() - Notify KMSAN that a task has exited.
3350b5e49cSAlexander Potapenko  * @task: task about to finish.
3450b5e49cSAlexander Potapenko  */
3550b5e49cSAlexander Potapenko void kmsan_task_exit(struct task_struct *task);
3650b5e49cSAlexander Potapenko 
3750b5e49cSAlexander Potapenko /**
383c206509SAlexander Potapenko  * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
393c206509SAlexander Potapenko  *
403c206509SAlexander Potapenko  * Allocate and initialize KMSAN metadata for early allocations.
413c206509SAlexander Potapenko  */
423c206509SAlexander Potapenko void __init kmsan_init_shadow(void);
433c206509SAlexander Potapenko 
443c206509SAlexander Potapenko /**
453c206509SAlexander Potapenko  * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
463c206509SAlexander Potapenko  */
473c206509SAlexander Potapenko void __init kmsan_init_runtime(void);
483c206509SAlexander Potapenko 
493c206509SAlexander Potapenko /**
503c206509SAlexander Potapenko  * kmsan_memblock_free_pages() - handle freeing of memblock pages.
513c206509SAlexander Potapenko  * @page:	struct page to free.
523c206509SAlexander Potapenko  * @order:	order of @page.
533c206509SAlexander Potapenko  *
543c206509SAlexander Potapenko  * Freed pages are either returned to buddy allocator or held back to be used
553c206509SAlexander Potapenko  * as metadata pages.
563c206509SAlexander Potapenko  */
57bb1508c2SAlexander Potapenko bool __init __must_check kmsan_memblock_free_pages(struct page *page,
58bb1508c2SAlexander Potapenko 						   unsigned int order);
593c206509SAlexander Potapenko 
603c206509SAlexander Potapenko /**
61b073d7f8SAlexander Potapenko  * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
62b073d7f8SAlexander Potapenko  * @page:  struct page pointer returned by alloc_pages().
63b073d7f8SAlexander Potapenko  * @order: order of allocated struct page.
64b073d7f8SAlexander Potapenko  * @flags: GFP flags used by alloc_pages()
65b073d7f8SAlexander Potapenko  *
66b073d7f8SAlexander Potapenko  * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
67b073d7f8SAlexander Potapenko  * @flags contain __GFP_ZERO.
68b073d7f8SAlexander Potapenko  */
69b073d7f8SAlexander Potapenko void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
70b073d7f8SAlexander Potapenko 
71b073d7f8SAlexander Potapenko /**
72b073d7f8SAlexander Potapenko  * kmsan_free_page() - Notify KMSAN about a free_pages() call.
73b073d7f8SAlexander Potapenko  * @page:  struct page pointer passed to free_pages().
74b073d7f8SAlexander Potapenko  * @order: order of deallocated struct page.
75b073d7f8SAlexander Potapenko  *
76b073d7f8SAlexander Potapenko  * KMSAN marks freed memory as uninitialized.
77b073d7f8SAlexander Potapenko  */
78b073d7f8SAlexander Potapenko void kmsan_free_page(struct page *page, unsigned int order);
79b073d7f8SAlexander Potapenko 
80b073d7f8SAlexander Potapenko /**
81b073d7f8SAlexander Potapenko  * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
82b073d7f8SAlexander Potapenko  * @dst: destination page.
83b073d7f8SAlexander Potapenko  * @src: source page.
84b073d7f8SAlexander Potapenko  *
85b073d7f8SAlexander Potapenko  * KMSAN copies the contents of metadata pages for @src into the metadata pages
86b073d7f8SAlexander Potapenko  * for @dst. If @dst has no associated metadata pages, nothing happens.
87b073d7f8SAlexander Potapenko  * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
88b073d7f8SAlexander Potapenko  */
89b073d7f8SAlexander Potapenko void kmsan_copy_page_meta(struct page *dst, struct page *src);
90b073d7f8SAlexander Potapenko 
91b073d7f8SAlexander Potapenko /**
9268ef169aSAlexander Potapenko  * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
9368ef169aSAlexander Potapenko  * @s:      slab cache the object belongs to.
9468ef169aSAlexander Potapenko  * @object: object pointer.
9568ef169aSAlexander Potapenko  * @flags:  GFP flags passed to the allocator.
9668ef169aSAlexander Potapenko  *
9768ef169aSAlexander Potapenko  * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
9868ef169aSAlexander Potapenko  * newly created object, marking it as initialized or uninitialized.
9968ef169aSAlexander Potapenko  */
10068ef169aSAlexander Potapenko void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
10168ef169aSAlexander Potapenko 
10268ef169aSAlexander Potapenko /**
10368ef169aSAlexander Potapenko  * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
10468ef169aSAlexander Potapenko  * @s:      slab cache the object belongs to.
10568ef169aSAlexander Potapenko  * @object: object pointer.
10668ef169aSAlexander Potapenko  *
10768ef169aSAlexander Potapenko  * KMSAN marks the freed object as uninitialized.
10868ef169aSAlexander Potapenko  */
10968ef169aSAlexander Potapenko void kmsan_slab_free(struct kmem_cache *s, void *object);
11068ef169aSAlexander Potapenko 
11168ef169aSAlexander Potapenko /**
11268ef169aSAlexander Potapenko  * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
11368ef169aSAlexander Potapenko  * @ptr:   object pointer.
11468ef169aSAlexander Potapenko  * @size:  object size.
11568ef169aSAlexander Potapenko  * @flags: GFP flags passed to the allocator.
11668ef169aSAlexander Potapenko  *
11768ef169aSAlexander Potapenko  * Similar to kmsan_slab_alloc(), but for large allocations.
11868ef169aSAlexander Potapenko  */
11968ef169aSAlexander Potapenko void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
12068ef169aSAlexander Potapenko 
12168ef169aSAlexander Potapenko /**
12268ef169aSAlexander Potapenko  * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
12368ef169aSAlexander Potapenko  * @ptr: object pointer.
12468ef169aSAlexander Potapenko  *
12568ef169aSAlexander Potapenko  * Similar to kmsan_slab_free(), but for large allocations.
12668ef169aSAlexander Potapenko  */
12768ef169aSAlexander Potapenko void kmsan_kfree_large(const void *ptr);
12868ef169aSAlexander Potapenko 
12968ef169aSAlexander Potapenko /**
130b073d7f8SAlexander Potapenko  * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
131b073d7f8SAlexander Potapenko  * @start:	start of vmapped range.
132b073d7f8SAlexander Potapenko  * @end:	end of vmapped range.
133b073d7f8SAlexander Potapenko  * @prot:	page protection flags used for vmap.
134b073d7f8SAlexander Potapenko  * @pages:	array of pages.
135b073d7f8SAlexander Potapenko  * @page_shift:	page_shift passed to vmap_range_noflush().
136b073d7f8SAlexander Potapenko  *
137b073d7f8SAlexander Potapenko  * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
13847ebd031SAlexander Potapenko  * vmalloc metadata address range. Returns 0 on success, callers must check
13947ebd031SAlexander Potapenko  * for non-zero return value.
140b073d7f8SAlexander Potapenko  */
141bb1508c2SAlexander Potapenko int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
142bb1508c2SAlexander Potapenko 						unsigned long end,
143bb1508c2SAlexander Potapenko 						pgprot_t prot,
144bb1508c2SAlexander Potapenko 						struct page **pages,
145b073d7f8SAlexander Potapenko 						unsigned int page_shift);
146b073d7f8SAlexander Potapenko 
147b073d7f8SAlexander Potapenko /**
148b073d7f8SAlexander Potapenko  * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
149b073d7f8SAlexander Potapenko  * @start: start of vunmapped range.
150b073d7f8SAlexander Potapenko  * @end:   end of vunmapped range.
151b073d7f8SAlexander Potapenko  *
152b073d7f8SAlexander Potapenko  * KMSAN unmaps the contiguous metadata ranges created by
153b073d7f8SAlexander Potapenko  * kmsan_map_kernel_range_noflush().
154b073d7f8SAlexander Potapenko  */
155b073d7f8SAlexander Potapenko void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
156b073d7f8SAlexander Potapenko 
157b073d7f8SAlexander Potapenko /**
158b073d7f8SAlexander Potapenko  * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
159b073d7f8SAlexander Potapenko  * @addr:	range start.
160b073d7f8SAlexander Potapenko  * @end:	range end.
161b073d7f8SAlexander Potapenko  * @phys_addr:	physical range start.
162b073d7f8SAlexander Potapenko  * @prot:	page protection flags used for ioremap_page_range().
163b073d7f8SAlexander Potapenko  * @page_shift:	page_shift argument passed to vmap_range_noflush().
164b073d7f8SAlexander Potapenko  *
165b073d7f8SAlexander Potapenko  * KMSAN creates new metadata pages for the physical pages mapped into the
166fdea03e1SAlexander Potapenko  * virtual memory. Returns 0 on success, callers must check for non-zero return
167fdea03e1SAlexander Potapenko  * value.
168b073d7f8SAlexander Potapenko  */
169bb1508c2SAlexander Potapenko int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
170b073d7f8SAlexander Potapenko 					  phys_addr_t phys_addr, pgprot_t prot,
171b073d7f8SAlexander Potapenko 					  unsigned int page_shift);
172b073d7f8SAlexander Potapenko 
173b073d7f8SAlexander Potapenko /**
174b073d7f8SAlexander Potapenko  * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
175b073d7f8SAlexander Potapenko  * @start: range start.
176b073d7f8SAlexander Potapenko  * @end:   range end.
177b073d7f8SAlexander Potapenko  *
178b073d7f8SAlexander Potapenko  * KMSAN unmaps the metadata pages for the given range and, unlike for
179b073d7f8SAlexander Potapenko  * vunmap_page_range(), also deallocates them.
180b073d7f8SAlexander Potapenko  */
181b073d7f8SAlexander Potapenko void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
182b073d7f8SAlexander Potapenko 
1837ade4f10SAlexander Potapenko /**
1847ade4f10SAlexander Potapenko  * kmsan_handle_dma() - Handle a DMA data transfer.
1857ade4f10SAlexander Potapenko  * @page:   first page of the buffer.
1867ade4f10SAlexander Potapenko  * @offset: offset of the buffer within the first page.
1877ade4f10SAlexander Potapenko  * @size:   buffer size.
1887ade4f10SAlexander Potapenko  * @dir:    one of possible dma_data_direction values.
1897ade4f10SAlexander Potapenko  *
1907ade4f10SAlexander Potapenko  * Depending on @direction, KMSAN:
1917ade4f10SAlexander Potapenko  * * checks the buffer, if it is copied to device;
1927ade4f10SAlexander Potapenko  * * initializes the buffer, if it is copied from device;
1937ade4f10SAlexander Potapenko  * * does both, if this is a DMA_BIDIRECTIONAL transfer.
1947ade4f10SAlexander Potapenko  */
1957ade4f10SAlexander Potapenko void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
1967ade4f10SAlexander Potapenko 		      enum dma_data_direction dir);
1977ade4f10SAlexander Potapenko 
1987ade4f10SAlexander Potapenko /**
1997ade4f10SAlexander Potapenko  * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
2007ade4f10SAlexander Potapenko  * @sg:    scatterlist holding DMA buffers.
2017ade4f10SAlexander Potapenko  * @nents: number of scatterlist entries.
2027ade4f10SAlexander Potapenko  * @dir:   one of possible dma_data_direction values.
2037ade4f10SAlexander Potapenko  *
2047ade4f10SAlexander Potapenko  * Depending on @direction, KMSAN:
2057ade4f10SAlexander Potapenko  * * checks the buffers in the scatterlist, if they are copied to device;
2067ade4f10SAlexander Potapenko  * * initializes the buffers, if they are copied from device;
2077ade4f10SAlexander Potapenko  * * does both, if this is a DMA_BIDIRECTIONAL transfer.
2087ade4f10SAlexander Potapenko  */
2097ade4f10SAlexander Potapenko void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
2107ade4f10SAlexander Potapenko 			 enum dma_data_direction dir);
2117ade4f10SAlexander Potapenko 
212553a8018SAlexander Potapenko /**
213553a8018SAlexander Potapenko  * kmsan_handle_urb() - Handle a USB data transfer.
214553a8018SAlexander Potapenko  * @urb:    struct urb pointer.
215553a8018SAlexander Potapenko  * @is_out: data transfer direction (true means output to hardware).
216553a8018SAlexander Potapenko  *
217553a8018SAlexander Potapenko  * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
218553a8018SAlexander Potapenko  * KMSAN initializes the transfer buffer.
219553a8018SAlexander Potapenko  */
220553a8018SAlexander Potapenko void kmsan_handle_urb(const struct urb *urb, bool is_out);
221553a8018SAlexander Potapenko 
2226cae637fSAlexander Potapenko /**
2236cae637fSAlexander Potapenko  * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
2246cae637fSAlexander Potapenko  * @regs:	struct pt_regs pointer received from assembly code.
2256cae637fSAlexander Potapenko  *
2266cae637fSAlexander Potapenko  * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
2276cae637fSAlexander Potapenko  * false positive reports. Unlike kmsan_unpoison_memory(),
2286cae637fSAlexander Potapenko  * kmsan_unpoison_entry_regs() can be called from the regions where
2296cae637fSAlexander Potapenko  * kmsan_in_runtime() returns true, which is the case in early entry code.
2306cae637fSAlexander Potapenko  */
2316cae637fSAlexander Potapenko void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
2326cae637fSAlexander Potapenko 
2336b1709d4SIlya Leoshkevich /**
2346b1709d4SIlya Leoshkevich  * kmsan_get_metadata() - Return a pointer to KMSAN shadow or origins.
2356b1709d4SIlya Leoshkevich  * @addr:      kernel address.
2366b1709d4SIlya Leoshkevich  * @is_origin: whether to return origins or shadow.
2376b1709d4SIlya Leoshkevich  *
2386b1709d4SIlya Leoshkevich  * Return NULL if metadata cannot be found.
2396b1709d4SIlya Leoshkevich  */
2406b1709d4SIlya Leoshkevich void *kmsan_get_metadata(void *addr, bool is_origin);
2416b1709d4SIlya Leoshkevich 
242ec3e837dSIlya Leoshkevich /**
243ec3e837dSIlya Leoshkevich  * kmsan_enable_current(): Enable KMSAN for the current task.
244ec3e837dSIlya Leoshkevich  *
245ec3e837dSIlya Leoshkevich  * Each kmsan_enable_current() current call must be preceded by a
246ec3e837dSIlya Leoshkevich  * kmsan_disable_current() call. These call pairs may be nested.
247ec3e837dSIlya Leoshkevich  */
248ec3e837dSIlya Leoshkevich void kmsan_enable_current(void);
249ec3e837dSIlya Leoshkevich 
250ec3e837dSIlya Leoshkevich /**
251ec3e837dSIlya Leoshkevich  * kmsan_disable_current(): Disable KMSAN for the current task.
252ec3e837dSIlya Leoshkevich  *
253ec3e837dSIlya Leoshkevich  * Each kmsan_disable_current() current call must be followed by a
254ec3e837dSIlya Leoshkevich  * kmsan_enable_current() call. These call pairs may be nested.
255ec3e837dSIlya Leoshkevich  */
256ec3e837dSIlya Leoshkevich void kmsan_disable_current(void);
257ec3e837dSIlya Leoshkevich 
2581fdb3c70SIlya Leoshkevich /**
2591fdb3c70SIlya Leoshkevich  * memset_no_sanitize_memory(): Fill memory without KMSAN instrumentation.
2601fdb3c70SIlya Leoshkevich  * @s: address of kernel memory to fill.
2611fdb3c70SIlya Leoshkevich  * @c: constant byte to fill the memory with.
2621fdb3c70SIlya Leoshkevich  * @n: number of bytes to fill.
2631fdb3c70SIlya Leoshkevich  *
2641fdb3c70SIlya Leoshkevich  * This is like memset(), but without KMSAN instrumentation.
2651fdb3c70SIlya Leoshkevich  */
memset_no_sanitize_memory(void * s,int c,size_t n)2661fdb3c70SIlya Leoshkevich static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
2671fdb3c70SIlya Leoshkevich {
2681fdb3c70SIlya Leoshkevich 	return __memset(s, c, n);
2691fdb3c70SIlya Leoshkevich }
2701fdb3c70SIlya Leoshkevich 
271*e6553e2fSIlya Leoshkevich extern bool kmsan_enabled;
272*e6553e2fSIlya Leoshkevich extern int panic_on_kmsan;
273*e6553e2fSIlya Leoshkevich 
274*e6553e2fSIlya Leoshkevich /*
275*e6553e2fSIlya Leoshkevich  * KMSAN performs a lot of consistency checks that are currently enabled by
276*e6553e2fSIlya Leoshkevich  * default. BUG_ON is normally discouraged in the kernel, unless used for
277*e6553e2fSIlya Leoshkevich  * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
278*e6553e2fSIlya Leoshkevich  * recover if something goes wrong.
279*e6553e2fSIlya Leoshkevich  */
280*e6553e2fSIlya Leoshkevich #define KMSAN_WARN_ON(cond)                                           \
281*e6553e2fSIlya Leoshkevich 	({                                                            \
282*e6553e2fSIlya Leoshkevich 		const bool __cond = WARN_ON(cond);                    \
283*e6553e2fSIlya Leoshkevich 		if (unlikely(__cond)) {                               \
284*e6553e2fSIlya Leoshkevich 			WRITE_ONCE(kmsan_enabled, false);             \
285*e6553e2fSIlya Leoshkevich 			if (panic_on_kmsan) {                         \
286*e6553e2fSIlya Leoshkevich 				/* Can't call panic() here because */ \
287*e6553e2fSIlya Leoshkevich 				/* of uaccess checks. */              \
288*e6553e2fSIlya Leoshkevich 				BUG();                                \
289*e6553e2fSIlya Leoshkevich 			}                                             \
290*e6553e2fSIlya Leoshkevich 		}                                                     \
291*e6553e2fSIlya Leoshkevich 		__cond;                                               \
292*e6553e2fSIlya Leoshkevich 	})
293*e6553e2fSIlya Leoshkevich 
294b073d7f8SAlexander Potapenko #else
295b073d7f8SAlexander Potapenko 
kmsan_init_shadow(void)2963c206509SAlexander Potapenko static inline void kmsan_init_shadow(void)
2973c206509SAlexander Potapenko {
2983c206509SAlexander Potapenko }
2993c206509SAlexander Potapenko 
kmsan_init_runtime(void)3003c206509SAlexander Potapenko static inline void kmsan_init_runtime(void)
3013c206509SAlexander Potapenko {
3023c206509SAlexander Potapenko }
3033c206509SAlexander Potapenko 
kmsan_memblock_free_pages(struct page * page,unsigned int order)304bb1508c2SAlexander Potapenko static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
3053c206509SAlexander Potapenko 							  unsigned int order)
3063c206509SAlexander Potapenko {
3073c206509SAlexander Potapenko 	return true;
3083c206509SAlexander Potapenko }
3093c206509SAlexander Potapenko 
kmsan_task_create(struct task_struct * task)31050b5e49cSAlexander Potapenko static inline void kmsan_task_create(struct task_struct *task)
31150b5e49cSAlexander Potapenko {
31250b5e49cSAlexander Potapenko }
31350b5e49cSAlexander Potapenko 
kmsan_task_exit(struct task_struct * task)31450b5e49cSAlexander Potapenko static inline void kmsan_task_exit(struct task_struct *task)
31550b5e49cSAlexander Potapenko {
31650b5e49cSAlexander Potapenko }
31750b5e49cSAlexander Potapenko 
kmsan_alloc_page(struct page * page,unsigned int order,gfp_t flags)318bb1508c2SAlexander Potapenko static inline void kmsan_alloc_page(struct page *page, unsigned int order,
319b073d7f8SAlexander Potapenko 				    gfp_t flags)
320b073d7f8SAlexander Potapenko {
321b073d7f8SAlexander Potapenko }
322b073d7f8SAlexander Potapenko 
kmsan_free_page(struct page * page,unsigned int order)323b073d7f8SAlexander Potapenko static inline void kmsan_free_page(struct page *page, unsigned int order)
324b073d7f8SAlexander Potapenko {
325b073d7f8SAlexander Potapenko }
326b073d7f8SAlexander Potapenko 
kmsan_copy_page_meta(struct page * dst,struct page * src)327b073d7f8SAlexander Potapenko static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
328b073d7f8SAlexander Potapenko {
329b073d7f8SAlexander Potapenko }
330b073d7f8SAlexander Potapenko 
kmsan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags)33168ef169aSAlexander Potapenko static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
33268ef169aSAlexander Potapenko 				    gfp_t flags)
33368ef169aSAlexander Potapenko {
33468ef169aSAlexander Potapenko }
33568ef169aSAlexander Potapenko 
kmsan_slab_free(struct kmem_cache * s,void * object)33668ef169aSAlexander Potapenko static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
33768ef169aSAlexander Potapenko {
33868ef169aSAlexander Potapenko }
33968ef169aSAlexander Potapenko 
kmsan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)34068ef169aSAlexander Potapenko static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
34168ef169aSAlexander Potapenko 				       gfp_t flags)
34268ef169aSAlexander Potapenko {
34368ef169aSAlexander Potapenko }
34468ef169aSAlexander Potapenko 
kmsan_kfree_large(const void * ptr)34568ef169aSAlexander Potapenko static inline void kmsan_kfree_large(const void *ptr)
34668ef169aSAlexander Potapenko {
34768ef169aSAlexander Potapenko }
34868ef169aSAlexander Potapenko 
kmsan_vmap_pages_range_noflush(unsigned long start,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)349bb1508c2SAlexander Potapenko static inline int __must_check kmsan_vmap_pages_range_noflush(
350bb1508c2SAlexander Potapenko 	unsigned long start, unsigned long end, pgprot_t prot,
351bb1508c2SAlexander Potapenko 	struct page **pages, unsigned int page_shift)
352b073d7f8SAlexander Potapenko {
35347ebd031SAlexander Potapenko 	return 0;
354b073d7f8SAlexander Potapenko }
355b073d7f8SAlexander Potapenko 
kmsan_vunmap_range_noflush(unsigned long start,unsigned long end)356b073d7f8SAlexander Potapenko static inline void kmsan_vunmap_range_noflush(unsigned long start,
357b073d7f8SAlexander Potapenko 					      unsigned long end)
358b073d7f8SAlexander Potapenko {
359b073d7f8SAlexander Potapenko }
360b073d7f8SAlexander Potapenko 
kmsan_ioremap_page_range(unsigned long start,unsigned long end,phys_addr_t phys_addr,pgprot_t prot,unsigned int page_shift)361bb1508c2SAlexander Potapenko static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
362b073d7f8SAlexander Potapenko 							unsigned long end,
363bb1508c2SAlexander Potapenko 							phys_addr_t phys_addr,
364bb1508c2SAlexander Potapenko 							pgprot_t prot,
365b073d7f8SAlexander Potapenko 							unsigned int page_shift)
366b073d7f8SAlexander Potapenko {
367fdea03e1SAlexander Potapenko 	return 0;
368b073d7f8SAlexander Potapenko }
369b073d7f8SAlexander Potapenko 
kmsan_iounmap_page_range(unsigned long start,unsigned long end)370b073d7f8SAlexander Potapenko static inline void kmsan_iounmap_page_range(unsigned long start,
371b073d7f8SAlexander Potapenko 					    unsigned long end)
372b073d7f8SAlexander Potapenko {
373b073d7f8SAlexander Potapenko }
374b073d7f8SAlexander Potapenko 
kmsan_handle_dma(struct page * page,size_t offset,size_t size,enum dma_data_direction dir)3757ade4f10SAlexander Potapenko static inline void kmsan_handle_dma(struct page *page, size_t offset,
3767ade4f10SAlexander Potapenko 				    size_t size, enum dma_data_direction dir)
3777ade4f10SAlexander Potapenko {
3787ade4f10SAlexander Potapenko }
3797ade4f10SAlexander Potapenko 
kmsan_handle_dma_sg(struct scatterlist * sg,int nents,enum dma_data_direction dir)3807ade4f10SAlexander Potapenko static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
3817ade4f10SAlexander Potapenko 				       enum dma_data_direction dir)
3827ade4f10SAlexander Potapenko {
3837ade4f10SAlexander Potapenko }
3847ade4f10SAlexander Potapenko 
kmsan_handle_urb(const struct urb * urb,bool is_out)385553a8018SAlexander Potapenko static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
386553a8018SAlexander Potapenko {
387553a8018SAlexander Potapenko }
388553a8018SAlexander Potapenko 
kmsan_unpoison_entry_regs(const struct pt_regs * regs)3896cae637fSAlexander Potapenko static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
3906cae637fSAlexander Potapenko {
3916cae637fSAlexander Potapenko }
3926cae637fSAlexander Potapenko 
kmsan_enable_current(void)393ec3e837dSIlya Leoshkevich static inline void kmsan_enable_current(void)
394ec3e837dSIlya Leoshkevich {
395ec3e837dSIlya Leoshkevich }
396ec3e837dSIlya Leoshkevich 
kmsan_disable_current(void)397ec3e837dSIlya Leoshkevich static inline void kmsan_disable_current(void)
398ec3e837dSIlya Leoshkevich {
399ec3e837dSIlya Leoshkevich }
400ec3e837dSIlya Leoshkevich 
memset_no_sanitize_memory(void * s,int c,size_t n)4011fdb3c70SIlya Leoshkevich static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
4021fdb3c70SIlya Leoshkevich {
4031fdb3c70SIlya Leoshkevich 	return memset(s, c, n);
4041fdb3c70SIlya Leoshkevich }
4051fdb3c70SIlya Leoshkevich 
406*e6553e2fSIlya Leoshkevich #define KMSAN_WARN_ON WARN_ON
407*e6553e2fSIlya Leoshkevich 
408b073d7f8SAlexander Potapenko #endif
409b073d7f8SAlexander Potapenko 
410b073d7f8SAlexander Potapenko #endif /* _LINUX_KMSAN_H */
411