1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_CACHEFLUSH_H
3 #define _ASM_GENERIC_CACHEFLUSH_H
4 
5 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
6 
7 /*
8  * The cache doesn't need to be flushed when TLB entries change when
9  * the cache is mapped to physical memory, not virtual memory
10  */
11 #ifndef flush_cache_all
12 static inline void flush_cache_all(void)
13 {
14 }
15 #endif
16 
17 #ifndef flush_cache_mm
18 static inline void flush_cache_mm(struct mm_struct *mm)
19 {
20 }
21 #endif
22 
23 #ifndef flush_cache_dup_mm
24 static inline void flush_cache_dup_mm(struct mm_struct *mm)
25 {
26 }
27 #endif
28 
29 #ifndef flush_cache_range
30 static inline void flush_cache_range(struct vm_area_struct *vma,
31 				     unsigned long start,
32 				     unsigned long end)
33 {
34 }
35 #endif
36 
37 #ifndef flush_cache_page
38 static inline void flush_cache_page(struct vm_area_struct *vma,
39 				    unsigned long vmaddr,
40 				    unsigned long pfn)
41 {
42 }
43 #endif
44 
45 #ifndef flush_dcache_page
46 static inline void flush_dcache_page(struct page *page)
47 {
48 }
49 #endif
50 
51 #ifndef flush_dcache_mmap_lock
52 static inline void flush_dcache_mmap_lock(struct address_space *mapping)
53 {
54 }
55 #endif
56 
57 #ifndef flush_dcache_mmap_unlock
58 static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
59 {
60 }
61 #endif
62 
63 #ifndef flush_icache_range
64 static inline void flush_icache_range(unsigned long start, unsigned long end)
65 {
66 }
67 #endif
68 
69 #ifndef flush_icache_page
70 static inline void flush_icache_page(struct vm_area_struct *vma,
71 				     struct page *page)
72 {
73 }
74 #endif
75 
76 #ifndef flush_icache_user_range
77 static inline void flush_icache_user_range(struct vm_area_struct *vma,
78 					   struct page *page,
79 					   unsigned long addr, int len)
80 {
81 }
82 #endif
83 
84 #ifndef flush_cache_vmap
85 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
86 {
87 }
88 #endif
89 
90 #ifndef flush_cache_vunmap
91 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
92 {
93 }
94 #endif
95 
96 #ifndef copy_to_user_page
97 #define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
98 	do { \
99 		memcpy(dst, src, len); \
100 		flush_icache_user_range(vma, page, vaddr, len); \
101 	} while (0)
102 #endif
103 
104 #ifndef copy_from_user_page
105 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
106 	memcpy(dst, src, len)
107 #endif
108 
109 #endif /* _ASM_GENERIC_CACHEFLUSH_H */
110