1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2f8af4da3SHugh Dickins #ifndef __LINUX_KSM_H
3f8af4da3SHugh Dickins #define __LINUX_KSM_H
4f8af4da3SHugh Dickins /*
5f8af4da3SHugh Dickins * Memory merging support.
6f8af4da3SHugh Dickins *
7f8af4da3SHugh Dickins * This code enables dynamic sharing of identical pages found in different
8f8af4da3SHugh Dickins * memory areas, even if they are not shared by fork().
9f8af4da3SHugh Dickins */
10f8af4da3SHugh Dickins
11f8af4da3SHugh Dickins #include <linux/bitops.h>
12f8af4da3SHugh Dickins #include <linux/mm.h>
135ad64688SHugh Dickins #include <linux/pagemap.h>
145ad64688SHugh Dickins #include <linux/rmap.h>
15f8af4da3SHugh Dickins #include <linux/sched.h>
16f8af4da3SHugh Dickins
17f8af4da3SHugh Dickins #ifdef CONFIG_KSM
18f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
19f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags);
20d7597f59SStefan Roesch
21d7597f59SStefan Roesch void ksm_add_vma(struct vm_area_struct *vma);
22d7597f59SStefan Roesch int ksm_enable_merge_any(struct mm_struct *mm);
2324139c07SDavid Hildenbrand int ksm_disable_merge_any(struct mm_struct *mm);
242c281f54SDavid Hildenbrand int ksm_disable(struct mm_struct *mm);
25d7597f59SStefan Roesch
26f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm);
271c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm);
2879271476Sxu xin /*
2979271476Sxu xin * To identify zeropages that were mapped by KSM, we reuse the dirty bit
3079271476Sxu xin * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
3179271476Sxu xin * deduplicating memory.
3279271476Sxu xin */
3379271476Sxu xin #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
34f8af4da3SHugh Dickins
35c2dc78b8SChengming Zhou extern atomic_long_t ksm_zero_pages;
36c2dc78b8SChengming Zhou
ksm_map_zero_page(struct mm_struct * mm)37c2dc78b8SChengming Zhou static inline void ksm_map_zero_page(struct mm_struct *mm)
38c2dc78b8SChengming Zhou {
39c2dc78b8SChengming Zhou atomic_long_inc(&ksm_zero_pages);
40c2dc78b8SChengming Zhou atomic_long_inc(&mm->ksm_zero_pages);
41c2dc78b8SChengming Zhou }
42e2942062Sxu xin
ksm_might_unmap_zero_page(struct mm_struct * mm,pte_t pte)436080d19fSxu xin static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
44e2942062Sxu xin {
456080d19fSxu xin if (is_ksm_zero_pte(pte)) {
46c2dc78b8SChengming Zhou atomic_long_dec(&ksm_zero_pages);
47c2dc78b8SChengming Zhou atomic_long_dec(&mm->ksm_zero_pages);
486080d19fSxu xin }
49e2942062Sxu xin }
50e2942062Sxu xin
mm_ksm_zero_pages(struct mm_struct * mm)51c2dc78b8SChengming Zhou static inline long mm_ksm_zero_pages(struct mm_struct *mm)
52c2dc78b8SChengming Zhou {
53c2dc78b8SChengming Zhou return atomic_long_read(&mm->ksm_zero_pages);
54c2dc78b8SChengming Zhou }
55c2dc78b8SChengming Zhou
ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm)56985da552SLorenzo Stoakes static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
57f8af4da3SHugh Dickins {
58985da552SLorenzo Stoakes /* Adding mm to ksm is best effort on fork. */
597edea4c6SJinjiang Tu if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
60985da552SLorenzo Stoakes __ksm_enter(mm);
61f8af4da3SHugh Dickins }
62f8af4da3SHugh Dickins
ksm_execve(struct mm_struct * mm)633a9e567cSJinjiang Tu static inline int ksm_execve(struct mm_struct *mm)
643a9e567cSJinjiang Tu {
653a9e567cSJinjiang Tu if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
663a9e567cSJinjiang Tu return __ksm_enter(mm);
673a9e567cSJinjiang Tu
683a9e567cSJinjiang Tu return 0;
693a9e567cSJinjiang Tu }
703a9e567cSJinjiang Tu
ksm_exit(struct mm_struct * mm)711c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
72f8af4da3SHugh Dickins {
73f8af4da3SHugh Dickins if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
741c2fb7a4SAndrea Arcangeli __ksm_exit(mm);
75f8af4da3SHugh Dickins }
769a840895SHugh Dickins
775ad64688SHugh Dickins /*
785ad64688SHugh Dickins * When do_swap_page() first faults in from swap what used to be a KSM page,
795ad64688SHugh Dickins * no problem, it will be assigned to this vma's anon_vma; but thereafter,
805ad64688SHugh Dickins * it might be faulted into a different anon_vma (or perhaps to a different
815ad64688SHugh Dickins * offset in the same anon_vma). do_swap_page() cannot do all the locking
825ad64688SHugh Dickins * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
835ad64688SHugh Dickins * a copy, and leave remerging the pages to a later pass of ksmd.
845ad64688SHugh Dickins *
855ad64688SHugh Dickins * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
865ad64688SHugh Dickins * but what if the vma was unmerged while the page was swapped out?
875ad64688SHugh Dickins */
8896db66d9SMatthew Wilcox (Oracle) struct folio *ksm_might_need_to_copy(struct folio *folio,
891486fb50SKefeng Wang struct vm_area_struct *vma, unsigned long addr);
905ad64688SHugh Dickins
916d4675e6SMinchan Kim void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
9219138349SMatthew Wilcox (Oracle) void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
9368158bfaSMatthew Wilcox (Oracle) void collect_procs_ksm(const struct folio *folio, const struct page *page,
94b650e1d2SMatthew Wilcox (Oracle) struct list_head *to_kill, int force_early);
95d21077fbSStefan Roesch long ksm_process_profit(struct mm_struct *);
96*3ab76c76Sxu xin bool ksm_process_mergeable(struct mm_struct *mm);
97d21077fbSStefan Roesch
98f8af4da3SHugh Dickins #else /* !CONFIG_KSM */
99f8af4da3SHugh Dickins
ksm_add_vma(struct vm_area_struct * vma)100d7597f59SStefan Roesch static inline void ksm_add_vma(struct vm_area_struct *vma)
101d7597f59SStefan Roesch {
102d7597f59SStefan Roesch }
103d7597f59SStefan Roesch
ksm_disable(struct mm_struct * mm)1042c281f54SDavid Hildenbrand static inline int ksm_disable(struct mm_struct *mm)
1052c281f54SDavid Hildenbrand {
1062c281f54SDavid Hildenbrand return 0;
1072c281f54SDavid Hildenbrand }
1082c281f54SDavid Hildenbrand
ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm)109985da552SLorenzo Stoakes static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
110f8af4da3SHugh Dickins {
111f8af4da3SHugh Dickins }
112f8af4da3SHugh Dickins
ksm_execve(struct mm_struct * mm)1133a9e567cSJinjiang Tu static inline int ksm_execve(struct mm_struct *mm)
1143a9e567cSJinjiang Tu {
1153a9e567cSJinjiang Tu return 0;
1163a9e567cSJinjiang Tu }
1173a9e567cSJinjiang Tu
ksm_exit(struct mm_struct * mm)1181c2fb7a4SAndrea Arcangeli static inline void ksm_exit(struct mm_struct *mm)
119f8af4da3SHugh Dickins {
120f8af4da3SHugh Dickins }
1219a840895SHugh Dickins
ksm_might_unmap_zero_page(struct mm_struct * mm,pte_t pte)1226080d19fSxu xin static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
123e2942062Sxu xin {
124e2942062Sxu xin }
125e2942062Sxu xin
collect_procs_ksm(const struct folio * folio,const struct page * page,struct list_head * to_kill,int force_early)12668158bfaSMatthew Wilcox (Oracle) static inline void collect_procs_ksm(const struct folio *folio,
12768158bfaSMatthew Wilcox (Oracle) const struct page *page, struct list_head *to_kill,
12868158bfaSMatthew Wilcox (Oracle) int force_early)
1294248d008SLonglong Xia {
1304248d008SLonglong Xia }
1314248d008SLonglong Xia
132f42647acSHugh Dickins #ifdef CONFIG_MMU
ksm_madvise(struct vm_area_struct * vma,unsigned long start,unsigned long end,int advice,unsigned long * vm_flags)133f42647acSHugh Dickins static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
134f42647acSHugh Dickins unsigned long end, int advice, unsigned long *vm_flags)
135f42647acSHugh Dickins {
136f42647acSHugh Dickins return 0;
137f42647acSHugh Dickins }
138f42647acSHugh Dickins
ksm_might_need_to_copy(struct folio * folio,struct vm_area_struct * vma,unsigned long addr)13996db66d9SMatthew Wilcox (Oracle) static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
1401486fb50SKefeng Wang struct vm_area_struct *vma, unsigned long addr)
1415ad64688SHugh Dickins {
14296db66d9SMatthew Wilcox (Oracle) return folio;
1435ad64688SHugh Dickins }
1445ad64688SHugh Dickins
rmap_walk_ksm(struct folio * folio,struct rmap_walk_control * rwc)1452f031c6fSMatthew Wilcox (Oracle) static inline void rmap_walk_ksm(struct folio *folio,
1466d4675e6SMinchan Kim struct rmap_walk_control *rwc)
147e9995ef9SHugh Dickins {
148e9995ef9SHugh Dickins }
149e9995ef9SHugh Dickins
folio_migrate_ksm(struct folio * newfolio,struct folio * old)15019138349SMatthew Wilcox (Oracle) static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
151e9995ef9SHugh Dickins {
152e9995ef9SHugh Dickins }
153f42647acSHugh Dickins #endif /* CONFIG_MMU */
154f8af4da3SHugh Dickins #endif /* !CONFIG_KSM */
155f8af4da3SHugh Dickins
1565ad64688SHugh Dickins #endif /* __LINUX_KSM_H */
157