xref: /linux-6.15/include/linux/migrate.h (revision 564eb714)
1 #ifndef _LINUX_MIGRATE_H
2 #define _LINUX_MIGRATE_H
3 
4 #include <linux/mm.h>
5 #include <linux/mempolicy.h>
6 #include <linux/migrate_mode.h>
7 
8 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
9 
10 /*
11  * Return values from addresss_space_operations.migratepage():
12  * - negative errno on page migration failure;
13  * - zero on page migration success;
14  *
15  * The balloon page migration introduces this special case where a 'distinct'
16  * return code is used to flag a successful page migration to unmap_and_move().
17  * This approach is necessary because page migration can race against balloon
18  * deflation procedure, and for such case we could introduce a nasty page leak
19  * if a successfully migrated balloon page gets released concurrently with
20  * migration's unmap_and_move() wrap-up steps.
21  */
22 #define MIGRATEPAGE_SUCCESS		0
23 #define MIGRATEPAGE_BALLOON_SUCCESS	1 /* special ret code for balloon page
24 					   * sucessful migration case.
25 					   */
26 enum migrate_reason {
27 	MR_COMPACTION,
28 	MR_MEMORY_FAILURE,
29 	MR_MEMORY_HOTPLUG,
30 	MR_SYSCALL,		/* also applies to cpusets */
31 	MR_MEMPOLICY_MBIND,
32 	MR_NUMA_MISPLACED,
33 	MR_CMA
34 };
35 
36 #ifdef CONFIG_MIGRATION
37 
38 extern void putback_lru_pages(struct list_head *l);
39 extern void putback_movable_pages(struct list_head *l);
40 extern int migrate_page(struct address_space *,
41 			struct page *, struct page *, enum migrate_mode);
42 extern int migrate_pages(struct list_head *l, new_page_t x,
43 		unsigned long private, enum migrate_mode mode, int reason);
44 
45 extern int fail_migrate_page(struct address_space *,
46 			struct page *, struct page *);
47 
48 extern int migrate_prep(void);
49 extern int migrate_prep_local(void);
50 extern int migrate_vmas(struct mm_struct *mm,
51 		const nodemask_t *from, const nodemask_t *to,
52 		unsigned long flags);
53 extern void migrate_page_copy(struct page *newpage, struct page *page);
54 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 				  struct page *newpage, struct page *page);
56 extern int migrate_page_move_mapping(struct address_space *mapping,
57 		struct page *newpage, struct page *page,
58 		struct buffer_head *head, enum migrate_mode mode,
59 		int extra_count);
60 #else
61 
62 static inline void putback_lru_pages(struct list_head *l) {}
63 static inline void putback_movable_pages(struct list_head *l) {}
64 static inline int migrate_pages(struct list_head *l, new_page_t x,
65 		unsigned long private, enum migrate_mode mode, int reason)
66 	{ return -ENOSYS; }
67 
68 static inline int migrate_prep(void) { return -ENOSYS; }
69 static inline int migrate_prep_local(void) { return -ENOSYS; }
70 
71 static inline int migrate_vmas(struct mm_struct *mm,
72 		const nodemask_t *from, const nodemask_t *to,
73 		unsigned long flags)
74 {
75 	return -ENOSYS;
76 }
77 
78 static inline void migrate_page_copy(struct page *newpage,
79 				     struct page *page) {}
80 
81 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
82 				  struct page *newpage, struct page *page)
83 {
84 	return -ENOSYS;
85 }
86 
87 /* Possible settings for the migrate_page() method in address_operations */
88 #define migrate_page NULL
89 #define fail_migrate_page NULL
90 
91 #endif /* CONFIG_MIGRATION */
92 
93 #ifdef CONFIG_NUMA_BALANCING
94 extern bool pmd_trans_migrating(pmd_t pmd);
95 extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
96 extern int migrate_misplaced_page(struct page *page,
97 				  struct vm_area_struct *vma, int node);
98 extern bool migrate_ratelimited(int node);
99 #else
100 static inline bool pmd_trans_migrating(pmd_t pmd)
101 {
102 	return false;
103 }
104 static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
105 {
106 }
107 static inline int migrate_misplaced_page(struct page *page,
108 					 struct vm_area_struct *vma, int node)
109 {
110 	return -EAGAIN; /* can't migrate now */
111 }
112 static inline bool migrate_ratelimited(int node)
113 {
114 	return false;
115 }
116 #endif /* CONFIG_NUMA_BALANCING */
117 
118 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
119 extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
120 			struct vm_area_struct *vma,
121 			pmd_t *pmd, pmd_t entry,
122 			unsigned long address,
123 			struct page *page, int node);
124 #else
125 static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
126 			struct vm_area_struct *vma,
127 			pmd_t *pmd, pmd_t entry,
128 			unsigned long address,
129 			struct page *page, int node)
130 {
131 	return -EAGAIN;
132 }
133 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
134 
135 #endif /* _LINUX_MIGRATE_H */
136