xref: /linux-6.15/include/linux/migrate.h (revision 84ed8a99)
1 #ifndef _LINUX_MIGRATE_H
2 #define _LINUX_MIGRATE_H
3 
4 #include <linux/mm.h>
5 #include <linux/mempolicy.h>
6 #include <linux/migrate_mode.h>
7 
8 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
9 
10 /*
11  * Return values from addresss_space_operations.migratepage():
12  * - negative errno on page migration failure;
13  * - zero on page migration success;
14  *
15  * The balloon page migration introduces this special case where a 'distinct'
16  * return code is used to flag a successful page migration to unmap_and_move().
17  * This approach is necessary because page migration can race against balloon
18  * deflation procedure, and for such case we could introduce a nasty page leak
19  * if a successfully migrated balloon page gets released concurrently with
20  * migration's unmap_and_move() wrap-up steps.
21  */
22 #define MIGRATEPAGE_SUCCESS		0
23 #define MIGRATEPAGE_BALLOON_SUCCESS	1 /* special ret code for balloon page
24 					   * sucessful migration case.
25 					   */
26 enum migrate_reason {
27 	MR_COMPACTION,
28 	MR_MEMORY_FAILURE,
29 	MR_MEMORY_HOTPLUG,
30 	MR_SYSCALL,		/* also applies to cpusets */
31 	MR_MEMPOLICY_MBIND,
32 	MR_NUMA_MISPLACED,
33 	MR_CMA
34 };
35 
36 #ifdef CONFIG_MIGRATION
37 
38 extern void putback_lru_pages(struct list_head *l);
39 extern void putback_movable_pages(struct list_head *l);
40 extern int migrate_page(struct address_space *,
41 			struct page *, struct page *, enum migrate_mode);
42 extern int migrate_pages(struct list_head *l, new_page_t x,
43 		unsigned long private, enum migrate_mode mode, int reason);
44 
45 extern int fail_migrate_page(struct address_space *,
46 			struct page *, struct page *);
47 
48 extern int migrate_prep(void);
49 extern int migrate_prep_local(void);
50 extern int migrate_vmas(struct mm_struct *mm,
51 		const nodemask_t *from, const nodemask_t *to,
52 		unsigned long flags);
53 extern void migrate_page_copy(struct page *newpage, struct page *page);
54 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
55 				  struct page *newpage, struct page *page);
56 extern int migrate_page_move_mapping(struct address_space *mapping,
57 		struct page *newpage, struct page *page,
58 		struct buffer_head *head, enum migrate_mode mode);
59 #else
60 
61 static inline void putback_lru_pages(struct list_head *l) {}
62 static inline void putback_movable_pages(struct list_head *l) {}
63 static inline int migrate_pages(struct list_head *l, new_page_t x,
64 		unsigned long private, enum migrate_mode mode, int reason)
65 	{ return -ENOSYS; }
66 
67 static inline int migrate_prep(void) { return -ENOSYS; }
68 static inline int migrate_prep_local(void) { return -ENOSYS; }
69 
70 static inline int migrate_vmas(struct mm_struct *mm,
71 		const nodemask_t *from, const nodemask_t *to,
72 		unsigned long flags)
73 {
74 	return -ENOSYS;
75 }
76 
77 static inline void migrate_page_copy(struct page *newpage,
78 				     struct page *page) {}
79 
80 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
81 				  struct page *newpage, struct page *page)
82 {
83 	return -ENOSYS;
84 }
85 
86 /* Possible settings for the migrate_page() method in address_operations */
87 #define migrate_page NULL
88 #define fail_migrate_page NULL
89 
90 #endif /* CONFIG_MIGRATION */
91 
92 #ifdef CONFIG_NUMA_BALANCING
93 extern bool pmd_trans_migrating(pmd_t pmd);
94 extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
95 extern int migrate_misplaced_page(struct page *page,
96 				  struct vm_area_struct *vma, int node);
97 extern bool migrate_ratelimited(int node);
98 #else
99 static inline bool pmd_trans_migrating(pmd_t pmd)
100 {
101 	return false;
102 }
103 static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
104 {
105 }
106 static inline int migrate_misplaced_page(struct page *page,
107 					 struct vm_area_struct *vma, int node)
108 {
109 	return -EAGAIN; /* can't migrate now */
110 }
111 static inline bool migrate_ratelimited(int node)
112 {
113 	return false;
114 }
115 #endif /* CONFIG_NUMA_BALANCING */
116 
117 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
118 extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
119 			struct vm_area_struct *vma,
120 			pmd_t *pmd, pmd_t entry,
121 			unsigned long address,
122 			struct page *page, int node);
123 #else
124 static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
125 			struct vm_area_struct *vma,
126 			pmd_t *pmd, pmd_t entry,
127 			unsigned long address,
128 			struct page *page, int node)
129 {
130 	return -EAGAIN;
131 }
132 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
133 
134 #endif /* _LINUX_MIGRATE_H */
135