xref: /linux-6.15/include/linux/migrate.h (revision 68f2736a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
4 
5 #include <linux/mm.h>
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
9 
10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
12 
13 struct migration_target_control;
14 
15 /*
16  * Return values from addresss_space_operations.migratepage():
17  * - negative errno on page migration failure;
18  * - zero on page migration success;
19  */
20 #define MIGRATEPAGE_SUCCESS		0
21 
22 /**
23  * struct movable_operations - Driver page migration
24  * @isolate_page:
25  * The VM calls this function to prepare the page to be moved.  The page
26  * is locked and the driver should not unlock it.  The driver should
27  * return ``true`` if the page is movable and ``false`` if it is not
28  * currently movable.  After this function returns, the VM uses the
29  * page->lru field, so the driver must preserve any information which
30  * is usually stored here.
31  *
32  * @migrate_page:
33  * After isolation, the VM calls this function with the isolated
34  * @src page.  The driver should copy the contents of the
35  * @src page to the @dst page and set up the fields of @dst page.
36  * Both pages are locked.
37  * If page migration is successful, the driver should call
38  * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
39  * If the driver cannot migrate the page at the moment, it can return
40  * -EAGAIN.  The VM interprets this as a temporary migration failure and
41  * will retry it later.  Any other error value is a permanent migration
42  * failure and migration will not be retried.
43  * The driver shouldn't touch the @src->lru field while in the
44  * migrate_page() function.  It may write to @dst->lru.
45  *
46  * @putback_page:
47  * If migration fails on the isolated page, the VM informs the driver
48  * that the page is no longer a candidate for migration by calling
49  * this function.  The driver should put the isolated page back into
50  * its own data structure.
51  */
52 struct movable_operations {
53 	bool (*isolate_page)(struct page *, isolate_mode_t);
54 	int (*migrate_page)(struct page *dst, struct page *src,
55 			enum migrate_mode);
56 	void (*putback_page)(struct page *);
57 };
58 
59 /* Defined in mm/debug.c: */
60 extern const char *migrate_reason_names[MR_TYPES];
61 
62 #ifdef CONFIG_MIGRATION
63 
64 extern void putback_movable_pages(struct list_head *l);
65 extern int migrate_page(struct address_space *mapping,
66 			struct page *newpage, struct page *page,
67 			enum migrate_mode mode);
68 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
69 		unsigned long private, enum migrate_mode mode, int reason,
70 		unsigned int *ret_succeeded);
71 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
72 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
73 
74 extern void migrate_page_states(struct page *newpage, struct page *page);
75 extern void migrate_page_copy(struct page *newpage, struct page *page);
76 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
77 				  struct page *newpage, struct page *page);
78 extern int migrate_page_move_mapping(struct address_space *mapping,
79 		struct page *newpage, struct page *page, int extra_count);
80 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
81 				spinlock_t *ptl);
82 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
83 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
84 int folio_migrate_mapping(struct address_space *mapping,
85 		struct folio *newfolio, struct folio *folio, int extra_count);
86 
87 #else
88 
89 static inline void putback_movable_pages(struct list_head *l) {}
90 static inline int migrate_pages(struct list_head *l, new_page_t new,
91 		free_page_t free, unsigned long private, enum migrate_mode mode,
92 		int reason, unsigned int *ret_succeeded)
93 	{ return -ENOSYS; }
94 static inline struct page *alloc_migration_target(struct page *page,
95 		unsigned long private)
96 	{ return NULL; }
97 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
98 	{ return -EBUSY; }
99 
100 static inline void migrate_page_states(struct page *newpage, struct page *page)
101 {
102 }
103 
104 static inline void migrate_page_copy(struct page *newpage,
105 				     struct page *page) {}
106 
107 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
108 				  struct page *newpage, struct page *page)
109 {
110 	return -ENOSYS;
111 }
112 
113 #endif /* CONFIG_MIGRATION */
114 
115 #if defined(CONFIG_MIGRATION) && defined(CONFIG_NUMA)
116 extern void set_migration_target_nodes(void);
117 extern void migrate_on_reclaim_init(void);
118 extern bool numa_demotion_enabled;
119 extern int next_demotion_node(int node);
120 #else
121 static inline void set_migration_target_nodes(void) {}
122 static inline void migrate_on_reclaim_init(void) {}
123 static inline int next_demotion_node(int node)
124 {
125         return NUMA_NO_NODE;
126 }
127 #define numa_demotion_enabled  false
128 #endif
129 
130 #ifdef CONFIG_COMPACTION
131 bool PageMovable(struct page *page);
132 void __SetPageMovable(struct page *page, const struct movable_operations *ops);
133 void __ClearPageMovable(struct page *page);
134 #else
135 static inline bool PageMovable(struct page *page) { return false; }
136 static inline void __SetPageMovable(struct page *page,
137 		const struct movable_operations *ops)
138 {
139 }
140 static inline void __ClearPageMovable(struct page *page)
141 {
142 }
143 #endif
144 
145 static inline bool folio_test_movable(struct folio *folio)
146 {
147 	return PageMovable(&folio->page);
148 }
149 
150 static inline
151 const struct movable_operations *page_movable_ops(struct page *page)
152 {
153 	VM_BUG_ON(!__PageMovable(page));
154 
155 	return (const struct movable_operations *)
156 		((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
157 }
158 
159 #ifdef CONFIG_NUMA_BALANCING
160 extern int migrate_misplaced_page(struct page *page,
161 				  struct vm_area_struct *vma, int node);
162 #else
163 static inline int migrate_misplaced_page(struct page *page,
164 					 struct vm_area_struct *vma, int node)
165 {
166 	return -EAGAIN; /* can't migrate now */
167 }
168 #endif /* CONFIG_NUMA_BALANCING */
169 
170 #ifdef CONFIG_MIGRATION
171 
172 /*
173  * Watch out for PAE architecture, which has an unsigned long, and might not
174  * have enough bits to store all physical address and flags. So far we have
175  * enough room for all our flags.
176  */
177 #define MIGRATE_PFN_VALID	(1UL << 0)
178 #define MIGRATE_PFN_MIGRATE	(1UL << 1)
179 #define MIGRATE_PFN_WRITE	(1UL << 3)
180 #define MIGRATE_PFN_SHIFT	6
181 
182 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
183 {
184 	if (!(mpfn & MIGRATE_PFN_VALID))
185 		return NULL;
186 	return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
187 }
188 
189 static inline unsigned long migrate_pfn(unsigned long pfn)
190 {
191 	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
192 }
193 
194 enum migrate_vma_direction {
195 	MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
196 	MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
197 };
198 
199 struct migrate_vma {
200 	struct vm_area_struct	*vma;
201 	/*
202 	 * Both src and dst array must be big enough for
203 	 * (end - start) >> PAGE_SHIFT entries.
204 	 *
205 	 * The src array must not be modified by the caller after
206 	 * migrate_vma_setup(), and must not change the dst array after
207 	 * migrate_vma_pages() returns.
208 	 */
209 	unsigned long		*dst;
210 	unsigned long		*src;
211 	unsigned long		cpages;
212 	unsigned long		npages;
213 	unsigned long		start;
214 	unsigned long		end;
215 
216 	/*
217 	 * Set to the owner value also stored in page->pgmap->owner for
218 	 * migrating out of device private memory. The flags also need to
219 	 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
220 	 * The caller should always set this field when using mmu notifier
221 	 * callbacks to avoid device MMU invalidations for device private
222 	 * pages that are not being migrated.
223 	 */
224 	void			*pgmap_owner;
225 	unsigned long		flags;
226 };
227 
228 int migrate_vma_setup(struct migrate_vma *args);
229 void migrate_vma_pages(struct migrate_vma *migrate);
230 void migrate_vma_finalize(struct migrate_vma *migrate);
231 #endif /* CONFIG_MIGRATION */
232 
233 #endif /* _LINUX_MIGRATE_H */
234