xref: /linux-6.15/include/linux/migrate.h (revision c1ccbbaa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
4 
5 #include <linux/mm.h>
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
9 
10 typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
11 typedef void free_folio_t(struct folio *folio, unsigned long private);
12 
13 struct migration_target_control;
14 
15 /*
16  * Return values from addresss_space_operations.migratepage():
17  * - negative errno on page migration failure;
18  * - zero on page migration success;
19  */
20 #define MIGRATEPAGE_SUCCESS		0
21 #define MIGRATEPAGE_UNMAP		1
22 
23 /**
24  * struct movable_operations - Driver page migration
25  * @isolate_page:
26  * The VM calls this function to prepare the page to be moved.  The page
27  * is locked and the driver should not unlock it.  The driver should
28  * return ``true`` if the page is movable and ``false`` if it is not
29  * currently movable.  After this function returns, the VM uses the
30  * page->lru field, so the driver must preserve any information which
31  * is usually stored here.
32  *
33  * @migrate_page:
34  * After isolation, the VM calls this function with the isolated
35  * @src page.  The driver should copy the contents of the
36  * @src page to the @dst page and set up the fields of @dst page.
37  * Both pages are locked.
38  * If page migration is successful, the driver should call
39  * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
40  * If the driver cannot migrate the page at the moment, it can return
41  * -EAGAIN.  The VM interprets this as a temporary migration failure and
42  * will retry it later.  Any other error value is a permanent migration
43  * failure and migration will not be retried.
44  * The driver shouldn't touch the @src->lru field while in the
45  * migrate_page() function.  It may write to @dst->lru.
46  *
47  * @putback_page:
48  * If migration fails on the isolated page, the VM informs the driver
49  * that the page is no longer a candidate for migration by calling
50  * this function.  The driver should put the isolated page back into
51  * its own data structure.
52  */
53 struct movable_operations {
54 	bool (*isolate_page)(struct page *, isolate_mode_t);
55 	int (*migrate_page)(struct page *dst, struct page *src,
56 			enum migrate_mode);
57 	void (*putback_page)(struct page *);
58 };
59 
60 /* Defined in mm/debug.c: */
61 extern const char *migrate_reason_names[MR_TYPES];
62 
63 #ifdef CONFIG_MIGRATION
64 
65 void putback_movable_pages(struct list_head *l);
66 int migrate_folio(struct address_space *mapping, struct folio *dst,
67 		struct folio *src, enum migrate_mode mode);
68 int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
69 		  unsigned long private, enum migrate_mode mode, int reason,
70 		  unsigned int *ret_succeeded);
71 struct folio *alloc_migration_target(struct folio *src, unsigned long private);
72 bool isolate_movable_page(struct page *page, isolate_mode_t mode);
73 
74 int migrate_huge_page_move_mapping(struct address_space *mapping,
75 		struct folio *dst, struct folio *src);
76 void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
77 		__releases(ptl);
78 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
79 int folio_migrate_mapping(struct address_space *mapping,
80 		struct folio *newfolio, struct folio *folio, int extra_count);
81 
82 #else
83 
84 static inline void putback_movable_pages(struct list_head *l) {}
85 static inline int migrate_pages(struct list_head *l, new_folio_t new,
86 		free_folio_t free, unsigned long private,
87 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
88 	{ return -ENOSYS; }
89 static inline struct folio *alloc_migration_target(struct folio *src,
90 		unsigned long private)
91 	{ return NULL; }
92 static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
93 	{ return false; }
94 
95 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
96 				  struct folio *dst, struct folio *src)
97 {
98 	return -ENOSYS;
99 }
100 
101 #endif /* CONFIG_MIGRATION */
102 
103 #ifdef CONFIG_COMPACTION
104 bool PageMovable(struct page *page);
105 void __SetPageMovable(struct page *page, const struct movable_operations *ops);
106 void __ClearPageMovable(struct page *page);
107 #else
108 static inline bool PageMovable(struct page *page) { return false; }
109 static inline void __SetPageMovable(struct page *page,
110 		const struct movable_operations *ops)
111 {
112 }
113 static inline void __ClearPageMovable(struct page *page)
114 {
115 }
116 #endif
117 
118 static inline bool folio_test_movable(struct folio *folio)
119 {
120 	return PageMovable(&folio->page);
121 }
122 
123 static inline
124 const struct movable_operations *folio_movable_ops(struct folio *folio)
125 {
126 	VM_BUG_ON(!__folio_test_movable(folio));
127 
128 	return (const struct movable_operations *)
129 		((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
130 }
131 
132 static inline
133 const struct movable_operations *page_movable_ops(struct page *page)
134 {
135 	VM_BUG_ON(!__PageMovable(page));
136 
137 	return (const struct movable_operations *)
138 		((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
139 }
140 
141 #ifdef CONFIG_NUMA_BALANCING
142 int migrate_misplaced_folio_prepare(struct folio *folio,
143 		struct vm_area_struct *vma, int node);
144 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
145 			   int node);
146 #else
147 static inline int migrate_misplaced_folio_prepare(struct folio *folio,
148 		struct vm_area_struct *vma, int node)
149 {
150 	return -EAGAIN; /* can't migrate now */
151 }
152 static inline int migrate_misplaced_folio(struct folio *folio,
153 					 struct vm_area_struct *vma, int node)
154 {
155 	return -EAGAIN; /* can't migrate now */
156 }
157 #endif /* CONFIG_NUMA_BALANCING */
158 
159 #ifdef CONFIG_MIGRATION
160 
161 /*
162  * Watch out for PAE architecture, which has an unsigned long, and might not
163  * have enough bits to store all physical address and flags. So far we have
164  * enough room for all our flags.
165  */
166 #define MIGRATE_PFN_VALID	(1UL << 0)
167 #define MIGRATE_PFN_MIGRATE	(1UL << 1)
168 #define MIGRATE_PFN_WRITE	(1UL << 3)
169 #define MIGRATE_PFN_SHIFT	6
170 
171 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
172 {
173 	if (!(mpfn & MIGRATE_PFN_VALID))
174 		return NULL;
175 	return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
176 }
177 
178 static inline unsigned long migrate_pfn(unsigned long pfn)
179 {
180 	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
181 }
182 
183 enum migrate_vma_direction {
184 	MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
185 	MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
186 	MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
187 };
188 
189 struct migrate_vma {
190 	struct vm_area_struct	*vma;
191 	/*
192 	 * Both src and dst array must be big enough for
193 	 * (end - start) >> PAGE_SHIFT entries.
194 	 *
195 	 * The src array must not be modified by the caller after
196 	 * migrate_vma_setup(), and must not change the dst array after
197 	 * migrate_vma_pages() returns.
198 	 */
199 	unsigned long		*dst;
200 	unsigned long		*src;
201 	unsigned long		cpages;
202 	unsigned long		npages;
203 	unsigned long		start;
204 	unsigned long		end;
205 
206 	/*
207 	 * Set to the owner value also stored in page->pgmap->owner for
208 	 * migrating out of device private memory. The flags also need to
209 	 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
210 	 * The caller should always set this field when using mmu notifier
211 	 * callbacks to avoid device MMU invalidations for device private
212 	 * pages that are not being migrated.
213 	 */
214 	void			*pgmap_owner;
215 	unsigned long		flags;
216 
217 	/*
218 	 * Set to vmf->page if this is being called to migrate a page as part of
219 	 * a migrate_to_ram() callback.
220 	 */
221 	struct page		*fault_page;
222 };
223 
224 int migrate_vma_setup(struct migrate_vma *args);
225 void migrate_vma_pages(struct migrate_vma *migrate);
226 void migrate_vma_finalize(struct migrate_vma *migrate);
227 int migrate_device_range(unsigned long *src_pfns, unsigned long start,
228 			unsigned long npages);
229 void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
230 			unsigned long npages);
231 void migrate_device_finalize(unsigned long *src_pfns,
232 			unsigned long *dst_pfns, unsigned long npages);
233 
234 #endif /* CONFIG_MIGRATION */
235 
236 #endif /* _LINUX_MIGRATE_H */
237