xref: /linux-6.15/include/linux/migrate.h (revision dbcfe5ec)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
4 
5 #include <linux/mm.h>
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
9 
10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
12 
13 struct migration_target_control;
14 
15 /*
16  * Return values from addresss_space_operations.migratepage():
17  * - negative errno on page migration failure;
18  * - zero on page migration success;
19  */
20 #define MIGRATEPAGE_SUCCESS		0
21 
22 /**
23  * struct movable_operations - Driver page migration
24  * @isolate_page:
25  * The VM calls this function to prepare the page to be moved.  The page
26  * is locked and the driver should not unlock it.  The driver should
27  * return ``true`` if the page is movable and ``false`` if it is not
28  * currently movable.  After this function returns, the VM uses the
29  * page->lru field, so the driver must preserve any information which
30  * is usually stored here.
31  *
32  * @migrate_page:
33  * After isolation, the VM calls this function with the isolated
34  * @src page.  The driver should copy the contents of the
35  * @src page to the @dst page and set up the fields of @dst page.
36  * Both pages are locked.
37  * If page migration is successful, the driver should call
38  * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
39  * If the driver cannot migrate the page at the moment, it can return
40  * -EAGAIN.  The VM interprets this as a temporary migration failure and
41  * will retry it later.  Any other error value is a permanent migration
42  * failure and migration will not be retried.
43  * The driver shouldn't touch the @src->lru field while in the
44  * migrate_page() function.  It may write to @dst->lru.
45  *
46  * @putback_page:
47  * If migration fails on the isolated page, the VM informs the driver
48  * that the page is no longer a candidate for migration by calling
49  * this function.  The driver should put the isolated page back into
50  * its own data structure.
51  */
52 struct movable_operations {
53 	bool (*isolate_page)(struct page *, isolate_mode_t);
54 	int (*migrate_page)(struct page *dst, struct page *src,
55 			enum migrate_mode);
56 	void (*putback_page)(struct page *);
57 };
58 
59 /* Defined in mm/debug.c: */
60 extern const char *migrate_reason_names[MR_TYPES];
61 
62 #ifdef CONFIG_MIGRATION
63 
64 extern void putback_movable_pages(struct list_head *l);
65 int migrate_folio(struct address_space *mapping, struct folio *dst,
66 		struct folio *src, enum migrate_mode mode);
67 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
68 		unsigned long private, enum migrate_mode mode, int reason,
69 		unsigned int *ret_succeeded);
70 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
71 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
72 
73 int migrate_huge_page_move_mapping(struct address_space *mapping,
74 		struct folio *dst, struct folio *src);
75 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
76 				spinlock_t *ptl);
77 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
78 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
79 int folio_migrate_mapping(struct address_space *mapping,
80 		struct folio *newfolio, struct folio *folio, int extra_count);
81 
82 #else
83 
84 static inline void putback_movable_pages(struct list_head *l) {}
85 static inline int migrate_pages(struct list_head *l, new_page_t new,
86 		free_page_t free, unsigned long private, enum migrate_mode mode,
87 		int reason, unsigned int *ret_succeeded)
88 	{ return -ENOSYS; }
89 static inline struct page *alloc_migration_target(struct page *page,
90 		unsigned long private)
91 	{ return NULL; }
92 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
93 	{ return -EBUSY; }
94 
95 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
96 				  struct folio *dst, struct folio *src)
97 {
98 	return -ENOSYS;
99 }
100 
101 #endif /* CONFIG_MIGRATION */
102 
103 #if defined(CONFIG_MIGRATION) && defined(CONFIG_NUMA)
104 extern void set_migration_target_nodes(void);
105 extern void migrate_on_reclaim_init(void);
106 extern bool numa_demotion_enabled;
107 extern int next_demotion_node(int node);
108 #else
109 static inline void set_migration_target_nodes(void) {}
110 static inline void migrate_on_reclaim_init(void) {}
111 static inline int next_demotion_node(int node)
112 {
113         return NUMA_NO_NODE;
114 }
115 #define numa_demotion_enabled  false
116 #endif
117 
118 #ifdef CONFIG_COMPACTION
119 bool PageMovable(struct page *page);
120 void __SetPageMovable(struct page *page, const struct movable_operations *ops);
121 void __ClearPageMovable(struct page *page);
122 #else
123 static inline bool PageMovable(struct page *page) { return false; }
124 static inline void __SetPageMovable(struct page *page,
125 		const struct movable_operations *ops)
126 {
127 }
128 static inline void __ClearPageMovable(struct page *page)
129 {
130 }
131 #endif
132 
133 static inline bool folio_test_movable(struct folio *folio)
134 {
135 	return PageMovable(&folio->page);
136 }
137 
138 static inline
139 const struct movable_operations *page_movable_ops(struct page *page)
140 {
141 	VM_BUG_ON(!__PageMovable(page));
142 
143 	return (const struct movable_operations *)
144 		((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
145 }
146 
147 #ifdef CONFIG_NUMA_BALANCING
148 extern int migrate_misplaced_page(struct page *page,
149 				  struct vm_area_struct *vma, int node);
150 #else
151 static inline int migrate_misplaced_page(struct page *page,
152 					 struct vm_area_struct *vma, int node)
153 {
154 	return -EAGAIN; /* can't migrate now */
155 }
156 #endif /* CONFIG_NUMA_BALANCING */
157 
158 #ifdef CONFIG_MIGRATION
159 
160 /*
161  * Watch out for PAE architecture, which has an unsigned long, and might not
162  * have enough bits to store all physical address and flags. So far we have
163  * enough room for all our flags.
164  */
165 #define MIGRATE_PFN_VALID	(1UL << 0)
166 #define MIGRATE_PFN_MIGRATE	(1UL << 1)
167 #define MIGRATE_PFN_WRITE	(1UL << 3)
168 #define MIGRATE_PFN_SHIFT	6
169 
170 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
171 {
172 	if (!(mpfn & MIGRATE_PFN_VALID))
173 		return NULL;
174 	return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
175 }
176 
177 static inline unsigned long migrate_pfn(unsigned long pfn)
178 {
179 	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
180 }
181 
182 enum migrate_vma_direction {
183 	MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
184 	MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
185 	MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
186 };
187 
188 struct migrate_vma {
189 	struct vm_area_struct	*vma;
190 	/*
191 	 * Both src and dst array must be big enough for
192 	 * (end - start) >> PAGE_SHIFT entries.
193 	 *
194 	 * The src array must not be modified by the caller after
195 	 * migrate_vma_setup(), and must not change the dst array after
196 	 * migrate_vma_pages() returns.
197 	 */
198 	unsigned long		*dst;
199 	unsigned long		*src;
200 	unsigned long		cpages;
201 	unsigned long		npages;
202 	unsigned long		start;
203 	unsigned long		end;
204 
205 	/*
206 	 * Set to the owner value also stored in page->pgmap->owner for
207 	 * migrating out of device private memory. The flags also need to
208 	 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
209 	 * The caller should always set this field when using mmu notifier
210 	 * callbacks to avoid device MMU invalidations for device private
211 	 * pages that are not being migrated.
212 	 */
213 	void			*pgmap_owner;
214 	unsigned long		flags;
215 };
216 
217 int migrate_vma_setup(struct migrate_vma *args);
218 void migrate_vma_pages(struct migrate_vma *migrate);
219 void migrate_vma_finalize(struct migrate_vma *migrate);
220 #endif /* CONFIG_MIGRATION */
221 
222 #endif /* _LINUX_MIGRATE_H */
223