1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_MIGRATE_H 3 #define _LINUX_MIGRATE_H 4 5 #include <linux/mm.h> 6 #include <linux/mempolicy.h> 7 #include <linux/migrate_mode.h> 8 #include <linux/hugetlb.h> 9 10 typedef struct page *new_page_t(struct page *page, unsigned long private); 11 typedef void free_page_t(struct page *page, unsigned long private); 12 13 struct migration_target_control; 14 15 /* 16 * Return values from addresss_space_operations.migratepage(): 17 * - negative errno on page migration failure; 18 * - zero on page migration success; 19 */ 20 #define MIGRATEPAGE_SUCCESS 0 21 22 enum migrate_reason { 23 MR_COMPACTION, 24 MR_MEMORY_FAILURE, 25 MR_MEMORY_HOTPLUG, 26 MR_SYSCALL, /* also applies to cpusets */ 27 MR_MEMPOLICY_MBIND, 28 MR_NUMA_MISPLACED, 29 MR_CONTIG_RANGE, 30 MR_LONGTERM_PIN, 31 MR_TYPES 32 }; 33 34 /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ 35 extern const char *migrate_reason_names[MR_TYPES]; 36 37 #ifdef CONFIG_MIGRATION 38 39 extern void putback_movable_pages(struct list_head *l); 40 extern int migrate_page(struct address_space *mapping, 41 struct page *newpage, struct page *page, 42 enum migrate_mode mode); 43 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, 44 unsigned long private, enum migrate_mode mode, int reason); 45 extern struct page *alloc_migration_target(struct page *page, unsigned long private); 46 extern int isolate_movable_page(struct page *page, isolate_mode_t mode); 47 48 extern void migrate_page_states(struct page *newpage, struct page *page); 49 extern void migrate_page_copy(struct page *newpage, struct page *page); 50 extern int migrate_huge_page_move_mapping(struct address_space *mapping, 51 struct page *newpage, struct page *page); 52 extern int migrate_page_move_mapping(struct address_space *mapping, 53 struct page *newpage, struct page *page, int extra_count); 54 extern void copy_huge_page(struct page *dst, struct page *src); 55 #else 56 57 static inline void putback_movable_pages(struct list_head *l) {} 58 static inline int migrate_pages(struct list_head *l, new_page_t new, 59 free_page_t free, unsigned long private, enum migrate_mode mode, 60 int reason) 61 { return -ENOSYS; } 62 static inline struct page *alloc_migration_target(struct page *page, 63 unsigned long private) 64 { return NULL; } 65 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) 66 { return -EBUSY; } 67 68 static inline void migrate_page_states(struct page *newpage, struct page *page) 69 { 70 } 71 72 static inline void migrate_page_copy(struct page *newpage, 73 struct page *page) {} 74 75 static inline int migrate_huge_page_move_mapping(struct address_space *mapping, 76 struct page *newpage, struct page *page) 77 { 78 return -ENOSYS; 79 } 80 81 static inline void copy_huge_page(struct page *dst, struct page *src) 82 { 83 } 84 #endif /* CONFIG_MIGRATION */ 85 86 #ifdef CONFIG_COMPACTION 87 extern int PageMovable(struct page *page); 88 extern void __SetPageMovable(struct page *page, struct address_space *mapping); 89 extern void __ClearPageMovable(struct page *page); 90 #else 91 static inline int PageMovable(struct page *page) { return 0; } 92 static inline void __SetPageMovable(struct page *page, 93 struct address_space *mapping) 94 { 95 } 96 static inline void __ClearPageMovable(struct page *page) 97 { 98 } 99 #endif 100 101 #ifdef CONFIG_NUMA_BALANCING 102 extern int migrate_misplaced_page(struct page *page, 103 struct vm_area_struct *vma, int node); 104 #else 105 static inline int migrate_misplaced_page(struct page *page, 106 struct vm_area_struct *vma, int node) 107 { 108 return -EAGAIN; /* can't migrate now */ 109 } 110 #endif /* CONFIG_NUMA_BALANCING */ 111 112 #ifdef CONFIG_MIGRATION 113 114 /* 115 * Watch out for PAE architecture, which has an unsigned long, and might not 116 * have enough bits to store all physical address and flags. So far we have 117 * enough room for all our flags. 118 */ 119 #define MIGRATE_PFN_VALID (1UL << 0) 120 #define MIGRATE_PFN_MIGRATE (1UL << 1) 121 #define MIGRATE_PFN_LOCKED (1UL << 2) 122 #define MIGRATE_PFN_WRITE (1UL << 3) 123 #define MIGRATE_PFN_SHIFT 6 124 125 static inline struct page *migrate_pfn_to_page(unsigned long mpfn) 126 { 127 if (!(mpfn & MIGRATE_PFN_VALID)) 128 return NULL; 129 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); 130 } 131 132 static inline unsigned long migrate_pfn(unsigned long pfn) 133 { 134 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; 135 } 136 137 enum migrate_vma_direction { 138 MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, 139 MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, 140 }; 141 142 struct migrate_vma { 143 struct vm_area_struct *vma; 144 /* 145 * Both src and dst array must be big enough for 146 * (end - start) >> PAGE_SHIFT entries. 147 * 148 * The src array must not be modified by the caller after 149 * migrate_vma_setup(), and must not change the dst array after 150 * migrate_vma_pages() returns. 151 */ 152 unsigned long *dst; 153 unsigned long *src; 154 unsigned long cpages; 155 unsigned long npages; 156 unsigned long start; 157 unsigned long end; 158 159 /* 160 * Set to the owner value also stored in page->pgmap->owner for 161 * migrating out of device private memory. The flags also need to 162 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. 163 * The caller should always set this field when using mmu notifier 164 * callbacks to avoid device MMU invalidations for device private 165 * pages that are not being migrated. 166 */ 167 void *pgmap_owner; 168 unsigned long flags; 169 }; 170 171 int migrate_vma_setup(struct migrate_vma *args); 172 void migrate_vma_pages(struct migrate_vma *migrate); 173 void migrate_vma_finalize(struct migrate_vma *migrate); 174 175 #endif /* CONFIG_MIGRATION */ 176 177 #endif /* _LINUX_MIGRATE_H */ 178