1 #ifndef _LINUX_MIGRATE_H 2 #define _LINUX_MIGRATE_H 3 4 #include <linux/mm.h> 5 #include <linux/mempolicy.h> 6 #include <linux/migrate_mode.h> 7 #include <linux/hugetlb.h> 8 9 typedef struct page *new_page_t(struct page *page, unsigned long private, 10 int **reason); 11 typedef void free_page_t(struct page *page, unsigned long private); 12 13 /* 14 * Return values from addresss_space_operations.migratepage(): 15 * - negative errno on page migration failure; 16 * - zero on page migration success; 17 */ 18 #define MIGRATEPAGE_SUCCESS 0 19 20 enum migrate_reason { 21 MR_COMPACTION, 22 MR_MEMORY_FAILURE, 23 MR_MEMORY_HOTPLUG, 24 MR_SYSCALL, /* also applies to cpusets */ 25 MR_MEMPOLICY_MBIND, 26 MR_NUMA_MISPLACED, 27 MR_CMA, 28 MR_TYPES 29 }; 30 31 /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ 32 extern char *migrate_reason_names[MR_TYPES]; 33 34 static inline struct page *new_page_nodemask(struct page *page, 35 int preferred_nid, nodemask_t *nodemask) 36 { 37 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; 38 39 if (PageHuge(page)) 40 return alloc_huge_page_nodemask(page_hstate(compound_head(page)), 41 preferred_nid, nodemask); 42 43 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) 44 gfp_mask |= __GFP_HIGHMEM; 45 46 return __alloc_pages_nodemask(gfp_mask, 0, preferred_nid, nodemask); 47 } 48 49 #ifdef CONFIG_MIGRATION 50 51 extern void putback_movable_pages(struct list_head *l); 52 extern int migrate_page(struct address_space *mapping, 53 struct page *newpage, struct page *page, 54 enum migrate_mode mode); 55 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, 56 unsigned long private, enum migrate_mode mode, int reason); 57 extern int isolate_movable_page(struct page *page, isolate_mode_t mode); 58 extern void putback_movable_page(struct page *page); 59 60 extern int migrate_prep(void); 61 extern int migrate_prep_local(void); 62 extern void migrate_page_copy(struct page *newpage, struct page *page); 63 extern int migrate_huge_page_move_mapping(struct address_space *mapping, 64 struct page *newpage, struct page *page); 65 extern int migrate_page_move_mapping(struct address_space *mapping, 66 struct page *newpage, struct page *page, 67 struct buffer_head *head, enum migrate_mode mode, 68 int extra_count); 69 #else 70 71 static inline void putback_movable_pages(struct list_head *l) {} 72 static inline int migrate_pages(struct list_head *l, new_page_t new, 73 free_page_t free, unsigned long private, enum migrate_mode mode, 74 int reason) 75 { return -ENOSYS; } 76 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) 77 { return -EBUSY; } 78 79 static inline int migrate_prep(void) { return -ENOSYS; } 80 static inline int migrate_prep_local(void) { return -ENOSYS; } 81 82 static inline void migrate_page_copy(struct page *newpage, 83 struct page *page) {} 84 85 static inline int migrate_huge_page_move_mapping(struct address_space *mapping, 86 struct page *newpage, struct page *page) 87 { 88 return -ENOSYS; 89 } 90 91 #endif /* CONFIG_MIGRATION */ 92 93 #ifdef CONFIG_COMPACTION 94 extern int PageMovable(struct page *page); 95 extern void __SetPageMovable(struct page *page, struct address_space *mapping); 96 extern void __ClearPageMovable(struct page *page); 97 #else 98 static inline int PageMovable(struct page *page) { return 0; }; 99 static inline void __SetPageMovable(struct page *page, 100 struct address_space *mapping) 101 { 102 } 103 static inline void __ClearPageMovable(struct page *page) 104 { 105 } 106 #endif 107 108 #ifdef CONFIG_NUMA_BALANCING 109 extern bool pmd_trans_migrating(pmd_t pmd); 110 extern int migrate_misplaced_page(struct page *page, 111 struct vm_area_struct *vma, int node); 112 #else 113 static inline bool pmd_trans_migrating(pmd_t pmd) 114 { 115 return false; 116 } 117 static inline int migrate_misplaced_page(struct page *page, 118 struct vm_area_struct *vma, int node) 119 { 120 return -EAGAIN; /* can't migrate now */ 121 } 122 #endif /* CONFIG_NUMA_BALANCING */ 123 124 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 125 extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, 126 struct vm_area_struct *vma, 127 pmd_t *pmd, pmd_t entry, 128 unsigned long address, 129 struct page *page, int node); 130 #else 131 static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, 132 struct vm_area_struct *vma, 133 pmd_t *pmd, pmd_t entry, 134 unsigned long address, 135 struct page *page, int node) 136 { 137 return -EAGAIN; 138 } 139 #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ 140 141 #endif /* _LINUX_MIGRATE_H */ 142