1 #ifndef _LINUX_COMPACTION_H 2 #define _LINUX_COMPACTION_H 3 4 /* Return values for compact_zone() and try_to_compact_pages() */ 5 /* compaction didn't start as it was not possible or direct reclaim was more suitable */ 6 #define COMPACT_SKIPPED 0 7 /* compaction should continue to another pageblock */ 8 #define COMPACT_CONTINUE 1 9 /* direct compaction partially compacted a zone and there are suitable pages */ 10 #define COMPACT_PARTIAL 2 11 /* The full zone was compacted */ 12 #define COMPACT_COMPLETE 3 13 14 #define COMPACT_MODE_DIRECT_RECLAIM 0 15 #define COMPACT_MODE_KSWAPD 1 16 17 #ifdef CONFIG_COMPACTION 18 extern int sysctl_compact_memory; 19 extern int sysctl_compaction_handler(struct ctl_table *table, int write, 20 void __user *buffer, size_t *length, loff_t *ppos); 21 extern int sysctl_extfrag_threshold; 22 extern int sysctl_extfrag_handler(struct ctl_table *table, int write, 23 void __user *buffer, size_t *length, loff_t *ppos); 24 25 extern int fragmentation_index(struct zone *zone, unsigned int order); 26 extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 27 int order, gfp_t gfp_mask, nodemask_t *mask, 28 bool sync); 29 extern unsigned long compaction_suitable(struct zone *zone, int order); 30 extern unsigned long compact_zone_order(struct zone *zone, int order, 31 gfp_t gfp_mask, bool sync, 32 int compact_mode); 33 34 /* Do not skip compaction more than 64 times */ 35 #define COMPACT_MAX_DEFER_SHIFT 6 36 37 /* 38 * Compaction is deferred when compaction fails to result in a page 39 * allocation success. 1 << compact_defer_limit compactions are skipped up 40 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 41 */ 42 static inline void defer_compaction(struct zone *zone) 43 { 44 zone->compact_considered = 0; 45 zone->compact_defer_shift++; 46 47 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 48 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 49 } 50 51 /* Returns true if compaction should be skipped this time */ 52 static inline bool compaction_deferred(struct zone *zone) 53 { 54 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 55 56 /* Avoid possible overflow */ 57 if (++zone->compact_considered > defer_limit) 58 zone->compact_considered = defer_limit; 59 60 return zone->compact_considered < (1UL << zone->compact_defer_shift); 61 } 62 63 #else 64 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 65 int order, gfp_t gfp_mask, nodemask_t *nodemask, 66 bool sync) 67 { 68 return COMPACT_CONTINUE; 69 } 70 71 static inline unsigned long compaction_suitable(struct zone *zone, int order) 72 { 73 return COMPACT_SKIPPED; 74 } 75 76 static inline unsigned long compact_zone_order(struct zone *zone, int order, 77 gfp_t gfp_mask, bool sync, 78 int compact_mode) 79 { 80 return COMPACT_CONTINUE; 81 } 82 83 static inline void defer_compaction(struct zone *zone) 84 { 85 } 86 87 static inline bool compaction_deferred(struct zone *zone) 88 { 89 return 1; 90 } 91 92 #endif /* CONFIG_COMPACTION */ 93 94 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 95 extern int compaction_register_node(struct node *node); 96 extern void compaction_unregister_node(struct node *node); 97 98 #else 99 100 static inline int compaction_register_node(struct node *node) 101 { 102 return 0; 103 } 104 105 static inline void compaction_unregister_node(struct node *node) 106 { 107 } 108 #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ 109 110 #endif /* _LINUX_COMPACTION_H */ 111