1 #ifndef _LINUX_COMPACTION_H 2 #define _LINUX_COMPACTION_H 3 4 /* Return values for compact_zone() and try_to_compact_pages() */ 5 /* compaction didn't start as it was not possible or direct reclaim was more suitable */ 6 #define COMPACT_SKIPPED 0 7 /* compaction should continue to another pageblock */ 8 #define COMPACT_CONTINUE 1 9 /* direct compaction partially compacted a zone and there are suitable pages */ 10 #define COMPACT_PARTIAL 2 11 /* The full zone was compacted */ 12 #define COMPACT_COMPLETE 3 13 14 #ifdef CONFIG_COMPACTION 15 extern int sysctl_compact_memory; 16 extern int sysctl_compaction_handler(struct ctl_table *table, int write, 17 void __user *buffer, size_t *length, loff_t *ppos); 18 extern int sysctl_extfrag_threshold; 19 extern int sysctl_extfrag_handler(struct ctl_table *table, int write, 20 void __user *buffer, size_t *length, loff_t *ppos); 21 22 extern int fragmentation_index(struct zone *zone, unsigned int order); 23 extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 24 int order, gfp_t gfp_mask, nodemask_t *mask, 25 bool sync, bool *contended); 26 extern void compact_pgdat(pg_data_t *pgdat, int order); 27 extern void reset_isolation_suitable(pg_data_t *pgdat); 28 extern unsigned long compaction_suitable(struct zone *zone, int order); 29 30 /* Do not skip compaction more than 64 times */ 31 #define COMPACT_MAX_DEFER_SHIFT 6 32 33 /* 34 * Compaction is deferred when compaction fails to result in a page 35 * allocation success. 1 << compact_defer_limit compactions are skipped up 36 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 37 */ 38 static inline void defer_compaction(struct zone *zone, int order) 39 { 40 zone->compact_considered = 0; 41 zone->compact_defer_shift++; 42 43 if (order < zone->compact_order_failed) 44 zone->compact_order_failed = order; 45 46 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 47 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 48 } 49 50 /* Returns true if compaction should be skipped this time */ 51 static inline bool compaction_deferred(struct zone *zone, int order) 52 { 53 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 54 55 if (order < zone->compact_order_failed) 56 return false; 57 58 /* Avoid possible overflow */ 59 if (++zone->compact_considered > defer_limit) 60 zone->compact_considered = defer_limit; 61 62 return zone->compact_considered < defer_limit; 63 } 64 65 /* Returns true if restarting compaction after many failures */ 66 static inline bool compaction_restarting(struct zone *zone, int order) 67 { 68 if (order < zone->compact_order_failed) 69 return false; 70 71 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 72 zone->compact_considered >= 1UL << zone->compact_defer_shift; 73 } 74 75 #else 76 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 77 int order, gfp_t gfp_mask, nodemask_t *nodemask, 78 bool sync, bool *contended) 79 { 80 return COMPACT_CONTINUE; 81 } 82 83 static inline void compact_pgdat(pg_data_t *pgdat, int order) 84 { 85 } 86 87 static inline void reset_isolation_suitable(pg_data_t *pgdat) 88 { 89 } 90 91 static inline unsigned long compaction_suitable(struct zone *zone, int order) 92 { 93 return COMPACT_SKIPPED; 94 } 95 96 static inline void defer_compaction(struct zone *zone, int order) 97 { 98 } 99 100 static inline bool compaction_deferred(struct zone *zone, int order) 101 { 102 return true; 103 } 104 105 #endif /* CONFIG_COMPACTION */ 106 107 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 108 extern int compaction_register_node(struct node *node); 109 extern void compaction_unregister_node(struct node *node); 110 111 #else 112 113 static inline int compaction_register_node(struct node *node) 114 { 115 return 0; 116 } 117 118 static inline void compaction_unregister_node(struct node *node) 119 { 120 } 121 #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */ 122 123 #endif /* _LINUX_COMPACTION_H */ 124