1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_ZSWAP_H 3 #define _LINUX_ZSWAP_H 4 5 #include <linux/types.h> 6 #include <linux/mm_types.h> 7 8 struct lruvec; 9 10 extern atomic_t zswap_stored_pages; 11 12 #ifdef CONFIG_ZSWAP 13 14 struct zswap_lruvec_state { 15 /* 16 * Number of swapped in pages from disk, i.e not found in the zswap pool. 17 * 18 * This is consumed and subtracted from the lru size in 19 * zswap_shrinker_count() to penalize past overshrinking that led to disk 20 * swapins. The idea is that had we considered this many more pages in the 21 * LRU active/protected and not written them back, we would not have had to 22 * swapped them in. 23 */ 24 atomic_long_t nr_disk_swapins; 25 }; 26 27 unsigned long zswap_total_pages(void); 28 bool zswap_store(struct folio *folio); 29 bool zswap_load(struct folio *folio); 30 void zswap_invalidate(swp_entry_t swp); 31 int zswap_swapon(int type, unsigned long nr_pages); 32 void zswap_swapoff(int type); 33 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg); 34 void zswap_lruvec_state_init(struct lruvec *lruvec); 35 void zswap_folio_swapin(struct folio *folio); 36 bool zswap_is_enabled(void); 37 bool zswap_never_enabled(void); 38 #else 39 40 struct zswap_lruvec_state {}; 41 42 static inline bool zswap_store(struct folio *folio) 43 { 44 return false; 45 } 46 47 static inline bool zswap_load(struct folio *folio) 48 { 49 return false; 50 } 51 52 static inline void zswap_invalidate(swp_entry_t swp) {} 53 static inline int zswap_swapon(int type, unsigned long nr_pages) 54 { 55 return 0; 56 } 57 static inline void zswap_swapoff(int type) {} 58 static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {} 59 static inline void zswap_lruvec_state_init(struct lruvec *lruvec) {} 60 static inline void zswap_folio_swapin(struct folio *folio) {} 61 62 static inline bool zswap_is_enabled(void) 63 { 64 return false; 65 } 66 67 static inline bool zswap_never_enabled(void) 68 { 69 return true; 70 } 71 72 #endif 73 74 #endif /* _LINUX_ZSWAP_H */ 75