1 /* 2 * include/linux/backing-dev.h 3 * 4 * low-level device information and state which is propagated up through 5 * to high-level code. 6 */ 7 8 #ifndef _LINUX_BACKING_DEV_H 9 #define _LINUX_BACKING_DEV_H 10 11 #include <asm/atomic.h> 12 13 struct page; 14 15 /* 16 * Bits in backing_dev_info.state 17 */ 18 enum bdi_state { 19 BDI_pdflush, /* A pdflush thread is working this device */ 20 BDI_write_congested, /* The write queue is getting full */ 21 BDI_read_congested, /* The read queue is getting full */ 22 BDI_unused, /* Available bits start here */ 23 }; 24 25 typedef int (congested_fn)(void *, int); 26 27 struct backing_dev_info { 28 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 29 unsigned long state; /* Always use atomic bitops on this */ 30 unsigned int capabilities; /* Device capabilities */ 31 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 32 void *congested_data; /* Pointer to aux data for congested func */ 33 void (*unplug_io_fn)(struct backing_dev_info *, struct page *); 34 void *unplug_io_data; 35 }; 36 37 38 /* 39 * Flags in backing_dev_info::capability 40 * - The first two flags control whether dirty pages will contribute to the 41 * VM's accounting and whether writepages() should be called for dirty pages 42 * (something that would not, for example, be appropriate for ramfs) 43 * - These flags let !MMU mmap() govern direct device mapping vs immediate 44 * copying more easily for MAP_PRIVATE, especially for ROM filesystems 45 */ 46 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 /* Dirty pages shouldn't contribute to accounting */ 47 #define BDI_CAP_NO_WRITEBACK 0x00000002 /* Don't write pages back */ 48 #define BDI_CAP_MAP_COPY 0x00000004 /* Copy can be mapped (MAP_PRIVATE) */ 49 #define BDI_CAP_MAP_DIRECT 0x00000008 /* Can be mapped directly (MAP_SHARED) */ 50 #define BDI_CAP_READ_MAP 0x00000010 /* Can be mapped for reading */ 51 #define BDI_CAP_WRITE_MAP 0x00000020 /* Can be mapped for writing */ 52 #define BDI_CAP_EXEC_MAP 0x00000040 /* Can be mapped for execution */ 53 #define BDI_CAP_VMFLAGS \ 54 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) 55 56 #if defined(VM_MAYREAD) && \ 57 (BDI_CAP_READ_MAP != VM_MAYREAD || \ 58 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \ 59 BDI_CAP_EXEC_MAP != VM_MAYEXEC) 60 #error please change backing_dev_info::capabilities flags 61 #endif 62 63 extern struct backing_dev_info default_backing_dev_info; 64 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); 65 66 int writeback_acquire(struct backing_dev_info *bdi); 67 int writeback_in_progress(struct backing_dev_info *bdi); 68 void writeback_release(struct backing_dev_info *bdi); 69 70 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) 71 { 72 if (bdi->congested_fn) 73 return bdi->congested_fn(bdi->congested_data, bdi_bits); 74 return (bdi->state & bdi_bits); 75 } 76 77 static inline int bdi_read_congested(struct backing_dev_info *bdi) 78 { 79 return bdi_congested(bdi, 1 << BDI_read_congested); 80 } 81 82 static inline int bdi_write_congested(struct backing_dev_info *bdi) 83 { 84 return bdi_congested(bdi, 1 << BDI_write_congested); 85 } 86 87 static inline int bdi_rw_congested(struct backing_dev_info *bdi) 88 { 89 return bdi_congested(bdi, (1 << BDI_read_congested)| 90 (1 << BDI_write_congested)); 91 } 92 93 void clear_bdi_congested(struct backing_dev_info *bdi, int rw); 94 void set_bdi_congested(struct backing_dev_info *bdi, int rw); 95 long congestion_wait(int rw, long timeout); 96 long congestion_wait_interruptible(int rw, long timeout); 97 void congestion_end(int rw); 98 99 #define bdi_cap_writeback_dirty(bdi) \ 100 (!((bdi)->capabilities & BDI_CAP_NO_WRITEBACK)) 101 102 #define bdi_cap_account_dirty(bdi) \ 103 (!((bdi)->capabilities & BDI_CAP_NO_ACCT_DIRTY)) 104 105 #define mapping_cap_writeback_dirty(mapping) \ 106 bdi_cap_writeback_dirty((mapping)->backing_dev_info) 107 108 #define mapping_cap_account_dirty(mapping) \ 109 bdi_cap_account_dirty((mapping)->backing_dev_info) 110 111 112 #endif /* _LINUX_BACKING_DEV_H */ 113