xref: /linux-6.15/fs/buffer.c (revision d289bf7b)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <[email protected]>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	smp_mb__before_clear_bit();
80 	clear_buffer_locked(bh);
81 	smp_mb__after_clear_bit();
82 	wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 
85 /*
86  * Block until a buffer comes unlocked.  This doesn't stop it
87  * from becoming locked again - you have to lock it yourself
88  * if you want to preserve its state.
89  */
90 void __wait_on_buffer(struct buffer_head * bh)
91 {
92 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
93 }
94 
95 static void
96 __clear_page_buffers(struct page *page)
97 {
98 	ClearPagePrivate(page);
99 	set_page_private(page, 0);
100 	page_cache_release(page);
101 }
102 
103 static void buffer_io_error(struct buffer_head *bh)
104 {
105 	char b[BDEVNAME_SIZE];
106 
107 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
108 			bdevname(bh->b_bdev, b),
109 			(unsigned long long)bh->b_blocknr);
110 }
111 
112 /*
113  * End-of-IO handler helper function which does not touch the bh after
114  * unlocking it.
115  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
116  * a race there is benign: unlock_buffer() only use the bh's address for
117  * hashing after unlocking the buffer, so it doesn't actually touch the bh
118  * itself.
119  */
120 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
121 {
122 	if (uptodate) {
123 		set_buffer_uptodate(bh);
124 	} else {
125 		/* This happens, due to failed READA attempts. */
126 		clear_buffer_uptodate(bh);
127 	}
128 	unlock_buffer(bh);
129 }
130 
131 /*
132  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
133  * unlock the buffer. This is what ll_rw_block uses too.
134  */
135 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
136 {
137 	__end_buffer_read_notouch(bh, uptodate);
138 	put_bh(bh);
139 }
140 
141 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
142 {
143 	char b[BDEVNAME_SIZE];
144 
145 	if (uptodate) {
146 		set_buffer_uptodate(bh);
147 	} else {
148 		if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
149 			buffer_io_error(bh);
150 			printk(KERN_WARNING "lost page write due to "
151 					"I/O error on %s\n",
152 				       bdevname(bh->b_bdev, b));
153 		}
154 		set_buffer_write_io_error(bh);
155 		clear_buffer_uptodate(bh);
156 	}
157 	unlock_buffer(bh);
158 	put_bh(bh);
159 }
160 
161 /*
162  * Write out and wait upon all the dirty data associated with a block
163  * device via its mapping.  Does not take the superblock lock.
164  */
165 int sync_blockdev(struct block_device *bdev)
166 {
167 	int ret = 0;
168 
169 	if (bdev)
170 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
171 	return ret;
172 }
173 EXPORT_SYMBOL(sync_blockdev);
174 
175 /*
176  * Write out and wait upon all dirty data associated with this
177  * device.   Filesystem data as well as the underlying block
178  * device.  Takes the superblock lock.
179  */
180 int fsync_bdev(struct block_device *bdev)
181 {
182 	struct super_block *sb = get_super(bdev);
183 	if (sb) {
184 		int res = fsync_super(sb);
185 		drop_super(sb);
186 		return res;
187 	}
188 	return sync_blockdev(bdev);
189 }
190 
191 /**
192  * freeze_bdev  --  lock a filesystem and force it into a consistent state
193  * @bdev:	blockdevice to lock
194  *
195  * This takes the block device bd_mount_sem to make sure no new mounts
196  * happen on bdev until thaw_bdev() is called.
197  * If a superblock is found on this device, we take the s_umount semaphore
198  * on it to make sure nobody unmounts until the snapshot creation is done.
199  */
200 struct super_block *freeze_bdev(struct block_device *bdev)
201 {
202 	struct super_block *sb;
203 
204 	down(&bdev->bd_mount_sem);
205 	sb = get_super(bdev);
206 	if (sb && !(sb->s_flags & MS_RDONLY)) {
207 		sb->s_frozen = SB_FREEZE_WRITE;
208 		smp_wmb();
209 
210 		__fsync_super(sb);
211 
212 		sb->s_frozen = SB_FREEZE_TRANS;
213 		smp_wmb();
214 
215 		sync_blockdev(sb->s_bdev);
216 
217 		if (sb->s_op->write_super_lockfs)
218 			sb->s_op->write_super_lockfs(sb);
219 	}
220 
221 	sync_blockdev(bdev);
222 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
223 }
224 EXPORT_SYMBOL(freeze_bdev);
225 
226 /**
227  * thaw_bdev  -- unlock filesystem
228  * @bdev:	blockdevice to unlock
229  * @sb:		associated superblock
230  *
231  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
232  */
233 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
234 {
235 	if (sb) {
236 		BUG_ON(sb->s_bdev != bdev);
237 
238 		if (sb->s_op->unlockfs)
239 			sb->s_op->unlockfs(sb);
240 		sb->s_frozen = SB_UNFROZEN;
241 		smp_wmb();
242 		wake_up(&sb->s_wait_unfrozen);
243 		drop_super(sb);
244 	}
245 
246 	up(&bdev->bd_mount_sem);
247 }
248 EXPORT_SYMBOL(thaw_bdev);
249 
250 /*
251  * Various filesystems appear to want __find_get_block to be non-blocking.
252  * But it's the page lock which protects the buffers.  To get around this,
253  * we get exclusion from try_to_free_buffers with the blockdev mapping's
254  * private_lock.
255  *
256  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
257  * may be quite high.  This code could TryLock the page, and if that
258  * succeeds, there is no need to take private_lock. (But if
259  * private_lock is contended then so is mapping->tree_lock).
260  */
261 static struct buffer_head *
262 __find_get_block_slow(struct block_device *bdev, sector_t block)
263 {
264 	struct inode *bd_inode = bdev->bd_inode;
265 	struct address_space *bd_mapping = bd_inode->i_mapping;
266 	struct buffer_head *ret = NULL;
267 	pgoff_t index;
268 	struct buffer_head *bh;
269 	struct buffer_head *head;
270 	struct page *page;
271 	int all_mapped = 1;
272 
273 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
274 	page = find_get_page(bd_mapping, index);
275 	if (!page)
276 		goto out;
277 
278 	spin_lock(&bd_mapping->private_lock);
279 	if (!page_has_buffers(page))
280 		goto out_unlock;
281 	head = page_buffers(page);
282 	bh = head;
283 	do {
284 		if (bh->b_blocknr == block) {
285 			ret = bh;
286 			get_bh(bh);
287 			goto out_unlock;
288 		}
289 		if (!buffer_mapped(bh))
290 			all_mapped = 0;
291 		bh = bh->b_this_page;
292 	} while (bh != head);
293 
294 	/* we might be here because some of the buffers on this page are
295 	 * not mapped.  This is due to various races between
296 	 * file io on the block device and getblk.  It gets dealt with
297 	 * elsewhere, don't buffer_error if we had some unmapped buffers
298 	 */
299 	if (all_mapped) {
300 		printk("__find_get_block_slow() failed. "
301 			"block=%llu, b_blocknr=%llu\n",
302 			(unsigned long long)block,
303 			(unsigned long long)bh->b_blocknr);
304 		printk("b_state=0x%08lx, b_size=%zu\n",
305 			bh->b_state, bh->b_size);
306 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
307 	}
308 out_unlock:
309 	spin_unlock(&bd_mapping->private_lock);
310 	page_cache_release(page);
311 out:
312 	return ret;
313 }
314 
315 /* If invalidate_buffers() will trash dirty buffers, it means some kind
316    of fs corruption is going on. Trashing dirty data always imply losing
317    information that was supposed to be just stored on the physical layer
318    by the user.
319 
320    Thus invalidate_buffers in general usage is not allwowed to trash
321    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
322    be preserved.  These buffers are simply skipped.
323 
324    We also skip buffers which are still in use.  For example this can
325    happen if a userspace program is reading the block device.
326 
327    NOTE: In the case where the user removed a removable-media-disk even if
328    there's still dirty data not synced on disk (due a bug in the device driver
329    or due an error of the user), by not destroying the dirty buffers we could
330    generate corruption also on the next media inserted, thus a parameter is
331    necessary to handle this case in the most safe way possible (trying
332    to not corrupt also the new disk inserted with the data belonging to
333    the old now corrupted disk). Also for the ramdisk the natural thing
334    to do in order to release the ramdisk memory is to destroy dirty buffers.
335 
336    These are two special cases. Normal usage imply the device driver
337    to issue a sync on the device (without waiting I/O completion) and
338    then an invalidate_buffers call that doesn't trash dirty buffers.
339 
340    For handling cache coherency with the blkdev pagecache the 'update' case
341    is been introduced. It is needed to re-read from disk any pinned
342    buffer. NOTE: re-reading from disk is destructive so we can do it only
343    when we assume nobody is changing the buffercache under our I/O and when
344    we think the disk contains more recent information than the buffercache.
345    The update == 1 pass marks the buffers we need to update, the update == 2
346    pass does the actual I/O. */
347 void invalidate_bdev(struct block_device *bdev)
348 {
349 	struct address_space *mapping = bdev->bd_inode->i_mapping;
350 
351 	if (mapping->nrpages == 0)
352 		return;
353 
354 	invalidate_bh_lrus();
355 	invalidate_mapping_pages(mapping, 0, -1);
356 }
357 
358 /*
359  * Kick pdflush then try to free up some ZONE_NORMAL memory.
360  */
361 static void free_more_memory(void)
362 {
363 	struct zone **zones;
364 	pg_data_t *pgdat;
365 
366 	wakeup_pdflush(1024);
367 	yield();
368 
369 	for_each_online_pgdat(pgdat) {
370 		zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
371 		if (*zones)
372 			try_to_free_pages(zones, 0, GFP_NOFS);
373 	}
374 }
375 
376 /*
377  * I/O completion handler for block_read_full_page() - pages
378  * which come unlocked at the end of I/O.
379  */
380 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
381 {
382 	unsigned long flags;
383 	struct buffer_head *first;
384 	struct buffer_head *tmp;
385 	struct page *page;
386 	int page_uptodate = 1;
387 
388 	BUG_ON(!buffer_async_read(bh));
389 
390 	page = bh->b_page;
391 	if (uptodate) {
392 		set_buffer_uptodate(bh);
393 	} else {
394 		clear_buffer_uptodate(bh);
395 		if (printk_ratelimit())
396 			buffer_io_error(bh);
397 		SetPageError(page);
398 	}
399 
400 	/*
401 	 * Be _very_ careful from here on. Bad things can happen if
402 	 * two buffer heads end IO at almost the same time and both
403 	 * decide that the page is now completely done.
404 	 */
405 	first = page_buffers(page);
406 	local_irq_save(flags);
407 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
408 	clear_buffer_async_read(bh);
409 	unlock_buffer(bh);
410 	tmp = bh;
411 	do {
412 		if (!buffer_uptodate(tmp))
413 			page_uptodate = 0;
414 		if (buffer_async_read(tmp)) {
415 			BUG_ON(!buffer_locked(tmp));
416 			goto still_busy;
417 		}
418 		tmp = tmp->b_this_page;
419 	} while (tmp != bh);
420 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
421 	local_irq_restore(flags);
422 
423 	/*
424 	 * If none of the buffers had errors and they are all
425 	 * uptodate then we can set the page uptodate.
426 	 */
427 	if (page_uptodate && !PageError(page))
428 		SetPageUptodate(page);
429 	unlock_page(page);
430 	return;
431 
432 still_busy:
433 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
434 	local_irq_restore(flags);
435 	return;
436 }
437 
438 /*
439  * Completion handler for block_write_full_page() - pages which are unlocked
440  * during I/O, and which have PageWriteback cleared upon I/O completion.
441  */
442 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
443 {
444 	char b[BDEVNAME_SIZE];
445 	unsigned long flags;
446 	struct buffer_head *first;
447 	struct buffer_head *tmp;
448 	struct page *page;
449 
450 	BUG_ON(!buffer_async_write(bh));
451 
452 	page = bh->b_page;
453 	if (uptodate) {
454 		set_buffer_uptodate(bh);
455 	} else {
456 		if (printk_ratelimit()) {
457 			buffer_io_error(bh);
458 			printk(KERN_WARNING "lost page write due to "
459 					"I/O error on %s\n",
460 			       bdevname(bh->b_bdev, b));
461 		}
462 		set_bit(AS_EIO, &page->mapping->flags);
463 		set_buffer_write_io_error(bh);
464 		clear_buffer_uptodate(bh);
465 		SetPageError(page);
466 	}
467 
468 	first = page_buffers(page);
469 	local_irq_save(flags);
470 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
471 
472 	clear_buffer_async_write(bh);
473 	unlock_buffer(bh);
474 	tmp = bh->b_this_page;
475 	while (tmp != bh) {
476 		if (buffer_async_write(tmp)) {
477 			BUG_ON(!buffer_locked(tmp));
478 			goto still_busy;
479 		}
480 		tmp = tmp->b_this_page;
481 	}
482 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
483 	local_irq_restore(flags);
484 	end_page_writeback(page);
485 	return;
486 
487 still_busy:
488 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
489 	local_irq_restore(flags);
490 	return;
491 }
492 
493 /*
494  * If a page's buffers are under async readin (end_buffer_async_read
495  * completion) then there is a possibility that another thread of
496  * control could lock one of the buffers after it has completed
497  * but while some of the other buffers have not completed.  This
498  * locked buffer would confuse end_buffer_async_read() into not unlocking
499  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
500  * that this buffer is not under async I/O.
501  *
502  * The page comes unlocked when it has no locked buffer_async buffers
503  * left.
504  *
505  * PageLocked prevents anyone starting new async I/O reads any of
506  * the buffers.
507  *
508  * PageWriteback is used to prevent simultaneous writeout of the same
509  * page.
510  *
511  * PageLocked prevents anyone from starting writeback of a page which is
512  * under read I/O (PageWriteback is only ever set against a locked page).
513  */
514 static void mark_buffer_async_read(struct buffer_head *bh)
515 {
516 	bh->b_end_io = end_buffer_async_read;
517 	set_buffer_async_read(bh);
518 }
519 
520 void mark_buffer_async_write(struct buffer_head *bh)
521 {
522 	bh->b_end_io = end_buffer_async_write;
523 	set_buffer_async_write(bh);
524 }
525 EXPORT_SYMBOL(mark_buffer_async_write);
526 
527 
528 /*
529  * fs/buffer.c contains helper functions for buffer-backed address space's
530  * fsync functions.  A common requirement for buffer-based filesystems is
531  * that certain data from the backing blockdev needs to be written out for
532  * a successful fsync().  For example, ext2 indirect blocks need to be
533  * written back and waited upon before fsync() returns.
534  *
535  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
536  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
537  * management of a list of dependent buffers at ->i_mapping->private_list.
538  *
539  * Locking is a little subtle: try_to_free_buffers() will remove buffers
540  * from their controlling inode's queue when they are being freed.  But
541  * try_to_free_buffers() will be operating against the *blockdev* mapping
542  * at the time, not against the S_ISREG file which depends on those buffers.
543  * So the locking for private_list is via the private_lock in the address_space
544  * which backs the buffers.  Which is different from the address_space
545  * against which the buffers are listed.  So for a particular address_space,
546  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
547  * mapping->private_list will always be protected by the backing blockdev's
548  * ->private_lock.
549  *
550  * Which introduces a requirement: all buffers on an address_space's
551  * ->private_list must be from the same address_space: the blockdev's.
552  *
553  * address_spaces which do not place buffers at ->private_list via these
554  * utility functions are free to use private_lock and private_list for
555  * whatever they want.  The only requirement is that list_empty(private_list)
556  * be true at clear_inode() time.
557  *
558  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
559  * filesystems should do that.  invalidate_inode_buffers() should just go
560  * BUG_ON(!list_empty).
561  *
562  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
563  * take an address_space, not an inode.  And it should be called
564  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
565  * queued up.
566  *
567  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
568  * list if it is already on a list.  Because if the buffer is on a list,
569  * it *must* already be on the right one.  If not, the filesystem is being
570  * silly.  This will save a ton of locking.  But first we have to ensure
571  * that buffers are taken *off* the old inode's list when they are freed
572  * (presumably in truncate).  That requires careful auditing of all
573  * filesystems (do it inside bforget()).  It could also be done by bringing
574  * b_inode back.
575  */
576 
577 /*
578  * The buffer's backing address_space's private_lock must be held
579  */
580 static inline void __remove_assoc_queue(struct buffer_head *bh)
581 {
582 	list_del_init(&bh->b_assoc_buffers);
583 	WARN_ON(!bh->b_assoc_map);
584 	if (buffer_write_io_error(bh))
585 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
586 	bh->b_assoc_map = NULL;
587 }
588 
589 int inode_has_buffers(struct inode *inode)
590 {
591 	return !list_empty(&inode->i_data.private_list);
592 }
593 
594 /*
595  * osync is designed to support O_SYNC io.  It waits synchronously for
596  * all already-submitted IO to complete, but does not queue any new
597  * writes to the disk.
598  *
599  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
600  * you dirty the buffers, and then use osync_inode_buffers to wait for
601  * completion.  Any other dirty buffers which are not yet queued for
602  * write will not be flushed to disk by the osync.
603  */
604 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
605 {
606 	struct buffer_head *bh;
607 	struct list_head *p;
608 	int err = 0;
609 
610 	spin_lock(lock);
611 repeat:
612 	list_for_each_prev(p, list) {
613 		bh = BH_ENTRY(p);
614 		if (buffer_locked(bh)) {
615 			get_bh(bh);
616 			spin_unlock(lock);
617 			wait_on_buffer(bh);
618 			if (!buffer_uptodate(bh))
619 				err = -EIO;
620 			brelse(bh);
621 			spin_lock(lock);
622 			goto repeat;
623 		}
624 	}
625 	spin_unlock(lock);
626 	return err;
627 }
628 
629 /**
630  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
631  * @mapping: the mapping which wants those buffers written
632  *
633  * Starts I/O against the buffers at mapping->private_list, and waits upon
634  * that I/O.
635  *
636  * Basically, this is a convenience function for fsync().
637  * @mapping is a file or directory which needs those buffers to be written for
638  * a successful fsync().
639  */
640 int sync_mapping_buffers(struct address_space *mapping)
641 {
642 	struct address_space *buffer_mapping = mapping->assoc_mapping;
643 
644 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
645 		return 0;
646 
647 	return fsync_buffers_list(&buffer_mapping->private_lock,
648 					&mapping->private_list);
649 }
650 EXPORT_SYMBOL(sync_mapping_buffers);
651 
652 /*
653  * Called when we've recently written block `bblock', and it is known that
654  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
655  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
656  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
657  */
658 void write_boundary_block(struct block_device *bdev,
659 			sector_t bblock, unsigned blocksize)
660 {
661 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
662 	if (bh) {
663 		if (buffer_dirty(bh))
664 			ll_rw_block(WRITE, 1, &bh);
665 		put_bh(bh);
666 	}
667 }
668 
669 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
670 {
671 	struct address_space *mapping = inode->i_mapping;
672 	struct address_space *buffer_mapping = bh->b_page->mapping;
673 
674 	mark_buffer_dirty(bh);
675 	if (!mapping->assoc_mapping) {
676 		mapping->assoc_mapping = buffer_mapping;
677 	} else {
678 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
679 	}
680 	if (!bh->b_assoc_map) {
681 		spin_lock(&buffer_mapping->private_lock);
682 		list_move_tail(&bh->b_assoc_buffers,
683 				&mapping->private_list);
684 		bh->b_assoc_map = mapping;
685 		spin_unlock(&buffer_mapping->private_lock);
686 	}
687 }
688 EXPORT_SYMBOL(mark_buffer_dirty_inode);
689 
690 /*
691  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
692  * dirty.
693  *
694  * If warn is true, then emit a warning if the page is not uptodate and has
695  * not been truncated.
696  */
697 static int __set_page_dirty(struct page *page,
698 		struct address_space *mapping, int warn)
699 {
700 	if (unlikely(!mapping))
701 		return !TestSetPageDirty(page);
702 
703 	if (TestSetPageDirty(page))
704 		return 0;
705 
706 	write_lock_irq(&mapping->tree_lock);
707 	if (page->mapping) {	/* Race with truncate? */
708 		WARN_ON_ONCE(warn && !PageUptodate(page));
709 
710 		if (mapping_cap_account_dirty(mapping)) {
711 			__inc_zone_page_state(page, NR_FILE_DIRTY);
712 			__inc_bdi_stat(mapping->backing_dev_info,
713 					BDI_RECLAIMABLE);
714 			task_io_account_write(PAGE_CACHE_SIZE);
715 		}
716 		radix_tree_tag_set(&mapping->page_tree,
717 				page_index(page), PAGECACHE_TAG_DIRTY);
718 	}
719 	write_unlock_irq(&mapping->tree_lock);
720 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
721 
722 	return 1;
723 }
724 
725 /*
726  * Add a page to the dirty page list.
727  *
728  * It is a sad fact of life that this function is called from several places
729  * deeply under spinlocking.  It may not sleep.
730  *
731  * If the page has buffers, the uptodate buffers are set dirty, to preserve
732  * dirty-state coherency between the page and the buffers.  It the page does
733  * not have buffers then when they are later attached they will all be set
734  * dirty.
735  *
736  * The buffers are dirtied before the page is dirtied.  There's a small race
737  * window in which a writepage caller may see the page cleanness but not the
738  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
739  * before the buffers, a concurrent writepage caller could clear the page dirty
740  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
741  * page on the dirty page list.
742  *
743  * We use private_lock to lock against try_to_free_buffers while using the
744  * page's buffer list.  Also use this to protect against clean buffers being
745  * added to the page after it was set dirty.
746  *
747  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
748  * address_space though.
749  */
750 int __set_page_dirty_buffers(struct page *page)
751 {
752 	struct address_space *mapping = page_mapping(page);
753 
754 	if (unlikely(!mapping))
755 		return !TestSetPageDirty(page);
756 
757 	spin_lock(&mapping->private_lock);
758 	if (page_has_buffers(page)) {
759 		struct buffer_head *head = page_buffers(page);
760 		struct buffer_head *bh = head;
761 
762 		do {
763 			set_buffer_dirty(bh);
764 			bh = bh->b_this_page;
765 		} while (bh != head);
766 	}
767 	spin_unlock(&mapping->private_lock);
768 
769 	return __set_page_dirty(page, mapping, 1);
770 }
771 EXPORT_SYMBOL(__set_page_dirty_buffers);
772 
773 /*
774  * Write out and wait upon a list of buffers.
775  *
776  * We have conflicting pressures: we want to make sure that all
777  * initially dirty buffers get waited on, but that any subsequently
778  * dirtied buffers don't.  After all, we don't want fsync to last
779  * forever if somebody is actively writing to the file.
780  *
781  * Do this in two main stages: first we copy dirty buffers to a
782  * temporary inode list, queueing the writes as we go.  Then we clean
783  * up, waiting for those writes to complete.
784  *
785  * During this second stage, any subsequent updates to the file may end
786  * up refiling the buffer on the original inode's dirty list again, so
787  * there is a chance we will end up with a buffer queued for write but
788  * not yet completed on that list.  So, as a final cleanup we go through
789  * the osync code to catch these locked, dirty buffers without requeuing
790  * any newly dirty buffers for write.
791  */
792 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
793 {
794 	struct buffer_head *bh;
795 	struct list_head tmp;
796 	struct address_space *mapping;
797 	int err = 0, err2;
798 
799 	INIT_LIST_HEAD(&tmp);
800 
801 	spin_lock(lock);
802 	while (!list_empty(list)) {
803 		bh = BH_ENTRY(list->next);
804 		mapping = bh->b_assoc_map;
805 		__remove_assoc_queue(bh);
806 		/* Avoid race with mark_buffer_dirty_inode() which does
807 		 * a lockless check and we rely on seeing the dirty bit */
808 		smp_mb();
809 		if (buffer_dirty(bh) || buffer_locked(bh)) {
810 			list_add(&bh->b_assoc_buffers, &tmp);
811 			bh->b_assoc_map = mapping;
812 			if (buffer_dirty(bh)) {
813 				get_bh(bh);
814 				spin_unlock(lock);
815 				/*
816 				 * Ensure any pending I/O completes so that
817 				 * ll_rw_block() actually writes the current
818 				 * contents - it is a noop if I/O is still in
819 				 * flight on potentially older contents.
820 				 */
821 				ll_rw_block(SWRITE, 1, &bh);
822 				brelse(bh);
823 				spin_lock(lock);
824 			}
825 		}
826 	}
827 
828 	while (!list_empty(&tmp)) {
829 		bh = BH_ENTRY(tmp.prev);
830 		get_bh(bh);
831 		mapping = bh->b_assoc_map;
832 		__remove_assoc_queue(bh);
833 		/* Avoid race with mark_buffer_dirty_inode() which does
834 		 * a lockless check and we rely on seeing the dirty bit */
835 		smp_mb();
836 		if (buffer_dirty(bh)) {
837 			list_add(&bh->b_assoc_buffers,
838 				 &mapping->private_list);
839 			bh->b_assoc_map = mapping;
840 		}
841 		spin_unlock(lock);
842 		wait_on_buffer(bh);
843 		if (!buffer_uptodate(bh))
844 			err = -EIO;
845 		brelse(bh);
846 		spin_lock(lock);
847 	}
848 
849 	spin_unlock(lock);
850 	err2 = osync_buffers_list(lock, list);
851 	if (err)
852 		return err;
853 	else
854 		return err2;
855 }
856 
857 /*
858  * Invalidate any and all dirty buffers on a given inode.  We are
859  * probably unmounting the fs, but that doesn't mean we have already
860  * done a sync().  Just drop the buffers from the inode list.
861  *
862  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
863  * assumes that all the buffers are against the blockdev.  Not true
864  * for reiserfs.
865  */
866 void invalidate_inode_buffers(struct inode *inode)
867 {
868 	if (inode_has_buffers(inode)) {
869 		struct address_space *mapping = &inode->i_data;
870 		struct list_head *list = &mapping->private_list;
871 		struct address_space *buffer_mapping = mapping->assoc_mapping;
872 
873 		spin_lock(&buffer_mapping->private_lock);
874 		while (!list_empty(list))
875 			__remove_assoc_queue(BH_ENTRY(list->next));
876 		spin_unlock(&buffer_mapping->private_lock);
877 	}
878 }
879 
880 /*
881  * Remove any clean buffers from the inode's buffer list.  This is called
882  * when we're trying to free the inode itself.  Those buffers can pin it.
883  *
884  * Returns true if all buffers were removed.
885  */
886 int remove_inode_buffers(struct inode *inode)
887 {
888 	int ret = 1;
889 
890 	if (inode_has_buffers(inode)) {
891 		struct address_space *mapping = &inode->i_data;
892 		struct list_head *list = &mapping->private_list;
893 		struct address_space *buffer_mapping = mapping->assoc_mapping;
894 
895 		spin_lock(&buffer_mapping->private_lock);
896 		while (!list_empty(list)) {
897 			struct buffer_head *bh = BH_ENTRY(list->next);
898 			if (buffer_dirty(bh)) {
899 				ret = 0;
900 				break;
901 			}
902 			__remove_assoc_queue(bh);
903 		}
904 		spin_unlock(&buffer_mapping->private_lock);
905 	}
906 	return ret;
907 }
908 
909 /*
910  * Create the appropriate buffers when given a page for data area and
911  * the size of each buffer.. Use the bh->b_this_page linked list to
912  * follow the buffers created.  Return NULL if unable to create more
913  * buffers.
914  *
915  * The retry flag is used to differentiate async IO (paging, swapping)
916  * which may not fail from ordinary buffer allocations.
917  */
918 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
919 		int retry)
920 {
921 	struct buffer_head *bh, *head;
922 	long offset;
923 
924 try_again:
925 	head = NULL;
926 	offset = PAGE_SIZE;
927 	while ((offset -= size) >= 0) {
928 		bh = alloc_buffer_head(GFP_NOFS);
929 		if (!bh)
930 			goto no_grow;
931 
932 		bh->b_bdev = NULL;
933 		bh->b_this_page = head;
934 		bh->b_blocknr = -1;
935 		head = bh;
936 
937 		bh->b_state = 0;
938 		atomic_set(&bh->b_count, 0);
939 		bh->b_private = NULL;
940 		bh->b_size = size;
941 
942 		/* Link the buffer to its page */
943 		set_bh_page(bh, page, offset);
944 
945 		init_buffer(bh, NULL, NULL);
946 	}
947 	return head;
948 /*
949  * In case anything failed, we just free everything we got.
950  */
951 no_grow:
952 	if (head) {
953 		do {
954 			bh = head;
955 			head = head->b_this_page;
956 			free_buffer_head(bh);
957 		} while (head);
958 	}
959 
960 	/*
961 	 * Return failure for non-async IO requests.  Async IO requests
962 	 * are not allowed to fail, so we have to wait until buffer heads
963 	 * become available.  But we don't want tasks sleeping with
964 	 * partially complete buffers, so all were released above.
965 	 */
966 	if (!retry)
967 		return NULL;
968 
969 	/* We're _really_ low on memory. Now we just
970 	 * wait for old buffer heads to become free due to
971 	 * finishing IO.  Since this is an async request and
972 	 * the reserve list is empty, we're sure there are
973 	 * async buffer heads in use.
974 	 */
975 	free_more_memory();
976 	goto try_again;
977 }
978 EXPORT_SYMBOL_GPL(alloc_page_buffers);
979 
980 static inline void
981 link_dev_buffers(struct page *page, struct buffer_head *head)
982 {
983 	struct buffer_head *bh, *tail;
984 
985 	bh = head;
986 	do {
987 		tail = bh;
988 		bh = bh->b_this_page;
989 	} while (bh);
990 	tail->b_this_page = head;
991 	attach_page_buffers(page, head);
992 }
993 
994 /*
995  * Initialise the state of a blockdev page's buffers.
996  */
997 static void
998 init_page_buffers(struct page *page, struct block_device *bdev,
999 			sector_t block, int size)
1000 {
1001 	struct buffer_head *head = page_buffers(page);
1002 	struct buffer_head *bh = head;
1003 	int uptodate = PageUptodate(page);
1004 
1005 	do {
1006 		if (!buffer_mapped(bh)) {
1007 			init_buffer(bh, NULL, NULL);
1008 			bh->b_bdev = bdev;
1009 			bh->b_blocknr = block;
1010 			if (uptodate)
1011 				set_buffer_uptodate(bh);
1012 			set_buffer_mapped(bh);
1013 		}
1014 		block++;
1015 		bh = bh->b_this_page;
1016 	} while (bh != head);
1017 }
1018 
1019 /*
1020  * Create the page-cache page that contains the requested block.
1021  *
1022  * This is user purely for blockdev mappings.
1023  */
1024 static struct page *
1025 grow_dev_page(struct block_device *bdev, sector_t block,
1026 		pgoff_t index, int size)
1027 {
1028 	struct inode *inode = bdev->bd_inode;
1029 	struct page *page;
1030 	struct buffer_head *bh;
1031 
1032 	page = find_or_create_page(inode->i_mapping, index,
1033 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1034 	if (!page)
1035 		return NULL;
1036 
1037 	BUG_ON(!PageLocked(page));
1038 
1039 	if (page_has_buffers(page)) {
1040 		bh = page_buffers(page);
1041 		if (bh->b_size == size) {
1042 			init_page_buffers(page, bdev, block, size);
1043 			return page;
1044 		}
1045 		if (!try_to_free_buffers(page))
1046 			goto failed;
1047 	}
1048 
1049 	/*
1050 	 * Allocate some buffers for this page
1051 	 */
1052 	bh = alloc_page_buffers(page, size, 0);
1053 	if (!bh)
1054 		goto failed;
1055 
1056 	/*
1057 	 * Link the page to the buffers and initialise them.  Take the
1058 	 * lock to be atomic wrt __find_get_block(), which does not
1059 	 * run under the page lock.
1060 	 */
1061 	spin_lock(&inode->i_mapping->private_lock);
1062 	link_dev_buffers(page, bh);
1063 	init_page_buffers(page, bdev, block, size);
1064 	spin_unlock(&inode->i_mapping->private_lock);
1065 	return page;
1066 
1067 failed:
1068 	BUG();
1069 	unlock_page(page);
1070 	page_cache_release(page);
1071 	return NULL;
1072 }
1073 
1074 /*
1075  * Create buffers for the specified block device block's page.  If
1076  * that page was dirty, the buffers are set dirty also.
1077  */
1078 static int
1079 grow_buffers(struct block_device *bdev, sector_t block, int size)
1080 {
1081 	struct page *page;
1082 	pgoff_t index;
1083 	int sizebits;
1084 
1085 	sizebits = -1;
1086 	do {
1087 		sizebits++;
1088 	} while ((size << sizebits) < PAGE_SIZE);
1089 
1090 	index = block >> sizebits;
1091 
1092 	/*
1093 	 * Check for a block which wants to lie outside our maximum possible
1094 	 * pagecache index.  (this comparison is done using sector_t types).
1095 	 */
1096 	if (unlikely(index != block >> sizebits)) {
1097 		char b[BDEVNAME_SIZE];
1098 
1099 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1100 			"device %s\n",
1101 			__FUNCTION__, (unsigned long long)block,
1102 			bdevname(bdev, b));
1103 		return -EIO;
1104 	}
1105 	block = index << sizebits;
1106 	/* Create a page with the proper size buffers.. */
1107 	page = grow_dev_page(bdev, block, index, size);
1108 	if (!page)
1109 		return 0;
1110 	unlock_page(page);
1111 	page_cache_release(page);
1112 	return 1;
1113 }
1114 
1115 static struct buffer_head *
1116 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1117 {
1118 	/* Size must be multiple of hard sectorsize */
1119 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1120 			(size < 512 || size > PAGE_SIZE))) {
1121 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1122 					size);
1123 		printk(KERN_ERR "hardsect size: %d\n",
1124 					bdev_hardsect_size(bdev));
1125 
1126 		dump_stack();
1127 		return NULL;
1128 	}
1129 
1130 	for (;;) {
1131 		struct buffer_head * bh;
1132 		int ret;
1133 
1134 		bh = __find_get_block(bdev, block, size);
1135 		if (bh)
1136 			return bh;
1137 
1138 		ret = grow_buffers(bdev, block, size);
1139 		if (ret < 0)
1140 			return NULL;
1141 		if (ret == 0)
1142 			free_more_memory();
1143 	}
1144 }
1145 
1146 /*
1147  * The relationship between dirty buffers and dirty pages:
1148  *
1149  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1150  * the page is tagged dirty in its radix tree.
1151  *
1152  * At all times, the dirtiness of the buffers represents the dirtiness of
1153  * subsections of the page.  If the page has buffers, the page dirty bit is
1154  * merely a hint about the true dirty state.
1155  *
1156  * When a page is set dirty in its entirety, all its buffers are marked dirty
1157  * (if the page has buffers).
1158  *
1159  * When a buffer is marked dirty, its page is dirtied, but the page's other
1160  * buffers are not.
1161  *
1162  * Also.  When blockdev buffers are explicitly read with bread(), they
1163  * individually become uptodate.  But their backing page remains not
1164  * uptodate - even if all of its buffers are uptodate.  A subsequent
1165  * block_read_full_page() against that page will discover all the uptodate
1166  * buffers, will set the page uptodate and will perform no I/O.
1167  */
1168 
1169 /**
1170  * mark_buffer_dirty - mark a buffer_head as needing writeout
1171  * @bh: the buffer_head to mark dirty
1172  *
1173  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1174  * backing page dirty, then tag the page as dirty in its address_space's radix
1175  * tree and then attach the address_space's inode to its superblock's dirty
1176  * inode list.
1177  *
1178  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1179  * mapping->tree_lock and the global inode_lock.
1180  */
1181 void mark_buffer_dirty(struct buffer_head *bh)
1182 {
1183 	WARN_ON_ONCE(!buffer_uptodate(bh));
1184 
1185 	/*
1186 	 * Very *carefully* optimize the it-is-already-dirty case.
1187 	 *
1188 	 * Don't let the final "is it dirty" escape to before we
1189 	 * perhaps modified the buffer.
1190 	 */
1191 	if (buffer_dirty(bh)) {
1192 		smp_mb();
1193 		if (buffer_dirty(bh))
1194 			return;
1195 	}
1196 
1197 	if (!test_set_buffer_dirty(bh))
1198 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1199 }
1200 
1201 /*
1202  * Decrement a buffer_head's reference count.  If all buffers against a page
1203  * have zero reference count, are clean and unlocked, and if the page is clean
1204  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1205  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1206  * a page but it ends up not being freed, and buffers may later be reattached).
1207  */
1208 void __brelse(struct buffer_head * buf)
1209 {
1210 	if (atomic_read(&buf->b_count)) {
1211 		put_bh(buf);
1212 		return;
1213 	}
1214 	printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1215 	WARN_ON(1);
1216 }
1217 
1218 /*
1219  * bforget() is like brelse(), except it discards any
1220  * potentially dirty data.
1221  */
1222 void __bforget(struct buffer_head *bh)
1223 {
1224 	clear_buffer_dirty(bh);
1225 	if (bh->b_assoc_map) {
1226 		struct address_space *buffer_mapping = bh->b_page->mapping;
1227 
1228 		spin_lock(&buffer_mapping->private_lock);
1229 		list_del_init(&bh->b_assoc_buffers);
1230 		bh->b_assoc_map = NULL;
1231 		spin_unlock(&buffer_mapping->private_lock);
1232 	}
1233 	__brelse(bh);
1234 }
1235 
1236 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1237 {
1238 	lock_buffer(bh);
1239 	if (buffer_uptodate(bh)) {
1240 		unlock_buffer(bh);
1241 		return bh;
1242 	} else {
1243 		get_bh(bh);
1244 		bh->b_end_io = end_buffer_read_sync;
1245 		submit_bh(READ, bh);
1246 		wait_on_buffer(bh);
1247 		if (buffer_uptodate(bh))
1248 			return bh;
1249 	}
1250 	brelse(bh);
1251 	return NULL;
1252 }
1253 
1254 /*
1255  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1256  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1257  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1258  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1259  * CPU's LRUs at the same time.
1260  *
1261  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1262  * sb_find_get_block().
1263  *
1264  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1265  * a local interrupt disable for that.
1266  */
1267 
1268 #define BH_LRU_SIZE	8
1269 
1270 struct bh_lru {
1271 	struct buffer_head *bhs[BH_LRU_SIZE];
1272 };
1273 
1274 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1275 
1276 #ifdef CONFIG_SMP
1277 #define bh_lru_lock()	local_irq_disable()
1278 #define bh_lru_unlock()	local_irq_enable()
1279 #else
1280 #define bh_lru_lock()	preempt_disable()
1281 #define bh_lru_unlock()	preempt_enable()
1282 #endif
1283 
1284 static inline void check_irqs_on(void)
1285 {
1286 #ifdef irqs_disabled
1287 	BUG_ON(irqs_disabled());
1288 #endif
1289 }
1290 
1291 /*
1292  * The LRU management algorithm is dopey-but-simple.  Sorry.
1293  */
1294 static void bh_lru_install(struct buffer_head *bh)
1295 {
1296 	struct buffer_head *evictee = NULL;
1297 	struct bh_lru *lru;
1298 
1299 	check_irqs_on();
1300 	bh_lru_lock();
1301 	lru = &__get_cpu_var(bh_lrus);
1302 	if (lru->bhs[0] != bh) {
1303 		struct buffer_head *bhs[BH_LRU_SIZE];
1304 		int in;
1305 		int out = 0;
1306 
1307 		get_bh(bh);
1308 		bhs[out++] = bh;
1309 		for (in = 0; in < BH_LRU_SIZE; in++) {
1310 			struct buffer_head *bh2 = lru->bhs[in];
1311 
1312 			if (bh2 == bh) {
1313 				__brelse(bh2);
1314 			} else {
1315 				if (out >= BH_LRU_SIZE) {
1316 					BUG_ON(evictee != NULL);
1317 					evictee = bh2;
1318 				} else {
1319 					bhs[out++] = bh2;
1320 				}
1321 			}
1322 		}
1323 		while (out < BH_LRU_SIZE)
1324 			bhs[out++] = NULL;
1325 		memcpy(lru->bhs, bhs, sizeof(bhs));
1326 	}
1327 	bh_lru_unlock();
1328 
1329 	if (evictee)
1330 		__brelse(evictee);
1331 }
1332 
1333 /*
1334  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1335  */
1336 static struct buffer_head *
1337 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1338 {
1339 	struct buffer_head *ret = NULL;
1340 	struct bh_lru *lru;
1341 	unsigned int i;
1342 
1343 	check_irqs_on();
1344 	bh_lru_lock();
1345 	lru = &__get_cpu_var(bh_lrus);
1346 	for (i = 0; i < BH_LRU_SIZE; i++) {
1347 		struct buffer_head *bh = lru->bhs[i];
1348 
1349 		if (bh && bh->b_bdev == bdev &&
1350 				bh->b_blocknr == block && bh->b_size == size) {
1351 			if (i) {
1352 				while (i) {
1353 					lru->bhs[i] = lru->bhs[i - 1];
1354 					i--;
1355 				}
1356 				lru->bhs[0] = bh;
1357 			}
1358 			get_bh(bh);
1359 			ret = bh;
1360 			break;
1361 		}
1362 	}
1363 	bh_lru_unlock();
1364 	return ret;
1365 }
1366 
1367 /*
1368  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1369  * it in the LRU and mark it as accessed.  If it is not present then return
1370  * NULL
1371  */
1372 struct buffer_head *
1373 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1374 {
1375 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1376 
1377 	if (bh == NULL) {
1378 		bh = __find_get_block_slow(bdev, block);
1379 		if (bh)
1380 			bh_lru_install(bh);
1381 	}
1382 	if (bh)
1383 		touch_buffer(bh);
1384 	return bh;
1385 }
1386 EXPORT_SYMBOL(__find_get_block);
1387 
1388 /*
1389  * __getblk will locate (and, if necessary, create) the buffer_head
1390  * which corresponds to the passed block_device, block and size. The
1391  * returned buffer has its reference count incremented.
1392  *
1393  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1394  * illegal block number, __getblk() will happily return a buffer_head
1395  * which represents the non-existent block.  Very weird.
1396  *
1397  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1398  * attempt is failing.  FIXME, perhaps?
1399  */
1400 struct buffer_head *
1401 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1402 {
1403 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1404 
1405 	might_sleep();
1406 	if (bh == NULL)
1407 		bh = __getblk_slow(bdev, block, size);
1408 	return bh;
1409 }
1410 EXPORT_SYMBOL(__getblk);
1411 
1412 /*
1413  * Do async read-ahead on a buffer..
1414  */
1415 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1416 {
1417 	struct buffer_head *bh = __getblk(bdev, block, size);
1418 	if (likely(bh)) {
1419 		ll_rw_block(READA, 1, &bh);
1420 		brelse(bh);
1421 	}
1422 }
1423 EXPORT_SYMBOL(__breadahead);
1424 
1425 /**
1426  *  __bread() - reads a specified block and returns the bh
1427  *  @bdev: the block_device to read from
1428  *  @block: number of block
1429  *  @size: size (in bytes) to read
1430  *
1431  *  Reads a specified block, and returns buffer head that contains it.
1432  *  It returns NULL if the block was unreadable.
1433  */
1434 struct buffer_head *
1435 __bread(struct block_device *bdev, sector_t block, unsigned size)
1436 {
1437 	struct buffer_head *bh = __getblk(bdev, block, size);
1438 
1439 	if (likely(bh) && !buffer_uptodate(bh))
1440 		bh = __bread_slow(bh);
1441 	return bh;
1442 }
1443 EXPORT_SYMBOL(__bread);
1444 
1445 /*
1446  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1447  * This doesn't race because it runs in each cpu either in irq
1448  * or with preempt disabled.
1449  */
1450 static void invalidate_bh_lru(void *arg)
1451 {
1452 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1453 	int i;
1454 
1455 	for (i = 0; i < BH_LRU_SIZE; i++) {
1456 		brelse(b->bhs[i]);
1457 		b->bhs[i] = NULL;
1458 	}
1459 	put_cpu_var(bh_lrus);
1460 }
1461 
1462 void invalidate_bh_lrus(void)
1463 {
1464 	on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
1465 }
1466 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1467 
1468 void set_bh_page(struct buffer_head *bh,
1469 		struct page *page, unsigned long offset)
1470 {
1471 	bh->b_page = page;
1472 	BUG_ON(offset >= PAGE_SIZE);
1473 	if (PageHighMem(page))
1474 		/*
1475 		 * This catches illegal uses and preserves the offset:
1476 		 */
1477 		bh->b_data = (char *)(0 + offset);
1478 	else
1479 		bh->b_data = page_address(page) + offset;
1480 }
1481 EXPORT_SYMBOL(set_bh_page);
1482 
1483 /*
1484  * Called when truncating a buffer on a page completely.
1485  */
1486 static void discard_buffer(struct buffer_head * bh)
1487 {
1488 	lock_buffer(bh);
1489 	clear_buffer_dirty(bh);
1490 	bh->b_bdev = NULL;
1491 	clear_buffer_mapped(bh);
1492 	clear_buffer_req(bh);
1493 	clear_buffer_new(bh);
1494 	clear_buffer_delay(bh);
1495 	clear_buffer_unwritten(bh);
1496 	unlock_buffer(bh);
1497 }
1498 
1499 /**
1500  * block_invalidatepage - invalidate part of all of a buffer-backed page
1501  *
1502  * @page: the page which is affected
1503  * @offset: the index of the truncation point
1504  *
1505  * block_invalidatepage() is called when all or part of the page has become
1506  * invalidatedby a truncate operation.
1507  *
1508  * block_invalidatepage() does not have to release all buffers, but it must
1509  * ensure that no dirty buffer is left outside @offset and that no I/O
1510  * is underway against any of the blocks which are outside the truncation
1511  * point.  Because the caller is about to free (and possibly reuse) those
1512  * blocks on-disk.
1513  */
1514 void block_invalidatepage(struct page *page, unsigned long offset)
1515 {
1516 	struct buffer_head *head, *bh, *next;
1517 	unsigned int curr_off = 0;
1518 
1519 	BUG_ON(!PageLocked(page));
1520 	if (!page_has_buffers(page))
1521 		goto out;
1522 
1523 	head = page_buffers(page);
1524 	bh = head;
1525 	do {
1526 		unsigned int next_off = curr_off + bh->b_size;
1527 		next = bh->b_this_page;
1528 
1529 		/*
1530 		 * is this block fully invalidated?
1531 		 */
1532 		if (offset <= curr_off)
1533 			discard_buffer(bh);
1534 		curr_off = next_off;
1535 		bh = next;
1536 	} while (bh != head);
1537 
1538 	/*
1539 	 * We release buffers only if the entire page is being invalidated.
1540 	 * The get_block cached value has been unconditionally invalidated,
1541 	 * so real IO is not possible anymore.
1542 	 */
1543 	if (offset == 0)
1544 		try_to_release_page(page, 0);
1545 out:
1546 	return;
1547 }
1548 EXPORT_SYMBOL(block_invalidatepage);
1549 
1550 /*
1551  * We attach and possibly dirty the buffers atomically wrt
1552  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1553  * is already excluded via the page lock.
1554  */
1555 void create_empty_buffers(struct page *page,
1556 			unsigned long blocksize, unsigned long b_state)
1557 {
1558 	struct buffer_head *bh, *head, *tail;
1559 
1560 	head = alloc_page_buffers(page, blocksize, 1);
1561 	bh = head;
1562 	do {
1563 		bh->b_state |= b_state;
1564 		tail = bh;
1565 		bh = bh->b_this_page;
1566 	} while (bh);
1567 	tail->b_this_page = head;
1568 
1569 	spin_lock(&page->mapping->private_lock);
1570 	if (PageUptodate(page) || PageDirty(page)) {
1571 		bh = head;
1572 		do {
1573 			if (PageDirty(page))
1574 				set_buffer_dirty(bh);
1575 			if (PageUptodate(page))
1576 				set_buffer_uptodate(bh);
1577 			bh = bh->b_this_page;
1578 		} while (bh != head);
1579 	}
1580 	attach_page_buffers(page, head);
1581 	spin_unlock(&page->mapping->private_lock);
1582 }
1583 EXPORT_SYMBOL(create_empty_buffers);
1584 
1585 /*
1586  * We are taking a block for data and we don't want any output from any
1587  * buffer-cache aliases starting from return from that function and
1588  * until the moment when something will explicitly mark the buffer
1589  * dirty (hopefully that will not happen until we will free that block ;-)
1590  * We don't even need to mark it not-uptodate - nobody can expect
1591  * anything from a newly allocated buffer anyway. We used to used
1592  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1593  * don't want to mark the alias unmapped, for example - it would confuse
1594  * anyone who might pick it with bread() afterwards...
1595  *
1596  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1597  * be writeout I/O going on against recently-freed buffers.  We don't
1598  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1599  * only if we really need to.  That happens here.
1600  */
1601 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1602 {
1603 	struct buffer_head *old_bh;
1604 
1605 	might_sleep();
1606 
1607 	old_bh = __find_get_block_slow(bdev, block);
1608 	if (old_bh) {
1609 		clear_buffer_dirty(old_bh);
1610 		wait_on_buffer(old_bh);
1611 		clear_buffer_req(old_bh);
1612 		__brelse(old_bh);
1613 	}
1614 }
1615 EXPORT_SYMBOL(unmap_underlying_metadata);
1616 
1617 /*
1618  * NOTE! All mapped/uptodate combinations are valid:
1619  *
1620  *	Mapped	Uptodate	Meaning
1621  *
1622  *	No	No		"unknown" - must do get_block()
1623  *	No	Yes		"hole" - zero-filled
1624  *	Yes	No		"allocated" - allocated on disk, not read in
1625  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1626  *
1627  * "Dirty" is valid only with the last case (mapped+uptodate).
1628  */
1629 
1630 /*
1631  * While block_write_full_page is writing back the dirty buffers under
1632  * the page lock, whoever dirtied the buffers may decide to clean them
1633  * again at any time.  We handle that by only looking at the buffer
1634  * state inside lock_buffer().
1635  *
1636  * If block_write_full_page() is called for regular writeback
1637  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1638  * locked buffer.   This only can happen if someone has written the buffer
1639  * directly, with submit_bh().  At the address_space level PageWriteback
1640  * prevents this contention from occurring.
1641  */
1642 static int __block_write_full_page(struct inode *inode, struct page *page,
1643 			get_block_t *get_block, struct writeback_control *wbc)
1644 {
1645 	int err;
1646 	sector_t block;
1647 	sector_t last_block;
1648 	struct buffer_head *bh, *head;
1649 	const unsigned blocksize = 1 << inode->i_blkbits;
1650 	int nr_underway = 0;
1651 
1652 	BUG_ON(!PageLocked(page));
1653 
1654 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1655 
1656 	if (!page_has_buffers(page)) {
1657 		create_empty_buffers(page, blocksize,
1658 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1659 	}
1660 
1661 	/*
1662 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1663 	 * here, and the (potentially unmapped) buffers may become dirty at
1664 	 * any time.  If a buffer becomes dirty here after we've inspected it
1665 	 * then we just miss that fact, and the page stays dirty.
1666 	 *
1667 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1668 	 * handle that here by just cleaning them.
1669 	 */
1670 
1671 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1672 	head = page_buffers(page);
1673 	bh = head;
1674 
1675 	/*
1676 	 * Get all the dirty buffers mapped to disk addresses and
1677 	 * handle any aliases from the underlying blockdev's mapping.
1678 	 */
1679 	do {
1680 		if (block > last_block) {
1681 			/*
1682 			 * mapped buffers outside i_size will occur, because
1683 			 * this page can be outside i_size when there is a
1684 			 * truncate in progress.
1685 			 */
1686 			/*
1687 			 * The buffer was zeroed by block_write_full_page()
1688 			 */
1689 			clear_buffer_dirty(bh);
1690 			set_buffer_uptodate(bh);
1691 		} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1692 			WARN_ON(bh->b_size != blocksize);
1693 			err = get_block(inode, block, bh, 1);
1694 			if (err)
1695 				goto recover;
1696 			if (buffer_new(bh)) {
1697 				/* blockdev mappings never come here */
1698 				clear_buffer_new(bh);
1699 				unmap_underlying_metadata(bh->b_bdev,
1700 							bh->b_blocknr);
1701 			}
1702 		}
1703 		bh = bh->b_this_page;
1704 		block++;
1705 	} while (bh != head);
1706 
1707 	do {
1708 		if (!buffer_mapped(bh))
1709 			continue;
1710 		/*
1711 		 * If it's a fully non-blocking write attempt and we cannot
1712 		 * lock the buffer then redirty the page.  Note that this can
1713 		 * potentially cause a busy-wait loop from pdflush and kswapd
1714 		 * activity, but those code paths have their own higher-level
1715 		 * throttling.
1716 		 */
1717 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1718 			lock_buffer(bh);
1719 		} else if (test_set_buffer_locked(bh)) {
1720 			redirty_page_for_writepage(wbc, page);
1721 			continue;
1722 		}
1723 		if (test_clear_buffer_dirty(bh)) {
1724 			mark_buffer_async_write(bh);
1725 		} else {
1726 			unlock_buffer(bh);
1727 		}
1728 	} while ((bh = bh->b_this_page) != head);
1729 
1730 	/*
1731 	 * The page and its buffers are protected by PageWriteback(), so we can
1732 	 * drop the bh refcounts early.
1733 	 */
1734 	BUG_ON(PageWriteback(page));
1735 	set_page_writeback(page);
1736 
1737 	do {
1738 		struct buffer_head *next = bh->b_this_page;
1739 		if (buffer_async_write(bh)) {
1740 			submit_bh(WRITE, bh);
1741 			nr_underway++;
1742 		}
1743 		bh = next;
1744 	} while (bh != head);
1745 	unlock_page(page);
1746 
1747 	err = 0;
1748 done:
1749 	if (nr_underway == 0) {
1750 		/*
1751 		 * The page was marked dirty, but the buffers were
1752 		 * clean.  Someone wrote them back by hand with
1753 		 * ll_rw_block/submit_bh.  A rare case.
1754 		 */
1755 		end_page_writeback(page);
1756 
1757 		/*
1758 		 * The page and buffer_heads can be released at any time from
1759 		 * here on.
1760 		 */
1761 	}
1762 	return err;
1763 
1764 recover:
1765 	/*
1766 	 * ENOSPC, or some other error.  We may already have added some
1767 	 * blocks to the file, so we need to write these out to avoid
1768 	 * exposing stale data.
1769 	 * The page is currently locked and not marked for writeback
1770 	 */
1771 	bh = head;
1772 	/* Recovery: lock and submit the mapped buffers */
1773 	do {
1774 		if (buffer_mapped(bh) && buffer_dirty(bh)) {
1775 			lock_buffer(bh);
1776 			mark_buffer_async_write(bh);
1777 		} else {
1778 			/*
1779 			 * The buffer may have been set dirty during
1780 			 * attachment to a dirty page.
1781 			 */
1782 			clear_buffer_dirty(bh);
1783 		}
1784 	} while ((bh = bh->b_this_page) != head);
1785 	SetPageError(page);
1786 	BUG_ON(PageWriteback(page));
1787 	mapping_set_error(page->mapping, err);
1788 	set_page_writeback(page);
1789 	do {
1790 		struct buffer_head *next = bh->b_this_page;
1791 		if (buffer_async_write(bh)) {
1792 			clear_buffer_dirty(bh);
1793 			submit_bh(WRITE, bh);
1794 			nr_underway++;
1795 		}
1796 		bh = next;
1797 	} while (bh != head);
1798 	unlock_page(page);
1799 	goto done;
1800 }
1801 
1802 /*
1803  * If a page has any new buffers, zero them out here, and mark them uptodate
1804  * and dirty so they'll be written out (in order to prevent uninitialised
1805  * block data from leaking). And clear the new bit.
1806  */
1807 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1808 {
1809 	unsigned int block_start, block_end;
1810 	struct buffer_head *head, *bh;
1811 
1812 	BUG_ON(!PageLocked(page));
1813 	if (!page_has_buffers(page))
1814 		return;
1815 
1816 	bh = head = page_buffers(page);
1817 	block_start = 0;
1818 	do {
1819 		block_end = block_start + bh->b_size;
1820 
1821 		if (buffer_new(bh)) {
1822 			if (block_end > from && block_start < to) {
1823 				if (!PageUptodate(page)) {
1824 					unsigned start, size;
1825 
1826 					start = max(from, block_start);
1827 					size = min(to, block_end) - start;
1828 
1829 					zero_user(page, start, size);
1830 					set_buffer_uptodate(bh);
1831 				}
1832 
1833 				clear_buffer_new(bh);
1834 				mark_buffer_dirty(bh);
1835 			}
1836 		}
1837 
1838 		block_start = block_end;
1839 		bh = bh->b_this_page;
1840 	} while (bh != head);
1841 }
1842 EXPORT_SYMBOL(page_zero_new_buffers);
1843 
1844 static int __block_prepare_write(struct inode *inode, struct page *page,
1845 		unsigned from, unsigned to, get_block_t *get_block)
1846 {
1847 	unsigned block_start, block_end;
1848 	sector_t block;
1849 	int err = 0;
1850 	unsigned blocksize, bbits;
1851 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1852 
1853 	BUG_ON(!PageLocked(page));
1854 	BUG_ON(from > PAGE_CACHE_SIZE);
1855 	BUG_ON(to > PAGE_CACHE_SIZE);
1856 	BUG_ON(from > to);
1857 
1858 	blocksize = 1 << inode->i_blkbits;
1859 	if (!page_has_buffers(page))
1860 		create_empty_buffers(page, blocksize, 0);
1861 	head = page_buffers(page);
1862 
1863 	bbits = inode->i_blkbits;
1864 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1865 
1866 	for(bh = head, block_start = 0; bh != head || !block_start;
1867 	    block++, block_start=block_end, bh = bh->b_this_page) {
1868 		block_end = block_start + blocksize;
1869 		if (block_end <= from || block_start >= to) {
1870 			if (PageUptodate(page)) {
1871 				if (!buffer_uptodate(bh))
1872 					set_buffer_uptodate(bh);
1873 			}
1874 			continue;
1875 		}
1876 		if (buffer_new(bh))
1877 			clear_buffer_new(bh);
1878 		if (!buffer_mapped(bh)) {
1879 			WARN_ON(bh->b_size != blocksize);
1880 			err = get_block(inode, block, bh, 1);
1881 			if (err)
1882 				break;
1883 			if (buffer_new(bh)) {
1884 				unmap_underlying_metadata(bh->b_bdev,
1885 							bh->b_blocknr);
1886 				if (PageUptodate(page)) {
1887 					clear_buffer_new(bh);
1888 					set_buffer_uptodate(bh);
1889 					mark_buffer_dirty(bh);
1890 					continue;
1891 				}
1892 				if (block_end > to || block_start < from)
1893 					zero_user_segments(page,
1894 						to, block_end,
1895 						block_start, from);
1896 				continue;
1897 			}
1898 		}
1899 		if (PageUptodate(page)) {
1900 			if (!buffer_uptodate(bh))
1901 				set_buffer_uptodate(bh);
1902 			continue;
1903 		}
1904 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1905 		    !buffer_unwritten(bh) &&
1906 		     (block_start < from || block_end > to)) {
1907 			ll_rw_block(READ, 1, &bh);
1908 			*wait_bh++=bh;
1909 		}
1910 	}
1911 	/*
1912 	 * If we issued read requests - let them complete.
1913 	 */
1914 	while(wait_bh > wait) {
1915 		wait_on_buffer(*--wait_bh);
1916 		if (!buffer_uptodate(*wait_bh))
1917 			err = -EIO;
1918 	}
1919 	if (unlikely(err))
1920 		page_zero_new_buffers(page, from, to);
1921 	return err;
1922 }
1923 
1924 static int __block_commit_write(struct inode *inode, struct page *page,
1925 		unsigned from, unsigned to)
1926 {
1927 	unsigned block_start, block_end;
1928 	int partial = 0;
1929 	unsigned blocksize;
1930 	struct buffer_head *bh, *head;
1931 
1932 	blocksize = 1 << inode->i_blkbits;
1933 
1934 	for(bh = head = page_buffers(page), block_start = 0;
1935 	    bh != head || !block_start;
1936 	    block_start=block_end, bh = bh->b_this_page) {
1937 		block_end = block_start + blocksize;
1938 		if (block_end <= from || block_start >= to) {
1939 			if (!buffer_uptodate(bh))
1940 				partial = 1;
1941 		} else {
1942 			set_buffer_uptodate(bh);
1943 			mark_buffer_dirty(bh);
1944 		}
1945 		clear_buffer_new(bh);
1946 	}
1947 
1948 	/*
1949 	 * If this is a partial write which happened to make all buffers
1950 	 * uptodate then we can optimize away a bogus readpage() for
1951 	 * the next read(). Here we 'discover' whether the page went
1952 	 * uptodate as a result of this (potentially partial) write.
1953 	 */
1954 	if (!partial)
1955 		SetPageUptodate(page);
1956 	return 0;
1957 }
1958 
1959 /*
1960  * block_write_begin takes care of the basic task of block allocation and
1961  * bringing partial write blocks uptodate first.
1962  *
1963  * If *pagep is not NULL, then block_write_begin uses the locked page
1964  * at *pagep rather than allocating its own. In this case, the page will
1965  * not be unlocked or deallocated on failure.
1966  */
1967 int block_write_begin(struct file *file, struct address_space *mapping,
1968 			loff_t pos, unsigned len, unsigned flags,
1969 			struct page **pagep, void **fsdata,
1970 			get_block_t *get_block)
1971 {
1972 	struct inode *inode = mapping->host;
1973 	int status = 0;
1974 	struct page *page;
1975 	pgoff_t index;
1976 	unsigned start, end;
1977 	int ownpage = 0;
1978 
1979 	index = pos >> PAGE_CACHE_SHIFT;
1980 	start = pos & (PAGE_CACHE_SIZE - 1);
1981 	end = start + len;
1982 
1983 	page = *pagep;
1984 	if (page == NULL) {
1985 		ownpage = 1;
1986 		page = __grab_cache_page(mapping, index);
1987 		if (!page) {
1988 			status = -ENOMEM;
1989 			goto out;
1990 		}
1991 		*pagep = page;
1992 	} else
1993 		BUG_ON(!PageLocked(page));
1994 
1995 	status = __block_prepare_write(inode, page, start, end, get_block);
1996 	if (unlikely(status)) {
1997 		ClearPageUptodate(page);
1998 
1999 		if (ownpage) {
2000 			unlock_page(page);
2001 			page_cache_release(page);
2002 			*pagep = NULL;
2003 
2004 			/*
2005 			 * prepare_write() may have instantiated a few blocks
2006 			 * outside i_size.  Trim these off again. Don't need
2007 			 * i_size_read because we hold i_mutex.
2008 			 */
2009 			if (pos + len > inode->i_size)
2010 				vmtruncate(inode, inode->i_size);
2011 		}
2012 		goto out;
2013 	}
2014 
2015 out:
2016 	return status;
2017 }
2018 EXPORT_SYMBOL(block_write_begin);
2019 
2020 int block_write_end(struct file *file, struct address_space *mapping,
2021 			loff_t pos, unsigned len, unsigned copied,
2022 			struct page *page, void *fsdata)
2023 {
2024 	struct inode *inode = mapping->host;
2025 	unsigned start;
2026 
2027 	start = pos & (PAGE_CACHE_SIZE - 1);
2028 
2029 	if (unlikely(copied < len)) {
2030 		/*
2031 		 * The buffers that were written will now be uptodate, so we
2032 		 * don't have to worry about a readpage reading them and
2033 		 * overwriting a partial write. However if we have encountered
2034 		 * a short write and only partially written into a buffer, it
2035 		 * will not be marked uptodate, so a readpage might come in and
2036 		 * destroy our partial write.
2037 		 *
2038 		 * Do the simplest thing, and just treat any short write to a
2039 		 * non uptodate page as a zero-length write, and force the
2040 		 * caller to redo the whole thing.
2041 		 */
2042 		if (!PageUptodate(page))
2043 			copied = 0;
2044 
2045 		page_zero_new_buffers(page, start+copied, start+len);
2046 	}
2047 	flush_dcache_page(page);
2048 
2049 	/* This could be a short (even 0-length) commit */
2050 	__block_commit_write(inode, page, start, start+copied);
2051 
2052 	return copied;
2053 }
2054 EXPORT_SYMBOL(block_write_end);
2055 
2056 int generic_write_end(struct file *file, struct address_space *mapping,
2057 			loff_t pos, unsigned len, unsigned copied,
2058 			struct page *page, void *fsdata)
2059 {
2060 	struct inode *inode = mapping->host;
2061 
2062 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2063 
2064 	/*
2065 	 * No need to use i_size_read() here, the i_size
2066 	 * cannot change under us because we hold i_mutex.
2067 	 *
2068 	 * But it's important to update i_size while still holding page lock:
2069 	 * page writeout could otherwise come in and zero beyond i_size.
2070 	 */
2071 	if (pos+copied > inode->i_size) {
2072 		i_size_write(inode, pos+copied);
2073 		mark_inode_dirty(inode);
2074 	}
2075 
2076 	unlock_page(page);
2077 	page_cache_release(page);
2078 
2079 	return copied;
2080 }
2081 EXPORT_SYMBOL(generic_write_end);
2082 
2083 /*
2084  * Generic "read page" function for block devices that have the normal
2085  * get_block functionality. This is most of the block device filesystems.
2086  * Reads the page asynchronously --- the unlock_buffer() and
2087  * set/clear_buffer_uptodate() functions propagate buffer state into the
2088  * page struct once IO has completed.
2089  */
2090 int block_read_full_page(struct page *page, get_block_t *get_block)
2091 {
2092 	struct inode *inode = page->mapping->host;
2093 	sector_t iblock, lblock;
2094 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2095 	unsigned int blocksize;
2096 	int nr, i;
2097 	int fully_mapped = 1;
2098 
2099 	BUG_ON(!PageLocked(page));
2100 	blocksize = 1 << inode->i_blkbits;
2101 	if (!page_has_buffers(page))
2102 		create_empty_buffers(page, blocksize, 0);
2103 	head = page_buffers(page);
2104 
2105 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2106 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2107 	bh = head;
2108 	nr = 0;
2109 	i = 0;
2110 
2111 	do {
2112 		if (buffer_uptodate(bh))
2113 			continue;
2114 
2115 		if (!buffer_mapped(bh)) {
2116 			int err = 0;
2117 
2118 			fully_mapped = 0;
2119 			if (iblock < lblock) {
2120 				WARN_ON(bh->b_size != blocksize);
2121 				err = get_block(inode, iblock, bh, 0);
2122 				if (err)
2123 					SetPageError(page);
2124 			}
2125 			if (!buffer_mapped(bh)) {
2126 				zero_user(page, i * blocksize, blocksize);
2127 				if (!err)
2128 					set_buffer_uptodate(bh);
2129 				continue;
2130 			}
2131 			/*
2132 			 * get_block() might have updated the buffer
2133 			 * synchronously
2134 			 */
2135 			if (buffer_uptodate(bh))
2136 				continue;
2137 		}
2138 		arr[nr++] = bh;
2139 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2140 
2141 	if (fully_mapped)
2142 		SetPageMappedToDisk(page);
2143 
2144 	if (!nr) {
2145 		/*
2146 		 * All buffers are uptodate - we can set the page uptodate
2147 		 * as well. But not if get_block() returned an error.
2148 		 */
2149 		if (!PageError(page))
2150 			SetPageUptodate(page);
2151 		unlock_page(page);
2152 		return 0;
2153 	}
2154 
2155 	/* Stage two: lock the buffers */
2156 	for (i = 0; i < nr; i++) {
2157 		bh = arr[i];
2158 		lock_buffer(bh);
2159 		mark_buffer_async_read(bh);
2160 	}
2161 
2162 	/*
2163 	 * Stage 3: start the IO.  Check for uptodateness
2164 	 * inside the buffer lock in case another process reading
2165 	 * the underlying blockdev brought it uptodate (the sct fix).
2166 	 */
2167 	for (i = 0; i < nr; i++) {
2168 		bh = arr[i];
2169 		if (buffer_uptodate(bh))
2170 			end_buffer_async_read(bh, 1);
2171 		else
2172 			submit_bh(READ, bh);
2173 	}
2174 	return 0;
2175 }
2176 
2177 /* utility function for filesystems that need to do work on expanding
2178  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2179  * deal with the hole.
2180  */
2181 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2182 {
2183 	struct address_space *mapping = inode->i_mapping;
2184 	struct page *page;
2185 	void *fsdata;
2186 	unsigned long limit;
2187 	int err;
2188 
2189 	err = -EFBIG;
2190         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2191 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2192 		send_sig(SIGXFSZ, current, 0);
2193 		goto out;
2194 	}
2195 	if (size > inode->i_sb->s_maxbytes)
2196 		goto out;
2197 
2198 	err = pagecache_write_begin(NULL, mapping, size, 0,
2199 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2200 				&page, &fsdata);
2201 	if (err)
2202 		goto out;
2203 
2204 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2205 	BUG_ON(err > 0);
2206 
2207 out:
2208 	return err;
2209 }
2210 
2211 int cont_expand_zero(struct file *file, struct address_space *mapping,
2212 			loff_t pos, loff_t *bytes)
2213 {
2214 	struct inode *inode = mapping->host;
2215 	unsigned blocksize = 1 << inode->i_blkbits;
2216 	struct page *page;
2217 	void *fsdata;
2218 	pgoff_t index, curidx;
2219 	loff_t curpos;
2220 	unsigned zerofrom, offset, len;
2221 	int err = 0;
2222 
2223 	index = pos >> PAGE_CACHE_SHIFT;
2224 	offset = pos & ~PAGE_CACHE_MASK;
2225 
2226 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2227 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2228 		if (zerofrom & (blocksize-1)) {
2229 			*bytes |= (blocksize-1);
2230 			(*bytes)++;
2231 		}
2232 		len = PAGE_CACHE_SIZE - zerofrom;
2233 
2234 		err = pagecache_write_begin(file, mapping, curpos, len,
2235 						AOP_FLAG_UNINTERRUPTIBLE,
2236 						&page, &fsdata);
2237 		if (err)
2238 			goto out;
2239 		zero_user(page, zerofrom, len);
2240 		err = pagecache_write_end(file, mapping, curpos, len, len,
2241 						page, fsdata);
2242 		if (err < 0)
2243 			goto out;
2244 		BUG_ON(err != len);
2245 		err = 0;
2246 	}
2247 
2248 	/* page covers the boundary, find the boundary offset */
2249 	if (index == curidx) {
2250 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2251 		/* if we will expand the thing last block will be filled */
2252 		if (offset <= zerofrom) {
2253 			goto out;
2254 		}
2255 		if (zerofrom & (blocksize-1)) {
2256 			*bytes |= (blocksize-1);
2257 			(*bytes)++;
2258 		}
2259 		len = offset - zerofrom;
2260 
2261 		err = pagecache_write_begin(file, mapping, curpos, len,
2262 						AOP_FLAG_UNINTERRUPTIBLE,
2263 						&page, &fsdata);
2264 		if (err)
2265 			goto out;
2266 		zero_user(page, zerofrom, len);
2267 		err = pagecache_write_end(file, mapping, curpos, len, len,
2268 						page, fsdata);
2269 		if (err < 0)
2270 			goto out;
2271 		BUG_ON(err != len);
2272 		err = 0;
2273 	}
2274 out:
2275 	return err;
2276 }
2277 
2278 /*
2279  * For moronic filesystems that do not allow holes in file.
2280  * We may have to extend the file.
2281  */
2282 int cont_write_begin(struct file *file, struct address_space *mapping,
2283 			loff_t pos, unsigned len, unsigned flags,
2284 			struct page **pagep, void **fsdata,
2285 			get_block_t *get_block, loff_t *bytes)
2286 {
2287 	struct inode *inode = mapping->host;
2288 	unsigned blocksize = 1 << inode->i_blkbits;
2289 	unsigned zerofrom;
2290 	int err;
2291 
2292 	err = cont_expand_zero(file, mapping, pos, bytes);
2293 	if (err)
2294 		goto out;
2295 
2296 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2297 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2298 		*bytes |= (blocksize-1);
2299 		(*bytes)++;
2300 	}
2301 
2302 	*pagep = NULL;
2303 	err = block_write_begin(file, mapping, pos, len,
2304 				flags, pagep, fsdata, get_block);
2305 out:
2306 	return err;
2307 }
2308 
2309 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2310 			get_block_t *get_block)
2311 {
2312 	struct inode *inode = page->mapping->host;
2313 	int err = __block_prepare_write(inode, page, from, to, get_block);
2314 	if (err)
2315 		ClearPageUptodate(page);
2316 	return err;
2317 }
2318 
2319 int block_commit_write(struct page *page, unsigned from, unsigned to)
2320 {
2321 	struct inode *inode = page->mapping->host;
2322 	__block_commit_write(inode,page,from,to);
2323 	return 0;
2324 }
2325 
2326 int generic_commit_write(struct file *file, struct page *page,
2327 		unsigned from, unsigned to)
2328 {
2329 	struct inode *inode = page->mapping->host;
2330 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2331 	__block_commit_write(inode,page,from,to);
2332 	/*
2333 	 * No need to use i_size_read() here, the i_size
2334 	 * cannot change under us because we hold i_mutex.
2335 	 */
2336 	if (pos > inode->i_size) {
2337 		i_size_write(inode, pos);
2338 		mark_inode_dirty(inode);
2339 	}
2340 	return 0;
2341 }
2342 
2343 /*
2344  * block_page_mkwrite() is not allowed to change the file size as it gets
2345  * called from a page fault handler when a page is first dirtied. Hence we must
2346  * be careful to check for EOF conditions here. We set the page up correctly
2347  * for a written page which means we get ENOSPC checking when writing into
2348  * holes and correct delalloc and unwritten extent mapping on filesystems that
2349  * support these features.
2350  *
2351  * We are not allowed to take the i_mutex here so we have to play games to
2352  * protect against truncate races as the page could now be beyond EOF.  Because
2353  * vmtruncate() writes the inode size before removing pages, once we have the
2354  * page lock we can determine safely if the page is beyond EOF. If it is not
2355  * beyond EOF, then the page is guaranteed safe against truncation until we
2356  * unlock the page.
2357  */
2358 int
2359 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2360 		   get_block_t get_block)
2361 {
2362 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2363 	unsigned long end;
2364 	loff_t size;
2365 	int ret = -EINVAL;
2366 
2367 	lock_page(page);
2368 	size = i_size_read(inode);
2369 	if ((page->mapping != inode->i_mapping) ||
2370 	    (page_offset(page) > size)) {
2371 		/* page got truncated out from underneath us */
2372 		goto out_unlock;
2373 	}
2374 
2375 	/* page is wholly or partially inside EOF */
2376 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2377 		end = size & ~PAGE_CACHE_MASK;
2378 	else
2379 		end = PAGE_CACHE_SIZE;
2380 
2381 	ret = block_prepare_write(page, 0, end, get_block);
2382 	if (!ret)
2383 		ret = block_commit_write(page, 0, end);
2384 
2385 out_unlock:
2386 	unlock_page(page);
2387 	return ret;
2388 }
2389 
2390 /*
2391  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2392  * immediately, while under the page lock.  So it needs a special end_io
2393  * handler which does not touch the bh after unlocking it.
2394  */
2395 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2396 {
2397 	__end_buffer_read_notouch(bh, uptodate);
2398 }
2399 
2400 /*
2401  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2402  * the page (converting it to circular linked list and taking care of page
2403  * dirty races).
2404  */
2405 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2406 {
2407 	struct buffer_head *bh;
2408 
2409 	BUG_ON(!PageLocked(page));
2410 
2411 	spin_lock(&page->mapping->private_lock);
2412 	bh = head;
2413 	do {
2414 		if (PageDirty(page))
2415 			set_buffer_dirty(bh);
2416 		if (!bh->b_this_page)
2417 			bh->b_this_page = head;
2418 		bh = bh->b_this_page;
2419 	} while (bh != head);
2420 	attach_page_buffers(page, head);
2421 	spin_unlock(&page->mapping->private_lock);
2422 }
2423 
2424 /*
2425  * On entry, the page is fully not uptodate.
2426  * On exit the page is fully uptodate in the areas outside (from,to)
2427  */
2428 int nobh_write_begin(struct file *file, struct address_space *mapping,
2429 			loff_t pos, unsigned len, unsigned flags,
2430 			struct page **pagep, void **fsdata,
2431 			get_block_t *get_block)
2432 {
2433 	struct inode *inode = mapping->host;
2434 	const unsigned blkbits = inode->i_blkbits;
2435 	const unsigned blocksize = 1 << blkbits;
2436 	struct buffer_head *head, *bh;
2437 	struct page *page;
2438 	pgoff_t index;
2439 	unsigned from, to;
2440 	unsigned block_in_page;
2441 	unsigned block_start, block_end;
2442 	sector_t block_in_file;
2443 	int nr_reads = 0;
2444 	int ret = 0;
2445 	int is_mapped_to_disk = 1;
2446 
2447 	index = pos >> PAGE_CACHE_SHIFT;
2448 	from = pos & (PAGE_CACHE_SIZE - 1);
2449 	to = from + len;
2450 
2451 	page = __grab_cache_page(mapping, index);
2452 	if (!page)
2453 		return -ENOMEM;
2454 	*pagep = page;
2455 	*fsdata = NULL;
2456 
2457 	if (page_has_buffers(page)) {
2458 		unlock_page(page);
2459 		page_cache_release(page);
2460 		*pagep = NULL;
2461 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2462 					fsdata, get_block);
2463 	}
2464 
2465 	if (PageMappedToDisk(page))
2466 		return 0;
2467 
2468 	/*
2469 	 * Allocate buffers so that we can keep track of state, and potentially
2470 	 * attach them to the page if an error occurs. In the common case of
2471 	 * no error, they will just be freed again without ever being attached
2472 	 * to the page (which is all OK, because we're under the page lock).
2473 	 *
2474 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2475 	 * than the circular one we're used to.
2476 	 */
2477 	head = alloc_page_buffers(page, blocksize, 0);
2478 	if (!head) {
2479 		ret = -ENOMEM;
2480 		goto out_release;
2481 	}
2482 
2483 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2484 
2485 	/*
2486 	 * We loop across all blocks in the page, whether or not they are
2487 	 * part of the affected region.  This is so we can discover if the
2488 	 * page is fully mapped-to-disk.
2489 	 */
2490 	for (block_start = 0, block_in_page = 0, bh = head;
2491 		  block_start < PAGE_CACHE_SIZE;
2492 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2493 		int create;
2494 
2495 		block_end = block_start + blocksize;
2496 		bh->b_state = 0;
2497 		create = 1;
2498 		if (block_start >= to)
2499 			create = 0;
2500 		ret = get_block(inode, block_in_file + block_in_page,
2501 					bh, create);
2502 		if (ret)
2503 			goto failed;
2504 		if (!buffer_mapped(bh))
2505 			is_mapped_to_disk = 0;
2506 		if (buffer_new(bh))
2507 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2508 		if (PageUptodate(page)) {
2509 			set_buffer_uptodate(bh);
2510 			continue;
2511 		}
2512 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2513 			zero_user_segments(page, block_start, from,
2514 							to, block_end);
2515 			continue;
2516 		}
2517 		if (buffer_uptodate(bh))
2518 			continue;	/* reiserfs does this */
2519 		if (block_start < from || block_end > to) {
2520 			lock_buffer(bh);
2521 			bh->b_end_io = end_buffer_read_nobh;
2522 			submit_bh(READ, bh);
2523 			nr_reads++;
2524 		}
2525 	}
2526 
2527 	if (nr_reads) {
2528 		/*
2529 		 * The page is locked, so these buffers are protected from
2530 		 * any VM or truncate activity.  Hence we don't need to care
2531 		 * for the buffer_head refcounts.
2532 		 */
2533 		for (bh = head; bh; bh = bh->b_this_page) {
2534 			wait_on_buffer(bh);
2535 			if (!buffer_uptodate(bh))
2536 				ret = -EIO;
2537 		}
2538 		if (ret)
2539 			goto failed;
2540 	}
2541 
2542 	if (is_mapped_to_disk)
2543 		SetPageMappedToDisk(page);
2544 
2545 	*fsdata = head; /* to be released by nobh_write_end */
2546 
2547 	return 0;
2548 
2549 failed:
2550 	BUG_ON(!ret);
2551 	/*
2552 	 * Error recovery is a bit difficult. We need to zero out blocks that
2553 	 * were newly allocated, and dirty them to ensure they get written out.
2554 	 * Buffers need to be attached to the page at this point, otherwise
2555 	 * the handling of potential IO errors during writeout would be hard
2556 	 * (could try doing synchronous writeout, but what if that fails too?)
2557 	 */
2558 	attach_nobh_buffers(page, head);
2559 	page_zero_new_buffers(page, from, to);
2560 
2561 out_release:
2562 	unlock_page(page);
2563 	page_cache_release(page);
2564 	*pagep = NULL;
2565 
2566 	if (pos + len > inode->i_size)
2567 		vmtruncate(inode, inode->i_size);
2568 
2569 	return ret;
2570 }
2571 EXPORT_SYMBOL(nobh_write_begin);
2572 
2573 int nobh_write_end(struct file *file, struct address_space *mapping,
2574 			loff_t pos, unsigned len, unsigned copied,
2575 			struct page *page, void *fsdata)
2576 {
2577 	struct inode *inode = page->mapping->host;
2578 	struct buffer_head *head = fsdata;
2579 	struct buffer_head *bh;
2580 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2581 
2582 	if (unlikely(copied < len) && !page_has_buffers(page))
2583 		attach_nobh_buffers(page, head);
2584 	if (page_has_buffers(page))
2585 		return generic_write_end(file, mapping, pos, len,
2586 					copied, page, fsdata);
2587 
2588 	SetPageUptodate(page);
2589 	set_page_dirty(page);
2590 	if (pos+copied > inode->i_size) {
2591 		i_size_write(inode, pos+copied);
2592 		mark_inode_dirty(inode);
2593 	}
2594 
2595 	unlock_page(page);
2596 	page_cache_release(page);
2597 
2598 	while (head) {
2599 		bh = head;
2600 		head = head->b_this_page;
2601 		free_buffer_head(bh);
2602 	}
2603 
2604 	return copied;
2605 }
2606 EXPORT_SYMBOL(nobh_write_end);
2607 
2608 /*
2609  * nobh_writepage() - based on block_full_write_page() except
2610  * that it tries to operate without attaching bufferheads to
2611  * the page.
2612  */
2613 int nobh_writepage(struct page *page, get_block_t *get_block,
2614 			struct writeback_control *wbc)
2615 {
2616 	struct inode * const inode = page->mapping->host;
2617 	loff_t i_size = i_size_read(inode);
2618 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2619 	unsigned offset;
2620 	int ret;
2621 
2622 	/* Is the page fully inside i_size? */
2623 	if (page->index < end_index)
2624 		goto out;
2625 
2626 	/* Is the page fully outside i_size? (truncate in progress) */
2627 	offset = i_size & (PAGE_CACHE_SIZE-1);
2628 	if (page->index >= end_index+1 || !offset) {
2629 		/*
2630 		 * The page may have dirty, unmapped buffers.  For example,
2631 		 * they may have been added in ext3_writepage().  Make them
2632 		 * freeable here, so the page does not leak.
2633 		 */
2634 #if 0
2635 		/* Not really sure about this  - do we need this ? */
2636 		if (page->mapping->a_ops->invalidatepage)
2637 			page->mapping->a_ops->invalidatepage(page, offset);
2638 #endif
2639 		unlock_page(page);
2640 		return 0; /* don't care */
2641 	}
2642 
2643 	/*
2644 	 * The page straddles i_size.  It must be zeroed out on each and every
2645 	 * writepage invocation because it may be mmapped.  "A file is mapped
2646 	 * in multiples of the page size.  For a file that is not a multiple of
2647 	 * the  page size, the remaining memory is zeroed when mapped, and
2648 	 * writes to that region are not written out to the file."
2649 	 */
2650 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2651 out:
2652 	ret = mpage_writepage(page, get_block, wbc);
2653 	if (ret == -EAGAIN)
2654 		ret = __block_write_full_page(inode, page, get_block, wbc);
2655 	return ret;
2656 }
2657 EXPORT_SYMBOL(nobh_writepage);
2658 
2659 int nobh_truncate_page(struct address_space *mapping,
2660 			loff_t from, get_block_t *get_block)
2661 {
2662 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2663 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2664 	unsigned blocksize;
2665 	sector_t iblock;
2666 	unsigned length, pos;
2667 	struct inode *inode = mapping->host;
2668 	struct page *page;
2669 	struct buffer_head map_bh;
2670 	int err;
2671 
2672 	blocksize = 1 << inode->i_blkbits;
2673 	length = offset & (blocksize - 1);
2674 
2675 	/* Block boundary? Nothing to do */
2676 	if (!length)
2677 		return 0;
2678 
2679 	length = blocksize - length;
2680 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2681 
2682 	page = grab_cache_page(mapping, index);
2683 	err = -ENOMEM;
2684 	if (!page)
2685 		goto out;
2686 
2687 	if (page_has_buffers(page)) {
2688 has_buffers:
2689 		unlock_page(page);
2690 		page_cache_release(page);
2691 		return block_truncate_page(mapping, from, get_block);
2692 	}
2693 
2694 	/* Find the buffer that contains "offset" */
2695 	pos = blocksize;
2696 	while (offset >= pos) {
2697 		iblock++;
2698 		pos += blocksize;
2699 	}
2700 
2701 	err = get_block(inode, iblock, &map_bh, 0);
2702 	if (err)
2703 		goto unlock;
2704 	/* unmapped? It's a hole - nothing to do */
2705 	if (!buffer_mapped(&map_bh))
2706 		goto unlock;
2707 
2708 	/* Ok, it's mapped. Make sure it's up-to-date */
2709 	if (!PageUptodate(page)) {
2710 		err = mapping->a_ops->readpage(NULL, page);
2711 		if (err) {
2712 			page_cache_release(page);
2713 			goto out;
2714 		}
2715 		lock_page(page);
2716 		if (!PageUptodate(page)) {
2717 			err = -EIO;
2718 			goto unlock;
2719 		}
2720 		if (page_has_buffers(page))
2721 			goto has_buffers;
2722 	}
2723 	zero_user(page, offset, length);
2724 	set_page_dirty(page);
2725 	err = 0;
2726 
2727 unlock:
2728 	unlock_page(page);
2729 	page_cache_release(page);
2730 out:
2731 	return err;
2732 }
2733 EXPORT_SYMBOL(nobh_truncate_page);
2734 
2735 int block_truncate_page(struct address_space *mapping,
2736 			loff_t from, get_block_t *get_block)
2737 {
2738 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2739 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2740 	unsigned blocksize;
2741 	sector_t iblock;
2742 	unsigned length, pos;
2743 	struct inode *inode = mapping->host;
2744 	struct page *page;
2745 	struct buffer_head *bh;
2746 	int err;
2747 
2748 	blocksize = 1 << inode->i_blkbits;
2749 	length = offset & (blocksize - 1);
2750 
2751 	/* Block boundary? Nothing to do */
2752 	if (!length)
2753 		return 0;
2754 
2755 	length = blocksize - length;
2756 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2757 
2758 	page = grab_cache_page(mapping, index);
2759 	err = -ENOMEM;
2760 	if (!page)
2761 		goto out;
2762 
2763 	if (!page_has_buffers(page))
2764 		create_empty_buffers(page, blocksize, 0);
2765 
2766 	/* Find the buffer that contains "offset" */
2767 	bh = page_buffers(page);
2768 	pos = blocksize;
2769 	while (offset >= pos) {
2770 		bh = bh->b_this_page;
2771 		iblock++;
2772 		pos += blocksize;
2773 	}
2774 
2775 	err = 0;
2776 	if (!buffer_mapped(bh)) {
2777 		WARN_ON(bh->b_size != blocksize);
2778 		err = get_block(inode, iblock, bh, 0);
2779 		if (err)
2780 			goto unlock;
2781 		/* unmapped? It's a hole - nothing to do */
2782 		if (!buffer_mapped(bh))
2783 			goto unlock;
2784 	}
2785 
2786 	/* Ok, it's mapped. Make sure it's up-to-date */
2787 	if (PageUptodate(page))
2788 		set_buffer_uptodate(bh);
2789 
2790 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2791 		err = -EIO;
2792 		ll_rw_block(READ, 1, &bh);
2793 		wait_on_buffer(bh);
2794 		/* Uhhuh. Read error. Complain and punt. */
2795 		if (!buffer_uptodate(bh))
2796 			goto unlock;
2797 	}
2798 
2799 	zero_user(page, offset, length);
2800 	mark_buffer_dirty(bh);
2801 	err = 0;
2802 
2803 unlock:
2804 	unlock_page(page);
2805 	page_cache_release(page);
2806 out:
2807 	return err;
2808 }
2809 
2810 /*
2811  * The generic ->writepage function for buffer-backed address_spaces
2812  */
2813 int block_write_full_page(struct page *page, get_block_t *get_block,
2814 			struct writeback_control *wbc)
2815 {
2816 	struct inode * const inode = page->mapping->host;
2817 	loff_t i_size = i_size_read(inode);
2818 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2819 	unsigned offset;
2820 
2821 	/* Is the page fully inside i_size? */
2822 	if (page->index < end_index)
2823 		return __block_write_full_page(inode, page, get_block, wbc);
2824 
2825 	/* Is the page fully outside i_size? (truncate in progress) */
2826 	offset = i_size & (PAGE_CACHE_SIZE-1);
2827 	if (page->index >= end_index+1 || !offset) {
2828 		/*
2829 		 * The page may have dirty, unmapped buffers.  For example,
2830 		 * they may have been added in ext3_writepage().  Make them
2831 		 * freeable here, so the page does not leak.
2832 		 */
2833 		do_invalidatepage(page, 0);
2834 		unlock_page(page);
2835 		return 0; /* don't care */
2836 	}
2837 
2838 	/*
2839 	 * The page straddles i_size.  It must be zeroed out on each and every
2840 	 * writepage invokation because it may be mmapped.  "A file is mapped
2841 	 * in multiples of the page size.  For a file that is not a multiple of
2842 	 * the  page size, the remaining memory is zeroed when mapped, and
2843 	 * writes to that region are not written out to the file."
2844 	 */
2845 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2846 	return __block_write_full_page(inode, page, get_block, wbc);
2847 }
2848 
2849 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2850 			    get_block_t *get_block)
2851 {
2852 	struct buffer_head tmp;
2853 	struct inode *inode = mapping->host;
2854 	tmp.b_state = 0;
2855 	tmp.b_blocknr = 0;
2856 	tmp.b_size = 1 << inode->i_blkbits;
2857 	get_block(inode, block, &tmp, 0);
2858 	return tmp.b_blocknr;
2859 }
2860 
2861 static void end_bio_bh_io_sync(struct bio *bio, int err)
2862 {
2863 	struct buffer_head *bh = bio->bi_private;
2864 
2865 	if (err == -EOPNOTSUPP) {
2866 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2867 		set_bit(BH_Eopnotsupp, &bh->b_state);
2868 	}
2869 
2870 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2871 	bio_put(bio);
2872 }
2873 
2874 int submit_bh(int rw, struct buffer_head * bh)
2875 {
2876 	struct bio *bio;
2877 	int ret = 0;
2878 
2879 	BUG_ON(!buffer_locked(bh));
2880 	BUG_ON(!buffer_mapped(bh));
2881 	BUG_ON(!bh->b_end_io);
2882 
2883 	if (buffer_ordered(bh) && (rw == WRITE))
2884 		rw = WRITE_BARRIER;
2885 
2886 	/*
2887 	 * Only clear out a write error when rewriting, should this
2888 	 * include WRITE_SYNC as well?
2889 	 */
2890 	if (test_set_buffer_req(bh) && (rw == WRITE || rw == WRITE_BARRIER))
2891 		clear_buffer_write_io_error(bh);
2892 
2893 	/*
2894 	 * from here on down, it's all bio -- do the initial mapping,
2895 	 * submit_bio -> generic_make_request may further map this bio around
2896 	 */
2897 	bio = bio_alloc(GFP_NOIO, 1);
2898 
2899 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2900 	bio->bi_bdev = bh->b_bdev;
2901 	bio->bi_io_vec[0].bv_page = bh->b_page;
2902 	bio->bi_io_vec[0].bv_len = bh->b_size;
2903 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2904 
2905 	bio->bi_vcnt = 1;
2906 	bio->bi_idx = 0;
2907 	bio->bi_size = bh->b_size;
2908 
2909 	bio->bi_end_io = end_bio_bh_io_sync;
2910 	bio->bi_private = bh;
2911 
2912 	bio_get(bio);
2913 	submit_bio(rw, bio);
2914 
2915 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2916 		ret = -EOPNOTSUPP;
2917 
2918 	bio_put(bio);
2919 	return ret;
2920 }
2921 
2922 /**
2923  * ll_rw_block: low-level access to block devices (DEPRECATED)
2924  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2925  * @nr: number of &struct buffer_heads in the array
2926  * @bhs: array of pointers to &struct buffer_head
2927  *
2928  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2929  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2930  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2931  * are sent to disk. The fourth %READA option is described in the documentation
2932  * for generic_make_request() which ll_rw_block() calls.
2933  *
2934  * This function drops any buffer that it cannot get a lock on (with the
2935  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2936  * clean when doing a write request, and any buffer that appears to be
2937  * up-to-date when doing read request.  Further it marks as clean buffers that
2938  * are processed for writing (the buffer cache won't assume that they are
2939  * actually clean until the buffer gets unlocked).
2940  *
2941  * ll_rw_block sets b_end_io to simple completion handler that marks
2942  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2943  * any waiters.
2944  *
2945  * All of the buffers must be for the same device, and must also be a
2946  * multiple of the current approved size for the device.
2947  */
2948 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2949 {
2950 	int i;
2951 
2952 	for (i = 0; i < nr; i++) {
2953 		struct buffer_head *bh = bhs[i];
2954 
2955 		if (rw == SWRITE)
2956 			lock_buffer(bh);
2957 		else if (test_set_buffer_locked(bh))
2958 			continue;
2959 
2960 		if (rw == WRITE || rw == SWRITE) {
2961 			if (test_clear_buffer_dirty(bh)) {
2962 				bh->b_end_io = end_buffer_write_sync;
2963 				get_bh(bh);
2964 				submit_bh(WRITE, bh);
2965 				continue;
2966 			}
2967 		} else {
2968 			if (!buffer_uptodate(bh)) {
2969 				bh->b_end_io = end_buffer_read_sync;
2970 				get_bh(bh);
2971 				submit_bh(rw, bh);
2972 				continue;
2973 			}
2974 		}
2975 		unlock_buffer(bh);
2976 	}
2977 }
2978 
2979 /*
2980  * For a data-integrity writeout, we need to wait upon any in-progress I/O
2981  * and then start new I/O and then wait upon it.  The caller must have a ref on
2982  * the buffer_head.
2983  */
2984 int sync_dirty_buffer(struct buffer_head *bh)
2985 {
2986 	int ret = 0;
2987 
2988 	WARN_ON(atomic_read(&bh->b_count) < 1);
2989 	lock_buffer(bh);
2990 	if (test_clear_buffer_dirty(bh)) {
2991 		get_bh(bh);
2992 		bh->b_end_io = end_buffer_write_sync;
2993 		ret = submit_bh(WRITE, bh);
2994 		wait_on_buffer(bh);
2995 		if (buffer_eopnotsupp(bh)) {
2996 			clear_buffer_eopnotsupp(bh);
2997 			ret = -EOPNOTSUPP;
2998 		}
2999 		if (!ret && !buffer_uptodate(bh))
3000 			ret = -EIO;
3001 	} else {
3002 		unlock_buffer(bh);
3003 	}
3004 	return ret;
3005 }
3006 
3007 /*
3008  * try_to_free_buffers() checks if all the buffers on this particular page
3009  * are unused, and releases them if so.
3010  *
3011  * Exclusion against try_to_free_buffers may be obtained by either
3012  * locking the page or by holding its mapping's private_lock.
3013  *
3014  * If the page is dirty but all the buffers are clean then we need to
3015  * be sure to mark the page clean as well.  This is because the page
3016  * may be against a block device, and a later reattachment of buffers
3017  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3018  * filesystem data on the same device.
3019  *
3020  * The same applies to regular filesystem pages: if all the buffers are
3021  * clean then we set the page clean and proceed.  To do that, we require
3022  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3023  * private_lock.
3024  *
3025  * try_to_free_buffers() is non-blocking.
3026  */
3027 static inline int buffer_busy(struct buffer_head *bh)
3028 {
3029 	return atomic_read(&bh->b_count) |
3030 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3031 }
3032 
3033 static int
3034 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3035 {
3036 	struct buffer_head *head = page_buffers(page);
3037 	struct buffer_head *bh;
3038 
3039 	bh = head;
3040 	do {
3041 		if (buffer_write_io_error(bh) && page->mapping)
3042 			set_bit(AS_EIO, &page->mapping->flags);
3043 		if (buffer_busy(bh))
3044 			goto failed;
3045 		bh = bh->b_this_page;
3046 	} while (bh != head);
3047 
3048 	do {
3049 		struct buffer_head *next = bh->b_this_page;
3050 
3051 		if (bh->b_assoc_map)
3052 			__remove_assoc_queue(bh);
3053 		bh = next;
3054 	} while (bh != head);
3055 	*buffers_to_free = head;
3056 	__clear_page_buffers(page);
3057 	return 1;
3058 failed:
3059 	return 0;
3060 }
3061 
3062 int try_to_free_buffers(struct page *page)
3063 {
3064 	struct address_space * const mapping = page->mapping;
3065 	struct buffer_head *buffers_to_free = NULL;
3066 	int ret = 0;
3067 
3068 	BUG_ON(!PageLocked(page));
3069 	if (PageWriteback(page))
3070 		return 0;
3071 
3072 	if (mapping == NULL) {		/* can this still happen? */
3073 		ret = drop_buffers(page, &buffers_to_free);
3074 		goto out;
3075 	}
3076 
3077 	spin_lock(&mapping->private_lock);
3078 	ret = drop_buffers(page, &buffers_to_free);
3079 
3080 	/*
3081 	 * If the filesystem writes its buffers by hand (eg ext3)
3082 	 * then we can have clean buffers against a dirty page.  We
3083 	 * clean the page here; otherwise the VM will never notice
3084 	 * that the filesystem did any IO at all.
3085 	 *
3086 	 * Also, during truncate, discard_buffer will have marked all
3087 	 * the page's buffers clean.  We discover that here and clean
3088 	 * the page also.
3089 	 *
3090 	 * private_lock must be held over this entire operation in order
3091 	 * to synchronise against __set_page_dirty_buffers and prevent the
3092 	 * dirty bit from being lost.
3093 	 */
3094 	if (ret)
3095 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3096 	spin_unlock(&mapping->private_lock);
3097 out:
3098 	if (buffers_to_free) {
3099 		struct buffer_head *bh = buffers_to_free;
3100 
3101 		do {
3102 			struct buffer_head *next = bh->b_this_page;
3103 			free_buffer_head(bh);
3104 			bh = next;
3105 		} while (bh != buffers_to_free);
3106 	}
3107 	return ret;
3108 }
3109 EXPORT_SYMBOL(try_to_free_buffers);
3110 
3111 void block_sync_page(struct page *page)
3112 {
3113 	struct address_space *mapping;
3114 
3115 	smp_mb();
3116 	mapping = page_mapping(page);
3117 	if (mapping)
3118 		blk_run_backing_dev(mapping->backing_dev_info, page);
3119 }
3120 
3121 /*
3122  * There are no bdflush tunables left.  But distributions are
3123  * still running obsolete flush daemons, so we terminate them here.
3124  *
3125  * Use of bdflush() is deprecated and will be removed in a future kernel.
3126  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3127  */
3128 asmlinkage long sys_bdflush(int func, long data)
3129 {
3130 	static int msg_count;
3131 
3132 	if (!capable(CAP_SYS_ADMIN))
3133 		return -EPERM;
3134 
3135 	if (msg_count < 5) {
3136 		msg_count++;
3137 		printk(KERN_INFO
3138 			"warning: process `%s' used the obsolete bdflush"
3139 			" system call\n", current->comm);
3140 		printk(KERN_INFO "Fix your initscripts?\n");
3141 	}
3142 
3143 	if (func == 1)
3144 		do_exit(0);
3145 	return 0;
3146 }
3147 
3148 /*
3149  * Buffer-head allocation
3150  */
3151 static struct kmem_cache *bh_cachep;
3152 
3153 /*
3154  * Once the number of bh's in the machine exceeds this level, we start
3155  * stripping them in writeback.
3156  */
3157 static int max_buffer_heads;
3158 
3159 int buffer_heads_over_limit;
3160 
3161 struct bh_accounting {
3162 	int nr;			/* Number of live bh's */
3163 	int ratelimit;		/* Limit cacheline bouncing */
3164 };
3165 
3166 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3167 
3168 static void recalc_bh_state(void)
3169 {
3170 	int i;
3171 	int tot = 0;
3172 
3173 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3174 		return;
3175 	__get_cpu_var(bh_accounting).ratelimit = 0;
3176 	for_each_online_cpu(i)
3177 		tot += per_cpu(bh_accounting, i).nr;
3178 	buffer_heads_over_limit = (tot > max_buffer_heads);
3179 }
3180 
3181 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3182 {
3183 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
3184 				set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
3185 	if (ret) {
3186 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3187 		get_cpu_var(bh_accounting).nr++;
3188 		recalc_bh_state();
3189 		put_cpu_var(bh_accounting);
3190 	}
3191 	return ret;
3192 }
3193 EXPORT_SYMBOL(alloc_buffer_head);
3194 
3195 void free_buffer_head(struct buffer_head *bh)
3196 {
3197 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3198 	kmem_cache_free(bh_cachep, bh);
3199 	get_cpu_var(bh_accounting).nr--;
3200 	recalc_bh_state();
3201 	put_cpu_var(bh_accounting);
3202 }
3203 EXPORT_SYMBOL(free_buffer_head);
3204 
3205 static void buffer_exit_cpu(int cpu)
3206 {
3207 	int i;
3208 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3209 
3210 	for (i = 0; i < BH_LRU_SIZE; i++) {
3211 		brelse(b->bhs[i]);
3212 		b->bhs[i] = NULL;
3213 	}
3214 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3215 	per_cpu(bh_accounting, cpu).nr = 0;
3216 	put_cpu_var(bh_accounting);
3217 }
3218 
3219 static int buffer_cpu_notify(struct notifier_block *self,
3220 			      unsigned long action, void *hcpu)
3221 {
3222 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3223 		buffer_exit_cpu((unsigned long)hcpu);
3224 	return NOTIFY_OK;
3225 }
3226 
3227 /**
3228  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3229  * @bh: struct buffer_head
3230  *
3231  * Return true if the buffer is up-to-date and false,
3232  * with the buffer locked, if not.
3233  */
3234 int bh_uptodate_or_lock(struct buffer_head *bh)
3235 {
3236 	if (!buffer_uptodate(bh)) {
3237 		lock_buffer(bh);
3238 		if (!buffer_uptodate(bh))
3239 			return 0;
3240 		unlock_buffer(bh);
3241 	}
3242 	return 1;
3243 }
3244 EXPORT_SYMBOL(bh_uptodate_or_lock);
3245 
3246 /**
3247  * bh_submit_read - Submit a locked buffer for reading
3248  * @bh: struct buffer_head
3249  *
3250  * Returns zero on success and -EIO on error.
3251  */
3252 int bh_submit_read(struct buffer_head *bh)
3253 {
3254 	BUG_ON(!buffer_locked(bh));
3255 
3256 	if (buffer_uptodate(bh)) {
3257 		unlock_buffer(bh);
3258 		return 0;
3259 	}
3260 
3261 	get_bh(bh);
3262 	bh->b_end_io = end_buffer_read_sync;
3263 	submit_bh(READ, bh);
3264 	wait_on_buffer(bh);
3265 	if (buffer_uptodate(bh))
3266 		return 0;
3267 	return -EIO;
3268 }
3269 EXPORT_SYMBOL(bh_submit_read);
3270 
3271 static void
3272 init_buffer_head(struct kmem_cache *cachep, void *data)
3273 {
3274 	struct buffer_head *bh = data;
3275 
3276 	memset(bh, 0, sizeof(*bh));
3277 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3278 }
3279 
3280 void __init buffer_init(void)
3281 {
3282 	int nrpages;
3283 
3284 	bh_cachep = kmem_cache_create("buffer_head",
3285 			sizeof(struct buffer_head), 0,
3286 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3287 				SLAB_MEM_SPREAD),
3288 				init_buffer_head);
3289 
3290 	/*
3291 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3292 	 */
3293 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3294 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3295 	hotcpu_notifier(buffer_cpu_notify, 0);
3296 }
3297 
3298 EXPORT_SYMBOL(__bforget);
3299 EXPORT_SYMBOL(__brelse);
3300 EXPORT_SYMBOL(__wait_on_buffer);
3301 EXPORT_SYMBOL(block_commit_write);
3302 EXPORT_SYMBOL(block_prepare_write);
3303 EXPORT_SYMBOL(block_page_mkwrite);
3304 EXPORT_SYMBOL(block_read_full_page);
3305 EXPORT_SYMBOL(block_sync_page);
3306 EXPORT_SYMBOL(block_truncate_page);
3307 EXPORT_SYMBOL(block_write_full_page);
3308 EXPORT_SYMBOL(cont_write_begin);
3309 EXPORT_SYMBOL(end_buffer_read_sync);
3310 EXPORT_SYMBOL(end_buffer_write_sync);
3311 EXPORT_SYMBOL(file_fsync);
3312 EXPORT_SYMBOL(fsync_bdev);
3313 EXPORT_SYMBOL(generic_block_bmap);
3314 EXPORT_SYMBOL(generic_commit_write);
3315 EXPORT_SYMBOL(generic_cont_expand_simple);
3316 EXPORT_SYMBOL(init_buffer);
3317 EXPORT_SYMBOL(invalidate_bdev);
3318 EXPORT_SYMBOL(ll_rw_block);
3319 EXPORT_SYMBOL(mark_buffer_dirty);
3320 EXPORT_SYMBOL(submit_bh);
3321 EXPORT_SYMBOL(sync_dirty_buffer);
3322 EXPORT_SYMBOL(unlock_buffer);
3323