xref: /linux-6.15/fs/buffer.c (revision 7ec7fb39)
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6 
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <[email protected]>
19  */
20 
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44 
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46 
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48 
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52 	bh->b_end_io = handler;
53 	bh->b_private = private;
54 }
55 
56 static int sync_buffer(void *word)
57 {
58 	struct block_device *bd;
59 	struct buffer_head *bh
60 		= container_of(word, struct buffer_head, b_state);
61 
62 	smp_mb();
63 	bd = bh->b_bdev;
64 	if (bd)
65 		blk_run_address_space(bd->bd_inode->i_mapping);
66 	io_schedule();
67 	return 0;
68 }
69 
70 void __lock_buffer(struct buffer_head *bh)
71 {
72 	wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
73 							TASK_UNINTERRUPTIBLE);
74 }
75 EXPORT_SYMBOL(__lock_buffer);
76 
77 void unlock_buffer(struct buffer_head *bh)
78 {
79 	clear_bit_unlock(BH_Lock, &bh->b_state);
80 	smp_mb__after_clear_bit();
81 	wake_up_bit(&bh->b_state, BH_Lock);
82 }
83 
84 /*
85  * Block until a buffer comes unlocked.  This doesn't stop it
86  * from becoming locked again - you have to lock it yourself
87  * if you want to preserve its state.
88  */
89 void __wait_on_buffer(struct buffer_head * bh)
90 {
91 	wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92 }
93 
94 static void
95 __clear_page_buffers(struct page *page)
96 {
97 	ClearPagePrivate(page);
98 	set_page_private(page, 0);
99 	page_cache_release(page);
100 }
101 
102 
103 static int quiet_error(struct buffer_head *bh)
104 {
105 	if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 		return 0;
107 	return 1;
108 }
109 
110 
111 static void buffer_io_error(struct buffer_head *bh)
112 {
113 	char b[BDEVNAME_SIZE];
114 	printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
115 			bdevname(bh->b_bdev, b),
116 			(unsigned long long)bh->b_blocknr);
117 }
118 
119 /*
120  * End-of-IO handler helper function which does not touch the bh after
121  * unlocking it.
122  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
123  * a race there is benign: unlock_buffer() only use the bh's address for
124  * hashing after unlocking the buffer, so it doesn't actually touch the bh
125  * itself.
126  */
127 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
128 {
129 	if (uptodate) {
130 		set_buffer_uptodate(bh);
131 	} else {
132 		/* This happens, due to failed READA attempts. */
133 		clear_buffer_uptodate(bh);
134 	}
135 	unlock_buffer(bh);
136 }
137 
138 /*
139  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
140  * unlock the buffer. This is what ll_rw_block uses too.
141  */
142 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
143 {
144 	__end_buffer_read_notouch(bh, uptodate);
145 	put_bh(bh);
146 }
147 
148 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149 {
150 	char b[BDEVNAME_SIZE];
151 
152 	if (uptodate) {
153 		set_buffer_uptodate(bh);
154 	} else {
155 		if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
156 			buffer_io_error(bh);
157 			printk(KERN_WARNING "lost page write due to "
158 					"I/O error on %s\n",
159 				       bdevname(bh->b_bdev, b));
160 		}
161 		set_buffer_write_io_error(bh);
162 		clear_buffer_uptodate(bh);
163 	}
164 	unlock_buffer(bh);
165 	put_bh(bh);
166 }
167 
168 /*
169  * Write out and wait upon all the dirty data associated with a block
170  * device via its mapping.  Does not take the superblock lock.
171  */
172 int sync_blockdev(struct block_device *bdev)
173 {
174 	int ret = 0;
175 
176 	if (bdev)
177 		ret = filemap_write_and_wait(bdev->bd_inode->i_mapping);
178 	return ret;
179 }
180 EXPORT_SYMBOL(sync_blockdev);
181 
182 /*
183  * Write out and wait upon all dirty data associated with this
184  * device.   Filesystem data as well as the underlying block
185  * device.  Takes the superblock lock.
186  */
187 int fsync_bdev(struct block_device *bdev)
188 {
189 	struct super_block *sb = get_super(bdev);
190 	if (sb) {
191 		int res = fsync_super(sb);
192 		drop_super(sb);
193 		return res;
194 	}
195 	return sync_blockdev(bdev);
196 }
197 
198 /**
199  * freeze_bdev  --  lock a filesystem and force it into a consistent state
200  * @bdev:	blockdevice to lock
201  *
202  * This takes the block device bd_mount_sem to make sure no new mounts
203  * happen on bdev until thaw_bdev() is called.
204  * If a superblock is found on this device, we take the s_umount semaphore
205  * on it to make sure nobody unmounts until the snapshot creation is done.
206  */
207 struct super_block *freeze_bdev(struct block_device *bdev)
208 {
209 	struct super_block *sb;
210 
211 	down(&bdev->bd_mount_sem);
212 	sb = get_super(bdev);
213 	if (sb && !(sb->s_flags & MS_RDONLY)) {
214 		sb->s_frozen = SB_FREEZE_WRITE;
215 		smp_wmb();
216 
217 		__fsync_super(sb);
218 
219 		sb->s_frozen = SB_FREEZE_TRANS;
220 		smp_wmb();
221 
222 		sync_blockdev(sb->s_bdev);
223 
224 		if (sb->s_op->write_super_lockfs)
225 			sb->s_op->write_super_lockfs(sb);
226 	}
227 
228 	sync_blockdev(bdev);
229 	return sb;	/* thaw_bdev releases s->s_umount and bd_mount_sem */
230 }
231 EXPORT_SYMBOL(freeze_bdev);
232 
233 /**
234  * thaw_bdev  -- unlock filesystem
235  * @bdev:	blockdevice to unlock
236  * @sb:		associated superblock
237  *
238  * Unlocks the filesystem and marks it writeable again after freeze_bdev().
239  */
240 void thaw_bdev(struct block_device *bdev, struct super_block *sb)
241 {
242 	if (sb) {
243 		BUG_ON(sb->s_bdev != bdev);
244 
245 		if (sb->s_op->unlockfs)
246 			sb->s_op->unlockfs(sb);
247 		sb->s_frozen = SB_UNFROZEN;
248 		smp_wmb();
249 		wake_up(&sb->s_wait_unfrozen);
250 		drop_super(sb);
251 	}
252 
253 	up(&bdev->bd_mount_sem);
254 }
255 EXPORT_SYMBOL(thaw_bdev);
256 
257 /*
258  * Various filesystems appear to want __find_get_block to be non-blocking.
259  * But it's the page lock which protects the buffers.  To get around this,
260  * we get exclusion from try_to_free_buffers with the blockdev mapping's
261  * private_lock.
262  *
263  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
264  * may be quite high.  This code could TryLock the page, and if that
265  * succeeds, there is no need to take private_lock. (But if
266  * private_lock is contended then so is mapping->tree_lock).
267  */
268 static struct buffer_head *
269 __find_get_block_slow(struct block_device *bdev, sector_t block)
270 {
271 	struct inode *bd_inode = bdev->bd_inode;
272 	struct address_space *bd_mapping = bd_inode->i_mapping;
273 	struct buffer_head *ret = NULL;
274 	pgoff_t index;
275 	struct buffer_head *bh;
276 	struct buffer_head *head;
277 	struct page *page;
278 	int all_mapped = 1;
279 
280 	index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
281 	page = find_get_page(bd_mapping, index);
282 	if (!page)
283 		goto out;
284 
285 	spin_lock(&bd_mapping->private_lock);
286 	if (!page_has_buffers(page))
287 		goto out_unlock;
288 	head = page_buffers(page);
289 	bh = head;
290 	do {
291 		if (bh->b_blocknr == block) {
292 			ret = bh;
293 			get_bh(bh);
294 			goto out_unlock;
295 		}
296 		if (!buffer_mapped(bh))
297 			all_mapped = 0;
298 		bh = bh->b_this_page;
299 	} while (bh != head);
300 
301 	/* we might be here because some of the buffers on this page are
302 	 * not mapped.  This is due to various races between
303 	 * file io on the block device and getblk.  It gets dealt with
304 	 * elsewhere, don't buffer_error if we had some unmapped buffers
305 	 */
306 	if (all_mapped) {
307 		printk("__find_get_block_slow() failed. "
308 			"block=%llu, b_blocknr=%llu\n",
309 			(unsigned long long)block,
310 			(unsigned long long)bh->b_blocknr);
311 		printk("b_state=0x%08lx, b_size=%zu\n",
312 			bh->b_state, bh->b_size);
313 		printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
314 	}
315 out_unlock:
316 	spin_unlock(&bd_mapping->private_lock);
317 	page_cache_release(page);
318 out:
319 	return ret;
320 }
321 
322 /* If invalidate_buffers() will trash dirty buffers, it means some kind
323    of fs corruption is going on. Trashing dirty data always imply losing
324    information that was supposed to be just stored on the physical layer
325    by the user.
326 
327    Thus invalidate_buffers in general usage is not allwowed to trash
328    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
329    be preserved.  These buffers are simply skipped.
330 
331    We also skip buffers which are still in use.  For example this can
332    happen if a userspace program is reading the block device.
333 
334    NOTE: In the case where the user removed a removable-media-disk even if
335    there's still dirty data not synced on disk (due a bug in the device driver
336    or due an error of the user), by not destroying the dirty buffers we could
337    generate corruption also on the next media inserted, thus a parameter is
338    necessary to handle this case in the most safe way possible (trying
339    to not corrupt also the new disk inserted with the data belonging to
340    the old now corrupted disk). Also for the ramdisk the natural thing
341    to do in order to release the ramdisk memory is to destroy dirty buffers.
342 
343    These are two special cases. Normal usage imply the device driver
344    to issue a sync on the device (without waiting I/O completion) and
345    then an invalidate_buffers call that doesn't trash dirty buffers.
346 
347    For handling cache coherency with the blkdev pagecache the 'update' case
348    is been introduced. It is needed to re-read from disk any pinned
349    buffer. NOTE: re-reading from disk is destructive so we can do it only
350    when we assume nobody is changing the buffercache under our I/O and when
351    we think the disk contains more recent information than the buffercache.
352    The update == 1 pass marks the buffers we need to update, the update == 2
353    pass does the actual I/O. */
354 void invalidate_bdev(struct block_device *bdev)
355 {
356 	struct address_space *mapping = bdev->bd_inode->i_mapping;
357 
358 	if (mapping->nrpages == 0)
359 		return;
360 
361 	invalidate_bh_lrus();
362 	invalidate_mapping_pages(mapping, 0, -1);
363 }
364 
365 /*
366  * Kick pdflush then try to free up some ZONE_NORMAL memory.
367  */
368 static void free_more_memory(void)
369 {
370 	struct zone *zone;
371 	int nid;
372 
373 	wakeup_pdflush(1024);
374 	yield();
375 
376 	for_each_online_node(nid) {
377 		(void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
378 						gfp_zone(GFP_NOFS), NULL,
379 						&zone);
380 		if (zone)
381 			try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
382 						GFP_NOFS);
383 	}
384 }
385 
386 /*
387  * I/O completion handler for block_read_full_page() - pages
388  * which come unlocked at the end of I/O.
389  */
390 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
391 {
392 	unsigned long flags;
393 	struct buffer_head *first;
394 	struct buffer_head *tmp;
395 	struct page *page;
396 	int page_uptodate = 1;
397 
398 	BUG_ON(!buffer_async_read(bh));
399 
400 	page = bh->b_page;
401 	if (uptodate) {
402 		set_buffer_uptodate(bh);
403 	} else {
404 		clear_buffer_uptodate(bh);
405 		if (!quiet_error(bh))
406 			buffer_io_error(bh);
407 		SetPageError(page);
408 	}
409 
410 	/*
411 	 * Be _very_ careful from here on. Bad things can happen if
412 	 * two buffer heads end IO at almost the same time and both
413 	 * decide that the page is now completely done.
414 	 */
415 	first = page_buffers(page);
416 	local_irq_save(flags);
417 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
418 	clear_buffer_async_read(bh);
419 	unlock_buffer(bh);
420 	tmp = bh;
421 	do {
422 		if (!buffer_uptodate(tmp))
423 			page_uptodate = 0;
424 		if (buffer_async_read(tmp)) {
425 			BUG_ON(!buffer_locked(tmp));
426 			goto still_busy;
427 		}
428 		tmp = tmp->b_this_page;
429 	} while (tmp != bh);
430 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
431 	local_irq_restore(flags);
432 
433 	/*
434 	 * If none of the buffers had errors and they are all
435 	 * uptodate then we can set the page uptodate.
436 	 */
437 	if (page_uptodate && !PageError(page))
438 		SetPageUptodate(page);
439 	unlock_page(page);
440 	return;
441 
442 still_busy:
443 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
444 	local_irq_restore(flags);
445 	return;
446 }
447 
448 /*
449  * Completion handler for block_write_full_page() - pages which are unlocked
450  * during I/O, and which have PageWriteback cleared upon I/O completion.
451  */
452 static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
453 {
454 	char b[BDEVNAME_SIZE];
455 	unsigned long flags;
456 	struct buffer_head *first;
457 	struct buffer_head *tmp;
458 	struct page *page;
459 
460 	BUG_ON(!buffer_async_write(bh));
461 
462 	page = bh->b_page;
463 	if (uptodate) {
464 		set_buffer_uptodate(bh);
465 	} else {
466 		if (!quiet_error(bh)) {
467 			buffer_io_error(bh);
468 			printk(KERN_WARNING "lost page write due to "
469 					"I/O error on %s\n",
470 			       bdevname(bh->b_bdev, b));
471 		}
472 		set_bit(AS_EIO, &page->mapping->flags);
473 		set_buffer_write_io_error(bh);
474 		clear_buffer_uptodate(bh);
475 		SetPageError(page);
476 	}
477 
478 	first = page_buffers(page);
479 	local_irq_save(flags);
480 	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
481 
482 	clear_buffer_async_write(bh);
483 	unlock_buffer(bh);
484 	tmp = bh->b_this_page;
485 	while (tmp != bh) {
486 		if (buffer_async_write(tmp)) {
487 			BUG_ON(!buffer_locked(tmp));
488 			goto still_busy;
489 		}
490 		tmp = tmp->b_this_page;
491 	}
492 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
493 	local_irq_restore(flags);
494 	end_page_writeback(page);
495 	return;
496 
497 still_busy:
498 	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
499 	local_irq_restore(flags);
500 	return;
501 }
502 
503 /*
504  * If a page's buffers are under async readin (end_buffer_async_read
505  * completion) then there is a possibility that another thread of
506  * control could lock one of the buffers after it has completed
507  * but while some of the other buffers have not completed.  This
508  * locked buffer would confuse end_buffer_async_read() into not unlocking
509  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
510  * that this buffer is not under async I/O.
511  *
512  * The page comes unlocked when it has no locked buffer_async buffers
513  * left.
514  *
515  * PageLocked prevents anyone starting new async I/O reads any of
516  * the buffers.
517  *
518  * PageWriteback is used to prevent simultaneous writeout of the same
519  * page.
520  *
521  * PageLocked prevents anyone from starting writeback of a page which is
522  * under read I/O (PageWriteback is only ever set against a locked page).
523  */
524 static void mark_buffer_async_read(struct buffer_head *bh)
525 {
526 	bh->b_end_io = end_buffer_async_read;
527 	set_buffer_async_read(bh);
528 }
529 
530 void mark_buffer_async_write(struct buffer_head *bh)
531 {
532 	bh->b_end_io = end_buffer_async_write;
533 	set_buffer_async_write(bh);
534 }
535 EXPORT_SYMBOL(mark_buffer_async_write);
536 
537 
538 /*
539  * fs/buffer.c contains helper functions for buffer-backed address space's
540  * fsync functions.  A common requirement for buffer-based filesystems is
541  * that certain data from the backing blockdev needs to be written out for
542  * a successful fsync().  For example, ext2 indirect blocks need to be
543  * written back and waited upon before fsync() returns.
544  *
545  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
546  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
547  * management of a list of dependent buffers at ->i_mapping->private_list.
548  *
549  * Locking is a little subtle: try_to_free_buffers() will remove buffers
550  * from their controlling inode's queue when they are being freed.  But
551  * try_to_free_buffers() will be operating against the *blockdev* mapping
552  * at the time, not against the S_ISREG file which depends on those buffers.
553  * So the locking for private_list is via the private_lock in the address_space
554  * which backs the buffers.  Which is different from the address_space
555  * against which the buffers are listed.  So for a particular address_space,
556  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
557  * mapping->private_list will always be protected by the backing blockdev's
558  * ->private_lock.
559  *
560  * Which introduces a requirement: all buffers on an address_space's
561  * ->private_list must be from the same address_space: the blockdev's.
562  *
563  * address_spaces which do not place buffers at ->private_list via these
564  * utility functions are free to use private_lock and private_list for
565  * whatever they want.  The only requirement is that list_empty(private_list)
566  * be true at clear_inode() time.
567  *
568  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
569  * filesystems should do that.  invalidate_inode_buffers() should just go
570  * BUG_ON(!list_empty).
571  *
572  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
573  * take an address_space, not an inode.  And it should be called
574  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
575  * queued up.
576  *
577  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
578  * list if it is already on a list.  Because if the buffer is on a list,
579  * it *must* already be on the right one.  If not, the filesystem is being
580  * silly.  This will save a ton of locking.  But first we have to ensure
581  * that buffers are taken *off* the old inode's list when they are freed
582  * (presumably in truncate).  That requires careful auditing of all
583  * filesystems (do it inside bforget()).  It could also be done by bringing
584  * b_inode back.
585  */
586 
587 /*
588  * The buffer's backing address_space's private_lock must be held
589  */
590 static void __remove_assoc_queue(struct buffer_head *bh)
591 {
592 	list_del_init(&bh->b_assoc_buffers);
593 	WARN_ON(!bh->b_assoc_map);
594 	if (buffer_write_io_error(bh))
595 		set_bit(AS_EIO, &bh->b_assoc_map->flags);
596 	bh->b_assoc_map = NULL;
597 }
598 
599 int inode_has_buffers(struct inode *inode)
600 {
601 	return !list_empty(&inode->i_data.private_list);
602 }
603 
604 /*
605  * osync is designed to support O_SYNC io.  It waits synchronously for
606  * all already-submitted IO to complete, but does not queue any new
607  * writes to the disk.
608  *
609  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
610  * you dirty the buffers, and then use osync_inode_buffers to wait for
611  * completion.  Any other dirty buffers which are not yet queued for
612  * write will not be flushed to disk by the osync.
613  */
614 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
615 {
616 	struct buffer_head *bh;
617 	struct list_head *p;
618 	int err = 0;
619 
620 	spin_lock(lock);
621 repeat:
622 	list_for_each_prev(p, list) {
623 		bh = BH_ENTRY(p);
624 		if (buffer_locked(bh)) {
625 			get_bh(bh);
626 			spin_unlock(lock);
627 			wait_on_buffer(bh);
628 			if (!buffer_uptodate(bh))
629 				err = -EIO;
630 			brelse(bh);
631 			spin_lock(lock);
632 			goto repeat;
633 		}
634 	}
635 	spin_unlock(lock);
636 	return err;
637 }
638 
639 /**
640  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
641  * @mapping: the mapping which wants those buffers written
642  *
643  * Starts I/O against the buffers at mapping->private_list, and waits upon
644  * that I/O.
645  *
646  * Basically, this is a convenience function for fsync().
647  * @mapping is a file or directory which needs those buffers to be written for
648  * a successful fsync().
649  */
650 int sync_mapping_buffers(struct address_space *mapping)
651 {
652 	struct address_space *buffer_mapping = mapping->assoc_mapping;
653 
654 	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
655 		return 0;
656 
657 	return fsync_buffers_list(&buffer_mapping->private_lock,
658 					&mapping->private_list);
659 }
660 EXPORT_SYMBOL(sync_mapping_buffers);
661 
662 /*
663  * Called when we've recently written block `bblock', and it is known that
664  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
665  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
666  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
667  */
668 void write_boundary_block(struct block_device *bdev,
669 			sector_t bblock, unsigned blocksize)
670 {
671 	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
672 	if (bh) {
673 		if (buffer_dirty(bh))
674 			ll_rw_block(WRITE, 1, &bh);
675 		put_bh(bh);
676 	}
677 }
678 
679 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
680 {
681 	struct address_space *mapping = inode->i_mapping;
682 	struct address_space *buffer_mapping = bh->b_page->mapping;
683 
684 	mark_buffer_dirty(bh);
685 	if (!mapping->assoc_mapping) {
686 		mapping->assoc_mapping = buffer_mapping;
687 	} else {
688 		BUG_ON(mapping->assoc_mapping != buffer_mapping);
689 	}
690 	if (!bh->b_assoc_map) {
691 		spin_lock(&buffer_mapping->private_lock);
692 		list_move_tail(&bh->b_assoc_buffers,
693 				&mapping->private_list);
694 		bh->b_assoc_map = mapping;
695 		spin_unlock(&buffer_mapping->private_lock);
696 	}
697 }
698 EXPORT_SYMBOL(mark_buffer_dirty_inode);
699 
700 /*
701  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
702  * dirty.
703  *
704  * If warn is true, then emit a warning if the page is not uptodate and has
705  * not been truncated.
706  */
707 static int __set_page_dirty(struct page *page,
708 		struct address_space *mapping, int warn)
709 {
710 	if (unlikely(!mapping))
711 		return !TestSetPageDirty(page);
712 
713 	if (TestSetPageDirty(page))
714 		return 0;
715 
716 	spin_lock_irq(&mapping->tree_lock);
717 	if (page->mapping) {	/* Race with truncate? */
718 		WARN_ON_ONCE(warn && !PageUptodate(page));
719 
720 		if (mapping_cap_account_dirty(mapping)) {
721 			__inc_zone_page_state(page, NR_FILE_DIRTY);
722 			__inc_bdi_stat(mapping->backing_dev_info,
723 					BDI_RECLAIMABLE);
724 			task_io_account_write(PAGE_CACHE_SIZE);
725 		}
726 		radix_tree_tag_set(&mapping->page_tree,
727 				page_index(page), PAGECACHE_TAG_DIRTY);
728 	}
729 	spin_unlock_irq(&mapping->tree_lock);
730 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
731 
732 	return 1;
733 }
734 
735 /*
736  * Add a page to the dirty page list.
737  *
738  * It is a sad fact of life that this function is called from several places
739  * deeply under spinlocking.  It may not sleep.
740  *
741  * If the page has buffers, the uptodate buffers are set dirty, to preserve
742  * dirty-state coherency between the page and the buffers.  It the page does
743  * not have buffers then when they are later attached they will all be set
744  * dirty.
745  *
746  * The buffers are dirtied before the page is dirtied.  There's a small race
747  * window in which a writepage caller may see the page cleanness but not the
748  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
749  * before the buffers, a concurrent writepage caller could clear the page dirty
750  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
751  * page on the dirty page list.
752  *
753  * We use private_lock to lock against try_to_free_buffers while using the
754  * page's buffer list.  Also use this to protect against clean buffers being
755  * added to the page after it was set dirty.
756  *
757  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
758  * address_space though.
759  */
760 int __set_page_dirty_buffers(struct page *page)
761 {
762 	struct address_space *mapping = page_mapping(page);
763 
764 	if (unlikely(!mapping))
765 		return !TestSetPageDirty(page);
766 
767 	spin_lock(&mapping->private_lock);
768 	if (page_has_buffers(page)) {
769 		struct buffer_head *head = page_buffers(page);
770 		struct buffer_head *bh = head;
771 
772 		do {
773 			set_buffer_dirty(bh);
774 			bh = bh->b_this_page;
775 		} while (bh != head);
776 	}
777 	spin_unlock(&mapping->private_lock);
778 
779 	return __set_page_dirty(page, mapping, 1);
780 }
781 EXPORT_SYMBOL(__set_page_dirty_buffers);
782 
783 /*
784  * Write out and wait upon a list of buffers.
785  *
786  * We have conflicting pressures: we want to make sure that all
787  * initially dirty buffers get waited on, but that any subsequently
788  * dirtied buffers don't.  After all, we don't want fsync to last
789  * forever if somebody is actively writing to the file.
790  *
791  * Do this in two main stages: first we copy dirty buffers to a
792  * temporary inode list, queueing the writes as we go.  Then we clean
793  * up, waiting for those writes to complete.
794  *
795  * During this second stage, any subsequent updates to the file may end
796  * up refiling the buffer on the original inode's dirty list again, so
797  * there is a chance we will end up with a buffer queued for write but
798  * not yet completed on that list.  So, as a final cleanup we go through
799  * the osync code to catch these locked, dirty buffers without requeuing
800  * any newly dirty buffers for write.
801  */
802 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
803 {
804 	struct buffer_head *bh;
805 	struct list_head tmp;
806 	struct address_space *mapping;
807 	int err = 0, err2;
808 
809 	INIT_LIST_HEAD(&tmp);
810 
811 	spin_lock(lock);
812 	while (!list_empty(list)) {
813 		bh = BH_ENTRY(list->next);
814 		mapping = bh->b_assoc_map;
815 		__remove_assoc_queue(bh);
816 		/* Avoid race with mark_buffer_dirty_inode() which does
817 		 * a lockless check and we rely on seeing the dirty bit */
818 		smp_mb();
819 		if (buffer_dirty(bh) || buffer_locked(bh)) {
820 			list_add(&bh->b_assoc_buffers, &tmp);
821 			bh->b_assoc_map = mapping;
822 			if (buffer_dirty(bh)) {
823 				get_bh(bh);
824 				spin_unlock(lock);
825 				/*
826 				 * Ensure any pending I/O completes so that
827 				 * ll_rw_block() actually writes the current
828 				 * contents - it is a noop if I/O is still in
829 				 * flight on potentially older contents.
830 				 */
831 				ll_rw_block(SWRITE_SYNC, 1, &bh);
832 				brelse(bh);
833 				spin_lock(lock);
834 			}
835 		}
836 	}
837 
838 	while (!list_empty(&tmp)) {
839 		bh = BH_ENTRY(tmp.prev);
840 		get_bh(bh);
841 		mapping = bh->b_assoc_map;
842 		__remove_assoc_queue(bh);
843 		/* Avoid race with mark_buffer_dirty_inode() which does
844 		 * a lockless check and we rely on seeing the dirty bit */
845 		smp_mb();
846 		if (buffer_dirty(bh)) {
847 			list_add(&bh->b_assoc_buffers,
848 				 &mapping->private_list);
849 			bh->b_assoc_map = mapping;
850 		}
851 		spin_unlock(lock);
852 		wait_on_buffer(bh);
853 		if (!buffer_uptodate(bh))
854 			err = -EIO;
855 		brelse(bh);
856 		spin_lock(lock);
857 	}
858 
859 	spin_unlock(lock);
860 	err2 = osync_buffers_list(lock, list);
861 	if (err)
862 		return err;
863 	else
864 		return err2;
865 }
866 
867 /*
868  * Invalidate any and all dirty buffers on a given inode.  We are
869  * probably unmounting the fs, but that doesn't mean we have already
870  * done a sync().  Just drop the buffers from the inode list.
871  *
872  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
873  * assumes that all the buffers are against the blockdev.  Not true
874  * for reiserfs.
875  */
876 void invalidate_inode_buffers(struct inode *inode)
877 {
878 	if (inode_has_buffers(inode)) {
879 		struct address_space *mapping = &inode->i_data;
880 		struct list_head *list = &mapping->private_list;
881 		struct address_space *buffer_mapping = mapping->assoc_mapping;
882 
883 		spin_lock(&buffer_mapping->private_lock);
884 		while (!list_empty(list))
885 			__remove_assoc_queue(BH_ENTRY(list->next));
886 		spin_unlock(&buffer_mapping->private_lock);
887 	}
888 }
889 EXPORT_SYMBOL(invalidate_inode_buffers);
890 
891 /*
892  * Remove any clean buffers from the inode's buffer list.  This is called
893  * when we're trying to free the inode itself.  Those buffers can pin it.
894  *
895  * Returns true if all buffers were removed.
896  */
897 int remove_inode_buffers(struct inode *inode)
898 {
899 	int ret = 1;
900 
901 	if (inode_has_buffers(inode)) {
902 		struct address_space *mapping = &inode->i_data;
903 		struct list_head *list = &mapping->private_list;
904 		struct address_space *buffer_mapping = mapping->assoc_mapping;
905 
906 		spin_lock(&buffer_mapping->private_lock);
907 		while (!list_empty(list)) {
908 			struct buffer_head *bh = BH_ENTRY(list->next);
909 			if (buffer_dirty(bh)) {
910 				ret = 0;
911 				break;
912 			}
913 			__remove_assoc_queue(bh);
914 		}
915 		spin_unlock(&buffer_mapping->private_lock);
916 	}
917 	return ret;
918 }
919 
920 /*
921  * Create the appropriate buffers when given a page for data area and
922  * the size of each buffer.. Use the bh->b_this_page linked list to
923  * follow the buffers created.  Return NULL if unable to create more
924  * buffers.
925  *
926  * The retry flag is used to differentiate async IO (paging, swapping)
927  * which may not fail from ordinary buffer allocations.
928  */
929 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
930 		int retry)
931 {
932 	struct buffer_head *bh, *head;
933 	long offset;
934 
935 try_again:
936 	head = NULL;
937 	offset = PAGE_SIZE;
938 	while ((offset -= size) >= 0) {
939 		bh = alloc_buffer_head(GFP_NOFS);
940 		if (!bh)
941 			goto no_grow;
942 
943 		bh->b_bdev = NULL;
944 		bh->b_this_page = head;
945 		bh->b_blocknr = -1;
946 		head = bh;
947 
948 		bh->b_state = 0;
949 		atomic_set(&bh->b_count, 0);
950 		bh->b_private = NULL;
951 		bh->b_size = size;
952 
953 		/* Link the buffer to its page */
954 		set_bh_page(bh, page, offset);
955 
956 		init_buffer(bh, NULL, NULL);
957 	}
958 	return head;
959 /*
960  * In case anything failed, we just free everything we got.
961  */
962 no_grow:
963 	if (head) {
964 		do {
965 			bh = head;
966 			head = head->b_this_page;
967 			free_buffer_head(bh);
968 		} while (head);
969 	}
970 
971 	/*
972 	 * Return failure for non-async IO requests.  Async IO requests
973 	 * are not allowed to fail, so we have to wait until buffer heads
974 	 * become available.  But we don't want tasks sleeping with
975 	 * partially complete buffers, so all were released above.
976 	 */
977 	if (!retry)
978 		return NULL;
979 
980 	/* We're _really_ low on memory. Now we just
981 	 * wait for old buffer heads to become free due to
982 	 * finishing IO.  Since this is an async request and
983 	 * the reserve list is empty, we're sure there are
984 	 * async buffer heads in use.
985 	 */
986 	free_more_memory();
987 	goto try_again;
988 }
989 EXPORT_SYMBOL_GPL(alloc_page_buffers);
990 
991 static inline void
992 link_dev_buffers(struct page *page, struct buffer_head *head)
993 {
994 	struct buffer_head *bh, *tail;
995 
996 	bh = head;
997 	do {
998 		tail = bh;
999 		bh = bh->b_this_page;
1000 	} while (bh);
1001 	tail->b_this_page = head;
1002 	attach_page_buffers(page, head);
1003 }
1004 
1005 /*
1006  * Initialise the state of a blockdev page's buffers.
1007  */
1008 static void
1009 init_page_buffers(struct page *page, struct block_device *bdev,
1010 			sector_t block, int size)
1011 {
1012 	struct buffer_head *head = page_buffers(page);
1013 	struct buffer_head *bh = head;
1014 	int uptodate = PageUptodate(page);
1015 
1016 	do {
1017 		if (!buffer_mapped(bh)) {
1018 			init_buffer(bh, NULL, NULL);
1019 			bh->b_bdev = bdev;
1020 			bh->b_blocknr = block;
1021 			if (uptodate)
1022 				set_buffer_uptodate(bh);
1023 			set_buffer_mapped(bh);
1024 		}
1025 		block++;
1026 		bh = bh->b_this_page;
1027 	} while (bh != head);
1028 }
1029 
1030 /*
1031  * Create the page-cache page that contains the requested block.
1032  *
1033  * This is user purely for blockdev mappings.
1034  */
1035 static struct page *
1036 grow_dev_page(struct block_device *bdev, sector_t block,
1037 		pgoff_t index, int size)
1038 {
1039 	struct inode *inode = bdev->bd_inode;
1040 	struct page *page;
1041 	struct buffer_head *bh;
1042 
1043 	page = find_or_create_page(inode->i_mapping, index,
1044 		(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1045 	if (!page)
1046 		return NULL;
1047 
1048 	BUG_ON(!PageLocked(page));
1049 
1050 	if (page_has_buffers(page)) {
1051 		bh = page_buffers(page);
1052 		if (bh->b_size == size) {
1053 			init_page_buffers(page, bdev, block, size);
1054 			return page;
1055 		}
1056 		if (!try_to_free_buffers(page))
1057 			goto failed;
1058 	}
1059 
1060 	/*
1061 	 * Allocate some buffers for this page
1062 	 */
1063 	bh = alloc_page_buffers(page, size, 0);
1064 	if (!bh)
1065 		goto failed;
1066 
1067 	/*
1068 	 * Link the page to the buffers and initialise them.  Take the
1069 	 * lock to be atomic wrt __find_get_block(), which does not
1070 	 * run under the page lock.
1071 	 */
1072 	spin_lock(&inode->i_mapping->private_lock);
1073 	link_dev_buffers(page, bh);
1074 	init_page_buffers(page, bdev, block, size);
1075 	spin_unlock(&inode->i_mapping->private_lock);
1076 	return page;
1077 
1078 failed:
1079 	BUG();
1080 	unlock_page(page);
1081 	page_cache_release(page);
1082 	return NULL;
1083 }
1084 
1085 /*
1086  * Create buffers for the specified block device block's page.  If
1087  * that page was dirty, the buffers are set dirty also.
1088  */
1089 static int
1090 grow_buffers(struct block_device *bdev, sector_t block, int size)
1091 {
1092 	struct page *page;
1093 	pgoff_t index;
1094 	int sizebits;
1095 
1096 	sizebits = -1;
1097 	do {
1098 		sizebits++;
1099 	} while ((size << sizebits) < PAGE_SIZE);
1100 
1101 	index = block >> sizebits;
1102 
1103 	/*
1104 	 * Check for a block which wants to lie outside our maximum possible
1105 	 * pagecache index.  (this comparison is done using sector_t types).
1106 	 */
1107 	if (unlikely(index != block >> sizebits)) {
1108 		char b[BDEVNAME_SIZE];
1109 
1110 		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1111 			"device %s\n",
1112 			__func__, (unsigned long long)block,
1113 			bdevname(bdev, b));
1114 		return -EIO;
1115 	}
1116 	block = index << sizebits;
1117 	/* Create a page with the proper size buffers.. */
1118 	page = grow_dev_page(bdev, block, index, size);
1119 	if (!page)
1120 		return 0;
1121 	unlock_page(page);
1122 	page_cache_release(page);
1123 	return 1;
1124 }
1125 
1126 static struct buffer_head *
1127 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1128 {
1129 	/* Size must be multiple of hard sectorsize */
1130 	if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
1131 			(size < 512 || size > PAGE_SIZE))) {
1132 		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1133 					size);
1134 		printk(KERN_ERR "hardsect size: %d\n",
1135 					bdev_hardsect_size(bdev));
1136 
1137 		dump_stack();
1138 		return NULL;
1139 	}
1140 
1141 	for (;;) {
1142 		struct buffer_head * bh;
1143 		int ret;
1144 
1145 		bh = __find_get_block(bdev, block, size);
1146 		if (bh)
1147 			return bh;
1148 
1149 		ret = grow_buffers(bdev, block, size);
1150 		if (ret < 0)
1151 			return NULL;
1152 		if (ret == 0)
1153 			free_more_memory();
1154 	}
1155 }
1156 
1157 /*
1158  * The relationship between dirty buffers and dirty pages:
1159  *
1160  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1161  * the page is tagged dirty in its radix tree.
1162  *
1163  * At all times, the dirtiness of the buffers represents the dirtiness of
1164  * subsections of the page.  If the page has buffers, the page dirty bit is
1165  * merely a hint about the true dirty state.
1166  *
1167  * When a page is set dirty in its entirety, all its buffers are marked dirty
1168  * (if the page has buffers).
1169  *
1170  * When a buffer is marked dirty, its page is dirtied, but the page's other
1171  * buffers are not.
1172  *
1173  * Also.  When blockdev buffers are explicitly read with bread(), they
1174  * individually become uptodate.  But their backing page remains not
1175  * uptodate - even if all of its buffers are uptodate.  A subsequent
1176  * block_read_full_page() against that page will discover all the uptodate
1177  * buffers, will set the page uptodate and will perform no I/O.
1178  */
1179 
1180 /**
1181  * mark_buffer_dirty - mark a buffer_head as needing writeout
1182  * @bh: the buffer_head to mark dirty
1183  *
1184  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1185  * backing page dirty, then tag the page as dirty in its address_space's radix
1186  * tree and then attach the address_space's inode to its superblock's dirty
1187  * inode list.
1188  *
1189  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1190  * mapping->tree_lock and the global inode_lock.
1191  */
1192 void mark_buffer_dirty(struct buffer_head *bh)
1193 {
1194 	WARN_ON_ONCE(!buffer_uptodate(bh));
1195 
1196 	/*
1197 	 * Very *carefully* optimize the it-is-already-dirty case.
1198 	 *
1199 	 * Don't let the final "is it dirty" escape to before we
1200 	 * perhaps modified the buffer.
1201 	 */
1202 	if (buffer_dirty(bh)) {
1203 		smp_mb();
1204 		if (buffer_dirty(bh))
1205 			return;
1206 	}
1207 
1208 	if (!test_set_buffer_dirty(bh))
1209 		__set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1210 }
1211 
1212 /*
1213  * Decrement a buffer_head's reference count.  If all buffers against a page
1214  * have zero reference count, are clean and unlocked, and if the page is clean
1215  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1216  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1217  * a page but it ends up not being freed, and buffers may later be reattached).
1218  */
1219 void __brelse(struct buffer_head * buf)
1220 {
1221 	if (atomic_read(&buf->b_count)) {
1222 		put_bh(buf);
1223 		return;
1224 	}
1225 	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1226 }
1227 
1228 /*
1229  * bforget() is like brelse(), except it discards any
1230  * potentially dirty data.
1231  */
1232 void __bforget(struct buffer_head *bh)
1233 {
1234 	clear_buffer_dirty(bh);
1235 	if (bh->b_assoc_map) {
1236 		struct address_space *buffer_mapping = bh->b_page->mapping;
1237 
1238 		spin_lock(&buffer_mapping->private_lock);
1239 		list_del_init(&bh->b_assoc_buffers);
1240 		bh->b_assoc_map = NULL;
1241 		spin_unlock(&buffer_mapping->private_lock);
1242 	}
1243 	__brelse(bh);
1244 }
1245 
1246 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1247 {
1248 	lock_buffer(bh);
1249 	if (buffer_uptodate(bh)) {
1250 		unlock_buffer(bh);
1251 		return bh;
1252 	} else {
1253 		get_bh(bh);
1254 		bh->b_end_io = end_buffer_read_sync;
1255 		submit_bh(READ, bh);
1256 		wait_on_buffer(bh);
1257 		if (buffer_uptodate(bh))
1258 			return bh;
1259 	}
1260 	brelse(bh);
1261 	return NULL;
1262 }
1263 
1264 /*
1265  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1266  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1267  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1268  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1269  * CPU's LRUs at the same time.
1270  *
1271  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1272  * sb_find_get_block().
1273  *
1274  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1275  * a local interrupt disable for that.
1276  */
1277 
1278 #define BH_LRU_SIZE	8
1279 
1280 struct bh_lru {
1281 	struct buffer_head *bhs[BH_LRU_SIZE];
1282 };
1283 
1284 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1285 
1286 #ifdef CONFIG_SMP
1287 #define bh_lru_lock()	local_irq_disable()
1288 #define bh_lru_unlock()	local_irq_enable()
1289 #else
1290 #define bh_lru_lock()	preempt_disable()
1291 #define bh_lru_unlock()	preempt_enable()
1292 #endif
1293 
1294 static inline void check_irqs_on(void)
1295 {
1296 #ifdef irqs_disabled
1297 	BUG_ON(irqs_disabled());
1298 #endif
1299 }
1300 
1301 /*
1302  * The LRU management algorithm is dopey-but-simple.  Sorry.
1303  */
1304 static void bh_lru_install(struct buffer_head *bh)
1305 {
1306 	struct buffer_head *evictee = NULL;
1307 	struct bh_lru *lru;
1308 
1309 	check_irqs_on();
1310 	bh_lru_lock();
1311 	lru = &__get_cpu_var(bh_lrus);
1312 	if (lru->bhs[0] != bh) {
1313 		struct buffer_head *bhs[BH_LRU_SIZE];
1314 		int in;
1315 		int out = 0;
1316 
1317 		get_bh(bh);
1318 		bhs[out++] = bh;
1319 		for (in = 0; in < BH_LRU_SIZE; in++) {
1320 			struct buffer_head *bh2 = lru->bhs[in];
1321 
1322 			if (bh2 == bh) {
1323 				__brelse(bh2);
1324 			} else {
1325 				if (out >= BH_LRU_SIZE) {
1326 					BUG_ON(evictee != NULL);
1327 					evictee = bh2;
1328 				} else {
1329 					bhs[out++] = bh2;
1330 				}
1331 			}
1332 		}
1333 		while (out < BH_LRU_SIZE)
1334 			bhs[out++] = NULL;
1335 		memcpy(lru->bhs, bhs, sizeof(bhs));
1336 	}
1337 	bh_lru_unlock();
1338 
1339 	if (evictee)
1340 		__brelse(evictee);
1341 }
1342 
1343 /*
1344  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1345  */
1346 static struct buffer_head *
1347 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1348 {
1349 	struct buffer_head *ret = NULL;
1350 	struct bh_lru *lru;
1351 	unsigned int i;
1352 
1353 	check_irqs_on();
1354 	bh_lru_lock();
1355 	lru = &__get_cpu_var(bh_lrus);
1356 	for (i = 0; i < BH_LRU_SIZE; i++) {
1357 		struct buffer_head *bh = lru->bhs[i];
1358 
1359 		if (bh && bh->b_bdev == bdev &&
1360 				bh->b_blocknr == block && bh->b_size == size) {
1361 			if (i) {
1362 				while (i) {
1363 					lru->bhs[i] = lru->bhs[i - 1];
1364 					i--;
1365 				}
1366 				lru->bhs[0] = bh;
1367 			}
1368 			get_bh(bh);
1369 			ret = bh;
1370 			break;
1371 		}
1372 	}
1373 	bh_lru_unlock();
1374 	return ret;
1375 }
1376 
1377 /*
1378  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1379  * it in the LRU and mark it as accessed.  If it is not present then return
1380  * NULL
1381  */
1382 struct buffer_head *
1383 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1384 {
1385 	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1386 
1387 	if (bh == NULL) {
1388 		bh = __find_get_block_slow(bdev, block);
1389 		if (bh)
1390 			bh_lru_install(bh);
1391 	}
1392 	if (bh)
1393 		touch_buffer(bh);
1394 	return bh;
1395 }
1396 EXPORT_SYMBOL(__find_get_block);
1397 
1398 /*
1399  * __getblk will locate (and, if necessary, create) the buffer_head
1400  * which corresponds to the passed block_device, block and size. The
1401  * returned buffer has its reference count incremented.
1402  *
1403  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1404  * illegal block number, __getblk() will happily return a buffer_head
1405  * which represents the non-existent block.  Very weird.
1406  *
1407  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1408  * attempt is failing.  FIXME, perhaps?
1409  */
1410 struct buffer_head *
1411 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1412 {
1413 	struct buffer_head *bh = __find_get_block(bdev, block, size);
1414 
1415 	might_sleep();
1416 	if (bh == NULL)
1417 		bh = __getblk_slow(bdev, block, size);
1418 	return bh;
1419 }
1420 EXPORT_SYMBOL(__getblk);
1421 
1422 /*
1423  * Do async read-ahead on a buffer..
1424  */
1425 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1426 {
1427 	struct buffer_head *bh = __getblk(bdev, block, size);
1428 	if (likely(bh)) {
1429 		ll_rw_block(READA, 1, &bh);
1430 		brelse(bh);
1431 	}
1432 }
1433 EXPORT_SYMBOL(__breadahead);
1434 
1435 /**
1436  *  __bread() - reads a specified block and returns the bh
1437  *  @bdev: the block_device to read from
1438  *  @block: number of block
1439  *  @size: size (in bytes) to read
1440  *
1441  *  Reads a specified block, and returns buffer head that contains it.
1442  *  It returns NULL if the block was unreadable.
1443  */
1444 struct buffer_head *
1445 __bread(struct block_device *bdev, sector_t block, unsigned size)
1446 {
1447 	struct buffer_head *bh = __getblk(bdev, block, size);
1448 
1449 	if (likely(bh) && !buffer_uptodate(bh))
1450 		bh = __bread_slow(bh);
1451 	return bh;
1452 }
1453 EXPORT_SYMBOL(__bread);
1454 
1455 /*
1456  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1457  * This doesn't race because it runs in each cpu either in irq
1458  * or with preempt disabled.
1459  */
1460 static void invalidate_bh_lru(void *arg)
1461 {
1462 	struct bh_lru *b = &get_cpu_var(bh_lrus);
1463 	int i;
1464 
1465 	for (i = 0; i < BH_LRU_SIZE; i++) {
1466 		brelse(b->bhs[i]);
1467 		b->bhs[i] = NULL;
1468 	}
1469 	put_cpu_var(bh_lrus);
1470 }
1471 
1472 void invalidate_bh_lrus(void)
1473 {
1474 	on_each_cpu(invalidate_bh_lru, NULL, 1);
1475 }
1476 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1477 
1478 void set_bh_page(struct buffer_head *bh,
1479 		struct page *page, unsigned long offset)
1480 {
1481 	bh->b_page = page;
1482 	BUG_ON(offset >= PAGE_SIZE);
1483 	if (PageHighMem(page))
1484 		/*
1485 		 * This catches illegal uses and preserves the offset:
1486 		 */
1487 		bh->b_data = (char *)(0 + offset);
1488 	else
1489 		bh->b_data = page_address(page) + offset;
1490 }
1491 EXPORT_SYMBOL(set_bh_page);
1492 
1493 /*
1494  * Called when truncating a buffer on a page completely.
1495  */
1496 static void discard_buffer(struct buffer_head * bh)
1497 {
1498 	lock_buffer(bh);
1499 	clear_buffer_dirty(bh);
1500 	bh->b_bdev = NULL;
1501 	clear_buffer_mapped(bh);
1502 	clear_buffer_req(bh);
1503 	clear_buffer_new(bh);
1504 	clear_buffer_delay(bh);
1505 	clear_buffer_unwritten(bh);
1506 	unlock_buffer(bh);
1507 }
1508 
1509 /**
1510  * block_invalidatepage - invalidate part of all of a buffer-backed page
1511  *
1512  * @page: the page which is affected
1513  * @offset: the index of the truncation point
1514  *
1515  * block_invalidatepage() is called when all or part of the page has become
1516  * invalidatedby a truncate operation.
1517  *
1518  * block_invalidatepage() does not have to release all buffers, but it must
1519  * ensure that no dirty buffer is left outside @offset and that no I/O
1520  * is underway against any of the blocks which are outside the truncation
1521  * point.  Because the caller is about to free (and possibly reuse) those
1522  * blocks on-disk.
1523  */
1524 void block_invalidatepage(struct page *page, unsigned long offset)
1525 {
1526 	struct buffer_head *head, *bh, *next;
1527 	unsigned int curr_off = 0;
1528 
1529 	BUG_ON(!PageLocked(page));
1530 	if (!page_has_buffers(page))
1531 		goto out;
1532 
1533 	head = page_buffers(page);
1534 	bh = head;
1535 	do {
1536 		unsigned int next_off = curr_off + bh->b_size;
1537 		next = bh->b_this_page;
1538 
1539 		/*
1540 		 * is this block fully invalidated?
1541 		 */
1542 		if (offset <= curr_off)
1543 			discard_buffer(bh);
1544 		curr_off = next_off;
1545 		bh = next;
1546 	} while (bh != head);
1547 
1548 	/*
1549 	 * We release buffers only if the entire page is being invalidated.
1550 	 * The get_block cached value has been unconditionally invalidated,
1551 	 * so real IO is not possible anymore.
1552 	 */
1553 	if (offset == 0)
1554 		try_to_release_page(page, 0);
1555 out:
1556 	return;
1557 }
1558 EXPORT_SYMBOL(block_invalidatepage);
1559 
1560 /*
1561  * We attach and possibly dirty the buffers atomically wrt
1562  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1563  * is already excluded via the page lock.
1564  */
1565 void create_empty_buffers(struct page *page,
1566 			unsigned long blocksize, unsigned long b_state)
1567 {
1568 	struct buffer_head *bh, *head, *tail;
1569 
1570 	head = alloc_page_buffers(page, blocksize, 1);
1571 	bh = head;
1572 	do {
1573 		bh->b_state |= b_state;
1574 		tail = bh;
1575 		bh = bh->b_this_page;
1576 	} while (bh);
1577 	tail->b_this_page = head;
1578 
1579 	spin_lock(&page->mapping->private_lock);
1580 	if (PageUptodate(page) || PageDirty(page)) {
1581 		bh = head;
1582 		do {
1583 			if (PageDirty(page))
1584 				set_buffer_dirty(bh);
1585 			if (PageUptodate(page))
1586 				set_buffer_uptodate(bh);
1587 			bh = bh->b_this_page;
1588 		} while (bh != head);
1589 	}
1590 	attach_page_buffers(page, head);
1591 	spin_unlock(&page->mapping->private_lock);
1592 }
1593 EXPORT_SYMBOL(create_empty_buffers);
1594 
1595 /*
1596  * We are taking a block for data and we don't want any output from any
1597  * buffer-cache aliases starting from return from that function and
1598  * until the moment when something will explicitly mark the buffer
1599  * dirty (hopefully that will not happen until we will free that block ;-)
1600  * We don't even need to mark it not-uptodate - nobody can expect
1601  * anything from a newly allocated buffer anyway. We used to used
1602  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1603  * don't want to mark the alias unmapped, for example - it would confuse
1604  * anyone who might pick it with bread() afterwards...
1605  *
1606  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1607  * be writeout I/O going on against recently-freed buffers.  We don't
1608  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1609  * only if we really need to.  That happens here.
1610  */
1611 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1612 {
1613 	struct buffer_head *old_bh;
1614 
1615 	might_sleep();
1616 
1617 	old_bh = __find_get_block_slow(bdev, block);
1618 	if (old_bh) {
1619 		clear_buffer_dirty(old_bh);
1620 		wait_on_buffer(old_bh);
1621 		clear_buffer_req(old_bh);
1622 		__brelse(old_bh);
1623 	}
1624 }
1625 EXPORT_SYMBOL(unmap_underlying_metadata);
1626 
1627 /*
1628  * NOTE! All mapped/uptodate combinations are valid:
1629  *
1630  *	Mapped	Uptodate	Meaning
1631  *
1632  *	No	No		"unknown" - must do get_block()
1633  *	No	Yes		"hole" - zero-filled
1634  *	Yes	No		"allocated" - allocated on disk, not read in
1635  *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1636  *
1637  * "Dirty" is valid only with the last case (mapped+uptodate).
1638  */
1639 
1640 /*
1641  * While block_write_full_page is writing back the dirty buffers under
1642  * the page lock, whoever dirtied the buffers may decide to clean them
1643  * again at any time.  We handle that by only looking at the buffer
1644  * state inside lock_buffer().
1645  *
1646  * If block_write_full_page() is called for regular writeback
1647  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1648  * locked buffer.   This only can happen if someone has written the buffer
1649  * directly, with submit_bh().  At the address_space level PageWriteback
1650  * prevents this contention from occurring.
1651  */
1652 static int __block_write_full_page(struct inode *inode, struct page *page,
1653 			get_block_t *get_block, struct writeback_control *wbc)
1654 {
1655 	int err;
1656 	sector_t block;
1657 	sector_t last_block;
1658 	struct buffer_head *bh, *head;
1659 	const unsigned blocksize = 1 << inode->i_blkbits;
1660 	int nr_underway = 0;
1661 
1662 	BUG_ON(!PageLocked(page));
1663 
1664 	last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1665 
1666 	if (!page_has_buffers(page)) {
1667 		create_empty_buffers(page, blocksize,
1668 					(1 << BH_Dirty)|(1 << BH_Uptodate));
1669 	}
1670 
1671 	/*
1672 	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1673 	 * here, and the (potentially unmapped) buffers may become dirty at
1674 	 * any time.  If a buffer becomes dirty here after we've inspected it
1675 	 * then we just miss that fact, and the page stays dirty.
1676 	 *
1677 	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1678 	 * handle that here by just cleaning them.
1679 	 */
1680 
1681 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1682 	head = page_buffers(page);
1683 	bh = head;
1684 
1685 	/*
1686 	 * Get all the dirty buffers mapped to disk addresses and
1687 	 * handle any aliases from the underlying blockdev's mapping.
1688 	 */
1689 	do {
1690 		if (block > last_block) {
1691 			/*
1692 			 * mapped buffers outside i_size will occur, because
1693 			 * this page can be outside i_size when there is a
1694 			 * truncate in progress.
1695 			 */
1696 			/*
1697 			 * The buffer was zeroed by block_write_full_page()
1698 			 */
1699 			clear_buffer_dirty(bh);
1700 			set_buffer_uptodate(bh);
1701 		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1702 			   buffer_dirty(bh)) {
1703 			WARN_ON(bh->b_size != blocksize);
1704 			err = get_block(inode, block, bh, 1);
1705 			if (err)
1706 				goto recover;
1707 			clear_buffer_delay(bh);
1708 			if (buffer_new(bh)) {
1709 				/* blockdev mappings never come here */
1710 				clear_buffer_new(bh);
1711 				unmap_underlying_metadata(bh->b_bdev,
1712 							bh->b_blocknr);
1713 			}
1714 		}
1715 		bh = bh->b_this_page;
1716 		block++;
1717 	} while (bh != head);
1718 
1719 	do {
1720 		if (!buffer_mapped(bh))
1721 			continue;
1722 		/*
1723 		 * If it's a fully non-blocking write attempt and we cannot
1724 		 * lock the buffer then redirty the page.  Note that this can
1725 		 * potentially cause a busy-wait loop from pdflush and kswapd
1726 		 * activity, but those code paths have their own higher-level
1727 		 * throttling.
1728 		 */
1729 		if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1730 			lock_buffer(bh);
1731 		} else if (!trylock_buffer(bh)) {
1732 			redirty_page_for_writepage(wbc, page);
1733 			continue;
1734 		}
1735 		if (test_clear_buffer_dirty(bh)) {
1736 			mark_buffer_async_write(bh);
1737 		} else {
1738 			unlock_buffer(bh);
1739 		}
1740 	} while ((bh = bh->b_this_page) != head);
1741 
1742 	/*
1743 	 * The page and its buffers are protected by PageWriteback(), so we can
1744 	 * drop the bh refcounts early.
1745 	 */
1746 	BUG_ON(PageWriteback(page));
1747 	set_page_writeback(page);
1748 
1749 	do {
1750 		struct buffer_head *next = bh->b_this_page;
1751 		if (buffer_async_write(bh)) {
1752 			submit_bh(WRITE, bh);
1753 			nr_underway++;
1754 		}
1755 		bh = next;
1756 	} while (bh != head);
1757 	unlock_page(page);
1758 
1759 	err = 0;
1760 done:
1761 	if (nr_underway == 0) {
1762 		/*
1763 		 * The page was marked dirty, but the buffers were
1764 		 * clean.  Someone wrote them back by hand with
1765 		 * ll_rw_block/submit_bh.  A rare case.
1766 		 */
1767 		end_page_writeback(page);
1768 
1769 		/*
1770 		 * The page and buffer_heads can be released at any time from
1771 		 * here on.
1772 		 */
1773 	}
1774 	return err;
1775 
1776 recover:
1777 	/*
1778 	 * ENOSPC, or some other error.  We may already have added some
1779 	 * blocks to the file, so we need to write these out to avoid
1780 	 * exposing stale data.
1781 	 * The page is currently locked and not marked for writeback
1782 	 */
1783 	bh = head;
1784 	/* Recovery: lock and submit the mapped buffers */
1785 	do {
1786 		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1787 		    !buffer_delay(bh)) {
1788 			lock_buffer(bh);
1789 			mark_buffer_async_write(bh);
1790 		} else {
1791 			/*
1792 			 * The buffer may have been set dirty during
1793 			 * attachment to a dirty page.
1794 			 */
1795 			clear_buffer_dirty(bh);
1796 		}
1797 	} while ((bh = bh->b_this_page) != head);
1798 	SetPageError(page);
1799 	BUG_ON(PageWriteback(page));
1800 	mapping_set_error(page->mapping, err);
1801 	set_page_writeback(page);
1802 	do {
1803 		struct buffer_head *next = bh->b_this_page;
1804 		if (buffer_async_write(bh)) {
1805 			clear_buffer_dirty(bh);
1806 			submit_bh(WRITE, bh);
1807 			nr_underway++;
1808 		}
1809 		bh = next;
1810 	} while (bh != head);
1811 	unlock_page(page);
1812 	goto done;
1813 }
1814 
1815 /*
1816  * If a page has any new buffers, zero them out here, and mark them uptodate
1817  * and dirty so they'll be written out (in order to prevent uninitialised
1818  * block data from leaking). And clear the new bit.
1819  */
1820 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1821 {
1822 	unsigned int block_start, block_end;
1823 	struct buffer_head *head, *bh;
1824 
1825 	BUG_ON(!PageLocked(page));
1826 	if (!page_has_buffers(page))
1827 		return;
1828 
1829 	bh = head = page_buffers(page);
1830 	block_start = 0;
1831 	do {
1832 		block_end = block_start + bh->b_size;
1833 
1834 		if (buffer_new(bh)) {
1835 			if (block_end > from && block_start < to) {
1836 				if (!PageUptodate(page)) {
1837 					unsigned start, size;
1838 
1839 					start = max(from, block_start);
1840 					size = min(to, block_end) - start;
1841 
1842 					zero_user(page, start, size);
1843 					set_buffer_uptodate(bh);
1844 				}
1845 
1846 				clear_buffer_new(bh);
1847 				mark_buffer_dirty(bh);
1848 			}
1849 		}
1850 
1851 		block_start = block_end;
1852 		bh = bh->b_this_page;
1853 	} while (bh != head);
1854 }
1855 EXPORT_SYMBOL(page_zero_new_buffers);
1856 
1857 static int __block_prepare_write(struct inode *inode, struct page *page,
1858 		unsigned from, unsigned to, get_block_t *get_block)
1859 {
1860 	unsigned block_start, block_end;
1861 	sector_t block;
1862 	int err = 0;
1863 	unsigned blocksize, bbits;
1864 	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1865 
1866 	BUG_ON(!PageLocked(page));
1867 	BUG_ON(from > PAGE_CACHE_SIZE);
1868 	BUG_ON(to > PAGE_CACHE_SIZE);
1869 	BUG_ON(from > to);
1870 
1871 	blocksize = 1 << inode->i_blkbits;
1872 	if (!page_has_buffers(page))
1873 		create_empty_buffers(page, blocksize, 0);
1874 	head = page_buffers(page);
1875 
1876 	bbits = inode->i_blkbits;
1877 	block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1878 
1879 	for(bh = head, block_start = 0; bh != head || !block_start;
1880 	    block++, block_start=block_end, bh = bh->b_this_page) {
1881 		block_end = block_start + blocksize;
1882 		if (block_end <= from || block_start >= to) {
1883 			if (PageUptodate(page)) {
1884 				if (!buffer_uptodate(bh))
1885 					set_buffer_uptodate(bh);
1886 			}
1887 			continue;
1888 		}
1889 		if (buffer_new(bh))
1890 			clear_buffer_new(bh);
1891 		if (!buffer_mapped(bh)) {
1892 			WARN_ON(bh->b_size != blocksize);
1893 			err = get_block(inode, block, bh, 1);
1894 			if (err)
1895 				break;
1896 			if (buffer_new(bh)) {
1897 				unmap_underlying_metadata(bh->b_bdev,
1898 							bh->b_blocknr);
1899 				if (PageUptodate(page)) {
1900 					clear_buffer_new(bh);
1901 					set_buffer_uptodate(bh);
1902 					mark_buffer_dirty(bh);
1903 					continue;
1904 				}
1905 				if (block_end > to || block_start < from)
1906 					zero_user_segments(page,
1907 						to, block_end,
1908 						block_start, from);
1909 				continue;
1910 			}
1911 		}
1912 		if (PageUptodate(page)) {
1913 			if (!buffer_uptodate(bh))
1914 				set_buffer_uptodate(bh);
1915 			continue;
1916 		}
1917 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1918 		    !buffer_unwritten(bh) &&
1919 		     (block_start < from || block_end > to)) {
1920 			ll_rw_block(READ, 1, &bh);
1921 			*wait_bh++=bh;
1922 		}
1923 	}
1924 	/*
1925 	 * If we issued read requests - let them complete.
1926 	 */
1927 	while(wait_bh > wait) {
1928 		wait_on_buffer(*--wait_bh);
1929 		if (!buffer_uptodate(*wait_bh))
1930 			err = -EIO;
1931 	}
1932 	if (unlikely(err))
1933 		page_zero_new_buffers(page, from, to);
1934 	return err;
1935 }
1936 
1937 static int __block_commit_write(struct inode *inode, struct page *page,
1938 		unsigned from, unsigned to)
1939 {
1940 	unsigned block_start, block_end;
1941 	int partial = 0;
1942 	unsigned blocksize;
1943 	struct buffer_head *bh, *head;
1944 
1945 	blocksize = 1 << inode->i_blkbits;
1946 
1947 	for(bh = head = page_buffers(page), block_start = 0;
1948 	    bh != head || !block_start;
1949 	    block_start=block_end, bh = bh->b_this_page) {
1950 		block_end = block_start + blocksize;
1951 		if (block_end <= from || block_start >= to) {
1952 			if (!buffer_uptodate(bh))
1953 				partial = 1;
1954 		} else {
1955 			set_buffer_uptodate(bh);
1956 			mark_buffer_dirty(bh);
1957 		}
1958 		clear_buffer_new(bh);
1959 	}
1960 
1961 	/*
1962 	 * If this is a partial write which happened to make all buffers
1963 	 * uptodate then we can optimize away a bogus readpage() for
1964 	 * the next read(). Here we 'discover' whether the page went
1965 	 * uptodate as a result of this (potentially partial) write.
1966 	 */
1967 	if (!partial)
1968 		SetPageUptodate(page);
1969 	return 0;
1970 }
1971 
1972 /*
1973  * block_write_begin takes care of the basic task of block allocation and
1974  * bringing partial write blocks uptodate first.
1975  *
1976  * If *pagep is not NULL, then block_write_begin uses the locked page
1977  * at *pagep rather than allocating its own. In this case, the page will
1978  * not be unlocked or deallocated on failure.
1979  */
1980 int block_write_begin(struct file *file, struct address_space *mapping,
1981 			loff_t pos, unsigned len, unsigned flags,
1982 			struct page **pagep, void **fsdata,
1983 			get_block_t *get_block)
1984 {
1985 	struct inode *inode = mapping->host;
1986 	int status = 0;
1987 	struct page *page;
1988 	pgoff_t index;
1989 	unsigned start, end;
1990 	int ownpage = 0;
1991 
1992 	index = pos >> PAGE_CACHE_SHIFT;
1993 	start = pos & (PAGE_CACHE_SIZE - 1);
1994 	end = start + len;
1995 
1996 	page = *pagep;
1997 	if (page == NULL) {
1998 		ownpage = 1;
1999 		page = grab_cache_page_write_begin(mapping, index, flags);
2000 		if (!page) {
2001 			status = -ENOMEM;
2002 			goto out;
2003 		}
2004 		*pagep = page;
2005 	} else
2006 		BUG_ON(!PageLocked(page));
2007 
2008 	status = __block_prepare_write(inode, page, start, end, get_block);
2009 	if (unlikely(status)) {
2010 		ClearPageUptodate(page);
2011 
2012 		if (ownpage) {
2013 			unlock_page(page);
2014 			page_cache_release(page);
2015 			*pagep = NULL;
2016 
2017 			/*
2018 			 * prepare_write() may have instantiated a few blocks
2019 			 * outside i_size.  Trim these off again. Don't need
2020 			 * i_size_read because we hold i_mutex.
2021 			 */
2022 			if (pos + len > inode->i_size)
2023 				vmtruncate(inode, inode->i_size);
2024 		}
2025 	}
2026 
2027 out:
2028 	return status;
2029 }
2030 EXPORT_SYMBOL(block_write_begin);
2031 
2032 int block_write_end(struct file *file, struct address_space *mapping,
2033 			loff_t pos, unsigned len, unsigned copied,
2034 			struct page *page, void *fsdata)
2035 {
2036 	struct inode *inode = mapping->host;
2037 	unsigned start;
2038 
2039 	start = pos & (PAGE_CACHE_SIZE - 1);
2040 
2041 	if (unlikely(copied < len)) {
2042 		/*
2043 		 * The buffers that were written will now be uptodate, so we
2044 		 * don't have to worry about a readpage reading them and
2045 		 * overwriting a partial write. However if we have encountered
2046 		 * a short write and only partially written into a buffer, it
2047 		 * will not be marked uptodate, so a readpage might come in and
2048 		 * destroy our partial write.
2049 		 *
2050 		 * Do the simplest thing, and just treat any short write to a
2051 		 * non uptodate page as a zero-length write, and force the
2052 		 * caller to redo the whole thing.
2053 		 */
2054 		if (!PageUptodate(page))
2055 			copied = 0;
2056 
2057 		page_zero_new_buffers(page, start+copied, start+len);
2058 	}
2059 	flush_dcache_page(page);
2060 
2061 	/* This could be a short (even 0-length) commit */
2062 	__block_commit_write(inode, page, start, start+copied);
2063 
2064 	return copied;
2065 }
2066 EXPORT_SYMBOL(block_write_end);
2067 
2068 int generic_write_end(struct file *file, struct address_space *mapping,
2069 			loff_t pos, unsigned len, unsigned copied,
2070 			struct page *page, void *fsdata)
2071 {
2072 	struct inode *inode = mapping->host;
2073 	int i_size_changed = 0;
2074 
2075 	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2076 
2077 	/*
2078 	 * No need to use i_size_read() here, the i_size
2079 	 * cannot change under us because we hold i_mutex.
2080 	 *
2081 	 * But it's important to update i_size while still holding page lock:
2082 	 * page writeout could otherwise come in and zero beyond i_size.
2083 	 */
2084 	if (pos+copied > inode->i_size) {
2085 		i_size_write(inode, pos+copied);
2086 		i_size_changed = 1;
2087 	}
2088 
2089 	unlock_page(page);
2090 	page_cache_release(page);
2091 
2092 	/*
2093 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2094 	 * makes the holding time of page lock longer. Second, it forces lock
2095 	 * ordering of page lock and transaction start for journaling
2096 	 * filesystems.
2097 	 */
2098 	if (i_size_changed)
2099 		mark_inode_dirty(inode);
2100 
2101 	return copied;
2102 }
2103 EXPORT_SYMBOL(generic_write_end);
2104 
2105 /*
2106  * block_is_partially_uptodate checks whether buffers within a page are
2107  * uptodate or not.
2108  *
2109  * Returns true if all buffers which correspond to a file portion
2110  * we want to read are uptodate.
2111  */
2112 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2113 					unsigned long from)
2114 {
2115 	struct inode *inode = page->mapping->host;
2116 	unsigned block_start, block_end, blocksize;
2117 	unsigned to;
2118 	struct buffer_head *bh, *head;
2119 	int ret = 1;
2120 
2121 	if (!page_has_buffers(page))
2122 		return 0;
2123 
2124 	blocksize = 1 << inode->i_blkbits;
2125 	to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2126 	to = from + to;
2127 	if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2128 		return 0;
2129 
2130 	head = page_buffers(page);
2131 	bh = head;
2132 	block_start = 0;
2133 	do {
2134 		block_end = block_start + blocksize;
2135 		if (block_end > from && block_start < to) {
2136 			if (!buffer_uptodate(bh)) {
2137 				ret = 0;
2138 				break;
2139 			}
2140 			if (block_end >= to)
2141 				break;
2142 		}
2143 		block_start = block_end;
2144 		bh = bh->b_this_page;
2145 	} while (bh != head);
2146 
2147 	return ret;
2148 }
2149 EXPORT_SYMBOL(block_is_partially_uptodate);
2150 
2151 /*
2152  * Generic "read page" function for block devices that have the normal
2153  * get_block functionality. This is most of the block device filesystems.
2154  * Reads the page asynchronously --- the unlock_buffer() and
2155  * set/clear_buffer_uptodate() functions propagate buffer state into the
2156  * page struct once IO has completed.
2157  */
2158 int block_read_full_page(struct page *page, get_block_t *get_block)
2159 {
2160 	struct inode *inode = page->mapping->host;
2161 	sector_t iblock, lblock;
2162 	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2163 	unsigned int blocksize;
2164 	int nr, i;
2165 	int fully_mapped = 1;
2166 
2167 	BUG_ON(!PageLocked(page));
2168 	blocksize = 1 << inode->i_blkbits;
2169 	if (!page_has_buffers(page))
2170 		create_empty_buffers(page, blocksize, 0);
2171 	head = page_buffers(page);
2172 
2173 	iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2174 	lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2175 	bh = head;
2176 	nr = 0;
2177 	i = 0;
2178 
2179 	do {
2180 		if (buffer_uptodate(bh))
2181 			continue;
2182 
2183 		if (!buffer_mapped(bh)) {
2184 			int err = 0;
2185 
2186 			fully_mapped = 0;
2187 			if (iblock < lblock) {
2188 				WARN_ON(bh->b_size != blocksize);
2189 				err = get_block(inode, iblock, bh, 0);
2190 				if (err)
2191 					SetPageError(page);
2192 			}
2193 			if (!buffer_mapped(bh)) {
2194 				zero_user(page, i * blocksize, blocksize);
2195 				if (!err)
2196 					set_buffer_uptodate(bh);
2197 				continue;
2198 			}
2199 			/*
2200 			 * get_block() might have updated the buffer
2201 			 * synchronously
2202 			 */
2203 			if (buffer_uptodate(bh))
2204 				continue;
2205 		}
2206 		arr[nr++] = bh;
2207 	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2208 
2209 	if (fully_mapped)
2210 		SetPageMappedToDisk(page);
2211 
2212 	if (!nr) {
2213 		/*
2214 		 * All buffers are uptodate - we can set the page uptodate
2215 		 * as well. But not if get_block() returned an error.
2216 		 */
2217 		if (!PageError(page))
2218 			SetPageUptodate(page);
2219 		unlock_page(page);
2220 		return 0;
2221 	}
2222 
2223 	/* Stage two: lock the buffers */
2224 	for (i = 0; i < nr; i++) {
2225 		bh = arr[i];
2226 		lock_buffer(bh);
2227 		mark_buffer_async_read(bh);
2228 	}
2229 
2230 	/*
2231 	 * Stage 3: start the IO.  Check for uptodateness
2232 	 * inside the buffer lock in case another process reading
2233 	 * the underlying blockdev brought it uptodate (the sct fix).
2234 	 */
2235 	for (i = 0; i < nr; i++) {
2236 		bh = arr[i];
2237 		if (buffer_uptodate(bh))
2238 			end_buffer_async_read(bh, 1);
2239 		else
2240 			submit_bh(READ, bh);
2241 	}
2242 	return 0;
2243 }
2244 
2245 /* utility function for filesystems that need to do work on expanding
2246  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2247  * deal with the hole.
2248  */
2249 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2250 {
2251 	struct address_space *mapping = inode->i_mapping;
2252 	struct page *page;
2253 	void *fsdata;
2254 	unsigned long limit;
2255 	int err;
2256 
2257 	err = -EFBIG;
2258         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2259 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2260 		send_sig(SIGXFSZ, current, 0);
2261 		goto out;
2262 	}
2263 	if (size > inode->i_sb->s_maxbytes)
2264 		goto out;
2265 
2266 	err = pagecache_write_begin(NULL, mapping, size, 0,
2267 				AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2268 				&page, &fsdata);
2269 	if (err)
2270 		goto out;
2271 
2272 	err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2273 	BUG_ON(err > 0);
2274 
2275 out:
2276 	return err;
2277 }
2278 
2279 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2280 			    loff_t pos, loff_t *bytes)
2281 {
2282 	struct inode *inode = mapping->host;
2283 	unsigned blocksize = 1 << inode->i_blkbits;
2284 	struct page *page;
2285 	void *fsdata;
2286 	pgoff_t index, curidx;
2287 	loff_t curpos;
2288 	unsigned zerofrom, offset, len;
2289 	int err = 0;
2290 
2291 	index = pos >> PAGE_CACHE_SHIFT;
2292 	offset = pos & ~PAGE_CACHE_MASK;
2293 
2294 	while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2295 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2296 		if (zerofrom & (blocksize-1)) {
2297 			*bytes |= (blocksize-1);
2298 			(*bytes)++;
2299 		}
2300 		len = PAGE_CACHE_SIZE - zerofrom;
2301 
2302 		err = pagecache_write_begin(file, mapping, curpos, len,
2303 						AOP_FLAG_UNINTERRUPTIBLE,
2304 						&page, &fsdata);
2305 		if (err)
2306 			goto out;
2307 		zero_user(page, zerofrom, len);
2308 		err = pagecache_write_end(file, mapping, curpos, len, len,
2309 						page, fsdata);
2310 		if (err < 0)
2311 			goto out;
2312 		BUG_ON(err != len);
2313 		err = 0;
2314 
2315 		balance_dirty_pages_ratelimited(mapping);
2316 	}
2317 
2318 	/* page covers the boundary, find the boundary offset */
2319 	if (index == curidx) {
2320 		zerofrom = curpos & ~PAGE_CACHE_MASK;
2321 		/* if we will expand the thing last block will be filled */
2322 		if (offset <= zerofrom) {
2323 			goto out;
2324 		}
2325 		if (zerofrom & (blocksize-1)) {
2326 			*bytes |= (blocksize-1);
2327 			(*bytes)++;
2328 		}
2329 		len = offset - zerofrom;
2330 
2331 		err = pagecache_write_begin(file, mapping, curpos, len,
2332 						AOP_FLAG_UNINTERRUPTIBLE,
2333 						&page, &fsdata);
2334 		if (err)
2335 			goto out;
2336 		zero_user(page, zerofrom, len);
2337 		err = pagecache_write_end(file, mapping, curpos, len, len,
2338 						page, fsdata);
2339 		if (err < 0)
2340 			goto out;
2341 		BUG_ON(err != len);
2342 		err = 0;
2343 	}
2344 out:
2345 	return err;
2346 }
2347 
2348 /*
2349  * For moronic filesystems that do not allow holes in file.
2350  * We may have to extend the file.
2351  */
2352 int cont_write_begin(struct file *file, struct address_space *mapping,
2353 			loff_t pos, unsigned len, unsigned flags,
2354 			struct page **pagep, void **fsdata,
2355 			get_block_t *get_block, loff_t *bytes)
2356 {
2357 	struct inode *inode = mapping->host;
2358 	unsigned blocksize = 1 << inode->i_blkbits;
2359 	unsigned zerofrom;
2360 	int err;
2361 
2362 	err = cont_expand_zero(file, mapping, pos, bytes);
2363 	if (err)
2364 		goto out;
2365 
2366 	zerofrom = *bytes & ~PAGE_CACHE_MASK;
2367 	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2368 		*bytes |= (blocksize-1);
2369 		(*bytes)++;
2370 	}
2371 
2372 	*pagep = NULL;
2373 	err = block_write_begin(file, mapping, pos, len,
2374 				flags, pagep, fsdata, get_block);
2375 out:
2376 	return err;
2377 }
2378 
2379 int block_prepare_write(struct page *page, unsigned from, unsigned to,
2380 			get_block_t *get_block)
2381 {
2382 	struct inode *inode = page->mapping->host;
2383 	int err = __block_prepare_write(inode, page, from, to, get_block);
2384 	if (err)
2385 		ClearPageUptodate(page);
2386 	return err;
2387 }
2388 
2389 int block_commit_write(struct page *page, unsigned from, unsigned to)
2390 {
2391 	struct inode *inode = page->mapping->host;
2392 	__block_commit_write(inode,page,from,to);
2393 	return 0;
2394 }
2395 
2396 /*
2397  * block_page_mkwrite() is not allowed to change the file size as it gets
2398  * called from a page fault handler when a page is first dirtied. Hence we must
2399  * be careful to check for EOF conditions here. We set the page up correctly
2400  * for a written page which means we get ENOSPC checking when writing into
2401  * holes and correct delalloc and unwritten extent mapping on filesystems that
2402  * support these features.
2403  *
2404  * We are not allowed to take the i_mutex here so we have to play games to
2405  * protect against truncate races as the page could now be beyond EOF.  Because
2406  * vmtruncate() writes the inode size before removing pages, once we have the
2407  * page lock we can determine safely if the page is beyond EOF. If it is not
2408  * beyond EOF, then the page is guaranteed safe against truncation until we
2409  * unlock the page.
2410  */
2411 int
2412 block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2413 		   get_block_t get_block)
2414 {
2415 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2416 	unsigned long end;
2417 	loff_t size;
2418 	int ret = -EINVAL;
2419 
2420 	lock_page(page);
2421 	size = i_size_read(inode);
2422 	if ((page->mapping != inode->i_mapping) ||
2423 	    (page_offset(page) > size)) {
2424 		/* page got truncated out from underneath us */
2425 		goto out_unlock;
2426 	}
2427 
2428 	/* page is wholly or partially inside EOF */
2429 	if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2430 		end = size & ~PAGE_CACHE_MASK;
2431 	else
2432 		end = PAGE_CACHE_SIZE;
2433 
2434 	ret = block_prepare_write(page, 0, end, get_block);
2435 	if (!ret)
2436 		ret = block_commit_write(page, 0, end);
2437 
2438 out_unlock:
2439 	unlock_page(page);
2440 	return ret;
2441 }
2442 
2443 /*
2444  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2445  * immediately, while under the page lock.  So it needs a special end_io
2446  * handler which does not touch the bh after unlocking it.
2447  */
2448 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2449 {
2450 	__end_buffer_read_notouch(bh, uptodate);
2451 }
2452 
2453 /*
2454  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2455  * the page (converting it to circular linked list and taking care of page
2456  * dirty races).
2457  */
2458 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2459 {
2460 	struct buffer_head *bh;
2461 
2462 	BUG_ON(!PageLocked(page));
2463 
2464 	spin_lock(&page->mapping->private_lock);
2465 	bh = head;
2466 	do {
2467 		if (PageDirty(page))
2468 			set_buffer_dirty(bh);
2469 		if (!bh->b_this_page)
2470 			bh->b_this_page = head;
2471 		bh = bh->b_this_page;
2472 	} while (bh != head);
2473 	attach_page_buffers(page, head);
2474 	spin_unlock(&page->mapping->private_lock);
2475 }
2476 
2477 /*
2478  * On entry, the page is fully not uptodate.
2479  * On exit the page is fully uptodate in the areas outside (from,to)
2480  */
2481 int nobh_write_begin(struct file *file, struct address_space *mapping,
2482 			loff_t pos, unsigned len, unsigned flags,
2483 			struct page **pagep, void **fsdata,
2484 			get_block_t *get_block)
2485 {
2486 	struct inode *inode = mapping->host;
2487 	const unsigned blkbits = inode->i_blkbits;
2488 	const unsigned blocksize = 1 << blkbits;
2489 	struct buffer_head *head, *bh;
2490 	struct page *page;
2491 	pgoff_t index;
2492 	unsigned from, to;
2493 	unsigned block_in_page;
2494 	unsigned block_start, block_end;
2495 	sector_t block_in_file;
2496 	int nr_reads = 0;
2497 	int ret = 0;
2498 	int is_mapped_to_disk = 1;
2499 
2500 	index = pos >> PAGE_CACHE_SHIFT;
2501 	from = pos & (PAGE_CACHE_SIZE - 1);
2502 	to = from + len;
2503 
2504 	page = grab_cache_page_write_begin(mapping, index, flags);
2505 	if (!page)
2506 		return -ENOMEM;
2507 	*pagep = page;
2508 	*fsdata = NULL;
2509 
2510 	if (page_has_buffers(page)) {
2511 		unlock_page(page);
2512 		page_cache_release(page);
2513 		*pagep = NULL;
2514 		return block_write_begin(file, mapping, pos, len, flags, pagep,
2515 					fsdata, get_block);
2516 	}
2517 
2518 	if (PageMappedToDisk(page))
2519 		return 0;
2520 
2521 	/*
2522 	 * Allocate buffers so that we can keep track of state, and potentially
2523 	 * attach them to the page if an error occurs. In the common case of
2524 	 * no error, they will just be freed again without ever being attached
2525 	 * to the page (which is all OK, because we're under the page lock).
2526 	 *
2527 	 * Be careful: the buffer linked list is a NULL terminated one, rather
2528 	 * than the circular one we're used to.
2529 	 */
2530 	head = alloc_page_buffers(page, blocksize, 0);
2531 	if (!head) {
2532 		ret = -ENOMEM;
2533 		goto out_release;
2534 	}
2535 
2536 	block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2537 
2538 	/*
2539 	 * We loop across all blocks in the page, whether or not they are
2540 	 * part of the affected region.  This is so we can discover if the
2541 	 * page is fully mapped-to-disk.
2542 	 */
2543 	for (block_start = 0, block_in_page = 0, bh = head;
2544 		  block_start < PAGE_CACHE_SIZE;
2545 		  block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2546 		int create;
2547 
2548 		block_end = block_start + blocksize;
2549 		bh->b_state = 0;
2550 		create = 1;
2551 		if (block_start >= to)
2552 			create = 0;
2553 		ret = get_block(inode, block_in_file + block_in_page,
2554 					bh, create);
2555 		if (ret)
2556 			goto failed;
2557 		if (!buffer_mapped(bh))
2558 			is_mapped_to_disk = 0;
2559 		if (buffer_new(bh))
2560 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2561 		if (PageUptodate(page)) {
2562 			set_buffer_uptodate(bh);
2563 			continue;
2564 		}
2565 		if (buffer_new(bh) || !buffer_mapped(bh)) {
2566 			zero_user_segments(page, block_start, from,
2567 							to, block_end);
2568 			continue;
2569 		}
2570 		if (buffer_uptodate(bh))
2571 			continue;	/* reiserfs does this */
2572 		if (block_start < from || block_end > to) {
2573 			lock_buffer(bh);
2574 			bh->b_end_io = end_buffer_read_nobh;
2575 			submit_bh(READ, bh);
2576 			nr_reads++;
2577 		}
2578 	}
2579 
2580 	if (nr_reads) {
2581 		/*
2582 		 * The page is locked, so these buffers are protected from
2583 		 * any VM or truncate activity.  Hence we don't need to care
2584 		 * for the buffer_head refcounts.
2585 		 */
2586 		for (bh = head; bh; bh = bh->b_this_page) {
2587 			wait_on_buffer(bh);
2588 			if (!buffer_uptodate(bh))
2589 				ret = -EIO;
2590 		}
2591 		if (ret)
2592 			goto failed;
2593 	}
2594 
2595 	if (is_mapped_to_disk)
2596 		SetPageMappedToDisk(page);
2597 
2598 	*fsdata = head; /* to be released by nobh_write_end */
2599 
2600 	return 0;
2601 
2602 failed:
2603 	BUG_ON(!ret);
2604 	/*
2605 	 * Error recovery is a bit difficult. We need to zero out blocks that
2606 	 * were newly allocated, and dirty them to ensure they get written out.
2607 	 * Buffers need to be attached to the page at this point, otherwise
2608 	 * the handling of potential IO errors during writeout would be hard
2609 	 * (could try doing synchronous writeout, but what if that fails too?)
2610 	 */
2611 	attach_nobh_buffers(page, head);
2612 	page_zero_new_buffers(page, from, to);
2613 
2614 out_release:
2615 	unlock_page(page);
2616 	page_cache_release(page);
2617 	*pagep = NULL;
2618 
2619 	if (pos + len > inode->i_size)
2620 		vmtruncate(inode, inode->i_size);
2621 
2622 	return ret;
2623 }
2624 EXPORT_SYMBOL(nobh_write_begin);
2625 
2626 int nobh_write_end(struct file *file, struct address_space *mapping,
2627 			loff_t pos, unsigned len, unsigned copied,
2628 			struct page *page, void *fsdata)
2629 {
2630 	struct inode *inode = page->mapping->host;
2631 	struct buffer_head *head = fsdata;
2632 	struct buffer_head *bh;
2633 	BUG_ON(fsdata != NULL && page_has_buffers(page));
2634 
2635 	if (unlikely(copied < len) && !page_has_buffers(page))
2636 		attach_nobh_buffers(page, head);
2637 	if (page_has_buffers(page))
2638 		return generic_write_end(file, mapping, pos, len,
2639 					copied, page, fsdata);
2640 
2641 	SetPageUptodate(page);
2642 	set_page_dirty(page);
2643 	if (pos+copied > inode->i_size) {
2644 		i_size_write(inode, pos+copied);
2645 		mark_inode_dirty(inode);
2646 	}
2647 
2648 	unlock_page(page);
2649 	page_cache_release(page);
2650 
2651 	while (head) {
2652 		bh = head;
2653 		head = head->b_this_page;
2654 		free_buffer_head(bh);
2655 	}
2656 
2657 	return copied;
2658 }
2659 EXPORT_SYMBOL(nobh_write_end);
2660 
2661 /*
2662  * nobh_writepage() - based on block_full_write_page() except
2663  * that it tries to operate without attaching bufferheads to
2664  * the page.
2665  */
2666 int nobh_writepage(struct page *page, get_block_t *get_block,
2667 			struct writeback_control *wbc)
2668 {
2669 	struct inode * const inode = page->mapping->host;
2670 	loff_t i_size = i_size_read(inode);
2671 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2672 	unsigned offset;
2673 	int ret;
2674 
2675 	/* Is the page fully inside i_size? */
2676 	if (page->index < end_index)
2677 		goto out;
2678 
2679 	/* Is the page fully outside i_size? (truncate in progress) */
2680 	offset = i_size & (PAGE_CACHE_SIZE-1);
2681 	if (page->index >= end_index+1 || !offset) {
2682 		/*
2683 		 * The page may have dirty, unmapped buffers.  For example,
2684 		 * they may have been added in ext3_writepage().  Make them
2685 		 * freeable here, so the page does not leak.
2686 		 */
2687 #if 0
2688 		/* Not really sure about this  - do we need this ? */
2689 		if (page->mapping->a_ops->invalidatepage)
2690 			page->mapping->a_ops->invalidatepage(page, offset);
2691 #endif
2692 		unlock_page(page);
2693 		return 0; /* don't care */
2694 	}
2695 
2696 	/*
2697 	 * The page straddles i_size.  It must be zeroed out on each and every
2698 	 * writepage invocation because it may be mmapped.  "A file is mapped
2699 	 * in multiples of the page size.  For a file that is not a multiple of
2700 	 * the  page size, the remaining memory is zeroed when mapped, and
2701 	 * writes to that region are not written out to the file."
2702 	 */
2703 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2704 out:
2705 	ret = mpage_writepage(page, get_block, wbc);
2706 	if (ret == -EAGAIN)
2707 		ret = __block_write_full_page(inode, page, get_block, wbc);
2708 	return ret;
2709 }
2710 EXPORT_SYMBOL(nobh_writepage);
2711 
2712 int nobh_truncate_page(struct address_space *mapping,
2713 			loff_t from, get_block_t *get_block)
2714 {
2715 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2716 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2717 	unsigned blocksize;
2718 	sector_t iblock;
2719 	unsigned length, pos;
2720 	struct inode *inode = mapping->host;
2721 	struct page *page;
2722 	struct buffer_head map_bh;
2723 	int err;
2724 
2725 	blocksize = 1 << inode->i_blkbits;
2726 	length = offset & (blocksize - 1);
2727 
2728 	/* Block boundary? Nothing to do */
2729 	if (!length)
2730 		return 0;
2731 
2732 	length = blocksize - length;
2733 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2734 
2735 	page = grab_cache_page(mapping, index);
2736 	err = -ENOMEM;
2737 	if (!page)
2738 		goto out;
2739 
2740 	if (page_has_buffers(page)) {
2741 has_buffers:
2742 		unlock_page(page);
2743 		page_cache_release(page);
2744 		return block_truncate_page(mapping, from, get_block);
2745 	}
2746 
2747 	/* Find the buffer that contains "offset" */
2748 	pos = blocksize;
2749 	while (offset >= pos) {
2750 		iblock++;
2751 		pos += blocksize;
2752 	}
2753 
2754 	err = get_block(inode, iblock, &map_bh, 0);
2755 	if (err)
2756 		goto unlock;
2757 	/* unmapped? It's a hole - nothing to do */
2758 	if (!buffer_mapped(&map_bh))
2759 		goto unlock;
2760 
2761 	/* Ok, it's mapped. Make sure it's up-to-date */
2762 	if (!PageUptodate(page)) {
2763 		err = mapping->a_ops->readpage(NULL, page);
2764 		if (err) {
2765 			page_cache_release(page);
2766 			goto out;
2767 		}
2768 		lock_page(page);
2769 		if (!PageUptodate(page)) {
2770 			err = -EIO;
2771 			goto unlock;
2772 		}
2773 		if (page_has_buffers(page))
2774 			goto has_buffers;
2775 	}
2776 	zero_user(page, offset, length);
2777 	set_page_dirty(page);
2778 	err = 0;
2779 
2780 unlock:
2781 	unlock_page(page);
2782 	page_cache_release(page);
2783 out:
2784 	return err;
2785 }
2786 EXPORT_SYMBOL(nobh_truncate_page);
2787 
2788 int block_truncate_page(struct address_space *mapping,
2789 			loff_t from, get_block_t *get_block)
2790 {
2791 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
2792 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
2793 	unsigned blocksize;
2794 	sector_t iblock;
2795 	unsigned length, pos;
2796 	struct inode *inode = mapping->host;
2797 	struct page *page;
2798 	struct buffer_head *bh;
2799 	int err;
2800 
2801 	blocksize = 1 << inode->i_blkbits;
2802 	length = offset & (blocksize - 1);
2803 
2804 	/* Block boundary? Nothing to do */
2805 	if (!length)
2806 		return 0;
2807 
2808 	length = blocksize - length;
2809 	iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2810 
2811 	page = grab_cache_page(mapping, index);
2812 	err = -ENOMEM;
2813 	if (!page)
2814 		goto out;
2815 
2816 	if (!page_has_buffers(page))
2817 		create_empty_buffers(page, blocksize, 0);
2818 
2819 	/* Find the buffer that contains "offset" */
2820 	bh = page_buffers(page);
2821 	pos = blocksize;
2822 	while (offset >= pos) {
2823 		bh = bh->b_this_page;
2824 		iblock++;
2825 		pos += blocksize;
2826 	}
2827 
2828 	err = 0;
2829 	if (!buffer_mapped(bh)) {
2830 		WARN_ON(bh->b_size != blocksize);
2831 		err = get_block(inode, iblock, bh, 0);
2832 		if (err)
2833 			goto unlock;
2834 		/* unmapped? It's a hole - nothing to do */
2835 		if (!buffer_mapped(bh))
2836 			goto unlock;
2837 	}
2838 
2839 	/* Ok, it's mapped. Make sure it's up-to-date */
2840 	if (PageUptodate(page))
2841 		set_buffer_uptodate(bh);
2842 
2843 	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2844 		err = -EIO;
2845 		ll_rw_block(READ, 1, &bh);
2846 		wait_on_buffer(bh);
2847 		/* Uhhuh. Read error. Complain and punt. */
2848 		if (!buffer_uptodate(bh))
2849 			goto unlock;
2850 	}
2851 
2852 	zero_user(page, offset, length);
2853 	mark_buffer_dirty(bh);
2854 	err = 0;
2855 
2856 unlock:
2857 	unlock_page(page);
2858 	page_cache_release(page);
2859 out:
2860 	return err;
2861 }
2862 
2863 /*
2864  * The generic ->writepage function for buffer-backed address_spaces
2865  */
2866 int block_write_full_page(struct page *page, get_block_t *get_block,
2867 			struct writeback_control *wbc)
2868 {
2869 	struct inode * const inode = page->mapping->host;
2870 	loff_t i_size = i_size_read(inode);
2871 	const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2872 	unsigned offset;
2873 
2874 	/* Is the page fully inside i_size? */
2875 	if (page->index < end_index)
2876 		return __block_write_full_page(inode, page, get_block, wbc);
2877 
2878 	/* Is the page fully outside i_size? (truncate in progress) */
2879 	offset = i_size & (PAGE_CACHE_SIZE-1);
2880 	if (page->index >= end_index+1 || !offset) {
2881 		/*
2882 		 * The page may have dirty, unmapped buffers.  For example,
2883 		 * they may have been added in ext3_writepage().  Make them
2884 		 * freeable here, so the page does not leak.
2885 		 */
2886 		do_invalidatepage(page, 0);
2887 		unlock_page(page);
2888 		return 0; /* don't care */
2889 	}
2890 
2891 	/*
2892 	 * The page straddles i_size.  It must be zeroed out on each and every
2893 	 * writepage invokation because it may be mmapped.  "A file is mapped
2894 	 * in multiples of the page size.  For a file that is not a multiple of
2895 	 * the  page size, the remaining memory is zeroed when mapped, and
2896 	 * writes to that region are not written out to the file."
2897 	 */
2898 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2899 	return __block_write_full_page(inode, page, get_block, wbc);
2900 }
2901 
2902 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2903 			    get_block_t *get_block)
2904 {
2905 	struct buffer_head tmp;
2906 	struct inode *inode = mapping->host;
2907 	tmp.b_state = 0;
2908 	tmp.b_blocknr = 0;
2909 	tmp.b_size = 1 << inode->i_blkbits;
2910 	get_block(inode, block, &tmp, 0);
2911 	return tmp.b_blocknr;
2912 }
2913 
2914 static void end_bio_bh_io_sync(struct bio *bio, int err)
2915 {
2916 	struct buffer_head *bh = bio->bi_private;
2917 
2918 	if (err == -EOPNOTSUPP) {
2919 		set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2920 		set_bit(BH_Eopnotsupp, &bh->b_state);
2921 	}
2922 
2923 	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2924 		set_bit(BH_Quiet, &bh->b_state);
2925 
2926 	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2927 	bio_put(bio);
2928 }
2929 
2930 int submit_bh(int rw, struct buffer_head * bh)
2931 {
2932 	struct bio *bio;
2933 	int ret = 0;
2934 
2935 	BUG_ON(!buffer_locked(bh));
2936 	BUG_ON(!buffer_mapped(bh));
2937 	BUG_ON(!bh->b_end_io);
2938 
2939 	/*
2940 	 * Mask in barrier bit for a write (could be either a WRITE or a
2941 	 * WRITE_SYNC
2942 	 */
2943 	if (buffer_ordered(bh) && (rw & WRITE))
2944 		rw |= WRITE_BARRIER;
2945 
2946 	/*
2947 	 * Only clear out a write error when rewriting
2948 	 */
2949 	if (test_set_buffer_req(bh) && (rw & WRITE))
2950 		clear_buffer_write_io_error(bh);
2951 
2952 	/*
2953 	 * from here on down, it's all bio -- do the initial mapping,
2954 	 * submit_bio -> generic_make_request may further map this bio around
2955 	 */
2956 	bio = bio_alloc(GFP_NOIO, 1);
2957 
2958 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2959 	bio->bi_bdev = bh->b_bdev;
2960 	bio->bi_io_vec[0].bv_page = bh->b_page;
2961 	bio->bi_io_vec[0].bv_len = bh->b_size;
2962 	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2963 
2964 	bio->bi_vcnt = 1;
2965 	bio->bi_idx = 0;
2966 	bio->bi_size = bh->b_size;
2967 
2968 	bio->bi_end_io = end_bio_bh_io_sync;
2969 	bio->bi_private = bh;
2970 
2971 	bio_get(bio);
2972 	submit_bio(rw, bio);
2973 
2974 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
2975 		ret = -EOPNOTSUPP;
2976 
2977 	bio_put(bio);
2978 	return ret;
2979 }
2980 
2981 /**
2982  * ll_rw_block: low-level access to block devices (DEPRECATED)
2983  * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2984  * @nr: number of &struct buffer_heads in the array
2985  * @bhs: array of pointers to &struct buffer_head
2986  *
2987  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2988  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2989  * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2990  * are sent to disk. The fourth %READA option is described in the documentation
2991  * for generic_make_request() which ll_rw_block() calls.
2992  *
2993  * This function drops any buffer that it cannot get a lock on (with the
2994  * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2995  * clean when doing a write request, and any buffer that appears to be
2996  * up-to-date when doing read request.  Further it marks as clean buffers that
2997  * are processed for writing (the buffer cache won't assume that they are
2998  * actually clean until the buffer gets unlocked).
2999  *
3000  * ll_rw_block sets b_end_io to simple completion handler that marks
3001  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
3002  * any waiters.
3003  *
3004  * All of the buffers must be for the same device, and must also be a
3005  * multiple of the current approved size for the device.
3006  */
3007 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3008 {
3009 	int i;
3010 
3011 	for (i = 0; i < nr; i++) {
3012 		struct buffer_head *bh = bhs[i];
3013 
3014 		if (rw == SWRITE || rw == SWRITE_SYNC)
3015 			lock_buffer(bh);
3016 		else if (!trylock_buffer(bh))
3017 			continue;
3018 
3019 		if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
3020 			if (test_clear_buffer_dirty(bh)) {
3021 				bh->b_end_io = end_buffer_write_sync;
3022 				get_bh(bh);
3023 				if (rw == SWRITE_SYNC)
3024 					submit_bh(WRITE_SYNC, bh);
3025 				else
3026 					submit_bh(WRITE, bh);
3027 				continue;
3028 			}
3029 		} else {
3030 			if (!buffer_uptodate(bh)) {
3031 				bh->b_end_io = end_buffer_read_sync;
3032 				get_bh(bh);
3033 				submit_bh(rw, bh);
3034 				continue;
3035 			}
3036 		}
3037 		unlock_buffer(bh);
3038 	}
3039 }
3040 
3041 /*
3042  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3043  * and then start new I/O and then wait upon it.  The caller must have a ref on
3044  * the buffer_head.
3045  */
3046 int sync_dirty_buffer(struct buffer_head *bh)
3047 {
3048 	int ret = 0;
3049 
3050 	WARN_ON(atomic_read(&bh->b_count) < 1);
3051 	lock_buffer(bh);
3052 	if (test_clear_buffer_dirty(bh)) {
3053 		get_bh(bh);
3054 		bh->b_end_io = end_buffer_write_sync;
3055 		ret = submit_bh(WRITE_SYNC, bh);
3056 		wait_on_buffer(bh);
3057 		if (buffer_eopnotsupp(bh)) {
3058 			clear_buffer_eopnotsupp(bh);
3059 			ret = -EOPNOTSUPP;
3060 		}
3061 		if (!ret && !buffer_uptodate(bh))
3062 			ret = -EIO;
3063 	} else {
3064 		unlock_buffer(bh);
3065 	}
3066 	return ret;
3067 }
3068 
3069 /*
3070  * try_to_free_buffers() checks if all the buffers on this particular page
3071  * are unused, and releases them if so.
3072  *
3073  * Exclusion against try_to_free_buffers may be obtained by either
3074  * locking the page or by holding its mapping's private_lock.
3075  *
3076  * If the page is dirty but all the buffers are clean then we need to
3077  * be sure to mark the page clean as well.  This is because the page
3078  * may be against a block device, and a later reattachment of buffers
3079  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3080  * filesystem data on the same device.
3081  *
3082  * The same applies to regular filesystem pages: if all the buffers are
3083  * clean then we set the page clean and proceed.  To do that, we require
3084  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3085  * private_lock.
3086  *
3087  * try_to_free_buffers() is non-blocking.
3088  */
3089 static inline int buffer_busy(struct buffer_head *bh)
3090 {
3091 	return atomic_read(&bh->b_count) |
3092 		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3093 }
3094 
3095 static int
3096 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3097 {
3098 	struct buffer_head *head = page_buffers(page);
3099 	struct buffer_head *bh;
3100 
3101 	bh = head;
3102 	do {
3103 		if (buffer_write_io_error(bh) && page->mapping)
3104 			set_bit(AS_EIO, &page->mapping->flags);
3105 		if (buffer_busy(bh))
3106 			goto failed;
3107 		bh = bh->b_this_page;
3108 	} while (bh != head);
3109 
3110 	do {
3111 		struct buffer_head *next = bh->b_this_page;
3112 
3113 		if (bh->b_assoc_map)
3114 			__remove_assoc_queue(bh);
3115 		bh = next;
3116 	} while (bh != head);
3117 	*buffers_to_free = head;
3118 	__clear_page_buffers(page);
3119 	return 1;
3120 failed:
3121 	return 0;
3122 }
3123 
3124 int try_to_free_buffers(struct page *page)
3125 {
3126 	struct address_space * const mapping = page->mapping;
3127 	struct buffer_head *buffers_to_free = NULL;
3128 	int ret = 0;
3129 
3130 	BUG_ON(!PageLocked(page));
3131 	if (PageWriteback(page))
3132 		return 0;
3133 
3134 	if (mapping == NULL) {		/* can this still happen? */
3135 		ret = drop_buffers(page, &buffers_to_free);
3136 		goto out;
3137 	}
3138 
3139 	spin_lock(&mapping->private_lock);
3140 	ret = drop_buffers(page, &buffers_to_free);
3141 
3142 	/*
3143 	 * If the filesystem writes its buffers by hand (eg ext3)
3144 	 * then we can have clean buffers against a dirty page.  We
3145 	 * clean the page here; otherwise the VM will never notice
3146 	 * that the filesystem did any IO at all.
3147 	 *
3148 	 * Also, during truncate, discard_buffer will have marked all
3149 	 * the page's buffers clean.  We discover that here and clean
3150 	 * the page also.
3151 	 *
3152 	 * private_lock must be held over this entire operation in order
3153 	 * to synchronise against __set_page_dirty_buffers and prevent the
3154 	 * dirty bit from being lost.
3155 	 */
3156 	if (ret)
3157 		cancel_dirty_page(page, PAGE_CACHE_SIZE);
3158 	spin_unlock(&mapping->private_lock);
3159 out:
3160 	if (buffers_to_free) {
3161 		struct buffer_head *bh = buffers_to_free;
3162 
3163 		do {
3164 			struct buffer_head *next = bh->b_this_page;
3165 			free_buffer_head(bh);
3166 			bh = next;
3167 		} while (bh != buffers_to_free);
3168 	}
3169 	return ret;
3170 }
3171 EXPORT_SYMBOL(try_to_free_buffers);
3172 
3173 void block_sync_page(struct page *page)
3174 {
3175 	struct address_space *mapping;
3176 
3177 	smp_mb();
3178 	mapping = page_mapping(page);
3179 	if (mapping)
3180 		blk_run_backing_dev(mapping->backing_dev_info, page);
3181 }
3182 
3183 /*
3184  * There are no bdflush tunables left.  But distributions are
3185  * still running obsolete flush daemons, so we terminate them here.
3186  *
3187  * Use of bdflush() is deprecated and will be removed in a future kernel.
3188  * The `pdflush' kernel threads fully replace bdflush daemons and this call.
3189  */
3190 asmlinkage long sys_bdflush(int func, long data)
3191 {
3192 	static int msg_count;
3193 
3194 	if (!capable(CAP_SYS_ADMIN))
3195 		return -EPERM;
3196 
3197 	if (msg_count < 5) {
3198 		msg_count++;
3199 		printk(KERN_INFO
3200 			"warning: process `%s' used the obsolete bdflush"
3201 			" system call\n", current->comm);
3202 		printk(KERN_INFO "Fix your initscripts?\n");
3203 	}
3204 
3205 	if (func == 1)
3206 		do_exit(0);
3207 	return 0;
3208 }
3209 
3210 /*
3211  * Buffer-head allocation
3212  */
3213 static struct kmem_cache *bh_cachep;
3214 
3215 /*
3216  * Once the number of bh's in the machine exceeds this level, we start
3217  * stripping them in writeback.
3218  */
3219 static int max_buffer_heads;
3220 
3221 int buffer_heads_over_limit;
3222 
3223 struct bh_accounting {
3224 	int nr;			/* Number of live bh's */
3225 	int ratelimit;		/* Limit cacheline bouncing */
3226 };
3227 
3228 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3229 
3230 static void recalc_bh_state(void)
3231 {
3232 	int i;
3233 	int tot = 0;
3234 
3235 	if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3236 		return;
3237 	__get_cpu_var(bh_accounting).ratelimit = 0;
3238 	for_each_online_cpu(i)
3239 		tot += per_cpu(bh_accounting, i).nr;
3240 	buffer_heads_over_limit = (tot > max_buffer_heads);
3241 }
3242 
3243 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3244 {
3245 	struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3246 	if (ret) {
3247 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3248 		get_cpu_var(bh_accounting).nr++;
3249 		recalc_bh_state();
3250 		put_cpu_var(bh_accounting);
3251 	}
3252 	return ret;
3253 }
3254 EXPORT_SYMBOL(alloc_buffer_head);
3255 
3256 void free_buffer_head(struct buffer_head *bh)
3257 {
3258 	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3259 	kmem_cache_free(bh_cachep, bh);
3260 	get_cpu_var(bh_accounting).nr--;
3261 	recalc_bh_state();
3262 	put_cpu_var(bh_accounting);
3263 }
3264 EXPORT_SYMBOL(free_buffer_head);
3265 
3266 static void buffer_exit_cpu(int cpu)
3267 {
3268 	int i;
3269 	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3270 
3271 	for (i = 0; i < BH_LRU_SIZE; i++) {
3272 		brelse(b->bhs[i]);
3273 		b->bhs[i] = NULL;
3274 	}
3275 	get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3276 	per_cpu(bh_accounting, cpu).nr = 0;
3277 	put_cpu_var(bh_accounting);
3278 }
3279 
3280 static int buffer_cpu_notify(struct notifier_block *self,
3281 			      unsigned long action, void *hcpu)
3282 {
3283 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3284 		buffer_exit_cpu((unsigned long)hcpu);
3285 	return NOTIFY_OK;
3286 }
3287 
3288 /**
3289  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3290  * @bh: struct buffer_head
3291  *
3292  * Return true if the buffer is up-to-date and false,
3293  * with the buffer locked, if not.
3294  */
3295 int bh_uptodate_or_lock(struct buffer_head *bh)
3296 {
3297 	if (!buffer_uptodate(bh)) {
3298 		lock_buffer(bh);
3299 		if (!buffer_uptodate(bh))
3300 			return 0;
3301 		unlock_buffer(bh);
3302 	}
3303 	return 1;
3304 }
3305 EXPORT_SYMBOL(bh_uptodate_or_lock);
3306 
3307 /**
3308  * bh_submit_read - Submit a locked buffer for reading
3309  * @bh: struct buffer_head
3310  *
3311  * Returns zero on success and -EIO on error.
3312  */
3313 int bh_submit_read(struct buffer_head *bh)
3314 {
3315 	BUG_ON(!buffer_locked(bh));
3316 
3317 	if (buffer_uptodate(bh)) {
3318 		unlock_buffer(bh);
3319 		return 0;
3320 	}
3321 
3322 	get_bh(bh);
3323 	bh->b_end_io = end_buffer_read_sync;
3324 	submit_bh(READ, bh);
3325 	wait_on_buffer(bh);
3326 	if (buffer_uptodate(bh))
3327 		return 0;
3328 	return -EIO;
3329 }
3330 EXPORT_SYMBOL(bh_submit_read);
3331 
3332 static void
3333 init_buffer_head(void *data)
3334 {
3335 	struct buffer_head *bh = data;
3336 
3337 	memset(bh, 0, sizeof(*bh));
3338 	INIT_LIST_HEAD(&bh->b_assoc_buffers);
3339 }
3340 
3341 void __init buffer_init(void)
3342 {
3343 	int nrpages;
3344 
3345 	bh_cachep = kmem_cache_create("buffer_head",
3346 			sizeof(struct buffer_head), 0,
3347 				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3348 				SLAB_MEM_SPREAD),
3349 				init_buffer_head);
3350 
3351 	/*
3352 	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3353 	 */
3354 	nrpages = (nr_free_buffer_pages() * 10) / 100;
3355 	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3356 	hotcpu_notifier(buffer_cpu_notify, 0);
3357 }
3358 
3359 EXPORT_SYMBOL(__bforget);
3360 EXPORT_SYMBOL(__brelse);
3361 EXPORT_SYMBOL(__wait_on_buffer);
3362 EXPORT_SYMBOL(block_commit_write);
3363 EXPORT_SYMBOL(block_prepare_write);
3364 EXPORT_SYMBOL(block_page_mkwrite);
3365 EXPORT_SYMBOL(block_read_full_page);
3366 EXPORT_SYMBOL(block_sync_page);
3367 EXPORT_SYMBOL(block_truncate_page);
3368 EXPORT_SYMBOL(block_write_full_page);
3369 EXPORT_SYMBOL(cont_write_begin);
3370 EXPORT_SYMBOL(end_buffer_read_sync);
3371 EXPORT_SYMBOL(end_buffer_write_sync);
3372 EXPORT_SYMBOL(file_fsync);
3373 EXPORT_SYMBOL(fsync_bdev);
3374 EXPORT_SYMBOL(generic_block_bmap);
3375 EXPORT_SYMBOL(generic_cont_expand_simple);
3376 EXPORT_SYMBOL(init_buffer);
3377 EXPORT_SYMBOL(invalidate_bdev);
3378 EXPORT_SYMBOL(ll_rw_block);
3379 EXPORT_SYMBOL(mark_buffer_dirty);
3380 EXPORT_SYMBOL(submit_bh);
3381 EXPORT_SYMBOL(sync_dirty_buffer);
3382 EXPORT_SYMBOL(unlock_buffer);
3383