xref: /linux-6.15/fs/sync.c (revision 6348be02)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f79e2abbSAndrew Morton /*
3f79e2abbSAndrew Morton  * High-level sync()-related operations
4f79e2abbSAndrew Morton  */
5f79e2abbSAndrew Morton 
670164eb6SChristoph Hellwig #include <linux/blkdev.h>
7f79e2abbSAndrew Morton #include <linux/kernel.h>
8f79e2abbSAndrew Morton #include <linux/file.h>
9f79e2abbSAndrew Morton #include <linux/fs.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11630d9c47SPaul Gortmaker #include <linux/export.h>
12b7ed78f5SSage Weil #include <linux/namei.h>
13914e2637SAl Viro #include <linux/sched.h>
14f79e2abbSAndrew Morton #include <linux/writeback.h>
15f79e2abbSAndrew Morton #include <linux/syscalls.h>
16f79e2abbSAndrew Morton #include <linux/linkage.h>
17f79e2abbSAndrew Morton #include <linux/pagemap.h>
18cf9a2ae8SDavid Howells #include <linux/quotaops.h>
195129a469SJörn Engel #include <linux/backing-dev.h>
205a3e5cb8SJan Kara #include "internal.h"
21f79e2abbSAndrew Morton 
22f79e2abbSAndrew Morton #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
23f79e2abbSAndrew Morton 			SYNC_FILE_RANGE_WAIT_AFTER)
24f79e2abbSAndrew Morton 
25c15c54f5SJan Kara /*
26c15c54f5SJan Kara  * Write out and wait upon all dirty data associated with this
27c15c54f5SJan Kara  * superblock.  Filesystem data as well as the underlying block
28c15c54f5SJan Kara  * device.  Takes the superblock lock.
29c15c54f5SJan Kara  */
sync_filesystem(struct super_block * sb)3060b0680fSJan Kara int sync_filesystem(struct super_block *sb)
31c15c54f5SJan Kara {
325679897eSDarrick J. Wong 	int ret = 0;
33c15c54f5SJan Kara 
345af7926fSChristoph Hellwig 	/*
355af7926fSChristoph Hellwig 	 * We need to be protected against the filesystem going from
365af7926fSChristoph Hellwig 	 * r/o to r/w or vice versa.
375af7926fSChristoph Hellwig 	 */
385af7926fSChristoph Hellwig 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
395af7926fSChristoph Hellwig 
405af7926fSChristoph Hellwig 	/*
415af7926fSChristoph Hellwig 	 * No point in syncing out anything if the filesystem is read-only.
425af7926fSChristoph Hellwig 	 */
43bc98a42cSDavid Howells 	if (sb_rdonly(sb))
445af7926fSChristoph Hellwig 		return 0;
455af7926fSChristoph Hellwig 
469a208ba5SChristoph Hellwig 	/*
479a208ba5SChristoph Hellwig 	 * Do the filesystem syncing work.  For simple filesystems
489a208ba5SChristoph Hellwig 	 * writeback_inodes_sb(sb) just dirties buffers with inodes so we have
4970164eb6SChristoph Hellwig 	 * to submit I/O for these buffers via sync_blockdev().  This also
509a208ba5SChristoph Hellwig 	 * speeds up the wait == 1 case since in that case write_inode()
519a208ba5SChristoph Hellwig 	 * methods call sync_dirty_buffer() and thus effectively write one block
529a208ba5SChristoph Hellwig 	 * at a time.
539a208ba5SChristoph Hellwig 	 */
549a208ba5SChristoph Hellwig 	writeback_inodes_sb(sb, WB_REASON_SYNC);
555679897eSDarrick J. Wong 	if (sb->s_op->sync_fs) {
565679897eSDarrick J. Wong 		ret = sb->s_op->sync_fs(sb, 0);
575679897eSDarrick J. Wong 		if (ret)
585679897eSDarrick J. Wong 			return ret;
595679897eSDarrick J. Wong 	}
6070164eb6SChristoph Hellwig 	ret = sync_blockdev_nowait(sb->s_bdev);
615679897eSDarrick J. Wong 	if (ret)
62c15c54f5SJan Kara 		return ret;
639a208ba5SChristoph Hellwig 
649a208ba5SChristoph Hellwig 	sync_inodes_sb(sb);
655679897eSDarrick J. Wong 	if (sb->s_op->sync_fs) {
665679897eSDarrick J. Wong 		ret = sb->s_op->sync_fs(sb, 1);
675679897eSDarrick J. Wong 		if (ret)
685679897eSDarrick J. Wong 			return ret;
695679897eSDarrick J. Wong 	}
7070164eb6SChristoph Hellwig 	return sync_blockdev(sb->s_bdev);
71c15c54f5SJan Kara }
7210096fb1SAnton Altaparmakov EXPORT_SYMBOL(sync_filesystem);
73c15c54f5SJan Kara 
sync_inodes_one_sb(struct super_block * sb,void * arg)74b3de6531SJan Kara static void sync_inodes_one_sb(struct super_block *sb, void *arg)
7501a05b33SAl Viro {
76bc98a42cSDavid Howells 	if (!sb_rdonly(sb))
770dc83bd3SJan Kara 		sync_inodes_sb(sb);
7801a05b33SAl Viro }
79b3de6531SJan Kara 
sync_fs_one_sb(struct super_block * sb,void * arg)80b3de6531SJan Kara static void sync_fs_one_sb(struct super_block *sb, void *arg)
81b3de6531SJan Kara {
8232b1924bSKonstantin Khlebnikov 	if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
8332b1924bSKonstantin Khlebnikov 	    sb->s_op->sync_fs)
84b3de6531SJan Kara 		sb->s_op->sync_fs(sb, *(int *)arg);
85b3de6531SJan Kara }
86b3de6531SJan Kara 
873beab0b4SZhang, Yanmin /*
884ea425b6SJan Kara  * Sync everything. We start by waking flusher threads so that most of
894ea425b6SJan Kara  * writeback runs on all devices in parallel. Then we sync all inodes reliably
904ea425b6SJan Kara  * which effectively also waits for all flusher threads to finish doing
914ea425b6SJan Kara  * writeback. At this point all data is on disk so metadata should be stable
924ea425b6SJan Kara  * and we tell filesystems to sync their metadata via ->sync_fs() calls.
934ea425b6SJan Kara  * Finally, we writeout all block devices because some filesystems (e.g. ext2)
944ea425b6SJan Kara  * just write metadata (such as inodes or bitmaps) to block device page cache
954ea425b6SJan Kara  * and do not sync it on their own in ->sync_fs().
963beab0b4SZhang, Yanmin  */
ksys_sync(void)9770f68ee8SDominik Brodowski void ksys_sync(void)
98cf9a2ae8SDavid Howells {
99b3de6531SJan Kara 	int nowait = 0, wait = 1;
100b3de6531SJan Kara 
1019ba4b2dfSJens Axboe 	wakeup_flusher_threads(WB_REASON_SYNC);
1020dc83bd3SJan Kara 	iterate_supers(sync_inodes_one_sb, NULL);
1034ea425b6SJan Kara 	iterate_supers(sync_fs_one_sb, &nowait);
104b3de6531SJan Kara 	iterate_supers(sync_fs_one_sb, &wait);
1051e03a36bSChristoph Hellwig 	sync_bdevs(false);
1061e03a36bSChristoph Hellwig 	sync_bdevs(true);
1075cee5815SJan Kara 	if (unlikely(laptop_mode))
1085cee5815SJan Kara 		laptop_sync_completion();
10970f68ee8SDominik Brodowski }
11070f68ee8SDominik Brodowski 
SYSCALL_DEFINE0(sync)11170f68ee8SDominik Brodowski SYSCALL_DEFINE0(sync)
11270f68ee8SDominik Brodowski {
11370f68ee8SDominik Brodowski 	ksys_sync();
114cf9a2ae8SDavid Howells 	return 0;
115cf9a2ae8SDavid Howells }
116cf9a2ae8SDavid Howells 
do_sync_work(struct work_struct * work)117a2a9537aSJens Axboe static void do_sync_work(struct work_struct *work)
118a2a9537aSJens Axboe {
119b3de6531SJan Kara 	int nowait = 0;
120b3de6531SJan Kara 
1215cee5815SJan Kara 	/*
1225cee5815SJan Kara 	 * Sync twice to reduce the possibility we skipped some inodes / pages
1235cee5815SJan Kara 	 * because they were temporarily locked
1245cee5815SJan Kara 	 */
125b3de6531SJan Kara 	iterate_supers(sync_inodes_one_sb, &nowait);
126b3de6531SJan Kara 	iterate_supers(sync_fs_one_sb, &nowait);
1271e03a36bSChristoph Hellwig 	sync_bdevs(false);
128b3de6531SJan Kara 	iterate_supers(sync_inodes_one_sb, &nowait);
129b3de6531SJan Kara 	iterate_supers(sync_fs_one_sb, &nowait);
1301e03a36bSChristoph Hellwig 	sync_bdevs(false);
1315cee5815SJan Kara 	printk("Emergency Sync complete\n");
132a2a9537aSJens Axboe 	kfree(work);
133a2a9537aSJens Axboe }
134a2a9537aSJens Axboe 
emergency_sync(void)135cf9a2ae8SDavid Howells void emergency_sync(void)
136cf9a2ae8SDavid Howells {
137a2a9537aSJens Axboe 	struct work_struct *work;
138a2a9537aSJens Axboe 
139a2a9537aSJens Axboe 	work = kmalloc(sizeof(*work), GFP_ATOMIC);
140a2a9537aSJens Axboe 	if (work) {
141a2a9537aSJens Axboe 		INIT_WORK(work, do_sync_work);
142a2a9537aSJens Axboe 		schedule_work(work);
143a2a9537aSJens Axboe 	}
144cf9a2ae8SDavid Howells }
145cf9a2ae8SDavid Howells 
146b7ed78f5SSage Weil /*
147b7ed78f5SSage Weil  * sync a single super
148b7ed78f5SSage Weil  */
SYSCALL_DEFINE1(syncfs,int,fd)149b7ed78f5SSage Weil SYSCALL_DEFINE1(syncfs, int, fd)
150b7ed78f5SSage Weil {
151*6348be02SAl Viro 	CLASS(fd, f)(fd);
152b7ed78f5SSage Weil 	struct super_block *sb;
153735e4ae5SJeff Layton 	int ret, ret2;
154b7ed78f5SSage Weil 
155*6348be02SAl Viro 	if (fd_empty(f))
156b7ed78f5SSage Weil 		return -EBADF;
1571da91ea8SAl Viro 	sb = fd_file(f)->f_path.dentry->d_sb;
158b7ed78f5SSage Weil 
159b7ed78f5SSage Weil 	down_read(&sb->s_umount);
160b7ed78f5SSage Weil 	ret = sync_filesystem(sb);
161b7ed78f5SSage Weil 	up_read(&sb->s_umount);
162b7ed78f5SSage Weil 
1631da91ea8SAl Viro 	ret2 = errseq_check_and_advance(&sb->s_wb_err, &fd_file(f)->f_sb_err);
164735e4ae5SJeff Layton 
165735e4ae5SJeff Layton 	return ret ? ret : ret2;
166b7ed78f5SSage Weil }
167b7ed78f5SSage Weil 
1684c728ef5SChristoph Hellwig /**
169148f948bSJan Kara  * vfs_fsync_range - helper to sync a range of data & metadata to disk
1704c728ef5SChristoph Hellwig  * @file:		file to sync
171148f948bSJan Kara  * @start:		offset in bytes of the beginning of data range to sync
172148f948bSJan Kara  * @end:		offset in bytes of the end of data range (inclusive)
173148f948bSJan Kara  * @datasync:		perform only datasync
1744c728ef5SChristoph Hellwig  *
175148f948bSJan Kara  * Write back data in range @start..@end and metadata for @file to disk.  If
176148f948bSJan Kara  * @datasync is set only metadata needed to access modified file data is
177148f948bSJan Kara  * written.
1784c728ef5SChristoph Hellwig  */
vfs_fsync_range(struct file * file,loff_t start,loff_t end,int datasync)1798018ab05SChristoph Hellwig int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
180cf9a2ae8SDavid Howells {
1810ae45f63STheodore Ts'o 	struct inode *inode = file->f_mapping->host;
1820ae45f63STheodore Ts'o 
18372c2d531SAl Viro 	if (!file->f_op->fsync)
18402c24a82SJosef Bacik 		return -EINVAL;
1850d07e557SChristoph Hellwig 	if (!datasync && (inode->i_state & I_DIRTY_TIME))
1860ae45f63STheodore Ts'o 		mark_inode_dirty_sync(inode);
1870f41074aSJeff Layton 	return file->f_op->fsync(file, start, end, datasync);
188cf9a2ae8SDavid Howells }
189148f948bSJan Kara EXPORT_SYMBOL(vfs_fsync_range);
190148f948bSJan Kara 
191148f948bSJan Kara /**
192148f948bSJan Kara  * vfs_fsync - perform a fsync or fdatasync on a file
193148f948bSJan Kara  * @file:		file to sync
194148f948bSJan Kara  * @datasync:		only perform a fdatasync operation
195148f948bSJan Kara  *
196148f948bSJan Kara  * Write back data and metadata for @file to disk.  If @datasync is
197148f948bSJan Kara  * set only metadata needed to access modified file data is written.
198148f948bSJan Kara  */
vfs_fsync(struct file * file,int datasync)1998018ab05SChristoph Hellwig int vfs_fsync(struct file *file, int datasync)
200148f948bSJan Kara {
2018018ab05SChristoph Hellwig 	return vfs_fsync_range(file, 0, LLONG_MAX, datasync);
202148f948bSJan Kara }
2034c728ef5SChristoph Hellwig EXPORT_SYMBOL(vfs_fsync);
204cf9a2ae8SDavid Howells 
do_fsync(unsigned int fd,int datasync)2054c728ef5SChristoph Hellwig static int do_fsync(unsigned int fd, int datasync)
206cf9a2ae8SDavid Howells {
207*6348be02SAl Viro 	CLASS(fd, f)(fd);
208cf9a2ae8SDavid Howells 
209*6348be02SAl Viro 	if (fd_empty(f))
210*6348be02SAl Viro 		return -EBADF;
211*6348be02SAl Viro 
212*6348be02SAl Viro 	return vfs_fsync(fd_file(f), datasync);
213cf9a2ae8SDavid Howells }
214cf9a2ae8SDavid Howells 
SYSCALL_DEFINE1(fsync,unsigned int,fd)215a5f8fa9eSHeiko Carstens SYSCALL_DEFINE1(fsync, unsigned int, fd)
216cf9a2ae8SDavid Howells {
2174c728ef5SChristoph Hellwig 	return do_fsync(fd, 0);
218cf9a2ae8SDavid Howells }
219cf9a2ae8SDavid Howells 
SYSCALL_DEFINE1(fdatasync,unsigned int,fd)220a5f8fa9eSHeiko Carstens SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
221cf9a2ae8SDavid Howells {
2224c728ef5SChristoph Hellwig 	return do_fsync(fd, 1);
223cf9a2ae8SDavid Howells }
224cf9a2ae8SDavid Howells 
sync_file_range(struct file * file,loff_t offset,loff_t nbytes,unsigned int flags)22522f96b38SJens Axboe int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
22622f96b38SJens Axboe 		    unsigned int flags)
22722f96b38SJens Axboe {
22822f96b38SJens Axboe 	int ret;
22922f96b38SJens Axboe 	struct address_space *mapping;
23022f96b38SJens Axboe 	loff_t endbyte;			/* inclusive */
23122f96b38SJens Axboe 	umode_t i_mode;
23222f96b38SJens Axboe 
23322f96b38SJens Axboe 	ret = -EINVAL;
23422f96b38SJens Axboe 	if (flags & ~VALID_FLAGS)
23522f96b38SJens Axboe 		goto out;
23622f96b38SJens Axboe 
23722f96b38SJens Axboe 	endbyte = offset + nbytes;
23822f96b38SJens Axboe 
23922f96b38SJens Axboe 	if ((s64)offset < 0)
24022f96b38SJens Axboe 		goto out;
24122f96b38SJens Axboe 	if ((s64)endbyte < 0)
24222f96b38SJens Axboe 		goto out;
24322f96b38SJens Axboe 	if (endbyte < offset)
24422f96b38SJens Axboe 		goto out;
24522f96b38SJens Axboe 
24622f96b38SJens Axboe 	if (sizeof(pgoff_t) == 4) {
24722f96b38SJens Axboe 		if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
24822f96b38SJens Axboe 			/*
24922f96b38SJens Axboe 			 * The range starts outside a 32 bit machine's
25022f96b38SJens Axboe 			 * pagecache addressing capabilities.  Let it "succeed"
25122f96b38SJens Axboe 			 */
25222f96b38SJens Axboe 			ret = 0;
25322f96b38SJens Axboe 			goto out;
25422f96b38SJens Axboe 		}
25522f96b38SJens Axboe 		if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
25622f96b38SJens Axboe 			/*
25722f96b38SJens Axboe 			 * Out to EOF
25822f96b38SJens Axboe 			 */
25922f96b38SJens Axboe 			nbytes = 0;
26022f96b38SJens Axboe 		}
26122f96b38SJens Axboe 	}
26222f96b38SJens Axboe 
26322f96b38SJens Axboe 	if (nbytes == 0)
26422f96b38SJens Axboe 		endbyte = LLONG_MAX;
26522f96b38SJens Axboe 	else
26622f96b38SJens Axboe 		endbyte--;		/* inclusive */
26722f96b38SJens Axboe 
26822f96b38SJens Axboe 	i_mode = file_inode(file)->i_mode;
26922f96b38SJens Axboe 	ret = -ESPIPE;
27022f96b38SJens Axboe 	if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
27122f96b38SJens Axboe 			!S_ISLNK(i_mode))
27222f96b38SJens Axboe 		goto out;
27322f96b38SJens Axboe 
27422f96b38SJens Axboe 	mapping = file->f_mapping;
27522f96b38SJens Axboe 	ret = 0;
27622f96b38SJens Axboe 	if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
27722f96b38SJens Axboe 		ret = file_fdatawait_range(file, offset, endbyte);
27822f96b38SJens Axboe 		if (ret < 0)
27922f96b38SJens Axboe 			goto out;
28022f96b38SJens Axboe 	}
28122f96b38SJens Axboe 
28222f96b38SJens Axboe 	if (flags & SYNC_FILE_RANGE_WRITE) {
283c553ea4fSAmir Goldstein 		int sync_mode = WB_SYNC_NONE;
284c553ea4fSAmir Goldstein 
285c553ea4fSAmir Goldstein 		if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) ==
286c553ea4fSAmir Goldstein 			     SYNC_FILE_RANGE_WRITE_AND_WAIT)
287c553ea4fSAmir Goldstein 			sync_mode = WB_SYNC_ALL;
288c553ea4fSAmir Goldstein 
28922f96b38SJens Axboe 		ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
290c553ea4fSAmir Goldstein 						 sync_mode);
29122f96b38SJens Axboe 		if (ret < 0)
29222f96b38SJens Axboe 			goto out;
29322f96b38SJens Axboe 	}
29422f96b38SJens Axboe 
29522f96b38SJens Axboe 	if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
29622f96b38SJens Axboe 		ret = file_fdatawait_range(file, offset, endbyte);
29722f96b38SJens Axboe 
29822f96b38SJens Axboe out:
29922f96b38SJens Axboe 	return ret;
30022f96b38SJens Axboe }
30122f96b38SJens Axboe 
302cf9a2ae8SDavid Howells /*
303c553ea4fSAmir Goldstein  * ksys_sync_file_range() permits finely controlled syncing over a segment of
304f79e2abbSAndrew Morton  * a file in the range offset .. (offset+nbytes-1) inclusive.  If nbytes is
305c553ea4fSAmir Goldstein  * zero then ksys_sync_file_range() will operate from offset out to EOF.
306f79e2abbSAndrew Morton  *
307f79e2abbSAndrew Morton  * The flag bits are:
308f79e2abbSAndrew Morton  *
309f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
310f79e2abbSAndrew Morton  * before performing the write.
311f79e2abbSAndrew Morton  *
312f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
313cce77081SPavel Machek  * range which are not presently under writeback. Note that this may block for
314cce77081SPavel Machek  * significant periods due to exhaustion of disk request structures.
315f79e2abbSAndrew Morton  *
316f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
317f79e2abbSAndrew Morton  * after performing the write.
318f79e2abbSAndrew Morton  *
319f79e2abbSAndrew Morton  * Useful combinations of the flag bits are:
320f79e2abbSAndrew Morton  *
321f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
322c553ea4fSAmir Goldstein  * in the range which were dirty on entry to ksys_sync_file_range() are placed
323f79e2abbSAndrew Morton  * under writeout.  This is a start-write-for-data-integrity operation.
324f79e2abbSAndrew Morton  *
325f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
326f79e2abbSAndrew Morton  * are not presently under writeout.  This is an asynchronous flush-to-disk
327f79e2abbSAndrew Morton  * operation.  Not suitable for data integrity operations.
328f79e2abbSAndrew Morton  *
329f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
330f79e2abbSAndrew Morton  * completion of writeout of all pages in the range.  This will be used after an
331f79e2abbSAndrew Morton  * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
332f79e2abbSAndrew Morton  * for that operation to complete and to return the result.
333f79e2abbSAndrew Morton  *
334c553ea4fSAmir Goldstein  * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER
335c553ea4fSAmir Goldstein  * (a.k.a. SYNC_FILE_RANGE_WRITE_AND_WAIT):
336f79e2abbSAndrew Morton  * a traditional sync() operation.  This is a write-for-data-integrity operation
337f79e2abbSAndrew Morton  * which will ensure that all pages in the range which were dirty on entry to
338c553ea4fSAmir Goldstein  * ksys_sync_file_range() are written to disk.  It should be noted that disk
339c553ea4fSAmir Goldstein  * caches are not flushed by this call, so there are no guarantees here that the
340c553ea4fSAmir Goldstein  * data will be available on disk after a crash.
341f79e2abbSAndrew Morton  *
342f79e2abbSAndrew Morton  *
343f79e2abbSAndrew Morton  * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
344f79e2abbSAndrew Morton  * I/O errors or ENOSPC conditions and will return those to the caller, after
345f79e2abbSAndrew Morton  * clearing the EIO and ENOSPC flags in the address_space.
346f79e2abbSAndrew Morton  *
347f79e2abbSAndrew Morton  * It should be noted that none of these operations write out the file's
348f79e2abbSAndrew Morton  * metadata.  So unless the application is strictly performing overwrites of
349f79e2abbSAndrew Morton  * already-instantiated disk blocks, there are no guarantees here that the data
350f79e2abbSAndrew Morton  * will be available after a crash.
351f79e2abbSAndrew Morton  */
ksys_sync_file_range(int fd,loff_t offset,loff_t nbytes,unsigned int flags)352806cbae1SDominik Brodowski int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
353806cbae1SDominik Brodowski 			 unsigned int flags)
354f79e2abbSAndrew Morton {
355*6348be02SAl Viro 	CLASS(fd, f)(fd);
356f79e2abbSAndrew Morton 
357*6348be02SAl Viro 	if (fd_empty(f))
358*6348be02SAl Viro 		return -EBADF;
359f79e2abbSAndrew Morton 
360*6348be02SAl Viro 	return sync_file_range(fd_file(f), offset, nbytes, flags);
361f79e2abbSAndrew Morton }
362f79e2abbSAndrew Morton 
SYSCALL_DEFINE4(sync_file_range,int,fd,loff_t,offset,loff_t,nbytes,unsigned int,flags)363806cbae1SDominik Brodowski SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
364806cbae1SDominik Brodowski 				unsigned int, flags)
365806cbae1SDominik Brodowski {
366806cbae1SDominik Brodowski 	return ksys_sync_file_range(fd, offset, nbytes, flags);
367806cbae1SDominik Brodowski }
368806cbae1SDominik Brodowski 
36959c10c52SGuo Ren #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_SYNC_FILE_RANGE)
COMPAT_SYSCALL_DEFINE6(sync_file_range,int,fd,compat_arg_u64_dual (offset),compat_arg_u64_dual (nbytes),unsigned int,flags)37059c10c52SGuo Ren COMPAT_SYSCALL_DEFINE6(sync_file_range, int, fd, compat_arg_u64_dual(offset),
37159c10c52SGuo Ren 		       compat_arg_u64_dual(nbytes), unsigned int, flags)
37259c10c52SGuo Ren {
37359c10c52SGuo Ren 	return ksys_sync_file_range(fd, compat_arg_u64_glue(offset),
37459c10c52SGuo Ren 				    compat_arg_u64_glue(nbytes), flags);
37559c10c52SGuo Ren }
37659c10c52SGuo Ren #endif
37759c10c52SGuo Ren 
378edd5cd4aSDavid Woodhouse /* It would be nice if people remember that not all the world's an i386
379edd5cd4aSDavid Woodhouse    when they introduce new system calls */
SYSCALL_DEFINE4(sync_file_range2,int,fd,unsigned int,flags,loff_t,offset,loff_t,nbytes)3804a0fd5bfSAl Viro SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags,
3814a0fd5bfSAl Viro 				 loff_t, offset, loff_t, nbytes)
382edd5cd4aSDavid Woodhouse {
383806cbae1SDominik Brodowski 	return ksys_sync_file_range(fd, offset, nbytes, flags);
384edd5cd4aSDavid Woodhouse }
385