zonefs: Always invalidate last cached page on append write
commitc1976bd8f2
upstream. When a direct append write is executed, the append offset may correspond to the last page of a sequential file inode which might have been cached already by buffered reads, page faults with mmap-read or non-direct readahead. To ensure that the on-disk and cached data is consistant for such last cached page, make sure to always invalidate it in zonefs_file_dio_append(). If the invalidation fails, return -EBUSY to userspace to differentiate from IO errors. This invalidation will always be a no-op when the FS block size (device zone write granularity) is equal to the page size (e.g. 4K). Reported-by: Hans Holmberg <Hans.Holmberg@wdc.com> Fixes:02ef12a663
("zonefs: use REQ_OP_ZONE_APPEND for sync DIO") Cc: stable@vger.kernel.org Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Tested-by: Hans Holmberg <hans.holmberg@wdc.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
4d35d375ef
commit
d7c67be755
|
@ -382,6 +382,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
|
||||||
struct zonefs_zone *z = zonefs_inode_zone(inode);
|
struct zonefs_zone *z = zonefs_inode_zone(inode);
|
||||||
struct block_device *bdev = inode->i_sb->s_bdev;
|
struct block_device *bdev = inode->i_sb->s_bdev;
|
||||||
unsigned int max = bdev_max_zone_append_sectors(bdev);
|
unsigned int max = bdev_max_zone_append_sectors(bdev);
|
||||||
|
pgoff_t start, end;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
ssize_t size;
|
ssize_t size;
|
||||||
int nr_pages;
|
int nr_pages;
|
||||||
|
@ -390,6 +391,19 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
|
||||||
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
|
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
|
||||||
iov_iter_truncate(from, max);
|
iov_iter_truncate(from, max);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the inode block size (zone write granularity) is smaller than the
|
||||||
|
* page size, we may be appending data belonging to the last page of the
|
||||||
|
* inode straddling inode->i_size, with that page already cached due to
|
||||||
|
* a buffered read or readahead. So make sure to invalidate that page.
|
||||||
|
* This will always be a no-op for the case where the block size is
|
||||||
|
* equal to the page size.
|
||||||
|
*/
|
||||||
|
start = iocb->ki_pos >> PAGE_SHIFT;
|
||||||
|
end = (iocb->ki_pos + iov_iter_count(from) - 1) >> PAGE_SHIFT;
|
||||||
|
if (invalidate_inode_pages2_range(inode->i_mapping, start, end))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
|
nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
|
||||||
if (!nr_pages)
|
if (!nr_pages)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in New Issue