Commit 75a452d31ba6 for kernel
commit 75a452d31ba697fc986609dd4905294e07687992
Merge: 87a367f1bffa 10d7c95af043
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date: Tue Feb 17 15:37:06 2026 -0800
Merge tag 'ntfs3_for_7.0' of https://github.com/Paragon-Software-Group/linux-ntfs3
Pull ntfs3 updates from Konstantin Komarov:
"New code:
- improve readahead for bitmap initialization and large directory scans
- fsync files by syncing parent inodes
- drop of preallocated clusters for sparse and compressed files
- zero-fill folios beyond i_valid in ntfs_read_folio()
- implement llseek SEEK_DATA/SEEK_HOLE by scanning data runs
- implement iomap-based file operations
- allow explicit boolean acl/prealloc mount options
- fall-through between switch labels
- delayed-allocation (delalloc) support
Fixes:
- check return value of indx_find to avoid infinite loop
- initialize new folios before use
- infinite loop in attr_load_runs_range on inconsistent metadata
- infinite loop triggered by zero-sized ATTR_LIST
- ntfs_mount_options leak in ntfs_fill_super()
- deadlock in ni_read_folio_cmpr
- circular locking dependency in run_unpack_ex
- prevent infinite loops caused by the next valid being the same
- restore NULL folio initialization in ntfs_writepages()
- slab-out-of-bounds read in DeleteIndexEntryRoot
Updates:
- allow readdir() to finish after directory mutations without rewinddir()
- handle attr_set_size() errors when truncating files
- make ntfs_writeback_ops static
- refactor duplicate kmemdup pattern in do_action()
- avoid calling run_get_entry() when run == NULL in ntfs_read_run_nb_ra()
Replaced:
- use wait_on_buffer() directly
- rename ni_readpage_cmpr into ni_read_folio_cmpr"
* tag 'ntfs3_for_7.0' of https://github.com/Paragon-Software-Group/linux-ntfs3: (26 commits)
fs/ntfs3: add delayed-allocation (delalloc) support
fs/ntfs3: avoid calling run_get_entry() when run == NULL in ntfs_read_run_nb_ra()
fs/ntfs3: add fall-through between switch labels
fs/ntfs3: allow explicit boolean acl/prealloc mount options
fs/ntfs3: Fix slab-out-of-bounds read in DeleteIndexEntryRoot
ntfs3: Restore NULL folio initialization in ntfs_writepages()
ntfs3: Refactor duplicate kmemdup pattern in do_action()
fs/ntfs3: prevent infinite loops caused by the next valid being the same
fs/ntfs3: make ntfs_writeback_ops static
ntfs3: fix circular locking dependency in run_unpack_ex
fs/ntfs3: implement iomap-based file operations
fs/ntfs3: fix deadlock in ni_read_folio_cmpr
fs/ntfs3: implement llseek SEEK_DATA/SEEK_HOLE by scanning data runs
fs/ntfs3: zero-fill folios beyond i_valid in ntfs_read_folio()
fs/ntfs3: handle attr_set_size() errors when truncating files
fs/ntfs3: drop preallocated clusters for sparse and compressed files
fs/ntfs3: fsync files by syncing parent inodes
fs/ntfs3: fix ntfs_mount_options leak in ntfs_fill_super()
fs/ntfs3: allow readdir() to finish after directory mutations without rewinddir()
fs/ntfs3: improve readahead for bitmap initialization and large directory scans
...
diff --cc fs/ntfs3/dir.c
index 596f8c62f033,001773b4514b..4652a56ad105
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@@ -499,13 -543,13 +543,13 @@@ static int ntfs_readdir(struct file *fi
}
out:
-
- __putname(name);
+ kfree(name);
put_indx_node(node);
- if (err == 1) {
+ if (!err) {
+ /* End of directory. */
+ ctx->pos = eod;
+ } else if (err == 1) {
/* 'ctx' is full. */
err = 0;
} else if (err == -ENOENT) {
diff --cc fs/ntfs3/file.c
index 6cb4479072a6,79e4c7a78c26..ae8c47cac406
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@@ -14,7 -14,7 +14,8 @@@
#include <linux/falloc.h>
#include <linux/fiemap.h>
#include <linux/fileattr.h>
+#include <linux/filelock.h>
+ #include <linux/iomap.h>
#include "debug.h"
#include "ntfs.h"
diff --cc fs/ntfs3/inode.c
index edfb973e4e82,aca774f1aed1..6e65066ebcc1
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@@ -741,8 -694,8 +694,8 @@@ static int ntfs_read_folio(struct file
return err;
}
- /* Normal + sparse files. */
- return mpage_read_folio(folio, ntfs_get_block);
- iomap_read_folio(&ntfs_iomap_ops, &ctx);
++ iomap_read_folio(&ntfs_iomap_ops, &ctx, NULL);
+ return 0;
}
static void ntfs_readahead(struct readahead_control *rac)
@@@ -763,112 -718,257 +718,257 @@@
return;
}
- valid = ni->i_valid;
- pos = readahead_pos(rac);
- iomap_readahead(&ntfs_iomap_ops, &ctx);
++ iomap_readahead(&ntfs_iomap_ops, &ctx, NULL);
+ }
- if (valid < i_size_read(inode) && pos <= valid &&
- valid < pos + readahead_length(rac)) {
- /* Range cross 'valid'. Read it page by page. */
- return;
+ int ntfs_set_size(struct inode *inode, u64 new_size)
+ {
+ struct super_block *sb = inode->i_sb;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+ struct ntfs_inode *ni = ntfs_i(inode);
+ int err;
+
+ /* Check for maximum file size. */
+ if (is_sparsed(ni) || is_compressed(ni)) {
+ if (new_size > sbi->maxbytes_sparse) {
+ return -EFBIG;
+ }
+ } else if (new_size > sbi->maxbytes) {
+ return -EFBIG;
}
- mpage_readahead(rac, ntfs_get_block);
- }
+ ni_lock(ni);
+ down_write(&ni->file.run_lock);
- static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
- return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
- bh_result, create, GET_BLOCK_DIRECT_IO_R);
- }
+ err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
+ &ni->i_valid, true);
- static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
- return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
- bh_result, create, GET_BLOCK_DIRECT_IO_W);
+ if (!err) {
+ i_size_write(inode, new_size);
+ mark_inode_dirty(inode);
+ }
+
+ up_write(&ni->file.run_lock);
+ ni_unlock(ni);
+
+ return err;
}
- static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+ /*
+ * Special value to detect ntfs_writeback_range call
+ */
+ #define WB_NO_DA (struct iomap *)1
+ /*
+ * Function to get mapping vbo -> lbo.
+ * used with:
+ * - iomap_zero_range
+ * - iomap_truncate_page
+ * - iomap_dio_rw
+ * - iomap_file_buffered_write
+ * - iomap_bmap
+ * - iomap_fiemap
+ * - iomap_bio_read_folio
+ * - iomap_bio_readahead
+ */
+ static int ntfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
- loff_t vbo = iocb->ki_pos;
- loff_t end;
- int wr = iov_iter_rw(iter) & WRITE;
- size_t iter_count = iov_iter_count(iter);
- loff_t valid;
- ssize_t ret;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ u8 cluster_bits = sbi->cluster_bits;
+ CLST vcn = offset >> cluster_bits;
+ u32 off = offset & sbi->cluster_mask;
+ bool rw = flags & IOMAP_WRITE;
+ loff_t endbyte = offset + length;
+ void *res = NULL;
+ int err;
+ CLST lcn, clen, clen_max = 1;
+ bool new_clst = false;
+ bool no_da;
+ bool zero = false;
+ if (unlikely(ntfs3_forced_shutdown(sbi->sb)))
+ return -EIO;
- if (is_resident(ni)) {
- /* Switch to buffered write. */
- ret = 0;
- goto out;
+ if (flags & IOMAP_REPORT) {
+ if (offset > ntfs_get_maxbytes(ni)) {
+ /* called from fiemap/bmap. */
+ return -EINVAL;
+ }
+
+ if (offset >= inode->i_size) {
+ /* special code for report. */
+ return -ENOENT;
+ }
}
- if (is_compressed(ni)) {
- ret = 0;
- goto out;
+
+ if (IOMAP_ZERO == flags && (endbyte & sbi->cluster_mask)) {
+ rw = true;
+ } else if (rw) {
+ clen_max = bytes_to_cluster(sbi, endbyte) - vcn;
}
- ret = blockdev_direct_IO(iocb, inode, iter,
- wr ? ntfs_get_block_direct_IO_W :
- ntfs_get_block_direct_IO_R);
+ /*
+ * Force to allocate clusters if directIO(write) or writeback_range.
+ * NOTE: attr_data_get_block allocates clusters only for sparse file.
+ * Normal file allocates clusters in attr_set_size.
+ */
+ no_da = flags == (IOMAP_DIRECT | IOMAP_WRITE) || srcmap == WB_NO_DA;
- if (ret > 0)
- end = vbo + ret;
- else if (wr && ret == -EIOCBQUEUED)
- end = vbo + iter_count;
- else
- goto out;
+ err = attr_data_get_block(ni, vcn, clen_max, &lcn, &clen,
+ rw ? &new_clst : NULL, zero, &res, no_da);
- valid = ni->i_valid;
- if (wr) {
- if (end > valid && !S_ISBLK(inode->i_mode)) {
- ni->i_valid = end;
- mark_inode_dirty(inode);
+ if (err) {
+ return err;
+ }
+
+ if (lcn == EOF_LCN) {
+ /* request out of file. */
+ if (flags & IOMAP_REPORT) {
+ /* special code for report. */
+ return -ENOENT;
+ }
+
+ if (rw) {
+ /* should never be here. */
+ return -EINVAL;
}
- } else if (vbo < valid && valid < end) {
- /* Fix page. */
- iov_iter_revert(iter, end - valid);
- iov_iter_zero(end - valid, iter);
+ lcn = SPARSE_LCN;
}
- out:
- return ret;
+ iomap->flags = new_clst ? IOMAP_F_NEW : 0;
+
+ if (lcn == RESIDENT_LCN) {
+ if (offset >= clen) {
+ kfree(res);
+ if (flags & IOMAP_REPORT) {
+ /* special code for report. */
+ return -ENOENT;
+ }
+ return -EFAULT;
+ }
+
+ iomap->private = iomap->inline_data = res;
+ iomap->type = IOMAP_INLINE;
+ iomap->offset = 0;
+ iomap->length = clen; /* resident size in bytes. */
+ return 0;
+ }
+
+ if (!clen) {
+ /* broken file? */
+ return -EINVAL;
+ }
+
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = offset;
+ iomap->length = ((loff_t)clen << cluster_bits) - off;
+
+ if (lcn == COMPRESSED_LCN) {
+ /* should never be here. */
+ return -EOPNOTSUPP;
+ }
+
+ if (lcn == DELALLOC_LCN) {
+ iomap->type = IOMAP_DELALLOC;
+ iomap->addr = IOMAP_NULL_ADDR;
+ } else {
+
+ /* Translate clusters into bytes. */
+ iomap->addr = ((loff_t)lcn << cluster_bits) + off;
+ if (length && iomap->length > length)
+ iomap->length = length;
+ else
+ endbyte = offset + iomap->length;
+
+ if (lcn == SPARSE_LCN) {
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ // if (IOMAP_ZERO == flags && !off) {
+ // iomap->length = (endbyte - offset) &
+ // sbi->cluster_mask_inv;
+ // }
+ } else if (endbyte <= ni->i_valid) {
+ iomap->type = IOMAP_MAPPED;
+ } else if (offset < ni->i_valid) {
+ iomap->type = IOMAP_MAPPED;
+ if (flags & IOMAP_REPORT)
+ iomap->length = ni->i_valid - offset;
+ } else if (rw || (flags & IOMAP_ZERO)) {
+ iomap->type = IOMAP_MAPPED;
+ } else {
+ iomap->type = IOMAP_UNWRITTEN;
+ }
+ }
+
+ if ((flags & IOMAP_ZERO) &&
+ (iomap->type == IOMAP_MAPPED || iomap->type == IOMAP_DELALLOC)) {
+ /* Avoid too large requests. */
+ u32 tail;
+ u32 off_a = offset & (PAGE_SIZE - 1);
+ if (off_a)
+ tail = PAGE_SIZE - off_a;
+ else
+ tail = PAGE_SIZE;
+
+ if (iomap->length > tail)
+ iomap->length = tail;
+ }
+
+ return 0;
}
- int ntfs_set_size(struct inode *inode, u64 new_size)
+ static int ntfs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned int flags,
+ struct iomap *iomap)
{
- struct super_block *sb = inode->i_sb;
- struct ntfs_sb_info *sbi = sb->s_fs_info;
+ int err = 0;
struct ntfs_inode *ni = ntfs_i(inode);
- int err;
+ loff_t endbyte = pos + written;
- /* Check for maximum file size. */
- if (is_sparsed(ni) || is_compressed(ni)) {
- if (new_size > sbi->maxbytes_sparse) {
- err = -EFBIG;
- goto out;
- }
- } else if (new_size > sbi->maxbytes) {
- err = -EFBIG;
- goto out;
- }
+ if ((flags & IOMAP_WRITE) || (flags & IOMAP_ZERO)) {
+ if (iomap->type == IOMAP_INLINE) {
+ u32 data_size;
+ struct ATTRIB *attr;
+ struct mft_inode *mi;
- ni_lock(ni);
- down_write(&ni->file.run_lock);
+ attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0,
+ NULL, &mi);
+ if (!attr || attr->non_res) {
+ err = -EINVAL;
+ goto out;
+ }
- err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
- &ni->i_valid, true, NULL);
+ data_size = le32_to_cpu(attr->res.data_size);
+ if (!(pos < data_size && endbyte <= data_size)) {
+ err = -EINVAL;
+ goto out;
+ }
- up_write(&ni->file.run_lock);
- ni_unlock(ni);
+ /* Update resident data. */
+ memcpy(resident_data(attr) + pos,
+ iomap_inline_data(iomap, pos), written);
+ mi->dirty = true;
+ ni->i_valid = data_size;
+ } else if (ni->i_valid < endbyte) {
+ ni->i_valid = endbyte;
+ mark_inode_dirty(inode);
+ }
+ }
- mark_inode_dirty(inode);
+ if ((flags & IOMAP_ZERO) &&
+ (iomap->type == IOMAP_MAPPED || iomap->type == IOMAP_DELALLOC)) {
+ /* Pair for code in ntfs_iomap_begin. */
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+ cond_resched();
+ }
out:
+ if (iomap->type == IOMAP_INLINE) {
+ kfree(iomap->private);
+ iomap->private = NULL;
+ }
+
return err;
}