1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8 #include <linux/buffer_head.h>
9 #include <linux/fs.h>
10 #include <linux/mpage.h>
11 #include <linux/namei.h>
12 #include <linux/nls.h>
13 #include <linux/uio.h>
14 #include <linux/writeback.h>
15
16 #include "debug.h"
17 #include "ntfs.h"
18 #include "ntfs_fs.h"
19
20 /*
21 * ntfs_read_mft - Read record and parses MFT.
22 */
ntfs_read_mft(struct inode * inode,const struct cpu_str * name,const struct MFT_REF * ref)23 static struct inode *ntfs_read_mft(struct inode *inode,
24 const struct cpu_str *name,
25 const struct MFT_REF *ref)
26 {
27 int err = 0;
28 struct ntfs_inode *ni = ntfs_i(inode);
29 struct super_block *sb = inode->i_sb;
30 struct ntfs_sb_info *sbi = sb->s_fs_info;
31 mode_t mode = 0;
32 struct ATTR_STD_INFO5 *std5 = NULL;
33 struct ATTR_LIST_ENTRY *le;
34 struct ATTRIB *attr;
35 bool is_match = false;
36 bool is_root = false;
37 bool is_dir;
38 unsigned long ino = inode->i_ino;
39 u32 rp_fa = 0, asize, t32;
40 u16 roff, rsize, names = 0;
41 const struct ATTR_FILE_NAME *fname = NULL;
42 const struct INDEX_ROOT *root;
43 struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
44 u64 t64;
45 struct MFT_REC *rec;
46 struct runs_tree *run;
47
48 inode->i_op = NULL;
49 /* Setup 'uid' and 'gid' */
50 inode->i_uid = sbi->options->fs_uid;
51 inode->i_gid = sbi->options->fs_gid;
52
53 err = mi_init(&ni->mi, sbi, ino);
54 if (err)
55 goto out;
56
57 if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) {
58 t64 = sbi->mft.lbo >> sbi->cluster_bits;
59 t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size);
60 sbi->mft.ni = ni;
61 init_rwsem(&ni->file.run_lock);
62
63 if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) {
64 err = -ENOMEM;
65 goto out;
66 }
67 }
68
69 err = mi_read(&ni->mi, ino == MFT_REC_MFT);
70
71 if (err)
72 goto out;
73
74 rec = ni->mi.mrec;
75
76 if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) {
77 ;
78 } else if (ref->seq != rec->seq) {
79 err = -EINVAL;
80 ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino,
81 le16_to_cpu(ref->seq), le16_to_cpu(rec->seq));
82 goto out;
83 } else if (!is_rec_inuse(rec)) {
84 err = -EINVAL;
85 ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino);
86 goto out;
87 }
88
89 if (le32_to_cpu(rec->total) != sbi->record_size) {
90 /* Bad inode? */
91 err = -EINVAL;
92 goto out;
93 }
94
95 if (!is_rec_base(rec))
96 goto Ok;
97
98 /* Record should contain $I30 root. */
99 is_dir = rec->flags & RECORD_FLAG_DIR;
100
101 inode->i_generation = le16_to_cpu(rec->seq);
102
103 /* Enumerate all struct Attributes MFT. */
104 le = NULL;
105 attr = NULL;
106
107 /*
108 * To reduce tab pressure use goto instead of
109 * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) ))
110 */
111 next_attr:
112 run = NULL;
113 err = -EINVAL;
114 attr = ni_enum_attr_ex(ni, attr, &le, NULL);
115 if (!attr)
116 goto end_enum;
117
118 if (le && le->vcn) {
119 /* This is non primary attribute segment. Ignore if not MFT. */
120 if (ino != MFT_REC_MFT || attr->type != ATTR_DATA)
121 goto next_attr;
122
123 run = &ni->file.run;
124 asize = le32_to_cpu(attr->size);
125 goto attr_unpack_run;
126 }
127
128 roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off);
129 rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size);
130 asize = le32_to_cpu(attr->size);
131
132 if (le16_to_cpu(attr->name_off) + attr->name_len > asize)
133 goto out;
134
135 switch (attr->type) {
136 case ATTR_STD:
137 if (attr->non_res ||
138 asize < sizeof(struct ATTR_STD_INFO) + roff ||
139 rsize < sizeof(struct ATTR_STD_INFO))
140 goto out;
141
142 if (std5)
143 goto next_attr;
144
145 std5 = Add2Ptr(attr, roff);
146
147 #ifdef STATX_BTIME
148 nt2kernel(std5->cr_time, &ni->i_crtime);
149 #endif
150 nt2kernel(std5->a_time, &inode->i_atime);
151 nt2kernel(std5->c_time, &inode->i_ctime);
152 nt2kernel(std5->m_time, &inode->i_mtime);
153
154 ni->std_fa = std5->fa;
155
156 if (asize >= sizeof(struct ATTR_STD_INFO5) + roff &&
157 rsize >= sizeof(struct ATTR_STD_INFO5))
158 ni->std_security_id = std5->security_id;
159 goto next_attr;
160
161 case ATTR_LIST:
162 if (attr->name_len || le || ino == MFT_REC_LOG)
163 goto out;
164
165 err = ntfs_load_attr_list(ni, attr);
166 if (err)
167 goto out;
168
169 le = NULL;
170 attr = NULL;
171 goto next_attr;
172
173 case ATTR_NAME:
174 if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff ||
175 rsize < SIZEOF_ATTRIBUTE_FILENAME)
176 goto out;
177
178 fname = Add2Ptr(attr, roff);
179 if (fname->type == FILE_NAME_DOS)
180 goto next_attr;
181
182 names += 1;
183 if (name && name->len == fname->name_len &&
184 !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
185 NULL, false))
186 is_match = true;
187
188 goto next_attr;
189
190 case ATTR_DATA:
191 if (is_dir) {
192 /* Ignore data attribute in dir record. */
193 goto next_attr;
194 }
195
196 if (ino == MFT_REC_BADCLUST && !attr->non_res)
197 goto next_attr;
198
199 if (attr->name_len &&
200 ((ino != MFT_REC_BADCLUST || !attr->non_res ||
201 attr->name_len != ARRAY_SIZE(BAD_NAME) ||
202 memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) &&
203 (ino != MFT_REC_SECURE || !attr->non_res ||
204 attr->name_len != ARRAY_SIZE(SDS_NAME) ||
205 memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) {
206 /* File contains stream attribute. Ignore it. */
207 goto next_attr;
208 }
209
210 if (is_attr_sparsed(attr))
211 ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE;
212 else
213 ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
214
215 if (is_attr_compressed(attr))
216 ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED;
217 else
218 ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED;
219
220 if (is_attr_encrypted(attr))
221 ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED;
222 else
223 ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED;
224
225 if (!attr->non_res) {
226 ni->i_valid = inode->i_size = rsize;
227 inode_set_bytes(inode, rsize);
228 }
229
230 mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv);
231
232 if (!attr->non_res) {
233 ni->ni_flags |= NI_FLAG_RESIDENT;
234 goto next_attr;
235 }
236
237 inode_set_bytes(inode, attr_ondisk_size(attr));
238
239 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
240 inode->i_size = le64_to_cpu(attr->nres.data_size);
241 if (!attr->nres.alloc_size)
242 goto next_attr;
243
244 run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run
245 : &ni->file.run;
246 break;
247
248 case ATTR_ROOT:
249 if (attr->non_res)
250 goto out;
251
252 root = Add2Ptr(attr, roff);
253 is_root = true;
254
255 if (attr->name_len != ARRAY_SIZE(I30_NAME) ||
256 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
257 goto next_attr;
258
259 if (root->type != ATTR_NAME ||
260 root->rule != NTFS_COLLATION_TYPE_FILENAME)
261 goto out;
262
263 if (!is_dir)
264 goto next_attr;
265
266 ni->ni_flags |= NI_FLAG_DIR;
267
268 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
269 if (err)
270 goto out;
271
272 mode = sb->s_root
273 ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv))
274 : (S_IFDIR | 0777);
275 goto next_attr;
276
277 case ATTR_ALLOC:
278 if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) ||
279 memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)))
280 goto next_attr;
281
282 inode->i_size = le64_to_cpu(attr->nres.data_size);
283 ni->i_valid = le64_to_cpu(attr->nres.valid_size);
284 inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size));
285
286 run = &ni->dir.alloc_run;
287 break;
288
289 case ATTR_BITMAP:
290 if (ino == MFT_REC_MFT) {
291 if (!attr->non_res)
292 goto out;
293 #ifndef CONFIG_NTFS3_64BIT_CLUSTER
294 /* 0x20000000 = 2^32 / 8 */
295 if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000)
296 goto out;
297 #endif
298 run = &sbi->mft.bitmap.run;
299 break;
300 } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) &&
301 !memcmp(attr_name(attr), I30_NAME,
302 sizeof(I30_NAME)) &&
303 attr->non_res) {
304 run = &ni->dir.bitmap_run;
305 break;
306 }
307 goto next_attr;
308
309 case ATTR_REPARSE:
310 if (attr->name_len)
311 goto next_attr;
312
313 rp_fa = ni_parse_reparse(ni, attr, &rp);
314 switch (rp_fa) {
315 case REPARSE_LINK:
316 /*
317 * Normal symlink.
318 * Assume one unicode symbol == one utf8.
319 */
320 inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer
321 .PrintNameLength) /
322 sizeof(u16);
323
324 ni->i_valid = inode->i_size;
325
326 /* Clear directory bit. */
327 if (ni->ni_flags & NI_FLAG_DIR) {
328 indx_clear(&ni->dir);
329 memset(&ni->dir, 0, sizeof(ni->dir));
330 ni->ni_flags &= ~NI_FLAG_DIR;
331 } else {
332 run_close(&ni->file.run);
333 }
334 mode = S_IFLNK | 0777;
335 is_dir = false;
336 if (attr->non_res) {
337 run = &ni->file.run;
338 goto attr_unpack_run; // Double break.
339 }
340 break;
341
342 case REPARSE_COMPRESSED:
343 break;
344
345 case REPARSE_DEDUPLICATED:
346 break;
347 }
348 goto next_attr;
349
350 case ATTR_EA_INFO:
351 if (!attr->name_len &&
352 resident_data_ex(attr, sizeof(struct EA_INFO))) {
353 ni->ni_flags |= NI_FLAG_EA;
354 /*
355 * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode
356 */
357 inode->i_mode = mode;
358 ntfs_get_wsl_perm(inode);
359 mode = inode->i_mode;
360 }
361 goto next_attr;
362
363 default:
364 goto next_attr;
365 }
366
367 attr_unpack_run:
368 roff = le16_to_cpu(attr->nres.run_off);
369
370 if (roff > asize) {
371 err = -EINVAL;
372 goto out;
373 }
374
375 t64 = le64_to_cpu(attr->nres.svcn);
376
377 err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
378 t64, Add2Ptr(attr, roff), asize - roff);
379 if (err < 0)
380 goto out;
381 err = 0;
382 goto next_attr;
383
384 end_enum:
385
386 if (!std5)
387 goto out;
388
389 if (!is_match && name) {
390 /* Reuse rec as buffer for ascii name. */
391 err = -ENOENT;
392 goto out;
393 }
394
395 if (std5->fa & FILE_ATTRIBUTE_READONLY)
396 mode &= ~0222;
397
398 if (!names) {
399 err = -EINVAL;
400 goto out;
401 }
402
403 if (names != le16_to_cpu(rec->hard_links)) {
404 /* Correct minor error on the fly. Do not mark inode as dirty. */
405 rec->hard_links = cpu_to_le16(names);
406 ni->mi.dirty = true;
407 }
408
409 set_nlink(inode, names);
410
411 if (S_ISDIR(mode)) {
412 ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
413
414 /*
415 * Dot and dot-dot should be included in count but was not
416 * included in enumeration.
417 * Usually a hard links to directories are disabled.
418 */
419 inode->i_op = &ntfs_dir_inode_operations;
420 inode->i_fop = &ntfs_dir_operations;
421 ni->i_valid = 0;
422 } else if (S_ISLNK(mode)) {
423 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
424 inode->i_op = &ntfs_link_inode_operations;
425 inode->i_fop = NULL;
426 inode_nohighmem(inode);
427 } else if (S_ISREG(mode)) {
428 ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
429 inode->i_op = &ntfs_file_inode_operations;
430 inode->i_fop = &ntfs_file_operations;
431 inode->i_mapping->a_ops =
432 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
433 if (ino != MFT_REC_MFT)
434 init_rwsem(&ni->file.run_lock);
435 } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
436 S_ISSOCK(mode)) {
437 inode->i_op = &ntfs_special_inode_operations;
438 init_special_inode(inode, mode, inode->i_rdev);
439 } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) &&
440 fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) {
441 /* Records in $Extend are not a files or general directories. */
442 inode->i_op = &ntfs_file_inode_operations;
443 } else {
444 err = -EINVAL;
445 goto out;
446 }
447
448 if ((sbi->options->sys_immutable &&
449 (std5->fa & FILE_ATTRIBUTE_SYSTEM)) &&
450 !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) {
451 inode->i_flags |= S_IMMUTABLE;
452 } else {
453 inode->i_flags &= ~S_IMMUTABLE;
454 }
455
456 inode->i_mode = mode;
457 if (!(ni->ni_flags & NI_FLAG_EA)) {
458 /* If no xattr then no security (stored in xattr). */
459 inode->i_flags |= S_NOSEC;
460 }
461
462 Ok:
463 if (ino == MFT_REC_MFT && !sb->s_root)
464 sbi->mft.ni = NULL;
465
466 unlock_new_inode(inode);
467
468 return inode;
469
470 out:
471 if (ino == MFT_REC_MFT && !sb->s_root)
472 sbi->mft.ni = NULL;
473
474 iget_failed(inode);
475 return ERR_PTR(err);
476 }
477
478 /*
479 * ntfs_test_inode
480 *
481 * Return: 1 if match.
482 */
ntfs_test_inode(struct inode * inode,void * data)483 static int ntfs_test_inode(struct inode *inode, void *data)
484 {
485 struct MFT_REF *ref = data;
486
487 return ino_get(ref) == inode->i_ino;
488 }
489
ntfs_set_inode(struct inode * inode,void * data)490 static int ntfs_set_inode(struct inode *inode, void *data)
491 {
492 const struct MFT_REF *ref = data;
493
494 inode->i_ino = ino_get(ref);
495 return 0;
496 }
497
ntfs_iget5(struct super_block * sb,const struct MFT_REF * ref,const struct cpu_str * name)498 struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref,
499 const struct cpu_str *name)
500 {
501 struct inode *inode;
502
503 inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode,
504 (void *)ref);
505 if (unlikely(!inode))
506 return ERR_PTR(-ENOMEM);
507
508 /* If this is a freshly allocated inode, need to read it now. */
509 if (inode->i_state & I_NEW)
510 inode = ntfs_read_mft(inode, name, ref);
511 else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) {
512 /* Inode overlaps? */
513 _ntfs_bad_inode(inode);
514 }
515
516 return inode;
517 }
518
519 enum get_block_ctx {
520 GET_BLOCK_GENERAL = 0,
521 GET_BLOCK_WRITE_BEGIN = 1,
522 GET_BLOCK_DIRECT_IO_R = 2,
523 GET_BLOCK_DIRECT_IO_W = 3,
524 GET_BLOCK_BMAP = 4,
525 };
526
ntfs_get_block_vbo(struct inode * inode,u64 vbo,struct buffer_head * bh,int create,enum get_block_ctx ctx)527 static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
528 struct buffer_head *bh, int create,
529 enum get_block_ctx ctx)
530 {
531 struct super_block *sb = inode->i_sb;
532 struct ntfs_sb_info *sbi = sb->s_fs_info;
533 struct ntfs_inode *ni = ntfs_i(inode);
534 struct page *page = bh->b_page;
535 u8 cluster_bits = sbi->cluster_bits;
536 u32 block_size = sb->s_blocksize;
537 u64 bytes, lbo, valid;
538 u32 off;
539 int err;
540 CLST vcn, lcn, len;
541 bool new;
542
543 /* Clear previous state. */
544 clear_buffer_new(bh);
545 clear_buffer_uptodate(bh);
546
547 /* Direct write uses 'create=0'. */
548 if (!create && vbo >= ni->i_valid) {
549 /* Out of valid. */
550 return 0;
551 }
552
553 if (vbo >= inode->i_size) {
554 /* Out of size. */
555 return 0;
556 }
557
558 if (is_resident(ni)) {
559 ni_lock(ni);
560 err = attr_data_read_resident(ni, page);
561 ni_unlock(ni);
562
563 if (!err)
564 set_buffer_uptodate(bh);
565 bh->b_size = block_size;
566 return err;
567 }
568
569 vcn = vbo >> cluster_bits;
570 off = vbo & sbi->cluster_mask;
571 new = false;
572
573 err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL);
574 if (err)
575 goto out;
576
577 if (!len)
578 return 0;
579
580 bytes = ((u64)len << cluster_bits) - off;
581
582 if (lcn == SPARSE_LCN) {
583 if (!create) {
584 if (bh->b_size > bytes)
585 bh->b_size = bytes;
586 return 0;
587 }
588 WARN_ON(1);
589 }
590
591 if (new) {
592 set_buffer_new(bh);
593 if ((len << cluster_bits) > block_size)
594 ntfs_sparse_cluster(inode, page, vcn, len);
595 }
596
597 lbo = ((u64)lcn << cluster_bits) + off;
598
599 set_buffer_mapped(bh);
600 bh->b_bdev = sb->s_bdev;
601 bh->b_blocknr = lbo >> sb->s_blocksize_bits;
602
603 valid = ni->i_valid;
604
605 if (ctx == GET_BLOCK_DIRECT_IO_W) {
606 /* ntfs_direct_IO will update ni->i_valid. */
607 if (vbo >= valid)
608 set_buffer_new(bh);
609 } else if (create) {
610 /* Normal write. */
611 if (bytes > bh->b_size)
612 bytes = bh->b_size;
613
614 if (vbo >= valid)
615 set_buffer_new(bh);
616
617 if (vbo + bytes > valid) {
618 ni->i_valid = vbo + bytes;
619 mark_inode_dirty(inode);
620 }
621 } else if (vbo >= valid) {
622 /* Read out of valid data. */
623 /* Should never be here 'cause already checked. */
624 clear_buffer_mapped(bh);
625 } else if (vbo + bytes <= valid) {
626 /* Normal read. */
627 } else if (vbo + block_size <= valid) {
628 /* Normal short read. */
629 bytes = block_size;
630 } else {
631 /*
632 * Read across valid size: vbo < valid && valid < vbo + block_size
633 */
634 bytes = block_size;
635
636 if (page) {
637 u32 voff = valid - vbo;
638
639 bh->b_size = block_size;
640 off = vbo & (PAGE_SIZE - 1);
641 set_bh_page(bh, page, off);
642 err = bh_read(bh, 0);
643 if (err < 0)
644 goto out;
645 zero_user_segment(page, off + voff, off + block_size);
646 }
647 }
648
649 if (bh->b_size > bytes)
650 bh->b_size = bytes;
651
652 #ifndef __LP64__
653 if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) {
654 static_assert(sizeof(size_t) < sizeof(loff_t));
655 if (bytes > 0x40000000u)
656 bh->b_size = 0x40000000u;
657 }
658 #endif
659
660 return 0;
661
662 out:
663 return err;
664 }
665
ntfs_get_block(struct inode * inode,sector_t vbn,struct buffer_head * bh_result,int create)666 int ntfs_get_block(struct inode *inode, sector_t vbn,
667 struct buffer_head *bh_result, int create)
668 {
669 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
670 bh_result, create, GET_BLOCK_GENERAL);
671 }
672
ntfs_get_block_bmap(struct inode * inode,sector_t vsn,struct buffer_head * bh_result,int create)673 static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn,
674 struct buffer_head *bh_result, int create)
675 {
676 return ntfs_get_block_vbo(inode,
677 (u64)vsn << inode->i_sb->s_blocksize_bits,
678 bh_result, create, GET_BLOCK_BMAP);
679 }
680
ntfs_bmap(struct address_space * mapping,sector_t block)681 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
682 {
683 return generic_block_bmap(mapping, block, ntfs_get_block_bmap);
684 }
685
ntfs_read_folio(struct file * file,struct folio * folio)686 static int ntfs_read_folio(struct file *file, struct folio *folio)
687 {
688 struct page *page = &folio->page;
689 int err;
690 struct address_space *mapping = page->mapping;
691 struct inode *inode = mapping->host;
692 struct ntfs_inode *ni = ntfs_i(inode);
693
694 if (is_resident(ni)) {
695 ni_lock(ni);
696 err = attr_data_read_resident(ni, page);
697 ni_unlock(ni);
698 if (err != E_NTFS_NONRESIDENT) {
699 unlock_page(page);
700 return err;
701 }
702 }
703
704 if (is_compressed(ni)) {
705 ni_lock(ni);
706 err = ni_readpage_cmpr(ni, page);
707 ni_unlock(ni);
708 return err;
709 }
710
711 /* Normal + sparse files. */
712 return mpage_read_folio(folio, ntfs_get_block);
713 }
714
ntfs_readahead(struct readahead_control * rac)715 static void ntfs_readahead(struct readahead_control *rac)
716 {
717 struct address_space *mapping = rac->mapping;
718 struct inode *inode = mapping->host;
719 struct ntfs_inode *ni = ntfs_i(inode);
720 u64 valid;
721 loff_t pos;
722
723 if (is_resident(ni)) {
724 /* No readahead for resident. */
725 return;
726 }
727
728 if (is_compressed(ni)) {
729 /* No readahead for compressed. */
730 return;
731 }
732
733 valid = ni->i_valid;
734 pos = readahead_pos(rac);
735
736 if (valid < i_size_read(inode) && pos <= valid &&
737 valid < pos + readahead_length(rac)) {
738 /* Range cross 'valid'. Read it page by page. */
739 return;
740 }
741
742 mpage_readahead(rac, ntfs_get_block);
743 }
744
ntfs_get_block_direct_IO_R(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)745 static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
746 struct buffer_head *bh_result, int create)
747 {
748 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
749 bh_result, create, GET_BLOCK_DIRECT_IO_R);
750 }
751
ntfs_get_block_direct_IO_W(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)752 static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
753 struct buffer_head *bh_result, int create)
754 {
755 return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
756 bh_result, create, GET_BLOCK_DIRECT_IO_W);
757 }
758
ntfs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)759 static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
760 {
761 struct file *file = iocb->ki_filp;
762 struct address_space *mapping = file->f_mapping;
763 struct inode *inode = mapping->host;
764 struct ntfs_inode *ni = ntfs_i(inode);
765 loff_t vbo = iocb->ki_pos;
766 loff_t end;
767 int wr = iov_iter_rw(iter) & WRITE;
768 size_t iter_count = iov_iter_count(iter);
769 loff_t valid;
770 ssize_t ret;
771
772 if (is_resident(ni)) {
773 /* Switch to buffered write. */
774 ret = 0;
775 goto out;
776 }
777
778 ret = blockdev_direct_IO(iocb, inode, iter,
779 wr ? ntfs_get_block_direct_IO_W
780 : ntfs_get_block_direct_IO_R);
781
782 if (ret > 0)
783 end = vbo + ret;
784 else if (wr && ret == -EIOCBQUEUED)
785 end = vbo + iter_count;
786 else
787 goto out;
788
789 valid = ni->i_valid;
790 if (wr) {
791 if (end > valid && !S_ISBLK(inode->i_mode)) {
792 ni->i_valid = end;
793 mark_inode_dirty(inode);
794 }
795 } else if (vbo < valid && valid < end) {
796 /* Fix page. */
797 iov_iter_revert(iter, end - valid);
798 iov_iter_zero(end - valid, iter);
799 }
800
801 out:
802 return ret;
803 }
804
ntfs_set_size(struct inode * inode,u64 new_size)805 int ntfs_set_size(struct inode *inode, u64 new_size)
806 {
807 struct super_block *sb = inode->i_sb;
808 struct ntfs_sb_info *sbi = sb->s_fs_info;
809 struct ntfs_inode *ni = ntfs_i(inode);
810 int err;
811
812 /* Check for maximum file size. */
813 if (is_sparsed(ni) || is_compressed(ni)) {
814 if (new_size > sbi->maxbytes_sparse) {
815 err = -EFBIG;
816 goto out;
817 }
818 } else if (new_size > sbi->maxbytes) {
819 err = -EFBIG;
820 goto out;
821 }
822
823 ni_lock(ni);
824 down_write(&ni->file.run_lock);
825
826 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
827 &ni->i_valid, true, NULL);
828
829 up_write(&ni->file.run_lock);
830 ni_unlock(ni);
831
832 mark_inode_dirty(inode);
833
834 out:
835 return err;
836 }
837
ntfs_writepage(struct page * page,struct writeback_control * wbc)838 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
839 {
840 struct address_space *mapping = page->mapping;
841 struct inode *inode = mapping->host;
842 struct ntfs_inode *ni = ntfs_i(inode);
843 int err;
844
845 if (is_resident(ni)) {
846 ni_lock(ni);
847 err = attr_data_write_resident(ni, page);
848 ni_unlock(ni);
849 if (err != E_NTFS_NONRESIDENT) {
850 unlock_page(page);
851 return err;
852 }
853 }
854
855 return block_write_full_page(page, ntfs_get_block, wbc);
856 }
857
ntfs_writepages(struct address_space * mapping,struct writeback_control * wbc)858 static int ntfs_writepages(struct address_space *mapping,
859 struct writeback_control *wbc)
860 {
861 /* Redirect call to 'ntfs_writepage' for resident files. */
862 if (is_resident(ntfs_i(mapping->host)))
863 return generic_writepages(mapping, wbc);
864 return mpage_writepages(mapping, wbc, ntfs_get_block);
865 }
866
ntfs_get_block_write_begin(struct inode * inode,sector_t vbn,struct buffer_head * bh_result,int create)867 static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn,
868 struct buffer_head *bh_result, int create)
869 {
870 return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits,
871 bh_result, create, GET_BLOCK_WRITE_BEGIN);
872 }
873
ntfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,u32 len,struct page ** pagep,void ** fsdata)874 int ntfs_write_begin(struct file *file, struct address_space *mapping,
875 loff_t pos, u32 len, struct page **pagep, void **fsdata)
876 {
877 int err;
878 struct inode *inode = mapping->host;
879 struct ntfs_inode *ni = ntfs_i(inode);
880
881 *pagep = NULL;
882 if (is_resident(ni)) {
883 struct page *page = grab_cache_page_write_begin(
884 mapping, pos >> PAGE_SHIFT);
885
886 if (!page) {
887 err = -ENOMEM;
888 goto out;
889 }
890
891 ni_lock(ni);
892 err = attr_data_read_resident(ni, page);
893 ni_unlock(ni);
894
895 if (!err) {
896 *pagep = page;
897 goto out;
898 }
899 unlock_page(page);
900 put_page(page);
901
902 if (err != E_NTFS_NONRESIDENT)
903 goto out;
904 }
905
906 err = block_write_begin(mapping, pos, len, pagep,
907 ntfs_get_block_write_begin);
908
909 out:
910 return err;
911 }
912
913 /*
914 * ntfs_write_end - Address_space_operations::write_end.
915 */
ntfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,u32 len,u32 copied,struct page * page,void * fsdata)916 int ntfs_write_end(struct file *file, struct address_space *mapping,
917 loff_t pos, u32 len, u32 copied, struct page *page,
918 void *fsdata)
919 {
920 struct inode *inode = mapping->host;
921 struct ntfs_inode *ni = ntfs_i(inode);
922 u64 valid = ni->i_valid;
923 bool dirty = false;
924 int err;
925
926 if (is_resident(ni)) {
927 ni_lock(ni);
928 err = attr_data_write_resident(ni, page);
929 ni_unlock(ni);
930 if (!err) {
931 dirty = true;
932 /* Clear any buffers in page. */
933 if (page_has_buffers(page)) {
934 struct buffer_head *head, *bh;
935
936 bh = head = page_buffers(page);
937 do {
938 clear_buffer_dirty(bh);
939 clear_buffer_mapped(bh);
940 set_buffer_uptodate(bh);
941 } while (head != (bh = bh->b_this_page));
942 }
943 SetPageUptodate(page);
944 err = copied;
945 }
946 unlock_page(page);
947 put_page(page);
948 } else {
949 err = generic_write_end(file, mapping, pos, len, copied, page,
950 fsdata);
951 }
952
953 if (err >= 0) {
954 if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
955 inode->i_ctime = inode->i_mtime = current_time(inode);
956 ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
957 dirty = true;
958 }
959
960 if (valid != ni->i_valid) {
961 /* ni->i_valid is changed in ntfs_get_block_vbo. */
962 dirty = true;
963 }
964
965 if (dirty)
966 mark_inode_dirty(inode);
967 }
968
969 return err;
970 }
971
reset_log_file(struct inode * inode)972 int reset_log_file(struct inode *inode)
973 {
974 int err;
975 loff_t pos = 0;
976 u32 log_size = inode->i_size;
977 struct address_space *mapping = inode->i_mapping;
978
979 for (;;) {
980 u32 len;
981 void *kaddr;
982 struct page *page;
983
984 len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE;
985
986 err = block_write_begin(mapping, pos, len, &page,
987 ntfs_get_block_write_begin);
988 if (err)
989 goto out;
990
991 kaddr = kmap_atomic(page);
992 memset(kaddr, -1, len);
993 kunmap_atomic(kaddr);
994 flush_dcache_page(page);
995
996 err = block_write_end(NULL, mapping, pos, len, len, page, NULL);
997 if (err < 0)
998 goto out;
999 pos += len;
1000
1001 if (pos >= log_size)
1002 break;
1003 balance_dirty_pages_ratelimited(mapping);
1004 }
1005 out:
1006 mark_inode_dirty_sync(inode);
1007
1008 return err;
1009 }
1010
ntfs3_write_inode(struct inode * inode,struct writeback_control * wbc)1011 int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc)
1012 {
1013 return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1014 }
1015
ntfs_sync_inode(struct inode * inode)1016 int ntfs_sync_inode(struct inode *inode)
1017 {
1018 return _ni_write_inode(inode, 1);
1019 }
1020
1021 /*
1022 * writeback_inode - Helper function for ntfs_flush_inodes().
1023 *
1024 * This writes both the inode and the file data blocks, waiting
1025 * for in flight data blocks before the start of the call. It
1026 * does not wait for any io started during the call.
1027 */
writeback_inode(struct inode * inode)1028 static int writeback_inode(struct inode *inode)
1029 {
1030 int ret = sync_inode_metadata(inode, 0);
1031
1032 if (!ret)
1033 ret = filemap_fdatawrite(inode->i_mapping);
1034 return ret;
1035 }
1036
1037 /*
1038 * ntfs_flush_inodes
1039 *
1040 * Write data and metadata corresponding to i1 and i2. The io is
1041 * started but we do not wait for any of it to finish.
1042 *
1043 * filemap_flush() is used for the block device, so if there is a dirty
1044 * page for a block already in flight, we will not wait and start the
1045 * io over again.
1046 */
ntfs_flush_inodes(struct super_block * sb,struct inode * i1,struct inode * i2)1047 int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
1048 struct inode *i2)
1049 {
1050 int ret = 0;
1051
1052 if (i1)
1053 ret = writeback_inode(i1);
1054 if (!ret && i2)
1055 ret = writeback_inode(i2);
1056 if (!ret)
1057 ret = sync_blockdev_nowait(sb->s_bdev);
1058 return ret;
1059 }
1060
inode_write_data(struct inode * inode,const void * data,size_t bytes)1061 int inode_write_data(struct inode *inode, const void *data, size_t bytes)
1062 {
1063 pgoff_t idx;
1064
1065 /* Write non resident data. */
1066 for (idx = 0; bytes; idx++) {
1067 size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes;
1068 struct page *page = ntfs_map_page(inode->i_mapping, idx);
1069
1070 if (IS_ERR(page))
1071 return PTR_ERR(page);
1072
1073 lock_page(page);
1074 WARN_ON(!PageUptodate(page));
1075 ClearPageUptodate(page);
1076
1077 memcpy(page_address(page), data, op);
1078
1079 flush_dcache_page(page);
1080 SetPageUptodate(page);
1081 unlock_page(page);
1082
1083 ntfs_unmap_page(page);
1084
1085 bytes -= op;
1086 data = Add2Ptr(data, PAGE_SIZE);
1087 }
1088 return 0;
1089 }
1090
1091 /*
1092 * ntfs_reparse_bytes
1093 *
1094 * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK)
1095 * for unicode string of @uni_len length.
1096 */
ntfs_reparse_bytes(u32 uni_len)1097 static inline u32 ntfs_reparse_bytes(u32 uni_len)
1098 {
1099 /* Header + unicode string + decorated unicode string. */
1100 return sizeof(short) * (2 * uni_len + 4) +
1101 offsetof(struct REPARSE_DATA_BUFFER,
1102 SymbolicLinkReparseBuffer.PathBuffer);
1103 }
1104
1105 static struct REPARSE_DATA_BUFFER *
ntfs_create_reparse_buffer(struct ntfs_sb_info * sbi,const char * symname,u32 size,u16 * nsize)1106 ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname,
1107 u32 size, u16 *nsize)
1108 {
1109 int i, err;
1110 struct REPARSE_DATA_BUFFER *rp;
1111 __le16 *rp_name;
1112 typeof(rp->SymbolicLinkReparseBuffer) *rs;
1113
1114 rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS);
1115 if (!rp)
1116 return ERR_PTR(-ENOMEM);
1117
1118 rs = &rp->SymbolicLinkReparseBuffer;
1119 rp_name = rs->PathBuffer;
1120
1121 /* Convert link name to UTF-16. */
1122 err = ntfs_nls_to_utf16(sbi, symname, size,
1123 (struct cpu_str *)(rp_name - 1), 2 * size,
1124 UTF16_LITTLE_ENDIAN);
1125 if (err < 0)
1126 goto out;
1127
1128 /* err = the length of unicode name of symlink. */
1129 *nsize = ntfs_reparse_bytes(err);
1130
1131 if (*nsize > sbi->reparse.max_size) {
1132 err = -EFBIG;
1133 goto out;
1134 }
1135
1136 /* Translate Linux '/' into Windows '\'. */
1137 for (i = 0; i < err; i++) {
1138 if (rp_name[i] == cpu_to_le16('/'))
1139 rp_name[i] = cpu_to_le16('\\');
1140 }
1141
1142 rp->ReparseTag = IO_REPARSE_TAG_SYMLINK;
1143 rp->ReparseDataLength =
1144 cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER,
1145 SymbolicLinkReparseBuffer));
1146
1147 /* PrintName + SubstituteName. */
1148 rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err);
1149 rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8);
1150 rs->PrintNameLength = rs->SubstituteNameOffset;
1151
1152 /*
1153 * TODO: Use relative path if possible to allow Windows to
1154 * parse this path.
1155 * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE).
1156 */
1157 rs->Flags = 0;
1158
1159 memmove(rp_name + err + 4, rp_name, sizeof(short) * err);
1160
1161 /* Decorate SubstituteName. */
1162 rp_name += err;
1163 rp_name[0] = cpu_to_le16('\\');
1164 rp_name[1] = cpu_to_le16('?');
1165 rp_name[2] = cpu_to_le16('?');
1166 rp_name[3] = cpu_to_le16('\\');
1167
1168 return rp;
1169 out:
1170 kfree(rp);
1171 return ERR_PTR(err);
1172 }
1173
ntfs_create_inode(struct user_namespace * mnt_userns,struct inode * dir,struct dentry * dentry,const struct cpu_str * uni,umode_t mode,dev_t dev,const char * symname,u32 size,struct ntfs_fnd * fnd)1174 struct inode *ntfs_create_inode(struct user_namespace *mnt_userns,
1175 struct inode *dir, struct dentry *dentry,
1176 const struct cpu_str *uni, umode_t mode,
1177 dev_t dev, const char *symname, u32 size,
1178 struct ntfs_fnd *fnd)
1179 {
1180 int err;
1181 struct super_block *sb = dir->i_sb;
1182 struct ntfs_sb_info *sbi = sb->s_fs_info;
1183 const struct qstr *name = &dentry->d_name;
1184 CLST ino = 0;
1185 struct ntfs_inode *dir_ni = ntfs_i(dir);
1186 struct ntfs_inode *ni = NULL;
1187 struct inode *inode = NULL;
1188 struct ATTRIB *attr;
1189 struct ATTR_STD_INFO5 *std5;
1190 struct ATTR_FILE_NAME *fname;
1191 struct MFT_REC *rec;
1192 u32 asize, dsize, sd_size;
1193 enum FILE_ATTRIBUTE fa;
1194 __le32 security_id = SECURITY_ID_INVALID;
1195 CLST vcn;
1196 const void *sd;
1197 u16 t16, nsize = 0, aid = 0;
1198 struct INDEX_ROOT *root, *dir_root;
1199 struct NTFS_DE *e, *new_de = NULL;
1200 struct REPARSE_DATA_BUFFER *rp = NULL;
1201 bool rp_inserted = false;
1202
1203 ni_lock_dir(dir_ni);
1204
1205 dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL);
1206 if (!dir_root) {
1207 err = -EINVAL;
1208 goto out1;
1209 }
1210
1211 if (S_ISDIR(mode)) {
1212 /* Use parent's directory attributes. */
1213 fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY |
1214 FILE_ATTRIBUTE_ARCHIVE;
1215 /*
1216 * By default child directory inherits parent attributes.
1217 * Root directory is hidden + system.
1218 * Make an exception for children in root.
1219 */
1220 if (dir->i_ino == MFT_REC_ROOT)
1221 fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM);
1222 } else if (S_ISLNK(mode)) {
1223 /* It is good idea that link should be the same type (file/dir) as target */
1224 fa = FILE_ATTRIBUTE_REPARSE_POINT;
1225
1226 /*
1227 * Linux: there are dir/file/symlink and so on.
1228 * NTFS: symlinks are "dir + reparse" or "file + reparse"
1229 * It is good idea to create:
1230 * dir + reparse if 'symname' points to directory
1231 * or
1232 * file + reparse if 'symname' points to file
1233 * Unfortunately kern_path hangs if symname contains 'dir'.
1234 */
1235
1236 /*
1237 * struct path path;
1238 *
1239 * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){
1240 * struct inode *target = d_inode(path.dentry);
1241 *
1242 * if (S_ISDIR(target->i_mode))
1243 * fa |= FILE_ATTRIBUTE_DIRECTORY;
1244 * // if ( target->i_sb == sb ){
1245 * // use relative path?
1246 * // }
1247 * path_put(&path);
1248 * }
1249 */
1250 } else if (S_ISREG(mode)) {
1251 if (sbi->options->sparse) {
1252 /* Sparsed regular file, cause option 'sparse'. */
1253 fa = FILE_ATTRIBUTE_SPARSE_FILE |
1254 FILE_ATTRIBUTE_ARCHIVE;
1255 } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) {
1256 /* Compressed regular file, if parent is compressed. */
1257 fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE;
1258 } else {
1259 /* Regular file, default attributes. */
1260 fa = FILE_ATTRIBUTE_ARCHIVE;
1261 }
1262 } else {
1263 fa = FILE_ATTRIBUTE_ARCHIVE;
1264 }
1265
1266 if (!(mode & 0222))
1267 fa |= FILE_ATTRIBUTE_READONLY;
1268
1269 /* Allocate PATH_MAX bytes. */
1270 new_de = __getname();
1271 if (!new_de) {
1272 err = -ENOMEM;
1273 goto out1;
1274 }
1275
1276 /* Mark rw ntfs as dirty. it will be cleared at umount. */
1277 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1278
1279 /* Step 1: allocate and fill new mft record. */
1280 err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL);
1281 if (err)
1282 goto out2;
1283
1284 ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY);
1285 if (IS_ERR(ni)) {
1286 err = PTR_ERR(ni);
1287 ni = NULL;
1288 goto out3;
1289 }
1290 inode = &ni->vfs_inode;
1291 inode_init_owner(mnt_userns, inode, dir, mode);
1292 mode = inode->i_mode;
1293
1294 inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime =
1295 current_time(inode);
1296
1297 rec = ni->mi.mrec;
1298 rec->hard_links = cpu_to_le16(1);
1299 attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off));
1300
1301 /* Get default security id. */
1302 sd = s_default_security;
1303 sd_size = sizeof(s_default_security);
1304
1305 if (is_ntfs3(sbi)) {
1306 security_id = dir_ni->std_security_id;
1307 if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) {
1308 security_id = sbi->security.def_security_id;
1309
1310 if (security_id == SECURITY_ID_INVALID &&
1311 !ntfs_insert_security(sbi, sd, sd_size,
1312 &security_id, NULL))
1313 sbi->security.def_security_id = security_id;
1314 }
1315 }
1316
1317 /* Insert standard info. */
1318 std5 = Add2Ptr(attr, SIZEOF_RESIDENT);
1319
1320 if (security_id == SECURITY_ID_INVALID) {
1321 dsize = sizeof(struct ATTR_STD_INFO);
1322 } else {
1323 dsize = sizeof(struct ATTR_STD_INFO5);
1324 std5->security_id = security_id;
1325 ni->std_security_id = security_id;
1326 }
1327 asize = SIZEOF_RESIDENT + dsize;
1328
1329 attr->type = ATTR_STD;
1330 attr->size = cpu_to_le32(asize);
1331 attr->id = cpu_to_le16(aid++);
1332 attr->res.data_off = SIZEOF_RESIDENT_LE;
1333 attr->res.data_size = cpu_to_le32(dsize);
1334
1335 std5->cr_time = std5->m_time = std5->c_time = std5->a_time =
1336 kernel2nt(&inode->i_atime);
1337
1338 ni->std_fa = fa;
1339 std5->fa = fa;
1340
1341 attr = Add2Ptr(attr, asize);
1342
1343 /* Insert file name. */
1344 err = fill_name_de(sbi, new_de, name, uni);
1345 if (err)
1346 goto out4;
1347
1348 mi_get_ref(&ni->mi, &new_de->ref);
1349
1350 fname = (struct ATTR_FILE_NAME *)(new_de + 1);
1351 mi_get_ref(&dir_ni->mi, &fname->home);
1352 fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time =
1353 fname->dup.a_time = std5->cr_time;
1354 fname->dup.alloc_size = fname->dup.data_size = 0;
1355 fname->dup.fa = std5->fa;
1356 fname->dup.ea_size = fname->dup.reparse = 0;
1357
1358 dsize = le16_to_cpu(new_de->key_size);
1359 asize = ALIGN(SIZEOF_RESIDENT + dsize, 8);
1360
1361 attr->type = ATTR_NAME;
1362 attr->size = cpu_to_le32(asize);
1363 attr->res.data_off = SIZEOF_RESIDENT_LE;
1364 attr->res.flags = RESIDENT_FLAG_INDEXED;
1365 attr->id = cpu_to_le16(aid++);
1366 attr->res.data_size = cpu_to_le32(dsize);
1367 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize);
1368
1369 attr = Add2Ptr(attr, asize);
1370
1371 if (security_id == SECURITY_ID_INVALID) {
1372 /* Insert security attribute. */
1373 asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8);
1374
1375 attr->type = ATTR_SECURE;
1376 attr->size = cpu_to_le32(asize);
1377 attr->id = cpu_to_le16(aid++);
1378 attr->res.data_off = SIZEOF_RESIDENT_LE;
1379 attr->res.data_size = cpu_to_le32(sd_size);
1380 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size);
1381
1382 attr = Add2Ptr(attr, asize);
1383 }
1384
1385 attr->id = cpu_to_le16(aid++);
1386 if (fa & FILE_ATTRIBUTE_DIRECTORY) {
1387 /*
1388 * Regular directory or symlink to directory.
1389 * Create root attribute.
1390 */
1391 dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE);
1392 asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize;
1393
1394 attr->type = ATTR_ROOT;
1395 attr->size = cpu_to_le32(asize);
1396
1397 attr->name_len = ARRAY_SIZE(I30_NAME);
1398 attr->name_off = SIZEOF_RESIDENT_LE;
1399 attr->res.data_off =
1400 cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT);
1401 attr->res.data_size = cpu_to_le32(dsize);
1402 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME,
1403 sizeof(I30_NAME));
1404
1405 root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT);
1406 memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr));
1407 root->ihdr.de_off =
1408 cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10
1409 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) +
1410 sizeof(struct NTFS_DE));
1411 root->ihdr.total = root->ihdr.used;
1412
1413 e = Add2Ptr(root, sizeof(struct INDEX_ROOT));
1414 e->size = cpu_to_le16(sizeof(struct NTFS_DE));
1415 e->flags = NTFS_IE_LAST;
1416 } else if (S_ISLNK(mode)) {
1417 /*
1418 * Symlink to file.
1419 * Create empty resident data attribute.
1420 */
1421 asize = SIZEOF_RESIDENT;
1422
1423 /* Insert empty ATTR_DATA */
1424 attr->type = ATTR_DATA;
1425 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1426 attr->name_off = SIZEOF_RESIDENT_LE;
1427 attr->res.data_off = SIZEOF_RESIDENT_LE;
1428 } else if (S_ISREG(mode)) {
1429 /*
1430 * Regular file. Create empty non resident data attribute.
1431 */
1432 attr->type = ATTR_DATA;
1433 attr->non_res = 1;
1434 attr->nres.evcn = cpu_to_le64(-1ll);
1435 if (fa & FILE_ATTRIBUTE_SPARSE_FILE) {
1436 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1437 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1438 attr->flags = ATTR_FLAG_SPARSED;
1439 asize = SIZEOF_NONRESIDENT_EX + 8;
1440 } else if (fa & FILE_ATTRIBUTE_COMPRESSED) {
1441 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8);
1442 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
1443 attr->flags = ATTR_FLAG_COMPRESSED;
1444 attr->nres.c_unit = COMPRESSION_UNIT;
1445 asize = SIZEOF_NONRESIDENT_EX + 8;
1446 } else {
1447 attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8);
1448 attr->name_off = SIZEOF_NONRESIDENT_LE;
1449 asize = SIZEOF_NONRESIDENT + 8;
1450 }
1451 attr->nres.run_off = attr->name_off;
1452 } else {
1453 /*
1454 * Node. Create empty resident data attribute.
1455 */
1456 attr->type = ATTR_DATA;
1457 attr->size = cpu_to_le32(SIZEOF_RESIDENT);
1458 attr->name_off = SIZEOF_RESIDENT_LE;
1459 if (fa & FILE_ATTRIBUTE_SPARSE_FILE)
1460 attr->flags = ATTR_FLAG_SPARSED;
1461 else if (fa & FILE_ATTRIBUTE_COMPRESSED)
1462 attr->flags = ATTR_FLAG_COMPRESSED;
1463 attr->res.data_off = SIZEOF_RESIDENT_LE;
1464 asize = SIZEOF_RESIDENT;
1465 ni->ni_flags |= NI_FLAG_RESIDENT;
1466 }
1467
1468 if (S_ISDIR(mode)) {
1469 ni->ni_flags |= NI_FLAG_DIR;
1470 err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30);
1471 if (err)
1472 goto out4;
1473 } else if (S_ISLNK(mode)) {
1474 rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize);
1475
1476 if (IS_ERR(rp)) {
1477 err = PTR_ERR(rp);
1478 rp = NULL;
1479 goto out4;
1480 }
1481
1482 /*
1483 * Insert ATTR_REPARSE.
1484 */
1485 attr = Add2Ptr(attr, asize);
1486 attr->type = ATTR_REPARSE;
1487 attr->id = cpu_to_le16(aid++);
1488
1489 /* Resident or non resident? */
1490 asize = ALIGN(SIZEOF_RESIDENT + nsize, 8);
1491 t16 = PtrOffset(rec, attr);
1492
1493 /*
1494 * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes.
1495 * It is good idea to keep extened attributes resident.
1496 */
1497 if (asize + t16 + 0x78 + 8 > sbi->record_size) {
1498 CLST alen;
1499 CLST clst = bytes_to_cluster(sbi, nsize);
1500
1501 /* Bytes per runs. */
1502 t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT;
1503
1504 attr->non_res = 1;
1505 attr->nres.evcn = cpu_to_le64(clst - 1);
1506 attr->name_off = SIZEOF_NONRESIDENT_LE;
1507 attr->nres.run_off = attr->name_off;
1508 attr->nres.data_size = cpu_to_le64(nsize);
1509 attr->nres.valid_size = attr->nres.data_size;
1510 attr->nres.alloc_size =
1511 cpu_to_le64(ntfs_up_cluster(sbi, nsize));
1512
1513 err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0,
1514 clst, NULL, 0, &alen, 0,
1515 NULL);
1516 if (err)
1517 goto out5;
1518
1519 err = run_pack(&ni->file.run, 0, clst,
1520 Add2Ptr(attr, SIZEOF_NONRESIDENT), t16,
1521 &vcn);
1522 if (err < 0)
1523 goto out5;
1524
1525 if (vcn != clst) {
1526 err = -EINVAL;
1527 goto out5;
1528 }
1529
1530 asize = SIZEOF_NONRESIDENT + ALIGN(err, 8);
1531 } else {
1532 attr->res.data_off = SIZEOF_RESIDENT_LE;
1533 attr->res.data_size = cpu_to_le32(nsize);
1534 memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize);
1535 nsize = 0;
1536 }
1537 /* Size of symlink equals the length of input string. */
1538 inode->i_size = size;
1539
1540 attr->size = cpu_to_le32(asize);
1541
1542 err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK,
1543 &new_de->ref);
1544 if (err)
1545 goto out5;
1546
1547 rp_inserted = true;
1548 }
1549
1550 attr = Add2Ptr(attr, asize);
1551 attr->type = ATTR_END;
1552
1553 rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8);
1554 rec->next_attr_id = cpu_to_le16(aid);
1555
1556 /* Step 2: Add new name in index. */
1557 err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0);
1558 if (err)
1559 goto out6;
1560
1561 /* Unlock parent directory before ntfs_init_acl. */
1562 ni_unlock(dir_ni);
1563
1564 inode->i_generation = le16_to_cpu(rec->seq);
1565
1566 dir->i_mtime = dir->i_ctime = inode->i_atime;
1567
1568 if (S_ISDIR(mode)) {
1569 inode->i_op = &ntfs_dir_inode_operations;
1570 inode->i_fop = &ntfs_dir_operations;
1571 } else if (S_ISLNK(mode)) {
1572 inode->i_op = &ntfs_link_inode_operations;
1573 inode->i_fop = NULL;
1574 inode->i_mapping->a_ops = &ntfs_aops;
1575 inode->i_size = size;
1576 inode_nohighmem(inode);
1577 } else if (S_ISREG(mode)) {
1578 inode->i_op = &ntfs_file_inode_operations;
1579 inode->i_fop = &ntfs_file_operations;
1580 inode->i_mapping->a_ops =
1581 is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops;
1582 init_rwsem(&ni->file.run_lock);
1583 } else {
1584 inode->i_op = &ntfs_special_inode_operations;
1585 init_special_inode(inode, mode, dev);
1586 }
1587
1588 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
1589 if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) {
1590 err = ntfs_init_acl(mnt_userns, inode, dir);
1591 if (err)
1592 goto out7;
1593 } else
1594 #endif
1595 {
1596 inode->i_flags |= S_NOSEC;
1597 }
1598
1599 /* Write non resident data. */
1600 if (nsize) {
1601 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0);
1602 if (err)
1603 goto out7;
1604 }
1605
1606 /*
1607 * Call 'd_instantiate' after inode->i_op is set
1608 * but before finish_open.
1609 */
1610 d_instantiate(dentry, inode);
1611
1612 ntfs_save_wsl_perm(inode);
1613 mark_inode_dirty(dir);
1614 mark_inode_dirty(inode);
1615
1616 /* Normal exit. */
1617 goto out2;
1618
1619 out7:
1620
1621 /* Undo 'indx_insert_entry'. */
1622 ni_lock_dir(dir_ni);
1623 indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1,
1624 le16_to_cpu(new_de->key_size), sbi);
1625 /* ni_unlock(dir_ni); will be called later. */
1626 out6:
1627 if (rp_inserted)
1628 ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref);
1629
1630 out5:
1631 if (S_ISDIR(mode) || run_is_empty(&ni->file.run))
1632 goto out4;
1633
1634 run_deallocate(sbi, &ni->file.run, false);
1635
1636 out4:
1637 clear_rec_inuse(rec);
1638 clear_nlink(inode);
1639 ni->mi.dirty = false;
1640 discard_new_inode(inode);
1641 out3:
1642 ntfs_mark_rec_free(sbi, ino, false);
1643
1644 out2:
1645 __putname(new_de);
1646 kfree(rp);
1647
1648 out1:
1649 if (err) {
1650 ni_unlock(dir_ni);
1651 return ERR_PTR(err);
1652 }
1653
1654 unlock_new_inode(inode);
1655
1656 return inode;
1657 }
1658
ntfs_link_inode(struct inode * inode,struct dentry * dentry)1659 int ntfs_link_inode(struct inode *inode, struct dentry *dentry)
1660 {
1661 int err;
1662 struct ntfs_inode *ni = ntfs_i(inode);
1663 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
1664 struct NTFS_DE *de;
1665
1666 /* Allocate PATH_MAX bytes. */
1667 de = __getname();
1668 if (!de)
1669 return -ENOMEM;
1670
1671 /* Mark rw ntfs as dirty. It will be cleared at umount. */
1672 ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
1673
1674 /* Construct 'de'. */
1675 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1676 if (err)
1677 goto out;
1678
1679 err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de);
1680 out:
1681 __putname(de);
1682 return err;
1683 }
1684
1685 /*
1686 * ntfs_unlink_inode
1687 *
1688 * inode_operations::unlink
1689 * inode_operations::rmdir
1690 */
ntfs_unlink_inode(struct inode * dir,const struct dentry * dentry)1691 int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
1692 {
1693 int err;
1694 struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info;
1695 struct inode *inode = d_inode(dentry);
1696 struct ntfs_inode *ni = ntfs_i(inode);
1697 struct ntfs_inode *dir_ni = ntfs_i(dir);
1698 struct NTFS_DE *de, *de2 = NULL;
1699 int undo_remove;
1700
1701 if (ntfs_is_meta_file(sbi, ni->mi.rno))
1702 return -EINVAL;
1703
1704 /* Allocate PATH_MAX bytes. */
1705 de = __getname();
1706 if (!de)
1707 return -ENOMEM;
1708
1709 ni_lock(ni);
1710
1711 if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) {
1712 err = -ENOTEMPTY;
1713 goto out;
1714 }
1715
1716 err = fill_name_de(sbi, de, &dentry->d_name, NULL);
1717 if (err < 0)
1718 goto out;
1719
1720 undo_remove = 0;
1721 err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove);
1722
1723 if (!err) {
1724 drop_nlink(inode);
1725 dir->i_mtime = dir->i_ctime = current_time(dir);
1726 mark_inode_dirty(dir);
1727 inode->i_ctime = dir->i_ctime;
1728 if (inode->i_nlink)
1729 mark_inode_dirty(inode);
1730 } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) {
1731 _ntfs_bad_inode(inode);
1732 } else {
1733 if (ni_is_dirty(dir))
1734 mark_inode_dirty(dir);
1735 if (ni_is_dirty(inode))
1736 mark_inode_dirty(inode);
1737 }
1738
1739 out:
1740 ni_unlock(ni);
1741 __putname(de);
1742 return err;
1743 }
1744
ntfs_evict_inode(struct inode * inode)1745 void ntfs_evict_inode(struct inode *inode)
1746 {
1747 truncate_inode_pages_final(&inode->i_data);
1748
1749 if (inode->i_nlink)
1750 _ni_write_inode(inode, inode_needs_sync(inode));
1751
1752 invalidate_inode_buffers(inode);
1753 clear_inode(inode);
1754
1755 ni_clear(ntfs_i(inode));
1756 }
1757
ntfs_readlink_hlp(struct inode * inode,char * buffer,int buflen)1758 static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer,
1759 int buflen)
1760 {
1761 int i, err = -EINVAL;
1762 struct ntfs_inode *ni = ntfs_i(inode);
1763 struct super_block *sb = inode->i_sb;
1764 struct ntfs_sb_info *sbi = sb->s_fs_info;
1765 u64 size;
1766 u16 ulen = 0;
1767 void *to_free = NULL;
1768 struct REPARSE_DATA_BUFFER *rp;
1769 const __le16 *uname;
1770 struct ATTRIB *attr;
1771
1772 /* Reparse data present. Try to parse it. */
1773 static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag));
1774 static_assert(sizeof(u32) == sizeof(rp->ReparseTag));
1775
1776 *buffer = 0;
1777
1778 attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL);
1779 if (!attr)
1780 goto out;
1781
1782 if (!attr->non_res) {
1783 rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER));
1784 if (!rp)
1785 goto out;
1786 size = le32_to_cpu(attr->res.data_size);
1787 } else {
1788 size = le64_to_cpu(attr->nres.data_size);
1789 rp = NULL;
1790 }
1791
1792 if (size > sbi->reparse.max_size || size <= sizeof(u32))
1793 goto out;
1794
1795 if (!rp) {
1796 rp = kmalloc(size, GFP_NOFS);
1797 if (!rp) {
1798 err = -ENOMEM;
1799 goto out;
1800 }
1801 to_free = rp;
1802 /* Read into temporal buffer. */
1803 err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL);
1804 if (err)
1805 goto out;
1806 }
1807
1808 /* Microsoft Tag. */
1809 switch (rp->ReparseTag) {
1810 case IO_REPARSE_TAG_MOUNT_POINT:
1811 /* Mount points and junctions. */
1812 /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */
1813 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1814 MountPointReparseBuffer.PathBuffer))
1815 goto out;
1816 uname = Add2Ptr(rp,
1817 offsetof(struct REPARSE_DATA_BUFFER,
1818 MountPointReparseBuffer.PathBuffer) +
1819 le16_to_cpu(rp->MountPointReparseBuffer
1820 .PrintNameOffset));
1821 ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength);
1822 break;
1823
1824 case IO_REPARSE_TAG_SYMLINK:
1825 /* FolderSymbolicLink */
1826 /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */
1827 if (size <= offsetof(struct REPARSE_DATA_BUFFER,
1828 SymbolicLinkReparseBuffer.PathBuffer))
1829 goto out;
1830 uname = Add2Ptr(
1831 rp, offsetof(struct REPARSE_DATA_BUFFER,
1832 SymbolicLinkReparseBuffer.PathBuffer) +
1833 le16_to_cpu(rp->SymbolicLinkReparseBuffer
1834 .PrintNameOffset));
1835 ulen = le16_to_cpu(
1836 rp->SymbolicLinkReparseBuffer.PrintNameLength);
1837 break;
1838
1839 case IO_REPARSE_TAG_CLOUD:
1840 case IO_REPARSE_TAG_CLOUD_1:
1841 case IO_REPARSE_TAG_CLOUD_2:
1842 case IO_REPARSE_TAG_CLOUD_3:
1843 case IO_REPARSE_TAG_CLOUD_4:
1844 case IO_REPARSE_TAG_CLOUD_5:
1845 case IO_REPARSE_TAG_CLOUD_6:
1846 case IO_REPARSE_TAG_CLOUD_7:
1847 case IO_REPARSE_TAG_CLOUD_8:
1848 case IO_REPARSE_TAG_CLOUD_9:
1849 case IO_REPARSE_TAG_CLOUD_A:
1850 case IO_REPARSE_TAG_CLOUD_B:
1851 case IO_REPARSE_TAG_CLOUD_C:
1852 case IO_REPARSE_TAG_CLOUD_D:
1853 case IO_REPARSE_TAG_CLOUD_E:
1854 case IO_REPARSE_TAG_CLOUD_F:
1855 err = sizeof("OneDrive") - 1;
1856 if (err > buflen)
1857 err = buflen;
1858 memcpy(buffer, "OneDrive", err);
1859 goto out;
1860
1861 default:
1862 if (IsReparseTagMicrosoft(rp->ReparseTag)) {
1863 /* Unknown Microsoft Tag. */
1864 goto out;
1865 }
1866 if (!IsReparseTagNameSurrogate(rp->ReparseTag) ||
1867 size <= sizeof(struct REPARSE_POINT)) {
1868 goto out;
1869 }
1870
1871 /* Users tag. */
1872 uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT));
1873 ulen = le16_to_cpu(rp->ReparseDataLength) -
1874 sizeof(struct REPARSE_POINT);
1875 }
1876
1877 /* Convert nlen from bytes to UNICODE chars. */
1878 ulen >>= 1;
1879
1880 /* Check that name is available. */
1881 if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size))
1882 goto out;
1883
1884 /* If name is already zero terminated then truncate it now. */
1885 if (!uname[ulen - 1])
1886 ulen -= 1;
1887
1888 err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen);
1889
1890 if (err < 0)
1891 goto out;
1892
1893 /* Translate Windows '\' into Linux '/'. */
1894 for (i = 0; i < err; i++) {
1895 if (buffer[i] == '\\')
1896 buffer[i] = '/';
1897 }
1898
1899 /* Always set last zero. */
1900 buffer[err] = 0;
1901 out:
1902 kfree(to_free);
1903 return err;
1904 }
1905
ntfs_get_link(struct dentry * de,struct inode * inode,struct delayed_call * done)1906 static const char *ntfs_get_link(struct dentry *de, struct inode *inode,
1907 struct delayed_call *done)
1908 {
1909 int err;
1910 char *ret;
1911
1912 if (!de)
1913 return ERR_PTR(-ECHILD);
1914
1915 ret = kmalloc(PAGE_SIZE, GFP_NOFS);
1916 if (!ret)
1917 return ERR_PTR(-ENOMEM);
1918
1919 err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE);
1920 if (err < 0) {
1921 kfree(ret);
1922 return ERR_PTR(err);
1923 }
1924
1925 set_delayed_call(done, kfree_link, ret);
1926
1927 return ret;
1928 }
1929
1930 // clang-format off
1931 const struct inode_operations ntfs_link_inode_operations = {
1932 .get_link = ntfs_get_link,
1933 .setattr = ntfs3_setattr,
1934 .listxattr = ntfs_listxattr,
1935 .permission = ntfs_permission,
1936 };
1937
1938 const struct address_space_operations ntfs_aops = {
1939 .read_folio = ntfs_read_folio,
1940 .readahead = ntfs_readahead,
1941 .writepage = ntfs_writepage,
1942 .writepages = ntfs_writepages,
1943 .write_begin = ntfs_write_begin,
1944 .write_end = ntfs_write_end,
1945 .direct_IO = ntfs_direct_IO,
1946 .bmap = ntfs_bmap,
1947 .dirty_folio = block_dirty_folio,
1948 .invalidate_folio = block_invalidate_folio,
1949 };
1950
1951 const struct address_space_operations ntfs_aops_cmpr = {
1952 .read_folio = ntfs_read_folio,
1953 .readahead = ntfs_readahead,
1954 };
1955 // clang-format on
1956