1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16 
17 /*
18  * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19  * preallocate algorithm.
20  */
21 #ifndef NTFS_MIN_LOG2_OF_CLUMP
22 #define NTFS_MIN_LOG2_OF_CLUMP 16
23 #endif
24 
25 #ifndef NTFS_MAX_LOG2_OF_CLUMP
26 #define NTFS_MAX_LOG2_OF_CLUMP 26
27 #endif
28 
29 // 16M
30 #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31 // 16G
32 #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33 
get_pre_allocated(u64 size)34 static inline u64 get_pre_allocated(u64 size)
35 {
36 	u32 clump;
37 	u8 align_shift;
38 	u64 ret;
39 
40 	if (size <= NTFS_CLUMP_MIN) {
41 		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 	} else if (size >= NTFS_CLUMP_MAX) {
44 		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 	} else {
47 		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 		clump = 1u << align_shift;
50 	}
51 
52 	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53 
54 	return ret;
55 }
56 
57 /*
58  * attr_must_be_resident
59  *
60  * Return: True if attribute must be resident.
61  */
attr_must_be_resident(struct ntfs_sb_info * sbi,enum ATTR_TYPE type)62 static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
63 					 enum ATTR_TYPE type)
64 {
65 	const struct ATTR_DEF_ENTRY *de;
66 
67 	switch (type) {
68 	case ATTR_STD:
69 	case ATTR_NAME:
70 	case ATTR_ID:
71 	case ATTR_LABEL:
72 	case ATTR_VOL_INFO:
73 	case ATTR_ROOT:
74 	case ATTR_EA_INFO:
75 		return true;
76 	default:
77 		de = ntfs_query_def(sbi, type);
78 		if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
79 			return true;
80 		return false;
81 	}
82 }
83 
84 /*
85  * attr_load_runs - Load all runs stored in @attr.
86  */
attr_load_runs(struct ATTRIB * attr,struct ntfs_inode * ni,struct runs_tree * run,const CLST * vcn)87 static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
88 			  struct runs_tree *run, const CLST *vcn)
89 {
90 	int err;
91 	CLST svcn = le64_to_cpu(attr->nres.svcn);
92 	CLST evcn = le64_to_cpu(attr->nres.evcn);
93 	u32 asize;
94 	u16 run_off;
95 
96 	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
97 		return 0;
98 
99 	if (vcn && (evcn < *vcn || *vcn < svcn))
100 		return -EINVAL;
101 
102 	asize = le32_to_cpu(attr->size);
103 	run_off = le16_to_cpu(attr->nres.run_off);
104 
105 	if (run_off > asize)
106 		return -EINVAL;
107 
108 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
109 			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
110 			    asize - run_off);
111 	if (err < 0)
112 		return err;
113 
114 	return 0;
115 }
116 
117 /*
118  * run_deallocate_ex - Deallocate clusters.
119  */
run_deallocate_ex(struct ntfs_sb_info * sbi,struct runs_tree * run,CLST vcn,CLST len,CLST * done,bool trim)120 static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
121 			     CLST vcn, CLST len, CLST *done, bool trim)
122 {
123 	int err = 0;
124 	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
125 	size_t idx;
126 
127 	if (!len)
128 		goto out;
129 
130 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
131 failed:
132 		run_truncate(run, vcn0);
133 		err = -EINVAL;
134 		goto out;
135 	}
136 
137 	for (;;) {
138 		if (clen > len)
139 			clen = len;
140 
141 		if (!clen) {
142 			err = -EINVAL;
143 			goto out;
144 		}
145 
146 		if (lcn != SPARSE_LCN) {
147 			if (sbi) {
148 				/* mark bitmap range [lcn + clen) as free and trim clusters. */
149 				mark_as_free_ex(sbi, lcn, clen, trim);
150 			}
151 			dn += clen;
152 		}
153 
154 		len -= clen;
155 		if (!len)
156 			break;
157 
158 		vcn_next = vcn + clen;
159 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
160 		    vcn != vcn_next) {
161 			/* Save memory - don't load entire run. */
162 			goto failed;
163 		}
164 	}
165 
166 out:
167 	if (done)
168 		*done += dn;
169 
170 	return err;
171 }
172 
173 /*
174  * attr_allocate_clusters - Find free space, mark it as used and store in @run.
175  */
attr_allocate_clusters(struct ntfs_sb_info * sbi,struct runs_tree * run,CLST vcn,CLST lcn,CLST len,CLST * pre_alloc,enum ALLOCATE_OPT opt,CLST * alen,const size_t fr,CLST * new_lcn)176 int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
177 			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
178 			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
179 			   CLST *new_lcn)
180 {
181 	int err;
182 	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
183 	size_t cnt = run->count;
184 
185 	for (;;) {
186 		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
187 					       opt);
188 
189 		if (err == -ENOSPC && pre) {
190 			pre = 0;
191 			if (*pre_alloc)
192 				*pre_alloc = 0;
193 			continue;
194 		}
195 
196 		if (err)
197 			goto out;
198 
199 		if (new_lcn && vcn == vcn0)
200 			*new_lcn = lcn;
201 
202 		/* Add new fragment into run storage. */
203 		if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
204 			/* Undo last 'ntfs_look_for_free_space' */
205 			mark_as_free_ex(sbi, lcn, len, false);
206 			err = -ENOMEM;
207 			goto out;
208 		}
209 
210 		vcn += flen;
211 
212 		if (flen >= len || opt == ALLOCATE_MFT ||
213 		    (fr && run->count - cnt >= fr)) {
214 			*alen = vcn - vcn0;
215 			return 0;
216 		}
217 
218 		len -= flen;
219 	}
220 
221 out:
222 	/* Undo 'ntfs_look_for_free_space' */
223 	if (vcn - vcn0) {
224 		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
225 		run_truncate(run, vcn0);
226 	}
227 
228 	return err;
229 }
230 
231 /*
232  * attr_make_nonresident
233  *
234  * If page is not NULL - it is already contains resident data
235  * and locked (called from ni_write_frame()).
236  */
attr_make_nonresident(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr,struct page * page)237 int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
238 			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
239 			  u64 new_size, struct runs_tree *run,
240 			  struct ATTRIB **ins_attr, struct page *page)
241 {
242 	struct ntfs_sb_info *sbi;
243 	struct ATTRIB *attr_s;
244 	struct MFT_REC *rec;
245 	u32 used, asize, rsize, aoff, align;
246 	bool is_data;
247 	CLST len, alen;
248 	char *next;
249 	int err;
250 
251 	if (attr->non_res) {
252 		*ins_attr = attr;
253 		return 0;
254 	}
255 
256 	sbi = mi->sbi;
257 	rec = mi->mrec;
258 	attr_s = NULL;
259 	used = le32_to_cpu(rec->used);
260 	asize = le32_to_cpu(attr->size);
261 	next = Add2Ptr(attr, asize);
262 	aoff = PtrOffset(rec, attr);
263 	rsize = le32_to_cpu(attr->res.data_size);
264 	is_data = attr->type == ATTR_DATA && !attr->name_len;
265 
266 	align = sbi->cluster_size;
267 	if (is_attr_compressed(attr))
268 		align <<= COMPRESSION_UNIT;
269 	len = (rsize + align - 1) >> sbi->cluster_bits;
270 
271 	run_init(run);
272 
273 	/* Make a copy of original attribute. */
274 	attr_s = kmemdup(attr, asize, GFP_NOFS);
275 	if (!attr_s) {
276 		err = -ENOMEM;
277 		goto out;
278 	}
279 
280 	if (!len) {
281 		/* Empty resident -> Empty nonresident. */
282 		alen = 0;
283 	} else {
284 		const char *data = resident_data(attr);
285 
286 		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
287 					     ALLOCATE_DEF, &alen, 0, NULL);
288 		if (err)
289 			goto out1;
290 
291 		if (!rsize) {
292 			/* Empty resident -> Non empty nonresident. */
293 		} else if (!is_data) {
294 			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
295 			if (err)
296 				goto out2;
297 		} else if (!page) {
298 			char *kaddr;
299 
300 			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
301 			if (!page) {
302 				err = -ENOMEM;
303 				goto out2;
304 			}
305 			kaddr = kmap_atomic(page);
306 			memcpy(kaddr, data, rsize);
307 			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
308 			kunmap_atomic(kaddr);
309 			flush_dcache_page(page);
310 			SetPageUptodate(page);
311 			set_page_dirty(page);
312 			unlock_page(page);
313 			put_page(page);
314 		}
315 	}
316 
317 	/* Remove original attribute. */
318 	used -= asize;
319 	memmove(attr, Add2Ptr(attr, asize), used - aoff);
320 	rec->used = cpu_to_le32(used);
321 	mi->dirty = true;
322 	if (le)
323 		al_remove_le(ni, le);
324 
325 	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
326 				    attr_s->name_len, run, 0, alen,
327 				    attr_s->flags, &attr, NULL, NULL);
328 	if (err)
329 		goto out3;
330 
331 	kfree(attr_s);
332 	attr->nres.data_size = cpu_to_le64(rsize);
333 	attr->nres.valid_size = attr->nres.data_size;
334 
335 	*ins_attr = attr;
336 
337 	if (is_data)
338 		ni->ni_flags &= ~NI_FLAG_RESIDENT;
339 
340 	/* Resident attribute becomes non resident. */
341 	return 0;
342 
343 out3:
344 	attr = Add2Ptr(rec, aoff);
345 	memmove(next, attr, used - aoff);
346 	memcpy(attr, attr_s, asize);
347 	rec->used = cpu_to_le32(used + asize);
348 	mi->dirty = true;
349 out2:
350 	/* Undo: do not trim new allocated clusters. */
351 	run_deallocate(sbi, run, false);
352 	run_close(run);
353 out1:
354 	kfree(attr_s);
355 out:
356 	return err;
357 }
358 
359 /*
360  * attr_set_size_res - Helper for attr_set_size().
361  */
attr_set_size_res(struct ntfs_inode * ni,struct ATTRIB * attr,struct ATTR_LIST_ENTRY * le,struct mft_inode * mi,u64 new_size,struct runs_tree * run,struct ATTRIB ** ins_attr)362 static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
363 			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
364 			     u64 new_size, struct runs_tree *run,
365 			     struct ATTRIB **ins_attr)
366 {
367 	struct ntfs_sb_info *sbi = mi->sbi;
368 	struct MFT_REC *rec = mi->mrec;
369 	u32 used = le32_to_cpu(rec->used);
370 	u32 asize = le32_to_cpu(attr->size);
371 	u32 aoff = PtrOffset(rec, attr);
372 	u32 rsize = le32_to_cpu(attr->res.data_size);
373 	u32 tail = used - aoff - asize;
374 	char *next = Add2Ptr(attr, asize);
375 	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
376 
377 	if (dsize < 0) {
378 		memmove(next + dsize, next, tail);
379 	} else if (dsize > 0) {
380 		if (used + dsize > sbi->max_bytes_per_attr)
381 			return attr_make_nonresident(ni, attr, le, mi, new_size,
382 						     run, ins_attr, NULL);
383 
384 		memmove(next + dsize, next, tail);
385 		memset(next, 0, dsize);
386 	}
387 
388 	if (new_size > rsize)
389 		memset(Add2Ptr(resident_data(attr), rsize), 0,
390 		       new_size - rsize);
391 
392 	rec->used = cpu_to_le32(used + dsize);
393 	attr->size = cpu_to_le32(asize + dsize);
394 	attr->res.data_size = cpu_to_le32(new_size);
395 	mi->dirty = true;
396 	*ins_attr = attr;
397 
398 	return 0;
399 }
400 
401 /*
402  * attr_set_size - Change the size of attribute.
403  *
404  * Extend:
405  *   - Sparse/compressed: No allocated clusters.
406  *   - Normal: Append allocated and preallocated new clusters.
407  * Shrink:
408  *   - No deallocate if @keep_prealloc is set.
409  */
attr_set_size(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 new_size,const u64 * new_valid,bool keep_prealloc,struct ATTRIB ** ret)410 int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
411 		  const __le16 *name, u8 name_len, struct runs_tree *run,
412 		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
413 		  struct ATTRIB **ret)
414 {
415 	int err = 0;
416 	struct ntfs_sb_info *sbi = ni->mi.sbi;
417 	u8 cluster_bits = sbi->cluster_bits;
418 	bool is_mft =
419 		ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
420 	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
421 	struct ATTRIB *attr = NULL, *attr_b;
422 	struct ATTR_LIST_ENTRY *le, *le_b;
423 	struct mft_inode *mi, *mi_b;
424 	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
425 	CLST next_svcn, pre_alloc = -1, done = 0;
426 	bool is_ext, is_bad = false;
427 	u32 align;
428 	struct MFT_REC *rec;
429 
430 again:
431 	alen = 0;
432 	le_b = NULL;
433 	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
434 			      &mi_b);
435 	if (!attr_b) {
436 		err = -ENOENT;
437 		goto bad_inode;
438 	}
439 
440 	if (!attr_b->non_res) {
441 		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
442 					&attr_b);
443 		if (err)
444 			return err;
445 
446 		/* Return if file is still resident. */
447 		if (!attr_b->non_res)
448 			goto ok1;
449 
450 		/* Layout of records may be changed, so do a full search. */
451 		goto again;
452 	}
453 
454 	is_ext = is_attr_ext(attr_b);
455 	align = sbi->cluster_size;
456 	if (is_ext)
457 		align <<= attr_b->nres.c_unit;
458 
459 	old_valid = le64_to_cpu(attr_b->nres.valid_size);
460 	old_size = le64_to_cpu(attr_b->nres.data_size);
461 	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
462 
463 again_1:
464 	old_alen = old_alloc >> cluster_bits;
465 
466 	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
467 	new_alen = new_alloc >> cluster_bits;
468 
469 	if (keep_prealloc && new_size < old_size) {
470 		attr_b->nres.data_size = cpu_to_le64(new_size);
471 		mi_b->dirty = true;
472 		goto ok;
473 	}
474 
475 	vcn = old_alen - 1;
476 
477 	svcn = le64_to_cpu(attr_b->nres.svcn);
478 	evcn = le64_to_cpu(attr_b->nres.evcn);
479 
480 	if (svcn <= vcn && vcn <= evcn) {
481 		attr = attr_b;
482 		le = le_b;
483 		mi = mi_b;
484 	} else if (!le_b) {
485 		err = -EINVAL;
486 		goto bad_inode;
487 	} else {
488 		le = le_b;
489 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
490 				    &mi);
491 		if (!attr) {
492 			err = -EINVAL;
493 			goto bad_inode;
494 		}
495 
496 next_le_1:
497 		svcn = le64_to_cpu(attr->nres.svcn);
498 		evcn = le64_to_cpu(attr->nres.evcn);
499 	}
500 	/*
501 	 * Here we have:
502 	 * attr,mi,le - last attribute segment (containing 'vcn').
503 	 * attr_b,mi_b,le_b - base (primary) attribute segment.
504 	 */
505 next_le:
506 	rec = mi->mrec;
507 	err = attr_load_runs(attr, ni, run, NULL);
508 	if (err)
509 		goto out;
510 
511 	if (new_size > old_size) {
512 		CLST to_allocate;
513 		size_t free;
514 
515 		if (new_alloc <= old_alloc) {
516 			attr_b->nres.data_size = cpu_to_le64(new_size);
517 			mi_b->dirty = true;
518 			goto ok;
519 		}
520 
521 		/*
522 		 * Add clusters. In simple case we have to:
523 		 *  - allocate space (vcn, lcn, len)
524 		 *  - update packed run in 'mi'
525 		 *  - update attr->nres.evcn
526 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
527 		 */
528 		to_allocate = new_alen - old_alen;
529 add_alloc_in_same_attr_seg:
530 		lcn = 0;
531 		if (is_mft) {
532 			/* MFT allocates clusters from MFT zone. */
533 			pre_alloc = 0;
534 		} else if (is_ext) {
535 			/* No preallocate for sparse/compress. */
536 			pre_alloc = 0;
537 		} else if (pre_alloc == -1) {
538 			pre_alloc = 0;
539 			if (type == ATTR_DATA && !name_len &&
540 			    sbi->options->prealloc) {
541 				pre_alloc =
542 					bytes_to_cluster(
543 						sbi,
544 						get_pre_allocated(new_size)) -
545 					new_alen;
546 			}
547 
548 			/* Get the last LCN to allocate from. */
549 			if (old_alen &&
550 			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
551 				lcn = SPARSE_LCN;
552 			}
553 
554 			if (lcn == SPARSE_LCN)
555 				lcn = 0;
556 			else if (lcn)
557 				lcn += 1;
558 
559 			free = wnd_zeroes(&sbi->used.bitmap);
560 			if (to_allocate > free) {
561 				err = -ENOSPC;
562 				goto out;
563 			}
564 
565 			if (pre_alloc && to_allocate + pre_alloc > free)
566 				pre_alloc = 0;
567 		}
568 
569 		vcn = old_alen;
570 
571 		if (is_ext) {
572 			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
573 					   false)) {
574 				err = -ENOMEM;
575 				goto out;
576 			}
577 			alen = to_allocate;
578 		} else {
579 			/* ~3 bytes per fragment. */
580 			err = attr_allocate_clusters(
581 				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
582 				is_mft ? ALLOCATE_MFT : 0, &alen,
583 				is_mft ? 0
584 				       : (sbi->record_size -
585 					  le32_to_cpu(rec->used) + 8) /
586 							 3 +
587 						 1,
588 				NULL);
589 			if (err)
590 				goto out;
591 		}
592 
593 		done += alen;
594 		vcn += alen;
595 		if (to_allocate > alen)
596 			to_allocate -= alen;
597 		else
598 			to_allocate = 0;
599 
600 pack_runs:
601 		err = mi_pack_runs(mi, attr, run, vcn - svcn);
602 		if (err)
603 			goto undo_1;
604 
605 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
606 		new_alloc_tmp = (u64)next_svcn << cluster_bits;
607 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
608 		mi_b->dirty = true;
609 
610 		if (next_svcn >= vcn && !to_allocate) {
611 			/* Normal way. Update attribute and exit. */
612 			attr_b->nres.data_size = cpu_to_le64(new_size);
613 			goto ok;
614 		}
615 
616 		/* At least two MFT to avoid recursive loop. */
617 		if (is_mft && next_svcn == vcn &&
618 		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
619 			new_size = new_alloc_tmp;
620 			attr_b->nres.data_size = attr_b->nres.alloc_size;
621 			goto ok;
622 		}
623 
624 		if (le32_to_cpu(rec->used) < sbi->record_size) {
625 			old_alen = next_svcn;
626 			evcn = old_alen - 1;
627 			goto add_alloc_in_same_attr_seg;
628 		}
629 
630 		attr_b->nres.data_size = attr_b->nres.alloc_size;
631 		if (new_alloc_tmp < old_valid)
632 			attr_b->nres.valid_size = attr_b->nres.data_size;
633 
634 		if (type == ATTR_LIST) {
635 			err = ni_expand_list(ni);
636 			if (err)
637 				goto undo_2;
638 			if (next_svcn < vcn)
639 				goto pack_runs;
640 
641 			/* Layout of records is changed. */
642 			goto again;
643 		}
644 
645 		if (!ni->attr_list.size) {
646 			err = ni_create_attr_list(ni);
647 			/* In case of error layout of records is not changed. */
648 			if (err)
649 				goto undo_2;
650 			/* Layout of records is changed. */
651 		}
652 
653 		if (next_svcn >= vcn) {
654 			/* This is MFT data, repeat. */
655 			goto again;
656 		}
657 
658 		/* Insert new attribute segment. */
659 		err = ni_insert_nonresident(ni, type, name, name_len, run,
660 					    next_svcn, vcn - next_svcn,
661 					    attr_b->flags, &attr, &mi, NULL);
662 
663 		/*
664 		 * Layout of records maybe changed.
665 		 * Find base attribute to update.
666 		 */
667 		le_b = NULL;
668 		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
669 				      NULL, &mi_b);
670 		if (!attr_b) {
671 			err = -EINVAL;
672 			goto bad_inode;
673 		}
674 
675 		if (err) {
676 			/* ni_insert_nonresident failed. */
677 			attr = NULL;
678 			goto undo_2;
679 		}
680 
681 		if (!is_mft)
682 			run_truncate_head(run, evcn + 1);
683 
684 		svcn = le64_to_cpu(attr->nres.svcn);
685 		evcn = le64_to_cpu(attr->nres.evcn);
686 
687 		/*
688 		 * Attribute is in consistency state.
689 		 * Save this point to restore to if next steps fail.
690 		 */
691 		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
692 		attr_b->nres.valid_size = attr_b->nres.data_size =
693 			attr_b->nres.alloc_size = cpu_to_le64(old_size);
694 		mi_b->dirty = true;
695 		goto again_1;
696 	}
697 
698 	if (new_size != old_size ||
699 	    (new_alloc != old_alloc && !keep_prealloc)) {
700 		/*
701 		 * Truncate clusters. In simple case we have to:
702 		 *  - update packed run in 'mi'
703 		 *  - update attr->nres.evcn
704 		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
705 		 *  - mark and trim clusters as free (vcn, lcn, len)
706 		 */
707 		CLST dlen = 0;
708 
709 		vcn = max(svcn, new_alen);
710 		new_alloc_tmp = (u64)vcn << cluster_bits;
711 
712 		if (vcn > svcn) {
713 			err = mi_pack_runs(mi, attr, run, vcn - svcn);
714 			if (err)
715 				goto out;
716 		} else if (le && le->vcn) {
717 			u16 le_sz = le16_to_cpu(le->size);
718 
719 			/*
720 			 * NOTE: List entries for one attribute are always
721 			 * the same size. We deal with last entry (vcn==0)
722 			 * and it is not first in entries array
723 			 * (list entry for std attribute always first).
724 			 * So it is safe to step back.
725 			 */
726 			mi_remove_attr(NULL, mi, attr);
727 
728 			if (!al_remove_le(ni, le)) {
729 				err = -EINVAL;
730 				goto bad_inode;
731 			}
732 
733 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
734 		} else {
735 			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
736 			mi->dirty = true;
737 		}
738 
739 		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
740 
741 		if (vcn == new_alen) {
742 			attr_b->nres.data_size = cpu_to_le64(new_size);
743 			if (new_size < old_valid)
744 				attr_b->nres.valid_size =
745 					attr_b->nres.data_size;
746 		} else {
747 			if (new_alloc_tmp <=
748 			    le64_to_cpu(attr_b->nres.data_size))
749 				attr_b->nres.data_size =
750 					attr_b->nres.alloc_size;
751 			if (new_alloc_tmp <
752 			    le64_to_cpu(attr_b->nres.valid_size))
753 				attr_b->nres.valid_size =
754 					attr_b->nres.alloc_size;
755 		}
756 		mi_b->dirty = true;
757 
758 		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
759 					true);
760 		if (err)
761 			goto out;
762 
763 		if (is_ext) {
764 			/* dlen - really deallocated clusters. */
765 			le64_sub_cpu(&attr_b->nres.total_size,
766 				     ((u64)dlen << cluster_bits));
767 		}
768 
769 		run_truncate(run, vcn);
770 
771 		if (new_alloc_tmp <= new_alloc)
772 			goto ok;
773 
774 		old_size = new_alloc_tmp;
775 		vcn = svcn - 1;
776 
777 		if (le == le_b) {
778 			attr = attr_b;
779 			mi = mi_b;
780 			evcn = svcn - 1;
781 			svcn = 0;
782 			goto next_le;
783 		}
784 
785 		if (le->type != type || le->name_len != name_len ||
786 		    memcmp(le_name(le), name, name_len * sizeof(short))) {
787 			err = -EINVAL;
788 			goto bad_inode;
789 		}
790 
791 		err = ni_load_mi(ni, le, &mi);
792 		if (err)
793 			goto out;
794 
795 		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
796 		if (!attr) {
797 			err = -EINVAL;
798 			goto bad_inode;
799 		}
800 		goto next_le_1;
801 	}
802 
803 ok:
804 	if (new_valid) {
805 		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
806 
807 		if (attr_b->nres.valid_size != valid) {
808 			attr_b->nres.valid_size = valid;
809 			mi_b->dirty = true;
810 		}
811 	}
812 
813 ok1:
814 	if (ret)
815 		*ret = attr_b;
816 
817 	/* Update inode_set_bytes. */
818 	if (((type == ATTR_DATA && !name_len) ||
819 	     (type == ATTR_ALLOC && name == I30_NAME))) {
820 		bool dirty = false;
821 
822 		if (ni->vfs_inode.i_size != new_size) {
823 			ni->vfs_inode.i_size = new_size;
824 			dirty = true;
825 		}
826 
827 		if (attr_b->non_res) {
828 			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
829 			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
830 				inode_set_bytes(&ni->vfs_inode, new_alloc);
831 				dirty = true;
832 			}
833 		}
834 
835 		if (dirty) {
836 			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
837 			mark_inode_dirty(&ni->vfs_inode);
838 		}
839 	}
840 
841 	return 0;
842 
843 undo_2:
844 	vcn -= alen;
845 	attr_b->nres.data_size = cpu_to_le64(old_size);
846 	attr_b->nres.valid_size = cpu_to_le64(old_valid);
847 	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
848 
849 	/* Restore 'attr' and 'mi'. */
850 	if (attr)
851 		goto restore_run;
852 
853 	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
854 	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
855 		attr = attr_b;
856 		le = le_b;
857 		mi = mi_b;
858 	} else if (!le_b) {
859 		err = -EINVAL;
860 		goto bad_inode;
861 	} else {
862 		le = le_b;
863 		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
864 				    &svcn, &mi);
865 		if (!attr)
866 			goto bad_inode;
867 	}
868 
869 restore_run:
870 	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
871 		is_bad = true;
872 
873 undo_1:
874 	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
875 
876 	run_truncate(run, vcn);
877 out:
878 	if (is_bad) {
879 bad_inode:
880 		_ntfs_bad_inode(&ni->vfs_inode);
881 	}
882 	return err;
883 }
884 
attr_data_get_block(struct ntfs_inode * ni,CLST vcn,CLST clen,CLST * lcn,CLST * len,bool * new)885 int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
886 			CLST *len, bool *new)
887 {
888 	int err = 0;
889 	struct runs_tree *run = &ni->file.run;
890 	struct ntfs_sb_info *sbi;
891 	u8 cluster_bits;
892 	struct ATTRIB *attr = NULL, *attr_b;
893 	struct ATTR_LIST_ENTRY *le, *le_b;
894 	struct mft_inode *mi, *mi_b;
895 	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
896 	u64 total_size;
897 	u32 clst_per_frame;
898 	bool ok;
899 
900 	if (new)
901 		*new = false;
902 
903 	down_read(&ni->file.run_lock);
904 	ok = run_lookup_entry(run, vcn, lcn, len, NULL);
905 	up_read(&ni->file.run_lock);
906 
907 	if (ok && (*lcn != SPARSE_LCN || !new)) {
908 		/* Normal way. */
909 		return 0;
910 	}
911 
912 	if (!clen)
913 		clen = 1;
914 
915 	if (ok && clen > *len)
916 		clen = *len;
917 
918 	sbi = ni->mi.sbi;
919 	cluster_bits = sbi->cluster_bits;
920 
921 	ni_lock(ni);
922 	down_write(&ni->file.run_lock);
923 
924 	le_b = NULL;
925 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
926 	if (!attr_b) {
927 		err = -ENOENT;
928 		goto out;
929 	}
930 
931 	if (!attr_b->non_res) {
932 		*lcn = RESIDENT_LCN;
933 		*len = 1;
934 		goto out;
935 	}
936 
937 	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
938 	if (vcn >= asize) {
939 		err = -EINVAL;
940 		goto out;
941 	}
942 
943 	clst_per_frame = 1u << attr_b->nres.c_unit;
944 	to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
945 
946 	if (vcn + to_alloc > asize)
947 		to_alloc = asize - vcn;
948 
949 	svcn = le64_to_cpu(attr_b->nres.svcn);
950 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
951 
952 	attr = attr_b;
953 	le = le_b;
954 	mi = mi_b;
955 
956 	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
957 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
958 				    &mi);
959 		if (!attr) {
960 			err = -EINVAL;
961 			goto out;
962 		}
963 		svcn = le64_to_cpu(attr->nres.svcn);
964 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
965 	}
966 
967 	err = attr_load_runs(attr, ni, run, NULL);
968 	if (err)
969 		goto out;
970 
971 	if (!ok) {
972 		ok = run_lookup_entry(run, vcn, lcn, len, NULL);
973 		if (ok && (*lcn != SPARSE_LCN || !new)) {
974 			/* Normal way. */
975 			err = 0;
976 			goto ok;
977 		}
978 
979 		if (!ok && !new) {
980 			*len = 0;
981 			err = 0;
982 			goto ok;
983 		}
984 
985 		if (ok && clen > *len) {
986 			clen = *len;
987 			to_alloc = (clen + clst_per_frame - 1) &
988 				   ~(clst_per_frame - 1);
989 		}
990 	}
991 
992 	if (!is_attr_ext(attr_b)) {
993 		err = -EINVAL;
994 		goto out;
995 	}
996 
997 	/* Get the last LCN to allocate from. */
998 	hint = 0;
999 
1000 	if (vcn > evcn1) {
1001 		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1002 				   false)) {
1003 			err = -ENOMEM;
1004 			goto out;
1005 		}
1006 	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1007 		hint = -1;
1008 	}
1009 
1010 	err = attr_allocate_clusters(
1011 		sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
1012 		(sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
1013 		lcn);
1014 	if (err)
1015 		goto out;
1016 	*new = true;
1017 
1018 	end = vcn + *len;
1019 
1020 	total_size = le64_to_cpu(attr_b->nres.total_size) +
1021 		     ((u64)*len << cluster_bits);
1022 
1023 repack:
1024 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1025 	if (err)
1026 		goto out;
1027 
1028 	attr_b->nres.total_size = cpu_to_le64(total_size);
1029 	inode_set_bytes(&ni->vfs_inode, total_size);
1030 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1031 
1032 	mi_b->dirty = true;
1033 	mark_inode_dirty(&ni->vfs_inode);
1034 
1035 	/* Stored [vcn : next_svcn) from [vcn : end). */
1036 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1037 
1038 	if (end <= evcn1) {
1039 		if (next_svcn == evcn1) {
1040 			/* Normal way. Update attribute and exit. */
1041 			goto ok;
1042 		}
1043 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1044 		if (!ni->attr_list.size) {
1045 			err = ni_create_attr_list(ni);
1046 			if (err)
1047 				goto out;
1048 			/* Layout of records is changed. */
1049 			le_b = NULL;
1050 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1051 					      0, NULL, &mi_b);
1052 			if (!attr_b) {
1053 				err = -ENOENT;
1054 				goto out;
1055 			}
1056 
1057 			attr = attr_b;
1058 			le = le_b;
1059 			mi = mi_b;
1060 			goto repack;
1061 		}
1062 	}
1063 
1064 	svcn = evcn1;
1065 
1066 	/* Estimate next attribute. */
1067 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1068 
1069 	if (attr) {
1070 		CLST alloc = bytes_to_cluster(
1071 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1072 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1073 
1074 		if (end < next_svcn)
1075 			end = next_svcn;
1076 		while (end > evcn) {
1077 			/* Remove segment [svcn : evcn). */
1078 			mi_remove_attr(NULL, mi, attr);
1079 
1080 			if (!al_remove_le(ni, le)) {
1081 				err = -EINVAL;
1082 				goto out;
1083 			}
1084 
1085 			if (evcn + 1 >= alloc) {
1086 				/* Last attribute segment. */
1087 				evcn1 = evcn + 1;
1088 				goto ins_ext;
1089 			}
1090 
1091 			if (ni_load_mi(ni, le, &mi)) {
1092 				attr = NULL;
1093 				goto out;
1094 			}
1095 
1096 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1097 					    &le->id);
1098 			if (!attr) {
1099 				err = -EINVAL;
1100 				goto out;
1101 			}
1102 			svcn = le64_to_cpu(attr->nres.svcn);
1103 			evcn = le64_to_cpu(attr->nres.evcn);
1104 		}
1105 
1106 		if (end < svcn)
1107 			end = svcn;
1108 
1109 		err = attr_load_runs(attr, ni, run, &end);
1110 		if (err)
1111 			goto out;
1112 
1113 		evcn1 = evcn + 1;
1114 		attr->nres.svcn = cpu_to_le64(next_svcn);
1115 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1116 		if (err)
1117 			goto out;
1118 
1119 		le->vcn = cpu_to_le64(next_svcn);
1120 		ni->attr_list.dirty = true;
1121 		mi->dirty = true;
1122 
1123 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1124 	}
1125 ins_ext:
1126 	if (evcn1 > next_svcn) {
1127 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1128 					    next_svcn, evcn1 - next_svcn,
1129 					    attr_b->flags, &attr, &mi, NULL);
1130 		if (err)
1131 			goto out;
1132 	}
1133 ok:
1134 	run_truncate_around(run, vcn);
1135 out:
1136 	up_write(&ni->file.run_lock);
1137 	ni_unlock(ni);
1138 
1139 	return err;
1140 }
1141 
attr_data_read_resident(struct ntfs_inode * ni,struct page * page)1142 int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1143 {
1144 	u64 vbo;
1145 	struct ATTRIB *attr;
1146 	u32 data_size;
1147 
1148 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1149 	if (!attr)
1150 		return -EINVAL;
1151 
1152 	if (attr->non_res)
1153 		return E_NTFS_NONRESIDENT;
1154 
1155 	vbo = page->index << PAGE_SHIFT;
1156 	data_size = le32_to_cpu(attr->res.data_size);
1157 	if (vbo < data_size) {
1158 		const char *data = resident_data(attr);
1159 		char *kaddr = kmap_atomic(page);
1160 		u32 use = data_size - vbo;
1161 
1162 		if (use > PAGE_SIZE)
1163 			use = PAGE_SIZE;
1164 
1165 		memcpy(kaddr, data + vbo, use);
1166 		memset(kaddr + use, 0, PAGE_SIZE - use);
1167 		kunmap_atomic(kaddr);
1168 		flush_dcache_page(page);
1169 		SetPageUptodate(page);
1170 	} else if (!PageUptodate(page)) {
1171 		zero_user_segment(page, 0, PAGE_SIZE);
1172 		SetPageUptodate(page);
1173 	}
1174 
1175 	return 0;
1176 }
1177 
attr_data_write_resident(struct ntfs_inode * ni,struct page * page)1178 int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1179 {
1180 	u64 vbo;
1181 	struct mft_inode *mi;
1182 	struct ATTRIB *attr;
1183 	u32 data_size;
1184 
1185 	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1186 	if (!attr)
1187 		return -EINVAL;
1188 
1189 	if (attr->non_res) {
1190 		/* Return special error code to check this case. */
1191 		return E_NTFS_NONRESIDENT;
1192 	}
1193 
1194 	vbo = page->index << PAGE_SHIFT;
1195 	data_size = le32_to_cpu(attr->res.data_size);
1196 	if (vbo < data_size) {
1197 		char *data = resident_data(attr);
1198 		char *kaddr = kmap_atomic(page);
1199 		u32 use = data_size - vbo;
1200 
1201 		if (use > PAGE_SIZE)
1202 			use = PAGE_SIZE;
1203 		memcpy(data + vbo, kaddr, use);
1204 		kunmap_atomic(kaddr);
1205 		mi->dirty = true;
1206 	}
1207 	ni->i_valid = data_size;
1208 
1209 	return 0;
1210 }
1211 
1212 /*
1213  * attr_load_runs_vcn - Load runs with VCN.
1214  */
attr_load_runs_vcn(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,CLST vcn)1215 int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1216 		       const __le16 *name, u8 name_len, struct runs_tree *run,
1217 		       CLST vcn)
1218 {
1219 	struct ATTRIB *attr;
1220 	int err;
1221 	CLST svcn, evcn;
1222 	u16 ro;
1223 
1224 	if (!ni) {
1225 		/* Is record corrupted? */
1226 		return -ENOENT;
1227 	}
1228 
1229 	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1230 	if (!attr) {
1231 		/* Is record corrupted? */
1232 		return -ENOENT;
1233 	}
1234 
1235 	svcn = le64_to_cpu(attr->nres.svcn);
1236 	evcn = le64_to_cpu(attr->nres.evcn);
1237 
1238 	if (evcn < vcn || vcn < svcn) {
1239 		/* Is record corrupted? */
1240 		return -EINVAL;
1241 	}
1242 
1243 	ro = le16_to_cpu(attr->nres.run_off);
1244 
1245 	if (ro > le32_to_cpu(attr->size))
1246 		return -EINVAL;
1247 
1248 	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1249 			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1250 	if (err < 0)
1251 		return err;
1252 	return 0;
1253 }
1254 
1255 /*
1256  * attr_load_runs_range - Load runs for given range [from to).
1257  */
attr_load_runs_range(struct ntfs_inode * ni,enum ATTR_TYPE type,const __le16 * name,u8 name_len,struct runs_tree * run,u64 from,u64 to)1258 int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1259 			 const __le16 *name, u8 name_len, struct runs_tree *run,
1260 			 u64 from, u64 to)
1261 {
1262 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1263 	u8 cluster_bits = sbi->cluster_bits;
1264 	CLST vcn;
1265 	CLST vcn_last = (to - 1) >> cluster_bits;
1266 	CLST lcn, clen;
1267 	int err;
1268 
1269 	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1270 		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1271 			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1272 						 vcn);
1273 			if (err)
1274 				return err;
1275 			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1276 		}
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 #ifdef CONFIG_NTFS3_LZX_XPRESS
1283 /*
1284  * attr_wof_frame_info
1285  *
1286  * Read header of Xpress/LZX file to get info about frame.
1287  */
attr_wof_frame_info(struct ntfs_inode * ni,struct ATTRIB * attr,struct runs_tree * run,u64 frame,u64 frames,u8 frame_bits,u32 * ondisk_size,u64 * vbo_data)1288 int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1289 			struct runs_tree *run, u64 frame, u64 frames,
1290 			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1291 {
1292 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1293 	u64 vbo[2], off[2], wof_size;
1294 	u32 voff;
1295 	u8 bytes_per_off;
1296 	char *addr;
1297 	struct page *page;
1298 	int i, err;
1299 	__le32 *off32;
1300 	__le64 *off64;
1301 
1302 	if (ni->vfs_inode.i_size < 0x100000000ull) {
1303 		/* File starts with array of 32 bit offsets. */
1304 		bytes_per_off = sizeof(__le32);
1305 		vbo[1] = frame << 2;
1306 		*vbo_data = frames << 2;
1307 	} else {
1308 		/* File starts with array of 64 bit offsets. */
1309 		bytes_per_off = sizeof(__le64);
1310 		vbo[1] = frame << 3;
1311 		*vbo_data = frames << 3;
1312 	}
1313 
1314 	/*
1315 	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1316 	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1317 	 */
1318 	if (!attr->non_res) {
1319 		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1320 			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1321 			return -EINVAL;
1322 		}
1323 		addr = resident_data(attr);
1324 
1325 		if (bytes_per_off == sizeof(__le32)) {
1326 			off32 = Add2Ptr(addr, vbo[1]);
1327 			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1328 			off[1] = le32_to_cpu(off32[0]);
1329 		} else {
1330 			off64 = Add2Ptr(addr, vbo[1]);
1331 			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1332 			off[1] = le64_to_cpu(off64[0]);
1333 		}
1334 
1335 		*vbo_data += off[0];
1336 		*ondisk_size = off[1] - off[0];
1337 		return 0;
1338 	}
1339 
1340 	wof_size = le64_to_cpu(attr->nres.data_size);
1341 	down_write(&ni->file.run_lock);
1342 	page = ni->file.offs_page;
1343 	if (!page) {
1344 		page = alloc_page(GFP_KERNEL);
1345 		if (!page) {
1346 			err = -ENOMEM;
1347 			goto out;
1348 		}
1349 		page->index = -1;
1350 		ni->file.offs_page = page;
1351 	}
1352 	lock_page(page);
1353 	addr = page_address(page);
1354 
1355 	if (vbo[1]) {
1356 		voff = vbo[1] & (PAGE_SIZE - 1);
1357 		vbo[0] = vbo[1] - bytes_per_off;
1358 		i = 0;
1359 	} else {
1360 		voff = 0;
1361 		vbo[0] = 0;
1362 		off[0] = 0;
1363 		i = 1;
1364 	}
1365 
1366 	do {
1367 		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1368 
1369 		if (index != page->index) {
1370 			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1371 			u64 to = min(from + PAGE_SIZE, wof_size);
1372 
1373 			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1374 						   ARRAY_SIZE(WOF_NAME), run,
1375 						   from, to);
1376 			if (err)
1377 				goto out1;
1378 
1379 			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1380 					     to - from, REQ_OP_READ);
1381 			if (err) {
1382 				page->index = -1;
1383 				goto out1;
1384 			}
1385 			page->index = index;
1386 		}
1387 
1388 		if (i) {
1389 			if (bytes_per_off == sizeof(__le32)) {
1390 				off32 = Add2Ptr(addr, voff);
1391 				off[1] = le32_to_cpu(*off32);
1392 			} else {
1393 				off64 = Add2Ptr(addr, voff);
1394 				off[1] = le64_to_cpu(*off64);
1395 			}
1396 		} else if (!voff) {
1397 			if (bytes_per_off == sizeof(__le32)) {
1398 				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1399 				off[0] = le32_to_cpu(*off32);
1400 			} else {
1401 				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1402 				off[0] = le64_to_cpu(*off64);
1403 			}
1404 		} else {
1405 			/* Two values in one page. */
1406 			if (bytes_per_off == sizeof(__le32)) {
1407 				off32 = Add2Ptr(addr, voff);
1408 				off[0] = le32_to_cpu(off32[-1]);
1409 				off[1] = le32_to_cpu(off32[0]);
1410 			} else {
1411 				off64 = Add2Ptr(addr, voff);
1412 				off[0] = le64_to_cpu(off64[-1]);
1413 				off[1] = le64_to_cpu(off64[0]);
1414 			}
1415 			break;
1416 		}
1417 	} while (++i < 2);
1418 
1419 	*vbo_data += off[0];
1420 	*ondisk_size = off[1] - off[0];
1421 
1422 out1:
1423 	unlock_page(page);
1424 out:
1425 	up_write(&ni->file.run_lock);
1426 	return err;
1427 }
1428 #endif
1429 
1430 /*
1431  * attr_is_frame_compressed - Used to detect compressed frame.
1432  */
attr_is_frame_compressed(struct ntfs_inode * ni,struct ATTRIB * attr,CLST frame,CLST * clst_data)1433 int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1434 			     CLST frame, CLST *clst_data)
1435 {
1436 	int err;
1437 	u32 clst_frame;
1438 	CLST clen, lcn, vcn, alen, slen, vcn_next;
1439 	size_t idx;
1440 	struct runs_tree *run;
1441 
1442 	*clst_data = 0;
1443 
1444 	if (!is_attr_compressed(attr))
1445 		return 0;
1446 
1447 	if (!attr->non_res)
1448 		return 0;
1449 
1450 	clst_frame = 1u << attr->nres.c_unit;
1451 	vcn = frame * clst_frame;
1452 	run = &ni->file.run;
1453 
1454 	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1455 		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1456 					 attr->name_len, run, vcn);
1457 		if (err)
1458 			return err;
1459 
1460 		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1461 			return -EINVAL;
1462 	}
1463 
1464 	if (lcn == SPARSE_LCN) {
1465 		/* Sparsed frame. */
1466 		return 0;
1467 	}
1468 
1469 	if (clen >= clst_frame) {
1470 		/*
1471 		 * The frame is not compressed 'cause
1472 		 * it does not contain any sparse clusters.
1473 		 */
1474 		*clst_data = clst_frame;
1475 		return 0;
1476 	}
1477 
1478 	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1479 	slen = 0;
1480 	*clst_data = clen;
1481 
1482 	/*
1483 	 * The frame is compressed if *clst_data + slen >= clst_frame.
1484 	 * Check next fragments.
1485 	 */
1486 	while ((vcn += clen) < alen) {
1487 		vcn_next = vcn;
1488 
1489 		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1490 		    vcn_next != vcn) {
1491 			err = attr_load_runs_vcn(ni, attr->type,
1492 						 attr_name(attr),
1493 						 attr->name_len, run, vcn_next);
1494 			if (err)
1495 				return err;
1496 			vcn = vcn_next;
1497 
1498 			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1499 				return -EINVAL;
1500 		}
1501 
1502 		if (lcn == SPARSE_LCN) {
1503 			slen += clen;
1504 		} else {
1505 			if (slen) {
1506 				/*
1507 				 * Data_clusters + sparse_clusters =
1508 				 * not enough for frame.
1509 				 */
1510 				return -EINVAL;
1511 			}
1512 			*clst_data += clen;
1513 		}
1514 
1515 		if (*clst_data + slen >= clst_frame) {
1516 			if (!slen) {
1517 				/*
1518 				 * There is no sparsed clusters in this frame
1519 				 * so it is not compressed.
1520 				 */
1521 				*clst_data = clst_frame;
1522 			} else {
1523 				/* Frame is compressed. */
1524 			}
1525 			break;
1526 		}
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 /*
1533  * attr_allocate_frame - Allocate/free clusters for @frame.
1534  *
1535  * Assumed: down_write(&ni->file.run_lock);
1536  */
attr_allocate_frame(struct ntfs_inode * ni,CLST frame,size_t compr_size,u64 new_valid)1537 int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1538 			u64 new_valid)
1539 {
1540 	int err = 0;
1541 	struct runs_tree *run = &ni->file.run;
1542 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1543 	struct ATTRIB *attr = NULL, *attr_b;
1544 	struct ATTR_LIST_ENTRY *le, *le_b;
1545 	struct mft_inode *mi, *mi_b;
1546 	CLST svcn, evcn1, next_svcn, lcn, len;
1547 	CLST vcn, end, clst_data;
1548 	u64 total_size, valid_size, data_size;
1549 
1550 	le_b = NULL;
1551 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1552 	if (!attr_b)
1553 		return -ENOENT;
1554 
1555 	if (!is_attr_ext(attr_b))
1556 		return -EINVAL;
1557 
1558 	vcn = frame << NTFS_LZNT_CUNIT;
1559 	total_size = le64_to_cpu(attr_b->nres.total_size);
1560 
1561 	svcn = le64_to_cpu(attr_b->nres.svcn);
1562 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1563 	data_size = le64_to_cpu(attr_b->nres.data_size);
1564 
1565 	if (svcn <= vcn && vcn < evcn1) {
1566 		attr = attr_b;
1567 		le = le_b;
1568 		mi = mi_b;
1569 	} else if (!le_b) {
1570 		err = -EINVAL;
1571 		goto out;
1572 	} else {
1573 		le = le_b;
1574 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1575 				    &mi);
1576 		if (!attr) {
1577 			err = -EINVAL;
1578 			goto out;
1579 		}
1580 		svcn = le64_to_cpu(attr->nres.svcn);
1581 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1582 	}
1583 
1584 	err = attr_load_runs(attr, ni, run, NULL);
1585 	if (err)
1586 		goto out;
1587 
1588 	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1589 	if (err)
1590 		goto out;
1591 
1592 	total_size -= (u64)clst_data << sbi->cluster_bits;
1593 
1594 	len = bytes_to_cluster(sbi, compr_size);
1595 
1596 	if (len == clst_data)
1597 		goto out;
1598 
1599 	if (len < clst_data) {
1600 		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1601 					NULL, true);
1602 		if (err)
1603 			goto out;
1604 
1605 		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1606 				   false)) {
1607 			err = -ENOMEM;
1608 			goto out;
1609 		}
1610 		end = vcn + clst_data;
1611 		/* Run contains updated range [vcn + len : end). */
1612 	} else {
1613 		CLST alen, hint = 0;
1614 		/* Get the last LCN to allocate from. */
1615 		if (vcn + clst_data &&
1616 		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1617 				      NULL)) {
1618 			hint = -1;
1619 		}
1620 
1621 		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1622 					     hint + 1, len - clst_data, NULL, 0,
1623 					     &alen, 0, &lcn);
1624 		if (err)
1625 			goto out;
1626 
1627 		end = vcn + len;
1628 		/* Run contains updated range [vcn + clst_data : end). */
1629 	}
1630 
1631 	total_size += (u64)len << sbi->cluster_bits;
1632 
1633 repack:
1634 	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1635 	if (err)
1636 		goto out;
1637 
1638 	attr_b->nres.total_size = cpu_to_le64(total_size);
1639 	inode_set_bytes(&ni->vfs_inode, total_size);
1640 
1641 	mi_b->dirty = true;
1642 	mark_inode_dirty(&ni->vfs_inode);
1643 
1644 	/* Stored [vcn : next_svcn) from [vcn : end). */
1645 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1646 
1647 	if (end <= evcn1) {
1648 		if (next_svcn == evcn1) {
1649 			/* Normal way. Update attribute and exit. */
1650 			goto ok;
1651 		}
1652 		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1653 		if (!ni->attr_list.size) {
1654 			err = ni_create_attr_list(ni);
1655 			if (err)
1656 				goto out;
1657 			/* Layout of records is changed. */
1658 			le_b = NULL;
1659 			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1660 					      0, NULL, &mi_b);
1661 			if (!attr_b) {
1662 				err = -ENOENT;
1663 				goto out;
1664 			}
1665 
1666 			attr = attr_b;
1667 			le = le_b;
1668 			mi = mi_b;
1669 			goto repack;
1670 		}
1671 	}
1672 
1673 	svcn = evcn1;
1674 
1675 	/* Estimate next attribute. */
1676 	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1677 
1678 	if (attr) {
1679 		CLST alloc = bytes_to_cluster(
1680 			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1681 		CLST evcn = le64_to_cpu(attr->nres.evcn);
1682 
1683 		if (end < next_svcn)
1684 			end = next_svcn;
1685 		while (end > evcn) {
1686 			/* Remove segment [svcn : evcn). */
1687 			mi_remove_attr(NULL, mi, attr);
1688 
1689 			if (!al_remove_le(ni, le)) {
1690 				err = -EINVAL;
1691 				goto out;
1692 			}
1693 
1694 			if (evcn + 1 >= alloc) {
1695 				/* Last attribute segment. */
1696 				evcn1 = evcn + 1;
1697 				goto ins_ext;
1698 			}
1699 
1700 			if (ni_load_mi(ni, le, &mi)) {
1701 				attr = NULL;
1702 				goto out;
1703 			}
1704 
1705 			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1706 					    &le->id);
1707 			if (!attr) {
1708 				err = -EINVAL;
1709 				goto out;
1710 			}
1711 			svcn = le64_to_cpu(attr->nres.svcn);
1712 			evcn = le64_to_cpu(attr->nres.evcn);
1713 		}
1714 
1715 		if (end < svcn)
1716 			end = svcn;
1717 
1718 		err = attr_load_runs(attr, ni, run, &end);
1719 		if (err)
1720 			goto out;
1721 
1722 		evcn1 = evcn + 1;
1723 		attr->nres.svcn = cpu_to_le64(next_svcn);
1724 		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1725 		if (err)
1726 			goto out;
1727 
1728 		le->vcn = cpu_to_le64(next_svcn);
1729 		ni->attr_list.dirty = true;
1730 		mi->dirty = true;
1731 
1732 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1733 	}
1734 ins_ext:
1735 	if (evcn1 > next_svcn) {
1736 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1737 					    next_svcn, evcn1 - next_svcn,
1738 					    attr_b->flags, &attr, &mi, NULL);
1739 		if (err)
1740 			goto out;
1741 	}
1742 ok:
1743 	run_truncate_around(run, vcn);
1744 out:
1745 	if (new_valid > data_size)
1746 		new_valid = data_size;
1747 
1748 	valid_size = le64_to_cpu(attr_b->nres.valid_size);
1749 	if (new_valid != valid_size) {
1750 		attr_b->nres.valid_size = cpu_to_le64(valid_size);
1751 		mi_b->dirty = true;
1752 	}
1753 
1754 	return err;
1755 }
1756 
1757 /*
1758  * attr_collapse_range - Collapse range in file.
1759  */
attr_collapse_range(struct ntfs_inode * ni,u64 vbo,u64 bytes)1760 int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1761 {
1762 	int err = 0;
1763 	struct runs_tree *run = &ni->file.run;
1764 	struct ntfs_sb_info *sbi = ni->mi.sbi;
1765 	struct ATTRIB *attr = NULL, *attr_b;
1766 	struct ATTR_LIST_ENTRY *le, *le_b;
1767 	struct mft_inode *mi, *mi_b;
1768 	CLST svcn, evcn1, len, dealloc, alen;
1769 	CLST vcn, end;
1770 	u64 valid_size, data_size, alloc_size, total_size;
1771 	u32 mask;
1772 	__le16 a_flags;
1773 
1774 	if (!bytes)
1775 		return 0;
1776 
1777 	le_b = NULL;
1778 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1779 	if (!attr_b)
1780 		return -ENOENT;
1781 
1782 	if (!attr_b->non_res) {
1783 		/* Attribute is resident. Nothing to do? */
1784 		return 0;
1785 	}
1786 
1787 	data_size = le64_to_cpu(attr_b->nres.data_size);
1788 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1789 	a_flags = attr_b->flags;
1790 
1791 	if (is_attr_ext(attr_b)) {
1792 		total_size = le64_to_cpu(attr_b->nres.total_size);
1793 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1794 	} else {
1795 		total_size = alloc_size;
1796 		mask = sbi->cluster_mask;
1797 	}
1798 
1799 	if ((vbo & mask) || (bytes & mask)) {
1800 		/* Allow to collapse only cluster aligned ranges. */
1801 		return -EINVAL;
1802 	}
1803 
1804 	if (vbo > data_size)
1805 		return -EINVAL;
1806 
1807 	down_write(&ni->file.run_lock);
1808 
1809 	if (vbo + bytes >= data_size) {
1810 		u64 new_valid = min(ni->i_valid, vbo);
1811 
1812 		/* Simple truncate file at 'vbo'. */
1813 		truncate_setsize(&ni->vfs_inode, vbo);
1814 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1815 				    &new_valid, true, NULL);
1816 
1817 		if (!err && new_valid < ni->i_valid)
1818 			ni->i_valid = new_valid;
1819 
1820 		goto out;
1821 	}
1822 
1823 	/*
1824 	 * Enumerate all attribute segments and collapse.
1825 	 */
1826 	alen = alloc_size >> sbi->cluster_bits;
1827 	vcn = vbo >> sbi->cluster_bits;
1828 	len = bytes >> sbi->cluster_bits;
1829 	end = vcn + len;
1830 	dealloc = 0;
1831 
1832 	svcn = le64_to_cpu(attr_b->nres.svcn);
1833 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1834 
1835 	if (svcn <= vcn && vcn < evcn1) {
1836 		attr = attr_b;
1837 		le = le_b;
1838 		mi = mi_b;
1839 	} else if (!le_b) {
1840 		err = -EINVAL;
1841 		goto out;
1842 	} else {
1843 		le = le_b;
1844 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1845 				    &mi);
1846 		if (!attr) {
1847 			err = -EINVAL;
1848 			goto out;
1849 		}
1850 
1851 		svcn = le64_to_cpu(attr->nres.svcn);
1852 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1853 	}
1854 
1855 	for (;;) {
1856 		if (svcn >= end) {
1857 			/* Shift VCN- */
1858 			attr->nres.svcn = cpu_to_le64(svcn - len);
1859 			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1860 			if (le) {
1861 				le->vcn = attr->nres.svcn;
1862 				ni->attr_list.dirty = true;
1863 			}
1864 			mi->dirty = true;
1865 		} else if (svcn < vcn || end < evcn1) {
1866 			CLST vcn1, eat, next_svcn;
1867 
1868 			/* Collapse a part of this attribute segment. */
1869 			err = attr_load_runs(attr, ni, run, &svcn);
1870 			if (err)
1871 				goto out;
1872 			vcn1 = max(vcn, svcn);
1873 			eat = min(end, evcn1) - vcn1;
1874 
1875 			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1876 						true);
1877 			if (err)
1878 				goto out;
1879 
1880 			if (!run_collapse_range(run, vcn1, eat)) {
1881 				err = -ENOMEM;
1882 				goto out;
1883 			}
1884 
1885 			if (svcn >= vcn) {
1886 				/* Shift VCN */
1887 				attr->nres.svcn = cpu_to_le64(vcn);
1888 				if (le) {
1889 					le->vcn = attr->nres.svcn;
1890 					ni->attr_list.dirty = true;
1891 				}
1892 			}
1893 
1894 			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1895 			if (err)
1896 				goto out;
1897 
1898 			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1899 			if (next_svcn + eat < evcn1) {
1900 				err = ni_insert_nonresident(
1901 					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1902 					evcn1 - eat - next_svcn, a_flags, &attr,
1903 					&mi, &le);
1904 				if (err)
1905 					goto out;
1906 
1907 				/* Layout of records maybe changed. */
1908 				attr_b = NULL;
1909 			}
1910 
1911 			/* Free all allocated memory. */
1912 			run_truncate(run, 0);
1913 		} else {
1914 			u16 le_sz;
1915 			u16 roff = le16_to_cpu(attr->nres.run_off);
1916 
1917 			if (roff > le32_to_cpu(attr->size)) {
1918 				err = -EINVAL;
1919 				goto out;
1920 			}
1921 
1922 			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1923 				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
1924 				      le32_to_cpu(attr->size) - roff);
1925 
1926 			/* Delete this attribute segment. */
1927 			mi_remove_attr(NULL, mi, attr);
1928 			if (!le)
1929 				break;
1930 
1931 			le_sz = le16_to_cpu(le->size);
1932 			if (!al_remove_le(ni, le)) {
1933 				err = -EINVAL;
1934 				goto out;
1935 			}
1936 
1937 			if (evcn1 >= alen)
1938 				break;
1939 
1940 			if (!svcn) {
1941 				/* Load next record that contains this attribute. */
1942 				if (ni_load_mi(ni, le, &mi)) {
1943 					err = -EINVAL;
1944 					goto out;
1945 				}
1946 
1947 				/* Look for required attribute. */
1948 				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1949 						    0, &le->id);
1950 				if (!attr) {
1951 					err = -EINVAL;
1952 					goto out;
1953 				}
1954 				goto next_attr;
1955 			}
1956 			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1957 		}
1958 
1959 		if (evcn1 >= alen)
1960 			break;
1961 
1962 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1963 		if (!attr) {
1964 			err = -EINVAL;
1965 			goto out;
1966 		}
1967 
1968 next_attr:
1969 		svcn = le64_to_cpu(attr->nres.svcn);
1970 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1971 	}
1972 
1973 	if (!attr_b) {
1974 		le_b = NULL;
1975 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1976 				      &mi_b);
1977 		if (!attr_b) {
1978 			err = -ENOENT;
1979 			goto out;
1980 		}
1981 	}
1982 
1983 	data_size -= bytes;
1984 	valid_size = ni->i_valid;
1985 	if (vbo + bytes <= valid_size)
1986 		valid_size -= bytes;
1987 	else if (vbo < valid_size)
1988 		valid_size = vbo;
1989 
1990 	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1991 	attr_b->nres.data_size = cpu_to_le64(data_size);
1992 	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1993 	total_size -= (u64)dealloc << sbi->cluster_bits;
1994 	if (is_attr_ext(attr_b))
1995 		attr_b->nres.total_size = cpu_to_le64(total_size);
1996 	mi_b->dirty = true;
1997 
1998 	/* Update inode size. */
1999 	ni->i_valid = valid_size;
2000 	ni->vfs_inode.i_size = data_size;
2001 	inode_set_bytes(&ni->vfs_inode, total_size);
2002 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2003 	mark_inode_dirty(&ni->vfs_inode);
2004 
2005 out:
2006 	up_write(&ni->file.run_lock);
2007 	if (err)
2008 		_ntfs_bad_inode(&ni->vfs_inode);
2009 
2010 	return err;
2011 }
2012 
2013 /*
2014  * attr_punch_hole
2015  *
2016  * Not for normal files.
2017  */
attr_punch_hole(struct ntfs_inode * ni,u64 vbo,u64 bytes,u32 * frame_size)2018 int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2019 {
2020 	int err = 0;
2021 	struct runs_tree *run = &ni->file.run;
2022 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2023 	struct ATTRIB *attr = NULL, *attr_b;
2024 	struct ATTR_LIST_ENTRY *le, *le_b;
2025 	struct mft_inode *mi, *mi_b;
2026 	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2027 	u64 total_size, alloc_size;
2028 	u32 mask;
2029 	__le16 a_flags;
2030 	struct runs_tree run2;
2031 
2032 	if (!bytes)
2033 		return 0;
2034 
2035 	le_b = NULL;
2036 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2037 	if (!attr_b)
2038 		return -ENOENT;
2039 
2040 	if (!attr_b->non_res) {
2041 		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2042 		u32 from, to;
2043 
2044 		if (vbo > data_size)
2045 			return 0;
2046 
2047 		from = vbo;
2048 		to = min_t(u64, vbo + bytes, data_size);
2049 		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2050 		return 0;
2051 	}
2052 
2053 	if (!is_attr_ext(attr_b))
2054 		return -EOPNOTSUPP;
2055 
2056 	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2057 	total_size = le64_to_cpu(attr_b->nres.total_size);
2058 
2059 	if (vbo >= alloc_size) {
2060 		/* NOTE: It is allowed. */
2061 		return 0;
2062 	}
2063 
2064 	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2065 
2066 	bytes += vbo;
2067 	if (bytes > alloc_size)
2068 		bytes = alloc_size;
2069 	bytes -= vbo;
2070 
2071 	if ((vbo & mask) || (bytes & mask)) {
2072 		/* We have to zero a range(s). */
2073 		if (frame_size == NULL) {
2074 			/* Caller insists range is aligned. */
2075 			return -EINVAL;
2076 		}
2077 		*frame_size = mask + 1;
2078 		return E_NTFS_NOTALIGNED;
2079 	}
2080 
2081 	down_write(&ni->file.run_lock);
2082 	run_init(&run2);
2083 	run_truncate(run, 0);
2084 
2085 	/*
2086 	 * Enumerate all attribute segments and punch hole where necessary.
2087 	 */
2088 	alen = alloc_size >> sbi->cluster_bits;
2089 	vcn = vbo >> sbi->cluster_bits;
2090 	len = bytes >> sbi->cluster_bits;
2091 	end = vcn + len;
2092 	hole = 0;
2093 
2094 	svcn = le64_to_cpu(attr_b->nres.svcn);
2095 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2096 	a_flags = attr_b->flags;
2097 
2098 	if (svcn <= vcn && vcn < evcn1) {
2099 		attr = attr_b;
2100 		le = le_b;
2101 		mi = mi_b;
2102 	} else if (!le_b) {
2103 		err = -EINVAL;
2104 		goto bad_inode;
2105 	} else {
2106 		le = le_b;
2107 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2108 				    &mi);
2109 		if (!attr) {
2110 			err = -EINVAL;
2111 			goto bad_inode;
2112 		}
2113 
2114 		svcn = le64_to_cpu(attr->nres.svcn);
2115 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2116 	}
2117 
2118 	while (svcn < end) {
2119 		CLST vcn1, zero, hole2 = hole;
2120 
2121 		err = attr_load_runs(attr, ni, run, &svcn);
2122 		if (err)
2123 			goto done;
2124 		vcn1 = max(vcn, svcn);
2125 		zero = min(end, evcn1) - vcn1;
2126 
2127 		/*
2128 		 * Check range [vcn1 + zero).
2129 		 * Calculate how many clusters there are.
2130 		 * Don't do any destructive actions.
2131 		 */
2132 		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2133 		if (err)
2134 			goto done;
2135 
2136 		/* Check if required range is already hole. */
2137 		if (hole2 == hole)
2138 			goto next_attr;
2139 
2140 		/* Make a clone of run to undo. */
2141 		err = run_clone(run, &run2);
2142 		if (err)
2143 			goto done;
2144 
2145 		/* Make a hole range (sparse) [vcn1 + zero). */
2146 		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2147 			err = -ENOMEM;
2148 			goto done;
2149 		}
2150 
2151 		/* Update run in attribute segment. */
2152 		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2153 		if (err)
2154 			goto done;
2155 		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2156 		if (next_svcn < evcn1) {
2157 			/* Insert new attribute segment. */
2158 			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2159 						    next_svcn,
2160 						    evcn1 - next_svcn, a_flags,
2161 						    &attr, &mi, &le);
2162 			if (err)
2163 				goto undo_punch;
2164 
2165 			/* Layout of records maybe changed. */
2166 			attr_b = NULL;
2167 		}
2168 
2169 		/* Real deallocate. Should not fail. */
2170 		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2171 
2172 next_attr:
2173 		/* Free all allocated memory. */
2174 		run_truncate(run, 0);
2175 
2176 		if (evcn1 >= alen)
2177 			break;
2178 
2179 		/* Get next attribute segment. */
2180 		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2181 		if (!attr) {
2182 			err = -EINVAL;
2183 			goto bad_inode;
2184 		}
2185 
2186 		svcn = le64_to_cpu(attr->nres.svcn);
2187 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2188 	}
2189 
2190 done:
2191 	if (!hole)
2192 		goto out;
2193 
2194 	if (!attr_b) {
2195 		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2196 				      &mi_b);
2197 		if (!attr_b) {
2198 			err = -EINVAL;
2199 			goto bad_inode;
2200 		}
2201 	}
2202 
2203 	total_size -= (u64)hole << sbi->cluster_bits;
2204 	attr_b->nres.total_size = cpu_to_le64(total_size);
2205 	mi_b->dirty = true;
2206 
2207 	/* Update inode size. */
2208 	inode_set_bytes(&ni->vfs_inode, total_size);
2209 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2210 	mark_inode_dirty(&ni->vfs_inode);
2211 
2212 out:
2213 	run_close(&run2);
2214 	up_write(&ni->file.run_lock);
2215 	return err;
2216 
2217 bad_inode:
2218 	_ntfs_bad_inode(&ni->vfs_inode);
2219 	goto out;
2220 
2221 undo_punch:
2222 	/*
2223 	 * Restore packed runs.
2224 	 * 'mi_pack_runs' should not fail, cause we restore original.
2225 	 */
2226 	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2227 		goto bad_inode;
2228 
2229 	goto done;
2230 }
2231 
2232 /*
2233  * attr_insert_range - Insert range (hole) in file.
2234  * Not for normal files.
2235  */
attr_insert_range(struct ntfs_inode * ni,u64 vbo,u64 bytes)2236 int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2237 {
2238 	int err = 0;
2239 	struct runs_tree *run = &ni->file.run;
2240 	struct ntfs_sb_info *sbi = ni->mi.sbi;
2241 	struct ATTRIB *attr = NULL, *attr_b;
2242 	struct ATTR_LIST_ENTRY *le, *le_b;
2243 	struct mft_inode *mi, *mi_b;
2244 	CLST vcn, svcn, evcn1, len, next_svcn;
2245 	u64 data_size, alloc_size;
2246 	u32 mask;
2247 	__le16 a_flags;
2248 
2249 	if (!bytes)
2250 		return 0;
2251 
2252 	le_b = NULL;
2253 	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2254 	if (!attr_b)
2255 		return -ENOENT;
2256 
2257 	if (!is_attr_ext(attr_b)) {
2258 		/* It was checked above. See fallocate. */
2259 		return -EOPNOTSUPP;
2260 	}
2261 
2262 	if (!attr_b->non_res) {
2263 		data_size = le32_to_cpu(attr_b->res.data_size);
2264 		alloc_size = data_size;
2265 		mask = sbi->cluster_mask; /* cluster_size - 1 */
2266 	} else {
2267 		data_size = le64_to_cpu(attr_b->nres.data_size);
2268 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2269 		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2270 	}
2271 
2272 	if (vbo > data_size) {
2273 		/* Insert range after the file size is not allowed. */
2274 		return -EINVAL;
2275 	}
2276 
2277 	if ((vbo & mask) || (bytes & mask)) {
2278 		/* Allow to insert only frame aligned ranges. */
2279 		return -EINVAL;
2280 	}
2281 
2282 	/*
2283 	 * valid_size <= data_size <= alloc_size
2284 	 * Check alloc_size for maximum possible.
2285 	 */
2286 	if (bytes > sbi->maxbytes_sparse - alloc_size)
2287 		return -EFBIG;
2288 
2289 	vcn = vbo >> sbi->cluster_bits;
2290 	len = bytes >> sbi->cluster_bits;
2291 
2292 	down_write(&ni->file.run_lock);
2293 
2294 	if (!attr_b->non_res) {
2295 		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2296 				    data_size + bytes, NULL, false, NULL);
2297 
2298 		le_b = NULL;
2299 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2300 				      &mi_b);
2301 		if (!attr_b) {
2302 			err = -EINVAL;
2303 			goto bad_inode;
2304 		}
2305 
2306 		if (err)
2307 			goto out;
2308 
2309 		if (!attr_b->non_res) {
2310 			/* Still resident. */
2311 			char *data = Add2Ptr(attr_b, attr_b->res.data_off);
2312 
2313 			memmove(data + bytes, data, bytes);
2314 			memset(data, 0, bytes);
2315 			goto done;
2316 		}
2317 
2318 		/* Resident files becomes nonresident. */
2319 		data_size = le64_to_cpu(attr_b->nres.data_size);
2320 		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2321 	}
2322 
2323 	/*
2324 	 * Enumerate all attribute segments and shift start vcn.
2325 	 */
2326 	a_flags = attr_b->flags;
2327 	svcn = le64_to_cpu(attr_b->nres.svcn);
2328 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2329 
2330 	if (svcn <= vcn && vcn < evcn1) {
2331 		attr = attr_b;
2332 		le = le_b;
2333 		mi = mi_b;
2334 	} else if (!le_b) {
2335 		err = -EINVAL;
2336 		goto bad_inode;
2337 	} else {
2338 		le = le_b;
2339 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2340 				    &mi);
2341 		if (!attr) {
2342 			err = -EINVAL;
2343 			goto bad_inode;
2344 		}
2345 
2346 		svcn = le64_to_cpu(attr->nres.svcn);
2347 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2348 	}
2349 
2350 	run_truncate(run, 0); /* clear cached values. */
2351 	err = attr_load_runs(attr, ni, run, NULL);
2352 	if (err)
2353 		goto out;
2354 
2355 	if (!run_insert_range(run, vcn, len)) {
2356 		err = -ENOMEM;
2357 		goto out;
2358 	}
2359 
2360 	/* Try to pack in current record as much as possible. */
2361 	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2362 	if (err)
2363 		goto out;
2364 
2365 	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2366 
2367 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2368 	       attr->type == ATTR_DATA && !attr->name_len) {
2369 		le64_add_cpu(&attr->nres.svcn, len);
2370 		le64_add_cpu(&attr->nres.evcn, len);
2371 		if (le) {
2372 			le->vcn = attr->nres.svcn;
2373 			ni->attr_list.dirty = true;
2374 		}
2375 		mi->dirty = true;
2376 	}
2377 
2378 	if (next_svcn < evcn1 + len) {
2379 		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2380 					    next_svcn, evcn1 + len - next_svcn,
2381 					    a_flags, NULL, NULL, NULL);
2382 
2383 		le_b = NULL;
2384 		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2385 				      &mi_b);
2386 		if (!attr_b) {
2387 			err = -EINVAL;
2388 			goto bad_inode;
2389 		}
2390 
2391 		if (err) {
2392 			/* ni_insert_nonresident failed. Try to undo. */
2393 			goto undo_insert_range;
2394 		}
2395 	}
2396 
2397 	/*
2398 	 * Update primary attribute segment.
2399 	 */
2400 	if (vbo <= ni->i_valid)
2401 		ni->i_valid += bytes;
2402 
2403 	attr_b->nres.data_size = le64_to_cpu(data_size + bytes);
2404 	attr_b->nres.alloc_size = le64_to_cpu(alloc_size + bytes);
2405 
2406 	/* ni->valid may be not equal valid_size (temporary). */
2407 	if (ni->i_valid > data_size + bytes)
2408 		attr_b->nres.valid_size = attr_b->nres.data_size;
2409 	else
2410 		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2411 	mi_b->dirty = true;
2412 
2413 done:
2414 	ni->vfs_inode.i_size += bytes;
2415 	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2416 	mark_inode_dirty(&ni->vfs_inode);
2417 
2418 out:
2419 	run_truncate(run, 0); /* clear cached values. */
2420 
2421 	up_write(&ni->file.run_lock);
2422 
2423 	return err;
2424 
2425 bad_inode:
2426 	_ntfs_bad_inode(&ni->vfs_inode);
2427 	goto out;
2428 
2429 undo_insert_range:
2430 	svcn = le64_to_cpu(attr_b->nres.svcn);
2431 	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2432 
2433 	if (svcn <= vcn && vcn < evcn1) {
2434 		attr = attr_b;
2435 		le = le_b;
2436 		mi = mi_b;
2437 	} else if (!le_b) {
2438 		goto bad_inode;
2439 	} else {
2440 		le = le_b;
2441 		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2442 				    &mi);
2443 		if (!attr) {
2444 			goto bad_inode;
2445 		}
2446 
2447 		svcn = le64_to_cpu(attr->nres.svcn);
2448 		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2449 	}
2450 
2451 	if (attr_load_runs(attr, ni, run, NULL))
2452 		goto bad_inode;
2453 
2454 	if (!run_collapse_range(run, vcn, len))
2455 		goto bad_inode;
2456 
2457 	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2458 		goto bad_inode;
2459 
2460 	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2461 	       attr->type == ATTR_DATA && !attr->name_len) {
2462 		le64_sub_cpu(&attr->nres.svcn, len);
2463 		le64_sub_cpu(&attr->nres.evcn, len);
2464 		if (le) {
2465 			le->vcn = attr->nres.svcn;
2466 			ni->attr_list.dirty = true;
2467 		}
2468 		mi->dirty = true;
2469 	}
2470 
2471 	goto out;
2472 }
2473