1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * attrib.c - NTFS attribute operations.  Part of the Linux-NTFS project.
4  *
5  * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
6  * Copyright (c) 2002 Richard Russon
7  */
8 
9 #include <linux/buffer_head.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 
15 #include "attrib.h"
16 #include "debug.h"
17 #include "layout.h"
18 #include "lcnalloc.h"
19 #include "malloc.h"
20 #include "mft.h"
21 #include "ntfs.h"
22 #include "types.h"
23 
24 /**
25  * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
26  * @ni:		ntfs inode for which to map (part of) a runlist
27  * @vcn:	map runlist part containing this vcn
28  * @ctx:	active attribute search context if present or NULL if not
29  *
30  * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
31  *
32  * If @ctx is specified, it is an active search context of @ni and its base mft
33  * record.  This is needed when ntfs_map_runlist_nolock() encounters unmapped
34  * runlist fragments and allows their mapping.  If you do not have the mft
35  * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
36  * will perform the necessary mapping and unmapping.
37  *
38  * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
39  * restores it before returning.  Thus, @ctx will be left pointing to the same
40  * attribute on return as on entry.  However, the actual pointers in @ctx may
41  * point to different memory locations on return, so you must remember to reset
42  * any cached pointers from the @ctx, i.e. after the call to
43  * ntfs_map_runlist_nolock(), you will probably want to do:
44  *	m = ctx->mrec;
45  *	a = ctx->attr;
46  * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
47  * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
48  *
49  * Return 0 on success and -errno on error.  There is one special error code
50  * which is not an error as such.  This is -ENOENT.  It means that @vcn is out
51  * of bounds of the runlist.
52  *
53  * Note the runlist can be NULL after this function returns if @vcn is zero and
54  * the attribute has zero allocated size, i.e. there simply is no runlist.
55  *
56  * WARNING: If @ctx is supplied, regardless of whether success or failure is
57  *	    returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
58  *	    is no longer valid, i.e. you need to either call
59  *	    ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
60  *	    In that case PTR_ERR(@ctx->mrec) will give you the error code for
61  *	    why the mapping of the old inode failed.
62  *
63  * Locking: - The runlist described by @ni must be locked for writing on entry
64  *	      and is locked on return.  Note the runlist will be modified.
65  *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
66  *	      entry and it will be left unmapped on return.
67  *	    - If @ctx is not NULL, the base mft record must be mapped on entry
68  *	      and it will be left mapped on return.
69  */
ntfs_map_runlist_nolock(ntfs_inode * ni,VCN vcn,ntfs_attr_search_ctx * ctx)70 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
71 {
72 	VCN end_vcn;
73 	unsigned long flags;
74 	ntfs_inode *base_ni;
75 	MFT_RECORD *m;
76 	ATTR_RECORD *a;
77 	runlist_element *rl;
78 	struct page *put_this_page = NULL;
79 	int err = 0;
80 	bool ctx_is_temporary, ctx_needs_reset;
81 	ntfs_attr_search_ctx old_ctx = { NULL, };
82 
83 	ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
84 			(unsigned long long)vcn);
85 	if (!NInoAttr(ni))
86 		base_ni = ni;
87 	else
88 		base_ni = ni->ext.base_ntfs_ino;
89 	if (!ctx) {
90 		ctx_is_temporary = ctx_needs_reset = true;
91 		m = map_mft_record(base_ni);
92 		if (IS_ERR(m))
93 			return PTR_ERR(m);
94 		ctx = ntfs_attr_get_search_ctx(base_ni, m);
95 		if (unlikely(!ctx)) {
96 			err = -ENOMEM;
97 			goto err_out;
98 		}
99 	} else {
100 		VCN allocated_size_vcn;
101 
102 		BUG_ON(IS_ERR(ctx->mrec));
103 		a = ctx->attr;
104 		BUG_ON(!a->non_resident);
105 		ctx_is_temporary = false;
106 		end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
107 		read_lock_irqsave(&ni->size_lock, flags);
108 		allocated_size_vcn = ni->allocated_size >>
109 				ni->vol->cluster_size_bits;
110 		read_unlock_irqrestore(&ni->size_lock, flags);
111 		if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
112 			end_vcn = allocated_size_vcn - 1;
113 		/*
114 		 * If we already have the attribute extent containing @vcn in
115 		 * @ctx, no need to look it up again.  We slightly cheat in
116 		 * that if vcn exceeds the allocated size, we will refuse to
117 		 * map the runlist below, so there is definitely no need to get
118 		 * the right attribute extent.
119 		 */
120 		if (vcn >= allocated_size_vcn || (a->type == ni->type &&
121 				a->name_length == ni->name_len &&
122 				!memcmp((u8*)a + le16_to_cpu(a->name_offset),
123 				ni->name, ni->name_len) &&
124 				sle64_to_cpu(a->data.non_resident.lowest_vcn)
125 				<= vcn && end_vcn >= vcn))
126 			ctx_needs_reset = false;
127 		else {
128 			/* Save the old search context. */
129 			old_ctx = *ctx;
130 			/*
131 			 * If the currently mapped (extent) inode is not the
132 			 * base inode we will unmap it when we reinitialize the
133 			 * search context which means we need to get a
134 			 * reference to the page containing the mapped mft
135 			 * record so we do not accidentally drop changes to the
136 			 * mft record when it has not been marked dirty yet.
137 			 */
138 			if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
139 					old_ctx.base_ntfs_ino) {
140 				put_this_page = old_ctx.ntfs_ino->page;
141 				get_page(put_this_page);
142 			}
143 			/*
144 			 * Reinitialize the search context so we can lookup the
145 			 * needed attribute extent.
146 			 */
147 			ntfs_attr_reinit_search_ctx(ctx);
148 			ctx_needs_reset = true;
149 		}
150 	}
151 	if (ctx_needs_reset) {
152 		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
153 				CASE_SENSITIVE, vcn, NULL, 0, ctx);
154 		if (unlikely(err)) {
155 			if (err == -ENOENT)
156 				err = -EIO;
157 			goto err_out;
158 		}
159 		BUG_ON(!ctx->attr->non_resident);
160 	}
161 	a = ctx->attr;
162 	/*
163 	 * Only decompress the mapping pairs if @vcn is inside it.  Otherwise
164 	 * we get into problems when we try to map an out of bounds vcn because
165 	 * we then try to map the already mapped runlist fragment and
166 	 * ntfs_mapping_pairs_decompress() fails.
167 	 */
168 	end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
169 	if (unlikely(vcn && vcn >= end_vcn)) {
170 		err = -ENOENT;
171 		goto err_out;
172 	}
173 	rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
174 	if (IS_ERR(rl))
175 		err = PTR_ERR(rl);
176 	else
177 		ni->runlist.rl = rl;
178 err_out:
179 	if (ctx_is_temporary) {
180 		if (likely(ctx))
181 			ntfs_attr_put_search_ctx(ctx);
182 		unmap_mft_record(base_ni);
183 	} else if (ctx_needs_reset) {
184 		/*
185 		 * If there is no attribute list, restoring the search context
186 		 * is accomplished simply by copying the saved context back over
187 		 * the caller supplied context.  If there is an attribute list,
188 		 * things are more complicated as we need to deal with mapping
189 		 * of mft records and resulting potential changes in pointers.
190 		 */
191 		if (NInoAttrList(base_ni)) {
192 			/*
193 			 * If the currently mapped (extent) inode is not the
194 			 * one we had before, we need to unmap it and map the
195 			 * old one.
196 			 */
197 			if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
198 				/*
199 				 * If the currently mapped inode is not the
200 				 * base inode, unmap it.
201 				 */
202 				if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
203 						ctx->base_ntfs_ino) {
204 					unmap_extent_mft_record(ctx->ntfs_ino);
205 					ctx->mrec = ctx->base_mrec;
206 					BUG_ON(!ctx->mrec);
207 				}
208 				/*
209 				 * If the old mapped inode is not the base
210 				 * inode, map it.
211 				 */
212 				if (old_ctx.base_ntfs_ino &&
213 						old_ctx.ntfs_ino !=
214 						old_ctx.base_ntfs_ino) {
215 retry_map:
216 					ctx->mrec = map_mft_record(
217 							old_ctx.ntfs_ino);
218 					/*
219 					 * Something bad has happened.  If out
220 					 * of memory retry till it succeeds.
221 					 * Any other errors are fatal and we
222 					 * return the error code in ctx->mrec.
223 					 * Let the caller deal with it...  We
224 					 * just need to fudge things so the
225 					 * caller can reinit and/or put the
226 					 * search context safely.
227 					 */
228 					if (IS_ERR(ctx->mrec)) {
229 						if (PTR_ERR(ctx->mrec) ==
230 								-ENOMEM) {
231 							schedule();
232 							goto retry_map;
233 						} else
234 							old_ctx.ntfs_ino =
235 								old_ctx.
236 								base_ntfs_ino;
237 					}
238 				}
239 			}
240 			/* Update the changed pointers in the saved context. */
241 			if (ctx->mrec != old_ctx.mrec) {
242 				if (!IS_ERR(ctx->mrec))
243 					old_ctx.attr = (ATTR_RECORD*)(
244 							(u8*)ctx->mrec +
245 							((u8*)old_ctx.attr -
246 							(u8*)old_ctx.mrec));
247 				old_ctx.mrec = ctx->mrec;
248 			}
249 		}
250 		/* Restore the search context to the saved one. */
251 		*ctx = old_ctx;
252 		/*
253 		 * We drop the reference on the page we took earlier.  In the
254 		 * case that IS_ERR(ctx->mrec) is true this means we might lose
255 		 * some changes to the mft record that had been made between
256 		 * the last time it was marked dirty/written out and now.  This
257 		 * at this stage is not a problem as the mapping error is fatal
258 		 * enough that the mft record cannot be written out anyway and
259 		 * the caller is very likely to shutdown the whole inode
260 		 * immediately and mark the volume dirty for chkdsk to pick up
261 		 * the pieces anyway.
262 		 */
263 		if (put_this_page)
264 			put_page(put_this_page);
265 	}
266 	return err;
267 }
268 
269 /**
270  * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
271  * @ni:		ntfs inode for which to map (part of) a runlist
272  * @vcn:	map runlist part containing this vcn
273  *
274  * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
275  *
276  * Return 0 on success and -errno on error.  There is one special error code
277  * which is not an error as such.  This is -ENOENT.  It means that @vcn is out
278  * of bounds of the runlist.
279  *
280  * Locking: - The runlist must be unlocked on entry and is unlocked on return.
281  *	    - This function takes the runlist lock for writing and may modify
282  *	      the runlist.
283  */
ntfs_map_runlist(ntfs_inode * ni,VCN vcn)284 int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
285 {
286 	int err = 0;
287 
288 	down_write(&ni->runlist.lock);
289 	/* Make sure someone else didn't do the work while we were sleeping. */
290 	if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
291 			LCN_RL_NOT_MAPPED))
292 		err = ntfs_map_runlist_nolock(ni, vcn, NULL);
293 	up_write(&ni->runlist.lock);
294 	return err;
295 }
296 
297 /**
298  * ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
299  * @ni:			ntfs inode of the attribute whose runlist to search
300  * @vcn:		vcn to convert
301  * @write_locked:	true if the runlist is locked for writing
302  *
303  * Find the virtual cluster number @vcn in the runlist of the ntfs attribute
304  * described by the ntfs inode @ni and return the corresponding logical cluster
305  * number (lcn).
306  *
307  * If the @vcn is not mapped yet, the attempt is made to map the attribute
308  * extent containing the @vcn and the vcn to lcn conversion is retried.
309  *
310  * If @write_locked is true the caller has locked the runlist for writing and
311  * if false for reading.
312  *
313  * Since lcns must be >= 0, we use negative return codes with special meaning:
314  *
315  * Return code	Meaning / Description
316  * ==========================================
317  *  LCN_HOLE	Hole / not allocated on disk.
318  *  LCN_ENOENT	There is no such vcn in the runlist, i.e. @vcn is out of bounds.
319  *  LCN_ENOMEM	Not enough memory to map runlist.
320  *  LCN_EIO	Critical error (runlist/file is corrupt, i/o error, etc).
321  *
322  * Locking: - The runlist must be locked on entry and is left locked on return.
323  *	    - If @write_locked is 'false', i.e. the runlist is locked for reading,
324  *	      the lock may be dropped inside the function so you cannot rely on
325  *	      the runlist still being the same when this function returns.
326  */
ntfs_attr_vcn_to_lcn_nolock(ntfs_inode * ni,const VCN vcn,const bool write_locked)327 LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
328 		const bool write_locked)
329 {
330 	LCN lcn;
331 	unsigned long flags;
332 	bool is_retry = false;
333 
334 	BUG_ON(!ni);
335 	ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
336 			ni->mft_no, (unsigned long long)vcn,
337 			write_locked ? "write" : "read");
338 	BUG_ON(!NInoNonResident(ni));
339 	BUG_ON(vcn < 0);
340 	if (!ni->runlist.rl) {
341 		read_lock_irqsave(&ni->size_lock, flags);
342 		if (!ni->allocated_size) {
343 			read_unlock_irqrestore(&ni->size_lock, flags);
344 			return LCN_ENOENT;
345 		}
346 		read_unlock_irqrestore(&ni->size_lock, flags);
347 	}
348 retry_remap:
349 	/* Convert vcn to lcn.  If that fails map the runlist and retry once. */
350 	lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
351 	if (likely(lcn >= LCN_HOLE)) {
352 		ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
353 		return lcn;
354 	}
355 	if (lcn != LCN_RL_NOT_MAPPED) {
356 		if (lcn != LCN_ENOENT)
357 			lcn = LCN_EIO;
358 	} else if (!is_retry) {
359 		int err;
360 
361 		if (!write_locked) {
362 			up_read(&ni->runlist.lock);
363 			down_write(&ni->runlist.lock);
364 			if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
365 					LCN_RL_NOT_MAPPED)) {
366 				up_write(&ni->runlist.lock);
367 				down_read(&ni->runlist.lock);
368 				goto retry_remap;
369 			}
370 		}
371 		err = ntfs_map_runlist_nolock(ni, vcn, NULL);
372 		if (!write_locked) {
373 			up_write(&ni->runlist.lock);
374 			down_read(&ni->runlist.lock);
375 		}
376 		if (likely(!err)) {
377 			is_retry = true;
378 			goto retry_remap;
379 		}
380 		if (err == -ENOENT)
381 			lcn = LCN_ENOENT;
382 		else if (err == -ENOMEM)
383 			lcn = LCN_ENOMEM;
384 		else
385 			lcn = LCN_EIO;
386 	}
387 	if (lcn != LCN_ENOENT)
388 		ntfs_error(ni->vol->sb, "Failed with error code %lli.",
389 				(long long)lcn);
390 	return lcn;
391 }
392 
393 /**
394  * ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
395  * @ni:		ntfs inode describing the runlist to search
396  * @vcn:	vcn to find
397  * @ctx:	active attribute search context if present or NULL if not
398  *
399  * Find the virtual cluster number @vcn in the runlist described by the ntfs
400  * inode @ni and return the address of the runlist element containing the @vcn.
401  *
402  * If the @vcn is not mapped yet, the attempt is made to map the attribute
403  * extent containing the @vcn and the vcn to lcn conversion is retried.
404  *
405  * If @ctx is specified, it is an active search context of @ni and its base mft
406  * record.  This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
407  * runlist fragments and allows their mapping.  If you do not have the mft
408  * record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
409  * will perform the necessary mapping and unmapping.
410  *
411  * Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
412  * restores it before returning.  Thus, @ctx will be left pointing to the same
413  * attribute on return as on entry.  However, the actual pointers in @ctx may
414  * point to different memory locations on return, so you must remember to reset
415  * any cached pointers from the @ctx, i.e. after the call to
416  * ntfs_attr_find_vcn_nolock(), you will probably want to do:
417  *	m = ctx->mrec;
418  *	a = ctx->attr;
419  * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
420  * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
421  * Note you need to distinguish between the lcn of the returned runlist element
422  * being >= 0 and LCN_HOLE.  In the later case you have to return zeroes on
423  * read and allocate clusters on write.
424  *
425  * Return the runlist element containing the @vcn on success and
426  * ERR_PTR(-errno) on error.  You need to test the return value with IS_ERR()
427  * to decide if the return is success or failure and PTR_ERR() to get to the
428  * error code if IS_ERR() is true.
429  *
430  * The possible error return codes are:
431  *	-ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
432  *	-ENOMEM - Not enough memory to map runlist.
433  *	-EIO	- Critical error (runlist/file is corrupt, i/o error, etc).
434  *
435  * WARNING: If @ctx is supplied, regardless of whether success or failure is
436  *	    returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
437  *	    is no longer valid, i.e. you need to either call
438  *	    ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
439  *	    In that case PTR_ERR(@ctx->mrec) will give you the error code for
440  *	    why the mapping of the old inode failed.
441  *
442  * Locking: - The runlist described by @ni must be locked for writing on entry
443  *	      and is locked on return.  Note the runlist may be modified when
444  *	      needed runlist fragments need to be mapped.
445  *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
446  *	      entry and it will be left unmapped on return.
447  *	    - If @ctx is not NULL, the base mft record must be mapped on entry
448  *	      and it will be left mapped on return.
449  */
ntfs_attr_find_vcn_nolock(ntfs_inode * ni,const VCN vcn,ntfs_attr_search_ctx * ctx)450 runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
451 		ntfs_attr_search_ctx *ctx)
452 {
453 	unsigned long flags;
454 	runlist_element *rl;
455 	int err = 0;
456 	bool is_retry = false;
457 
458 	BUG_ON(!ni);
459 	ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
460 			ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
461 	BUG_ON(!NInoNonResident(ni));
462 	BUG_ON(vcn < 0);
463 	if (!ni->runlist.rl) {
464 		read_lock_irqsave(&ni->size_lock, flags);
465 		if (!ni->allocated_size) {
466 			read_unlock_irqrestore(&ni->size_lock, flags);
467 			return ERR_PTR(-ENOENT);
468 		}
469 		read_unlock_irqrestore(&ni->size_lock, flags);
470 	}
471 retry_remap:
472 	rl = ni->runlist.rl;
473 	if (likely(rl && vcn >= rl[0].vcn)) {
474 		while (likely(rl->length)) {
475 			if (unlikely(vcn < rl[1].vcn)) {
476 				if (likely(rl->lcn >= LCN_HOLE)) {
477 					ntfs_debug("Done.");
478 					return rl;
479 				}
480 				break;
481 			}
482 			rl++;
483 		}
484 		if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
485 			if (likely(rl->lcn == LCN_ENOENT))
486 				err = -ENOENT;
487 			else
488 				err = -EIO;
489 		}
490 	}
491 	if (!err && !is_retry) {
492 		/*
493 		 * If the search context is invalid we cannot map the unmapped
494 		 * region.
495 		 */
496 		if (IS_ERR(ctx->mrec))
497 			err = PTR_ERR(ctx->mrec);
498 		else {
499 			/*
500 			 * The @vcn is in an unmapped region, map the runlist
501 			 * and retry.
502 			 */
503 			err = ntfs_map_runlist_nolock(ni, vcn, ctx);
504 			if (likely(!err)) {
505 				is_retry = true;
506 				goto retry_remap;
507 			}
508 		}
509 		if (err == -EINVAL)
510 			err = -EIO;
511 	} else if (!err)
512 		err = -EIO;
513 	if (err != -ENOENT)
514 		ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
515 	return ERR_PTR(err);
516 }
517 
518 /**
519  * ntfs_attr_find - find (next) attribute in mft record
520  * @type:	attribute type to find
521  * @name:	attribute name to find (optional, i.e. NULL means don't care)
522  * @name_len:	attribute name length (only needed if @name present)
523  * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
524  * @val:	attribute value to find (optional, resident attributes only)
525  * @val_len:	attribute value length
526  * @ctx:	search context with mft record and attribute to search from
527  *
528  * You should not need to call this function directly.  Use ntfs_attr_lookup()
529  * instead.
530  *
531  * ntfs_attr_find() takes a search context @ctx as parameter and searches the
532  * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
533  * attribute of @type, optionally @name and @val.
534  *
535  * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
536  * point to the found attribute.
537  *
538  * If the attribute is not found, ntfs_attr_find() returns -ENOENT and
539  * @ctx->attr will point to the attribute before which the attribute being
540  * searched for would need to be inserted if such an action were to be desired.
541  *
542  * On actual error, ntfs_attr_find() returns -EIO.  In this case @ctx->attr is
543  * undefined and in particular do not rely on it not changing.
544  *
545  * If @ctx->is_first is 'true', the search begins with @ctx->attr itself.  If it
546  * is 'false', the search begins after @ctx->attr.
547  *
548  * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
549  * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
550  * @ctx->mrec belongs.  This is so we can get at the ntfs volume and hence at
551  * the upcase table.  If @ic is CASE_SENSITIVE, the comparison is case
552  * sensitive.  When @name is present, @name_len is the @name length in Unicode
553  * characters.
554  *
555  * If @name is not present (NULL), we assume that the unnamed attribute is
556  * being searched for.
557  *
558  * Finally, the resident attribute value @val is looked for, if present.  If
559  * @val is not present (NULL), @val_len is ignored.
560  *
561  * ntfs_attr_find() only searches the specified mft record and it ignores the
562  * presence of an attribute list attribute (unless it is the one being searched
563  * for, obviously).  If you need to take attribute lists into consideration,
564  * use ntfs_attr_lookup() instead (see below).  This also means that you cannot
565  * use ntfs_attr_find() to search for extent records of non-resident
566  * attributes, as extents with lowest_vcn != 0 are usually described by the
567  * attribute list attribute only. - Note that it is possible that the first
568  * extent is only in the attribute list while the last extent is in the base
569  * mft record, so do not rely on being able to find the first extent in the
570  * base mft record.
571  *
572  * Warning: Never use @val when looking for attribute types which can be
573  *	    non-resident as this most likely will result in a crash!
574  */
ntfs_attr_find(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)575 static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
576 		const u32 name_len, const IGNORE_CASE_BOOL ic,
577 		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
578 {
579 	ATTR_RECORD *a;
580 	ntfs_volume *vol = ctx->ntfs_ino->vol;
581 	ntfschar *upcase = vol->upcase;
582 	u32 upcase_len = vol->upcase_len;
583 
584 	/*
585 	 * Iterate over attributes in mft record starting at @ctx->attr, or the
586 	 * attribute following that, if @ctx->is_first is 'true'.
587 	 */
588 	if (ctx->is_first) {
589 		a = ctx->attr;
590 		ctx->is_first = false;
591 	} else
592 		a = (ATTR_RECORD*)((u8*)ctx->attr +
593 				le32_to_cpu(ctx->attr->length));
594 	for (;;	a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
595 		u8 *mrec_end = (u8 *)ctx->mrec +
596 		               le32_to_cpu(ctx->mrec->bytes_allocated);
597 		u8 *name_end;
598 
599 		/* check whether ATTR_RECORD wrap */
600 		if ((u8 *)a < (u8 *)ctx->mrec)
601 			break;
602 
603 		/* check whether Attribute Record Header is within bounds */
604 		if ((u8 *)a > mrec_end ||
605 		    (u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
606 			break;
607 
608 		/* check whether ATTR_RECORD's name is within bounds */
609 		name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
610 			   a->name_length * sizeof(ntfschar);
611 		if (name_end > mrec_end)
612 			break;
613 
614 		ctx->attr = a;
615 		if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
616 				a->type == AT_END))
617 			return -ENOENT;
618 		if (unlikely(!a->length))
619 			break;
620 
621 		/* check whether ATTR_RECORD's length wrap */
622 		if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
623 			break;
624 		/* check whether ATTR_RECORD's length is within bounds */
625 		if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
626 			break;
627 
628 		if (a->type != type)
629 			continue;
630 		/*
631 		 * If @name is present, compare the two names.  If @name is
632 		 * missing, assume we want an unnamed attribute.
633 		 */
634 		if (!name) {
635 			/* The search failed if the found attribute is named. */
636 			if (a->name_length)
637 				return -ENOENT;
638 		} else if (!ntfs_are_names_equal(name, name_len,
639 			    (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
640 			    a->name_length, ic, upcase, upcase_len)) {
641 			register int rc;
642 
643 			rc = ntfs_collate_names(name, name_len,
644 					(ntfschar*)((u8*)a +
645 					le16_to_cpu(a->name_offset)),
646 					a->name_length, 1, IGNORE_CASE,
647 					upcase, upcase_len);
648 			/*
649 			 * If @name collates before a->name, there is no
650 			 * matching attribute.
651 			 */
652 			if (rc == -1)
653 				return -ENOENT;
654 			/* If the strings are not equal, continue search. */
655 			if (rc)
656 				continue;
657 			rc = ntfs_collate_names(name, name_len,
658 					(ntfschar*)((u8*)a +
659 					le16_to_cpu(a->name_offset)),
660 					a->name_length, 1, CASE_SENSITIVE,
661 					upcase, upcase_len);
662 			if (rc == -1)
663 				return -ENOENT;
664 			if (rc)
665 				continue;
666 		}
667 		/*
668 		 * The names match or @name not present and attribute is
669 		 * unnamed.  If no @val specified, we have found the attribute
670 		 * and are done.
671 		 */
672 		if (!val)
673 			return 0;
674 		/* @val is present; compare values. */
675 		else {
676 			register int rc;
677 
678 			rc = memcmp(val, (u8*)a + le16_to_cpu(
679 					a->data.resident.value_offset),
680 					min_t(u32, val_len, le32_to_cpu(
681 					a->data.resident.value_length)));
682 			/*
683 			 * If @val collates before the current attribute's
684 			 * value, there is no matching attribute.
685 			 */
686 			if (!rc) {
687 				register u32 avl;
688 
689 				avl = le32_to_cpu(
690 						a->data.resident.value_length);
691 				if (val_len == avl)
692 					return 0;
693 				if (val_len < avl)
694 					return -ENOENT;
695 			} else if (rc < 0)
696 				return -ENOENT;
697 		}
698 	}
699 	ntfs_error(vol->sb, "Inode is corrupt.  Run chkdsk.");
700 	NVolSetErrors(vol);
701 	return -EIO;
702 }
703 
704 /**
705  * load_attribute_list - load an attribute list into memory
706  * @vol:		ntfs volume from which to read
707  * @runlist:		runlist of the attribute list
708  * @al_start:		destination buffer
709  * @size:		size of the destination buffer in bytes
710  * @initialized_size:	initialized size of the attribute list
711  *
712  * Walk the runlist @runlist and load all clusters from it copying them into
713  * the linear buffer @al. The maximum number of bytes copied to @al is @size
714  * bytes. Note, @size does not need to be a multiple of the cluster size. If
715  * @initialized_size is less than @size, the region in @al between
716  * @initialized_size and @size will be zeroed and not read from disk.
717  *
718  * Return 0 on success or -errno on error.
719  */
load_attribute_list(ntfs_volume * vol,runlist * runlist,u8 * al_start,const s64 size,const s64 initialized_size)720 int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
721 		const s64 size, const s64 initialized_size)
722 {
723 	LCN lcn;
724 	u8 *al = al_start;
725 	u8 *al_end = al + initialized_size;
726 	runlist_element *rl;
727 	struct buffer_head *bh;
728 	struct super_block *sb;
729 	unsigned long block_size;
730 	unsigned long block, max_block;
731 	int err = 0;
732 	unsigned char block_size_bits;
733 
734 	ntfs_debug("Entering.");
735 	if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
736 			initialized_size > size)
737 		return -EINVAL;
738 	if (!initialized_size) {
739 		memset(al, 0, size);
740 		return 0;
741 	}
742 	sb = vol->sb;
743 	block_size = sb->s_blocksize;
744 	block_size_bits = sb->s_blocksize_bits;
745 	down_read(&runlist->lock);
746 	rl = runlist->rl;
747 	if (!rl) {
748 		ntfs_error(sb, "Cannot read attribute list since runlist is "
749 				"missing.");
750 		goto err_out;
751 	}
752 	/* Read all clusters specified by the runlist one run at a time. */
753 	while (rl->length) {
754 		lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
755 		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
756 				(unsigned long long)rl->vcn,
757 				(unsigned long long)lcn);
758 		/* The attribute list cannot be sparse. */
759 		if (lcn < 0) {
760 			ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed.  Cannot "
761 					"read attribute list.");
762 			goto err_out;
763 		}
764 		block = lcn << vol->cluster_size_bits >> block_size_bits;
765 		/* Read the run from device in chunks of block_size bytes. */
766 		max_block = block + (rl->length << vol->cluster_size_bits >>
767 				block_size_bits);
768 		ntfs_debug("max_block = 0x%lx.", max_block);
769 		do {
770 			ntfs_debug("Reading block = 0x%lx.", block);
771 			bh = sb_bread(sb, block);
772 			if (!bh) {
773 				ntfs_error(sb, "sb_bread() failed. Cannot "
774 						"read attribute list.");
775 				goto err_out;
776 			}
777 			if (al + block_size >= al_end)
778 				goto do_final;
779 			memcpy(al, bh->b_data, block_size);
780 			brelse(bh);
781 			al += block_size;
782 		} while (++block < max_block);
783 		rl++;
784 	}
785 	if (initialized_size < size) {
786 initialize:
787 		memset(al_start + initialized_size, 0, size - initialized_size);
788 	}
789 done:
790 	up_read(&runlist->lock);
791 	return err;
792 do_final:
793 	if (al < al_end) {
794 		/*
795 		 * Partial block.
796 		 *
797 		 * Note: The attribute list can be smaller than its allocation
798 		 * by multiple clusters.  This has been encountered by at least
799 		 * two people running Windows XP, thus we cannot do any
800 		 * truncation sanity checking here. (AIA)
801 		 */
802 		memcpy(al, bh->b_data, al_end - al);
803 		brelse(bh);
804 		if (initialized_size < size)
805 			goto initialize;
806 		goto done;
807 	}
808 	brelse(bh);
809 	/* Real overflow! */
810 	ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
811 			"is truncated.");
812 err_out:
813 	err = -EIO;
814 	goto done;
815 }
816 
817 /**
818  * ntfs_external_attr_find - find an attribute in the attribute list of an inode
819  * @type:	attribute type to find
820  * @name:	attribute name to find (optional, i.e. NULL means don't care)
821  * @name_len:	attribute name length (only needed if @name present)
822  * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
823  * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
824  * @val:	attribute value to find (optional, resident attributes only)
825  * @val_len:	attribute value length
826  * @ctx:	search context with mft record and attribute to search from
827  *
828  * You should not need to call this function directly.  Use ntfs_attr_lookup()
829  * instead.
830  *
831  * Find an attribute by searching the attribute list for the corresponding
832  * attribute list entry.  Having found the entry, map the mft record if the
833  * attribute is in a different mft record/inode, ntfs_attr_find() the attribute
834  * in there and return it.
835  *
836  * On first search @ctx->ntfs_ino must be the base mft record and @ctx must
837  * have been obtained from a call to ntfs_attr_get_search_ctx().  On subsequent
838  * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
839  * then the base inode).
840  *
841  * After finishing with the attribute/mft record you need to call
842  * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
843  * mapped inodes, etc).
844  *
845  * If the attribute is found, ntfs_external_attr_find() returns 0 and
846  * @ctx->attr will point to the found attribute.  @ctx->mrec will point to the
847  * mft record in which @ctx->attr is located and @ctx->al_entry will point to
848  * the attribute list entry for the attribute.
849  *
850  * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
851  * @ctx->attr will point to the attribute in the base mft record before which
852  * the attribute being searched for would need to be inserted if such an action
853  * were to be desired.  @ctx->mrec will point to the mft record in which
854  * @ctx->attr is located and @ctx->al_entry will point to the attribute list
855  * entry of the attribute before which the attribute being searched for would
856  * need to be inserted if such an action were to be desired.
857  *
858  * Thus to insert the not found attribute, one wants to add the attribute to
859  * @ctx->mrec (the base mft record) and if there is not enough space, the
860  * attribute should be placed in a newly allocated extent mft record.  The
861  * attribute list entry for the inserted attribute should be inserted in the
862  * attribute list attribute at @ctx->al_entry.
863  *
864  * On actual error, ntfs_external_attr_find() returns -EIO.  In this case
865  * @ctx->attr is undefined and in particular do not rely on it not changing.
866  */
ntfs_external_attr_find(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const VCN lowest_vcn,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)867 static int ntfs_external_attr_find(const ATTR_TYPE type,
868 		const ntfschar *name, const u32 name_len,
869 		const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
870 		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
871 {
872 	ntfs_inode *base_ni, *ni;
873 	ntfs_volume *vol;
874 	ATTR_LIST_ENTRY *al_entry, *next_al_entry;
875 	u8 *al_start, *al_end;
876 	ATTR_RECORD *a;
877 	ntfschar *al_name;
878 	u32 al_name_len;
879 	int err = 0;
880 	static const char *es = " Unmount and run chkdsk.";
881 
882 	ni = ctx->ntfs_ino;
883 	base_ni = ctx->base_ntfs_ino;
884 	ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
885 	if (!base_ni) {
886 		/* First call happens with the base mft record. */
887 		base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
888 		ctx->base_mrec = ctx->mrec;
889 	}
890 	if (ni == base_ni)
891 		ctx->base_attr = ctx->attr;
892 	if (type == AT_END)
893 		goto not_found;
894 	vol = base_ni->vol;
895 	al_start = base_ni->attr_list;
896 	al_end = al_start + base_ni->attr_list_size;
897 	if (!ctx->al_entry)
898 		ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
899 	/*
900 	 * Iterate over entries in attribute list starting at @ctx->al_entry,
901 	 * or the entry following that, if @ctx->is_first is 'true'.
902 	 */
903 	if (ctx->is_first) {
904 		al_entry = ctx->al_entry;
905 		ctx->is_first = false;
906 	} else
907 		al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
908 				le16_to_cpu(ctx->al_entry->length));
909 	for (;; al_entry = next_al_entry) {
910 		/* Out of bounds check. */
911 		if ((u8*)al_entry < base_ni->attr_list ||
912 				(u8*)al_entry > al_end)
913 			break;	/* Inode is corrupt. */
914 		ctx->al_entry = al_entry;
915 		/* Catch the end of the attribute list. */
916 		if ((u8*)al_entry == al_end)
917 			goto not_found;
918 		if (!al_entry->length)
919 			break;
920 		if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
921 				le16_to_cpu(al_entry->length) > al_end)
922 			break;
923 		next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
924 				le16_to_cpu(al_entry->length));
925 		if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
926 			goto not_found;
927 		if (type != al_entry->type)
928 			continue;
929 		/*
930 		 * If @name is present, compare the two names.  If @name is
931 		 * missing, assume we want an unnamed attribute.
932 		 */
933 		al_name_len = al_entry->name_length;
934 		al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
935 		if (!name) {
936 			if (al_name_len)
937 				goto not_found;
938 		} else if (!ntfs_are_names_equal(al_name, al_name_len, name,
939 				name_len, ic, vol->upcase, vol->upcase_len)) {
940 			register int rc;
941 
942 			rc = ntfs_collate_names(name, name_len, al_name,
943 					al_name_len, 1, IGNORE_CASE,
944 					vol->upcase, vol->upcase_len);
945 			/*
946 			 * If @name collates before al_name, there is no
947 			 * matching attribute.
948 			 */
949 			if (rc == -1)
950 				goto not_found;
951 			/* If the strings are not equal, continue search. */
952 			if (rc)
953 				continue;
954 			/*
955 			 * FIXME: Reverse engineering showed 0, IGNORE_CASE but
956 			 * that is inconsistent with ntfs_attr_find().  The
957 			 * subsequent rc checks were also different.  Perhaps I
958 			 * made a mistake in one of the two.  Need to recheck
959 			 * which is correct or at least see what is going on...
960 			 * (AIA)
961 			 */
962 			rc = ntfs_collate_names(name, name_len, al_name,
963 					al_name_len, 1, CASE_SENSITIVE,
964 					vol->upcase, vol->upcase_len);
965 			if (rc == -1)
966 				goto not_found;
967 			if (rc)
968 				continue;
969 		}
970 		/*
971 		 * The names match or @name not present and attribute is
972 		 * unnamed.  Now check @lowest_vcn.  Continue search if the
973 		 * next attribute list entry still fits @lowest_vcn.  Otherwise
974 		 * we have reached the right one or the search has failed.
975 		 */
976 		if (lowest_vcn && (u8*)next_al_entry >= al_start	    &&
977 				(u8*)next_al_entry + 6 < al_end		    &&
978 				(u8*)next_al_entry + le16_to_cpu(
979 					next_al_entry->length) <= al_end    &&
980 				sle64_to_cpu(next_al_entry->lowest_vcn) <=
981 					lowest_vcn			    &&
982 				next_al_entry->type == al_entry->type	    &&
983 				next_al_entry->name_length == al_name_len   &&
984 				ntfs_are_names_equal((ntfschar*)((u8*)
985 					next_al_entry +
986 					next_al_entry->name_offset),
987 					next_al_entry->name_length,
988 					al_name, al_name_len, CASE_SENSITIVE,
989 					vol->upcase, vol->upcase_len))
990 			continue;
991 		if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
992 			if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
993 				ntfs_error(vol->sb, "Found stale mft "
994 						"reference in attribute list "
995 						"of base inode 0x%lx.%s",
996 						base_ni->mft_no, es);
997 				err = -EIO;
998 				break;
999 			}
1000 		} else { /* Mft references do not match. */
1001 			/* If there is a mapped record unmap it first. */
1002 			if (ni != base_ni)
1003 				unmap_extent_mft_record(ni);
1004 			/* Do we want the base record back? */
1005 			if (MREF_LE(al_entry->mft_reference) ==
1006 					base_ni->mft_no) {
1007 				ni = ctx->ntfs_ino = base_ni;
1008 				ctx->mrec = ctx->base_mrec;
1009 			} else {
1010 				/* We want an extent record. */
1011 				ctx->mrec = map_extent_mft_record(base_ni,
1012 						le64_to_cpu(
1013 						al_entry->mft_reference), &ni);
1014 				if (IS_ERR(ctx->mrec)) {
1015 					ntfs_error(vol->sb, "Failed to map "
1016 							"extent mft record "
1017 							"0x%lx of base inode "
1018 							"0x%lx.%s",
1019 							MREF_LE(al_entry->
1020 							mft_reference),
1021 							base_ni->mft_no, es);
1022 					err = PTR_ERR(ctx->mrec);
1023 					if (err == -ENOENT)
1024 						err = -EIO;
1025 					/* Cause @ctx to be sanitized below. */
1026 					ni = NULL;
1027 					break;
1028 				}
1029 				ctx->ntfs_ino = ni;
1030 			}
1031 			ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1032 					le16_to_cpu(ctx->mrec->attrs_offset));
1033 		}
1034 		/*
1035 		 * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
1036 		 * mft record containing the attribute represented by the
1037 		 * current al_entry.
1038 		 */
1039 		/*
1040 		 * We could call into ntfs_attr_find() to find the right
1041 		 * attribute in this mft record but this would be less
1042 		 * efficient and not quite accurate as ntfs_attr_find() ignores
1043 		 * the attribute instance numbers for example which become
1044 		 * important when one plays with attribute lists.  Also,
1045 		 * because a proper match has been found in the attribute list
1046 		 * entry above, the comparison can now be optimized.  So it is
1047 		 * worth re-implementing a simplified ntfs_attr_find() here.
1048 		 */
1049 		a = ctx->attr;
1050 		/*
1051 		 * Use a manual loop so we can still use break and continue
1052 		 * with the same meanings as above.
1053 		 */
1054 do_next_attr_loop:
1055 		if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
1056 				le32_to_cpu(ctx->mrec->bytes_allocated))
1057 			break;
1058 		if (a->type == AT_END)
1059 			break;
1060 		if (!a->length)
1061 			break;
1062 		if (al_entry->instance != a->instance)
1063 			goto do_next_attr;
1064 		/*
1065 		 * If the type and/or the name are mismatched between the
1066 		 * attribute list entry and the attribute record, there is
1067 		 * corruption so we break and return error EIO.
1068 		 */
1069 		if (al_entry->type != a->type)
1070 			break;
1071 		if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
1072 				le16_to_cpu(a->name_offset)), a->name_length,
1073 				al_name, al_name_len, CASE_SENSITIVE,
1074 				vol->upcase, vol->upcase_len))
1075 			break;
1076 		ctx->attr = a;
1077 		/*
1078 		 * If no @val specified or @val specified and it matches, we
1079 		 * have found it!
1080 		 */
1081 		if (!val || (!a->non_resident && le32_to_cpu(
1082 				a->data.resident.value_length) == val_len &&
1083 				!memcmp((u8*)a +
1084 				le16_to_cpu(a->data.resident.value_offset),
1085 				val, val_len))) {
1086 			ntfs_debug("Done, found.");
1087 			return 0;
1088 		}
1089 do_next_attr:
1090 		/* Proceed to the next attribute in the current mft record. */
1091 		a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
1092 		goto do_next_attr_loop;
1093 	}
1094 	if (!err) {
1095 		ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
1096 				"attribute list attribute.%s", base_ni->mft_no,
1097 				es);
1098 		err = -EIO;
1099 	}
1100 	if (ni != base_ni) {
1101 		if (ni)
1102 			unmap_extent_mft_record(ni);
1103 		ctx->ntfs_ino = base_ni;
1104 		ctx->mrec = ctx->base_mrec;
1105 		ctx->attr = ctx->base_attr;
1106 	}
1107 	if (err != -ENOMEM)
1108 		NVolSetErrors(vol);
1109 	return err;
1110 not_found:
1111 	/*
1112 	 * If we were looking for AT_END, we reset the search context @ctx and
1113 	 * use ntfs_attr_find() to seek to the end of the base mft record.
1114 	 */
1115 	if (type == AT_END) {
1116 		ntfs_attr_reinit_search_ctx(ctx);
1117 		return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
1118 				ctx);
1119 	}
1120 	/*
1121 	 * The attribute was not found.  Before we return, we want to ensure
1122 	 * @ctx->mrec and @ctx->attr indicate the position at which the
1123 	 * attribute should be inserted in the base mft record.  Since we also
1124 	 * want to preserve @ctx->al_entry we cannot reinitialize the search
1125 	 * context using ntfs_attr_reinit_search_ctx() as this would set
1126 	 * @ctx->al_entry to NULL.  Thus we do the necessary bits manually (see
1127 	 * ntfs_attr_init_search_ctx() below).  Note, we _only_ preserve
1128 	 * @ctx->al_entry as the remaining fields (base_*) are identical to
1129 	 * their non base_ counterparts and we cannot set @ctx->base_attr
1130 	 * correctly yet as we do not know what @ctx->attr will be set to by
1131 	 * the call to ntfs_attr_find() below.
1132 	 */
1133 	if (ni != base_ni)
1134 		unmap_extent_mft_record(ni);
1135 	ctx->mrec = ctx->base_mrec;
1136 	ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1137 			le16_to_cpu(ctx->mrec->attrs_offset));
1138 	ctx->is_first = true;
1139 	ctx->ntfs_ino = base_ni;
1140 	ctx->base_ntfs_ino = NULL;
1141 	ctx->base_mrec = NULL;
1142 	ctx->base_attr = NULL;
1143 	/*
1144 	 * In case there are multiple matches in the base mft record, need to
1145 	 * keep enumerating until we get an attribute not found response (or
1146 	 * another error), otherwise we would keep returning the same attribute
1147 	 * over and over again and all programs using us for enumeration would
1148 	 * lock up in a tight loop.
1149 	 */
1150 	do {
1151 		err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
1152 				ctx);
1153 	} while (!err);
1154 	ntfs_debug("Done, not found.");
1155 	return err;
1156 }
1157 
1158 /**
1159  * ntfs_attr_lookup - find an attribute in an ntfs inode
1160  * @type:	attribute type to find
1161  * @name:	attribute name to find (optional, i.e. NULL means don't care)
1162  * @name_len:	attribute name length (only needed if @name present)
1163  * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
1164  * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
1165  * @val:	attribute value to find (optional, resident attributes only)
1166  * @val_len:	attribute value length
1167  * @ctx:	search context with mft record and attribute to search from
1168  *
1169  * Find an attribute in an ntfs inode.  On first search @ctx->ntfs_ino must
1170  * be the base mft record and @ctx must have been obtained from a call to
1171  * ntfs_attr_get_search_ctx().
1172  *
1173  * This function transparently handles attribute lists and @ctx is used to
1174  * continue searches where they were left off at.
1175  *
1176  * After finishing with the attribute/mft record you need to call
1177  * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
1178  * mapped inodes, etc).
1179  *
1180  * Return 0 if the search was successful and -errno if not.
1181  *
1182  * When 0, @ctx->attr is the found attribute and it is in mft record
1183  * @ctx->mrec.  If an attribute list attribute is present, @ctx->al_entry is
1184  * the attribute list entry of the found attribute.
1185  *
1186  * When -ENOENT, @ctx->attr is the attribute which collates just after the
1187  * attribute being searched for, i.e. if one wants to add the attribute to the
1188  * mft record this is the correct place to insert it into.  If an attribute
1189  * list attribute is present, @ctx->al_entry is the attribute list entry which
1190  * collates just after the attribute list entry of the attribute being searched
1191  * for, i.e. if one wants to add the attribute to the mft record this is the
1192  * correct place to insert its attribute list entry into.
1193  *
1194  * When -errno != -ENOENT, an error occurred during the lookup.  @ctx->attr is
1195  * then undefined and in particular you should not rely on it not changing.
1196  */
ntfs_attr_lookup(const ATTR_TYPE type,const ntfschar * name,const u32 name_len,const IGNORE_CASE_BOOL ic,const VCN lowest_vcn,const u8 * val,const u32 val_len,ntfs_attr_search_ctx * ctx)1197 int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
1198 		const u32 name_len, const IGNORE_CASE_BOOL ic,
1199 		const VCN lowest_vcn, const u8 *val, const u32 val_len,
1200 		ntfs_attr_search_ctx *ctx)
1201 {
1202 	ntfs_inode *base_ni;
1203 
1204 	ntfs_debug("Entering.");
1205 	BUG_ON(IS_ERR(ctx->mrec));
1206 	if (ctx->base_ntfs_ino)
1207 		base_ni = ctx->base_ntfs_ino;
1208 	else
1209 		base_ni = ctx->ntfs_ino;
1210 	/* Sanity check, just for debugging really. */
1211 	BUG_ON(!base_ni);
1212 	if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
1213 		return ntfs_attr_find(type, name, name_len, ic, val, val_len,
1214 				ctx);
1215 	return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
1216 			val, val_len, ctx);
1217 }
1218 
1219 /**
1220  * ntfs_attr_init_search_ctx - initialize an attribute search context
1221  * @ctx:	attribute search context to initialize
1222  * @ni:		ntfs inode with which to initialize the search context
1223  * @mrec:	mft record with which to initialize the search context
1224  *
1225  * Initialize the attribute search context @ctx with @ni and @mrec.
1226  */
ntfs_attr_init_search_ctx(ntfs_attr_search_ctx * ctx,ntfs_inode * ni,MFT_RECORD * mrec)1227 static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
1228 		ntfs_inode *ni, MFT_RECORD *mrec)
1229 {
1230 	*ctx = (ntfs_attr_search_ctx) {
1231 		.mrec = mrec,
1232 		/* Sanity checks are performed elsewhere. */
1233 		.attr = (ATTR_RECORD*)((u8*)mrec +
1234 				le16_to_cpu(mrec->attrs_offset)),
1235 		.is_first = true,
1236 		.ntfs_ino = ni,
1237 	};
1238 }
1239 
1240 /**
1241  * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
1242  * @ctx:	attribute search context to reinitialize
1243  *
1244  * Reinitialize the attribute search context @ctx, unmapping an associated
1245  * extent mft record if present, and initialize the search context again.
1246  *
1247  * This is used when a search for a new attribute is being started to reset
1248  * the search context to the beginning.
1249  */
ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx * ctx)1250 void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
1251 {
1252 	if (likely(!ctx->base_ntfs_ino)) {
1253 		/* No attribute list. */
1254 		ctx->is_first = true;
1255 		/* Sanity checks are performed elsewhere. */
1256 		ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1257 				le16_to_cpu(ctx->mrec->attrs_offset));
1258 		/*
1259 		 * This needs resetting due to ntfs_external_attr_find() which
1260 		 * can leave it set despite having zeroed ctx->base_ntfs_ino.
1261 		 */
1262 		ctx->al_entry = NULL;
1263 		return;
1264 	} /* Attribute list. */
1265 	if (ctx->ntfs_ino != ctx->base_ntfs_ino)
1266 		unmap_extent_mft_record(ctx->ntfs_ino);
1267 	ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
1268 	return;
1269 }
1270 
1271 /**
1272  * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
1273  * @ni:		ntfs inode with which to initialize the search context
1274  * @mrec:	mft record with which to initialize the search context
1275  *
1276  * Allocate a new attribute search context, initialize it with @ni and @mrec,
1277  * and return it. Return NULL if allocation failed.
1278  */
ntfs_attr_get_search_ctx(ntfs_inode * ni,MFT_RECORD * mrec)1279 ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
1280 {
1281 	ntfs_attr_search_ctx *ctx;
1282 
1283 	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
1284 	if (ctx)
1285 		ntfs_attr_init_search_ctx(ctx, ni, mrec);
1286 	return ctx;
1287 }
1288 
1289 /**
1290  * ntfs_attr_put_search_ctx - release an attribute search context
1291  * @ctx:	attribute search context to free
1292  *
1293  * Release the attribute search context @ctx, unmapping an associated extent
1294  * mft record if present.
1295  */
ntfs_attr_put_search_ctx(ntfs_attr_search_ctx * ctx)1296 void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
1297 {
1298 	if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
1299 		unmap_extent_mft_record(ctx->ntfs_ino);
1300 	kmem_cache_free(ntfs_attr_ctx_cache, ctx);
1301 	return;
1302 }
1303 
1304 #ifdef NTFS_RW
1305 
1306 /**
1307  * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
1308  * @vol:	ntfs volume to which the attribute belongs
1309  * @type:	attribute type which to find
1310  *
1311  * Search for the attribute definition record corresponding to the attribute
1312  * @type in the $AttrDef system file.
1313  *
1314  * Return the attribute type definition record if found and NULL if not found.
1315  */
ntfs_attr_find_in_attrdef(const ntfs_volume * vol,const ATTR_TYPE type)1316 static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
1317 		const ATTR_TYPE type)
1318 {
1319 	ATTR_DEF *ad;
1320 
1321 	BUG_ON(!vol->attrdef);
1322 	BUG_ON(!type);
1323 	for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
1324 			vol->attrdef_size && ad->type; ++ad) {
1325 		/* We have not found it yet, carry on searching. */
1326 		if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
1327 			continue;
1328 		/* We found the attribute; return it. */
1329 		if (likely(ad->type == type))
1330 			return ad;
1331 		/* We have gone too far already.  No point in continuing. */
1332 		break;
1333 	}
1334 	/* Attribute not found. */
1335 	ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
1336 			le32_to_cpu(type));
1337 	return NULL;
1338 }
1339 
1340 /**
1341  * ntfs_attr_size_bounds_check - check a size of an attribute type for validity
1342  * @vol:	ntfs volume to which the attribute belongs
1343  * @type:	attribute type which to check
1344  * @size:	size which to check
1345  *
1346  * Check whether the @size in bytes is valid for an attribute of @type on the
1347  * ntfs volume @vol.  This information is obtained from $AttrDef system file.
1348  *
1349  * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
1350  * listed in $AttrDef.
1351  */
ntfs_attr_size_bounds_check(const ntfs_volume * vol,const ATTR_TYPE type,const s64 size)1352 int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
1353 		const s64 size)
1354 {
1355 	ATTR_DEF *ad;
1356 
1357 	BUG_ON(size < 0);
1358 	/*
1359 	 * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
1360 	 * listed in $AttrDef.
1361 	 */
1362 	if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
1363 		return -ERANGE;
1364 	/* Get the $AttrDef entry for the attribute @type. */
1365 	ad = ntfs_attr_find_in_attrdef(vol, type);
1366 	if (unlikely(!ad))
1367 		return -ENOENT;
1368 	/* Do the bounds check. */
1369 	if (((sle64_to_cpu(ad->min_size) > 0) &&
1370 			size < sle64_to_cpu(ad->min_size)) ||
1371 			((sle64_to_cpu(ad->max_size) > 0) && size >
1372 			sle64_to_cpu(ad->max_size)))
1373 		return -ERANGE;
1374 	return 0;
1375 }
1376 
1377 /**
1378  * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
1379  * @vol:	ntfs volume to which the attribute belongs
1380  * @type:	attribute type which to check
1381  *
1382  * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1383  * be non-resident.  This information is obtained from $AttrDef system file.
1384  *
1385  * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
1386  * -ENOENT if the attribute is not listed in $AttrDef.
1387  */
ntfs_attr_can_be_non_resident(const ntfs_volume * vol,const ATTR_TYPE type)1388 int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1389 {
1390 	ATTR_DEF *ad;
1391 
1392 	/* Find the attribute definition record in $AttrDef. */
1393 	ad = ntfs_attr_find_in_attrdef(vol, type);
1394 	if (unlikely(!ad))
1395 		return -ENOENT;
1396 	/* Check the flags and return the result. */
1397 	if (ad->flags & ATTR_DEF_RESIDENT)
1398 		return -EPERM;
1399 	return 0;
1400 }
1401 
1402 /**
1403  * ntfs_attr_can_be_resident - check if an attribute can be resident
1404  * @vol:	ntfs volume to which the attribute belongs
1405  * @type:	attribute type which to check
1406  *
1407  * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1408  * be resident.  This information is derived from our ntfs knowledge and may
1409  * not be completely accurate, especially when user defined attributes are
1410  * present.  Basically we allow everything to be resident except for index
1411  * allocation and $EA attributes.
1412  *
1413  * Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
1414  *
1415  * Warning: In the system file $MFT the attribute $Bitmap must be non-resident
1416  *	    otherwise windows will not boot (blue screen of death)!  We cannot
1417  *	    check for this here as we do not know which inode's $Bitmap is
1418  *	    being asked about so the caller needs to special case this.
1419  */
ntfs_attr_can_be_resident(const ntfs_volume * vol,const ATTR_TYPE type)1420 int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1421 {
1422 	if (type == AT_INDEX_ALLOCATION)
1423 		return -EPERM;
1424 	return 0;
1425 }
1426 
1427 /**
1428  * ntfs_attr_record_resize - resize an attribute record
1429  * @m:		mft record containing attribute record
1430  * @a:		attribute record to resize
1431  * @new_size:	new size in bytes to which to resize the attribute record @a
1432  *
1433  * Resize the attribute record @a, i.e. the resident part of the attribute, in
1434  * the mft record @m to @new_size bytes.
1435  *
1436  * Return 0 on success and -errno on error.  The following error codes are
1437  * defined:
1438  *	-ENOSPC	- Not enough space in the mft record @m to perform the resize.
1439  *
1440  * Note: On error, no modifications have been performed whatsoever.
1441  *
1442  * Warning: If you make a record smaller without having copied all the data you
1443  *	    are interested in the data may be overwritten.
1444  */
ntfs_attr_record_resize(MFT_RECORD * m,ATTR_RECORD * a,u32 new_size)1445 int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
1446 {
1447 	ntfs_debug("Entering for new_size %u.", new_size);
1448 	/* Align to 8 bytes if it is not already done. */
1449 	if (new_size & 7)
1450 		new_size = (new_size + 7) & ~7;
1451 	/* If the actual attribute length has changed, move things around. */
1452 	if (new_size != le32_to_cpu(a->length)) {
1453 		u32 new_muse = le32_to_cpu(m->bytes_in_use) -
1454 				le32_to_cpu(a->length) + new_size;
1455 		/* Not enough space in this mft record. */
1456 		if (new_muse > le32_to_cpu(m->bytes_allocated))
1457 			return -ENOSPC;
1458 		/* Move attributes following @a to their new location. */
1459 		memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
1460 				le32_to_cpu(m->bytes_in_use) - ((u8*)a -
1461 				(u8*)m) - le32_to_cpu(a->length));
1462 		/* Adjust @m to reflect the change in used space. */
1463 		m->bytes_in_use = cpu_to_le32(new_muse);
1464 		/* Adjust @a to reflect the new size. */
1465 		if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
1466 			a->length = cpu_to_le32(new_size);
1467 	}
1468 	return 0;
1469 }
1470 
1471 /**
1472  * ntfs_resident_attr_value_resize - resize the value of a resident attribute
1473  * @m:		mft record containing attribute record
1474  * @a:		attribute record whose value to resize
1475  * @new_size:	new size in bytes to which to resize the attribute value of @a
1476  *
1477  * Resize the value of the attribute @a in the mft record @m to @new_size bytes.
1478  * If the value is made bigger, the newly allocated space is cleared.
1479  *
1480  * Return 0 on success and -errno on error.  The following error codes are
1481  * defined:
1482  *	-ENOSPC	- Not enough space in the mft record @m to perform the resize.
1483  *
1484  * Note: On error, no modifications have been performed whatsoever.
1485  *
1486  * Warning: If you make a record smaller without having copied all the data you
1487  *	    are interested in the data may be overwritten.
1488  */
ntfs_resident_attr_value_resize(MFT_RECORD * m,ATTR_RECORD * a,const u32 new_size)1489 int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
1490 		const u32 new_size)
1491 {
1492 	u32 old_size;
1493 
1494 	/* Resize the resident part of the attribute record. */
1495 	if (ntfs_attr_record_resize(m, a,
1496 			le16_to_cpu(a->data.resident.value_offset) + new_size))
1497 		return -ENOSPC;
1498 	/*
1499 	 * The resize succeeded!  If we made the attribute value bigger, clear
1500 	 * the area between the old size and @new_size.
1501 	 */
1502 	old_size = le32_to_cpu(a->data.resident.value_length);
1503 	if (new_size > old_size)
1504 		memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
1505 				old_size, 0, new_size - old_size);
1506 	/* Finally update the length of the attribute value. */
1507 	a->data.resident.value_length = cpu_to_le32(new_size);
1508 	return 0;
1509 }
1510 
1511 /**
1512  * ntfs_attr_make_non_resident - convert a resident to a non-resident attribute
1513  * @ni:		ntfs inode describing the attribute to convert
1514  * @data_size:	size of the resident data to copy to the non-resident attribute
1515  *
1516  * Convert the resident ntfs attribute described by the ntfs inode @ni to a
1517  * non-resident one.
1518  *
1519  * @data_size must be equal to the attribute value size.  This is needed since
1520  * we need to know the size before we can map the mft record and our callers
1521  * always know it.  The reason we cannot simply read the size from the vfs
1522  * inode i_size is that this is not necessarily uptodate.  This happens when
1523  * ntfs_attr_make_non_resident() is called in the ->truncate call path(s).
1524  *
1525  * Return 0 on success and -errno on error.  The following error return codes
1526  * are defined:
1527  *	-EPERM	- The attribute is not allowed to be non-resident.
1528  *	-ENOMEM	- Not enough memory.
1529  *	-ENOSPC	- Not enough disk space.
1530  *	-EINVAL	- Attribute not defined on the volume.
1531  *	-EIO	- I/o error or other error.
1532  * Note that -ENOSPC is also returned in the case that there is not enough
1533  * space in the mft record to do the conversion.  This can happen when the mft
1534  * record is already very full.  The caller is responsible for trying to make
1535  * space in the mft record and trying again.  FIXME: Do we need a separate
1536  * error return code for this kind of -ENOSPC or is it always worth trying
1537  * again in case the attribute may then fit in a resident state so no need to
1538  * make it non-resident at all?  Ho-hum...  (AIA)
1539  *
1540  * NOTE to self: No changes in the attribute list are required to move from
1541  *		 a resident to a non-resident attribute.
1542  *
1543  * Locking: - The caller must hold i_mutex on the inode.
1544  */
ntfs_attr_make_non_resident(ntfs_inode * ni,const u32 data_size)1545 int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1546 {
1547 	s64 new_size;
1548 	struct inode *vi = VFS_I(ni);
1549 	ntfs_volume *vol = ni->vol;
1550 	ntfs_inode *base_ni;
1551 	MFT_RECORD *m;
1552 	ATTR_RECORD *a;
1553 	ntfs_attr_search_ctx *ctx;
1554 	struct page *page;
1555 	runlist_element *rl;
1556 	u8 *kaddr;
1557 	unsigned long flags;
1558 	int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
1559 	u32 attr_size;
1560 	u8 old_res_attr_flags;
1561 
1562 	/* Check that the attribute is allowed to be non-resident. */
1563 	err = ntfs_attr_can_be_non_resident(vol, ni->type);
1564 	if (unlikely(err)) {
1565 		if (err == -EPERM)
1566 			ntfs_debug("Attribute is not allowed to be "
1567 					"non-resident.");
1568 		else
1569 			ntfs_debug("Attribute not defined on the NTFS "
1570 					"volume!");
1571 		return err;
1572 	}
1573 	/*
1574 	 * FIXME: Compressed and encrypted attributes are not supported when
1575 	 * writing and we should never have gotten here for them.
1576 	 */
1577 	BUG_ON(NInoCompressed(ni));
1578 	BUG_ON(NInoEncrypted(ni));
1579 	/*
1580 	 * The size needs to be aligned to a cluster boundary for allocation
1581 	 * purposes.
1582 	 */
1583 	new_size = (data_size + vol->cluster_size - 1) &
1584 			~(vol->cluster_size - 1);
1585 	if (new_size > 0) {
1586 		/*
1587 		 * Will need the page later and since the page lock nests
1588 		 * outside all ntfs locks, we need to get the page now.
1589 		 */
1590 		page = find_or_create_page(vi->i_mapping, 0,
1591 				mapping_gfp_mask(vi->i_mapping));
1592 		if (unlikely(!page))
1593 			return -ENOMEM;
1594 		/* Start by allocating clusters to hold the attribute value. */
1595 		rl = ntfs_cluster_alloc(vol, 0, new_size >>
1596 				vol->cluster_size_bits, -1, DATA_ZONE, true);
1597 		if (IS_ERR(rl)) {
1598 			err = PTR_ERR(rl);
1599 			ntfs_debug("Failed to allocate cluster%s, error code "
1600 					"%i.", (new_size >>
1601 					vol->cluster_size_bits) > 1 ? "s" : "",
1602 					err);
1603 			goto page_err_out;
1604 		}
1605 	} else {
1606 		rl = NULL;
1607 		page = NULL;
1608 	}
1609 	/* Determine the size of the mapping pairs array. */
1610 	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
1611 	if (unlikely(mp_size < 0)) {
1612 		err = mp_size;
1613 		ntfs_debug("Failed to get size for mapping pairs array, error "
1614 				"code %i.", err);
1615 		goto rl_err_out;
1616 	}
1617 	down_write(&ni->runlist.lock);
1618 	if (!NInoAttr(ni))
1619 		base_ni = ni;
1620 	else
1621 		base_ni = ni->ext.base_ntfs_ino;
1622 	m = map_mft_record(base_ni);
1623 	if (IS_ERR(m)) {
1624 		err = PTR_ERR(m);
1625 		m = NULL;
1626 		ctx = NULL;
1627 		goto err_out;
1628 	}
1629 	ctx = ntfs_attr_get_search_ctx(base_ni, m);
1630 	if (unlikely(!ctx)) {
1631 		err = -ENOMEM;
1632 		goto err_out;
1633 	}
1634 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1635 			CASE_SENSITIVE, 0, NULL, 0, ctx);
1636 	if (unlikely(err)) {
1637 		if (err == -ENOENT)
1638 			err = -EIO;
1639 		goto err_out;
1640 	}
1641 	m = ctx->mrec;
1642 	a = ctx->attr;
1643 	BUG_ON(NInoNonResident(ni));
1644 	BUG_ON(a->non_resident);
1645 	/*
1646 	 * Calculate new offsets for the name and the mapping pairs array.
1647 	 */
1648 	if (NInoSparse(ni) || NInoCompressed(ni))
1649 		name_ofs = (offsetof(ATTR_REC,
1650 				data.non_resident.compressed_size) +
1651 				sizeof(a->data.non_resident.compressed_size) +
1652 				7) & ~7;
1653 	else
1654 		name_ofs = (offsetof(ATTR_REC,
1655 				data.non_resident.compressed_size) + 7) & ~7;
1656 	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1657 	/*
1658 	 * Determine the size of the resident part of the now non-resident
1659 	 * attribute record.
1660 	 */
1661 	arec_size = (mp_ofs + mp_size + 7) & ~7;
1662 	/*
1663 	 * If the page is not uptodate bring it uptodate by copying from the
1664 	 * attribute value.
1665 	 */
1666 	attr_size = le32_to_cpu(a->data.resident.value_length);
1667 	BUG_ON(attr_size != data_size);
1668 	if (page && !PageUptodate(page)) {
1669 		kaddr = kmap_atomic(page);
1670 		memcpy(kaddr, (u8*)a +
1671 				le16_to_cpu(a->data.resident.value_offset),
1672 				attr_size);
1673 		memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size);
1674 		kunmap_atomic(kaddr);
1675 		flush_dcache_page(page);
1676 		SetPageUptodate(page);
1677 	}
1678 	/* Backup the attribute flag. */
1679 	old_res_attr_flags = a->data.resident.flags;
1680 	/* Resize the resident part of the attribute record. */
1681 	err = ntfs_attr_record_resize(m, a, arec_size);
1682 	if (unlikely(err))
1683 		goto err_out;
1684 	/*
1685 	 * Convert the resident part of the attribute record to describe a
1686 	 * non-resident attribute.
1687 	 */
1688 	a->non_resident = 1;
1689 	/* Move the attribute name if it exists and update the offset. */
1690 	if (a->name_length)
1691 		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1692 				a->name_length * sizeof(ntfschar));
1693 	a->name_offset = cpu_to_le16(name_ofs);
1694 	/* Setup the fields specific to non-resident attributes. */
1695 	a->data.non_resident.lowest_vcn = 0;
1696 	a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
1697 			vol->cluster_size_bits);
1698 	a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
1699 	memset(&a->data.non_resident.reserved, 0,
1700 			sizeof(a->data.non_resident.reserved));
1701 	a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
1702 	a->data.non_resident.data_size =
1703 			a->data.non_resident.initialized_size =
1704 			cpu_to_sle64(attr_size);
1705 	if (NInoSparse(ni) || NInoCompressed(ni)) {
1706 		a->data.non_resident.compression_unit = 0;
1707 		if (NInoCompressed(ni) || vol->major_ver < 3)
1708 			a->data.non_resident.compression_unit = 4;
1709 		a->data.non_resident.compressed_size =
1710 				a->data.non_resident.allocated_size;
1711 	} else
1712 		a->data.non_resident.compression_unit = 0;
1713 	/* Generate the mapping pairs array into the attribute record. */
1714 	err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
1715 			arec_size - mp_ofs, rl, 0, -1, NULL);
1716 	if (unlikely(err)) {
1717 		ntfs_debug("Failed to build mapping pairs, error code %i.",
1718 				err);
1719 		goto undo_err_out;
1720 	}
1721 	/* Setup the in-memory attribute structure to be non-resident. */
1722 	ni->runlist.rl = rl;
1723 	write_lock_irqsave(&ni->size_lock, flags);
1724 	ni->allocated_size = new_size;
1725 	if (NInoSparse(ni) || NInoCompressed(ni)) {
1726 		ni->itype.compressed.size = ni->allocated_size;
1727 		if (a->data.non_resident.compression_unit) {
1728 			ni->itype.compressed.block_size = 1U << (a->data.
1729 					non_resident.compression_unit +
1730 					vol->cluster_size_bits);
1731 			ni->itype.compressed.block_size_bits =
1732 					ffs(ni->itype.compressed.block_size) -
1733 					1;
1734 			ni->itype.compressed.block_clusters = 1U <<
1735 					a->data.non_resident.compression_unit;
1736 		} else {
1737 			ni->itype.compressed.block_size = 0;
1738 			ni->itype.compressed.block_size_bits = 0;
1739 			ni->itype.compressed.block_clusters = 0;
1740 		}
1741 		vi->i_blocks = ni->itype.compressed.size >> 9;
1742 	} else
1743 		vi->i_blocks = ni->allocated_size >> 9;
1744 	write_unlock_irqrestore(&ni->size_lock, flags);
1745 	/*
1746 	 * This needs to be last since the address space operations ->read_folio
1747 	 * and ->writepage can run concurrently with us as they are not
1748 	 * serialized on i_mutex.  Note, we are not allowed to fail once we flip
1749 	 * this switch, which is another reason to do this last.
1750 	 */
1751 	NInoSetNonResident(ni);
1752 	/* Mark the mft record dirty, so it gets written back. */
1753 	flush_dcache_mft_record_page(ctx->ntfs_ino);
1754 	mark_mft_record_dirty(ctx->ntfs_ino);
1755 	ntfs_attr_put_search_ctx(ctx);
1756 	unmap_mft_record(base_ni);
1757 	up_write(&ni->runlist.lock);
1758 	if (page) {
1759 		set_page_dirty(page);
1760 		unlock_page(page);
1761 		put_page(page);
1762 	}
1763 	ntfs_debug("Done.");
1764 	return 0;
1765 undo_err_out:
1766 	/* Convert the attribute back into a resident attribute. */
1767 	a->non_resident = 0;
1768 	/* Move the attribute name if it exists and update the offset. */
1769 	name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
1770 			sizeof(a->data.resident.reserved) + 7) & ~7;
1771 	if (a->name_length)
1772 		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1773 				a->name_length * sizeof(ntfschar));
1774 	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1775 	a->name_offset = cpu_to_le16(name_ofs);
1776 	arec_size = (mp_ofs + attr_size + 7) & ~7;
1777 	/* Resize the resident part of the attribute record. */
1778 	err2 = ntfs_attr_record_resize(m, a, arec_size);
1779 	if (unlikely(err2)) {
1780 		/*
1781 		 * This cannot happen (well if memory corruption is at work it
1782 		 * could happen in theory), but deal with it as well as we can.
1783 		 * If the old size is too small, truncate the attribute,
1784 		 * otherwise simply give it a larger allocated size.
1785 		 * FIXME: Should check whether chkdsk complains when the
1786 		 * allocated size is much bigger than the resident value size.
1787 		 */
1788 		arec_size = le32_to_cpu(a->length);
1789 		if ((mp_ofs + attr_size) > arec_size) {
1790 			err2 = attr_size;
1791 			attr_size = arec_size - mp_ofs;
1792 			ntfs_error(vol->sb, "Failed to undo partial resident "
1793 					"to non-resident attribute "
1794 					"conversion.  Truncating inode 0x%lx, "
1795 					"attribute type 0x%x from %i bytes to "
1796 					"%i bytes to maintain metadata "
1797 					"consistency.  THIS MEANS YOU ARE "
1798 					"LOSING %i BYTES DATA FROM THIS %s.",
1799 					vi->i_ino,
1800 					(unsigned)le32_to_cpu(ni->type),
1801 					err2, attr_size, err2 - attr_size,
1802 					((ni->type == AT_DATA) &&
1803 					!ni->name_len) ? "FILE": "ATTRIBUTE");
1804 			write_lock_irqsave(&ni->size_lock, flags);
1805 			ni->initialized_size = attr_size;
1806 			i_size_write(vi, attr_size);
1807 			write_unlock_irqrestore(&ni->size_lock, flags);
1808 		}
1809 	}
1810 	/* Setup the fields specific to resident attributes. */
1811 	a->data.resident.value_length = cpu_to_le32(attr_size);
1812 	a->data.resident.value_offset = cpu_to_le16(mp_ofs);
1813 	a->data.resident.flags = old_res_attr_flags;
1814 	memset(&a->data.resident.reserved, 0,
1815 			sizeof(a->data.resident.reserved));
1816 	/* Copy the data from the page back to the attribute value. */
1817 	if (page) {
1818 		kaddr = kmap_atomic(page);
1819 		memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1820 		kunmap_atomic(kaddr);
1821 	}
1822 	/* Setup the allocated size in the ntfs inode in case it changed. */
1823 	write_lock_irqsave(&ni->size_lock, flags);
1824 	ni->allocated_size = arec_size - mp_ofs;
1825 	write_unlock_irqrestore(&ni->size_lock, flags);
1826 	/* Mark the mft record dirty, so it gets written back. */
1827 	flush_dcache_mft_record_page(ctx->ntfs_ino);
1828 	mark_mft_record_dirty(ctx->ntfs_ino);
1829 err_out:
1830 	if (ctx)
1831 		ntfs_attr_put_search_ctx(ctx);
1832 	if (m)
1833 		unmap_mft_record(base_ni);
1834 	ni->runlist.rl = NULL;
1835 	up_write(&ni->runlist.lock);
1836 rl_err_out:
1837 	if (rl) {
1838 		if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
1839 			ntfs_error(vol->sb, "Failed to release allocated "
1840 					"cluster(s) in error code path.  Run "
1841 					"chkdsk to recover the lost "
1842 					"cluster(s).");
1843 			NVolSetErrors(vol);
1844 		}
1845 		ntfs_free(rl);
1846 page_err_out:
1847 		unlock_page(page);
1848 		put_page(page);
1849 	}
1850 	if (err == -EINVAL)
1851 		err = -EIO;
1852 	return err;
1853 }
1854 
1855 /**
1856  * ntfs_attr_extend_allocation - extend the allocated space of an attribute
1857  * @ni:			ntfs inode of the attribute whose allocation to extend
1858  * @new_alloc_size:	new size in bytes to which to extend the allocation to
1859  * @new_data_size:	new size in bytes to which to extend the data to
1860  * @data_start:		beginning of region which is required to be non-sparse
1861  *
1862  * Extend the allocated space of an attribute described by the ntfs inode @ni
1863  * to @new_alloc_size bytes.  If @data_start is -1, the whole extension may be
1864  * implemented as a hole in the file (as long as both the volume and the ntfs
1865  * inode @ni have sparse support enabled).  If @data_start is >= 0, then the
1866  * region between the old allocated size and @data_start - 1 may be made sparse
1867  * but the regions between @data_start and @new_alloc_size must be backed by
1868  * actual clusters.
1869  *
1870  * If @new_data_size is -1, it is ignored.  If it is >= 0, then the data size
1871  * of the attribute is extended to @new_data_size.  Note that the i_size of the
1872  * vfs inode is not updated.  Only the data size in the base attribute record
1873  * is updated.  The caller has to update i_size separately if this is required.
1874  * WARNING: It is a BUG() for @new_data_size to be smaller than the old data
1875  * size as well as for @new_data_size to be greater than @new_alloc_size.
1876  *
1877  * For resident attributes this involves resizing the attribute record and if
1878  * necessary moving it and/or other attributes into extent mft records and/or
1879  * converting the attribute to a non-resident attribute which in turn involves
1880  * extending the allocation of a non-resident attribute as described below.
1881  *
1882  * For non-resident attributes this involves allocating clusters in the data
1883  * zone on the volume (except for regions that are being made sparse) and
1884  * extending the run list to describe the allocated clusters as well as
1885  * updating the mapping pairs array of the attribute.  This in turn involves
1886  * resizing the attribute record and if necessary moving it and/or other
1887  * attributes into extent mft records and/or splitting the attribute record
1888  * into multiple extent attribute records.
1889  *
1890  * Also, the attribute list attribute is updated if present and in some of the
1891  * above cases (the ones where extent mft records/attributes come into play),
1892  * an attribute list attribute is created if not already present.
1893  *
1894  * Return the new allocated size on success and -errno on error.  In the case
1895  * that an error is encountered but a partial extension at least up to
1896  * @data_start (if present) is possible, the allocation is partially extended
1897  * and this is returned.  This means the caller must check the returned size to
1898  * determine if the extension was partial.  If @data_start is -1 then partial
1899  * allocations are not performed.
1900  *
1901  * WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
1902  *
1903  * Locking: This function takes the runlist lock of @ni for writing as well as
1904  * locking the mft record of the base ntfs inode.  These locks are maintained
1905  * throughout execution of the function.  These locks are required so that the
1906  * attribute can be resized safely and so that it can for example be converted
1907  * from resident to non-resident safely.
1908  *
1909  * TODO: At present attribute list attribute handling is not implemented.
1910  *
1911  * TODO: At present it is not safe to call this function for anything other
1912  * than the $DATA attribute(s) of an uncompressed and unencrypted file.
1913  */
ntfs_attr_extend_allocation(ntfs_inode * ni,s64 new_alloc_size,const s64 new_data_size,const s64 data_start)1914 s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
1915 		const s64 new_data_size, const s64 data_start)
1916 {
1917 	VCN vcn;
1918 	s64 ll, allocated_size, start = data_start;
1919 	struct inode *vi = VFS_I(ni);
1920 	ntfs_volume *vol = ni->vol;
1921 	ntfs_inode *base_ni;
1922 	MFT_RECORD *m;
1923 	ATTR_RECORD *a;
1924 	ntfs_attr_search_ctx *ctx;
1925 	runlist_element *rl, *rl2;
1926 	unsigned long flags;
1927 	int err, mp_size;
1928 	u32 attr_len = 0; /* Silence stupid gcc warning. */
1929 	bool mp_rebuilt;
1930 
1931 #ifdef DEBUG
1932 	read_lock_irqsave(&ni->size_lock, flags);
1933 	allocated_size = ni->allocated_size;
1934 	read_unlock_irqrestore(&ni->size_lock, flags);
1935 	ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1936 			"old_allocated_size 0x%llx, "
1937 			"new_allocated_size 0x%llx, new_data_size 0x%llx, "
1938 			"data_start 0x%llx.", vi->i_ino,
1939 			(unsigned)le32_to_cpu(ni->type),
1940 			(unsigned long long)allocated_size,
1941 			(unsigned long long)new_alloc_size,
1942 			(unsigned long long)new_data_size,
1943 			(unsigned long long)start);
1944 #endif
1945 retry_extend:
1946 	/*
1947 	 * For non-resident attributes, @start and @new_size need to be aligned
1948 	 * to cluster boundaries for allocation purposes.
1949 	 */
1950 	if (NInoNonResident(ni)) {
1951 		if (start > 0)
1952 			start &= ~(s64)vol->cluster_size_mask;
1953 		new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
1954 				~(s64)vol->cluster_size_mask;
1955 	}
1956 	BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
1957 	/* Check if new size is allowed in $AttrDef. */
1958 	err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
1959 	if (unlikely(err)) {
1960 		/* Only emit errors when the write will fail completely. */
1961 		read_lock_irqsave(&ni->size_lock, flags);
1962 		allocated_size = ni->allocated_size;
1963 		read_unlock_irqrestore(&ni->size_lock, flags);
1964 		if (start < 0 || start >= allocated_size) {
1965 			if (err == -ERANGE) {
1966 				ntfs_error(vol->sb, "Cannot extend allocation "
1967 						"of inode 0x%lx, attribute "
1968 						"type 0x%x, because the new "
1969 						"allocation would exceed the "
1970 						"maximum allowed size for "
1971 						"this attribute type.",
1972 						vi->i_ino, (unsigned)
1973 						le32_to_cpu(ni->type));
1974 			} else {
1975 				ntfs_error(vol->sb, "Cannot extend allocation "
1976 						"of inode 0x%lx, attribute "
1977 						"type 0x%x, because this "
1978 						"attribute type is not "
1979 						"defined on the NTFS volume.  "
1980 						"Possible corruption!  You "
1981 						"should run chkdsk!",
1982 						vi->i_ino, (unsigned)
1983 						le32_to_cpu(ni->type));
1984 			}
1985 		}
1986 		/* Translate error code to be POSIX conformant for write(2). */
1987 		if (err == -ERANGE)
1988 			err = -EFBIG;
1989 		else
1990 			err = -EIO;
1991 		return err;
1992 	}
1993 	if (!NInoAttr(ni))
1994 		base_ni = ni;
1995 	else
1996 		base_ni = ni->ext.base_ntfs_ino;
1997 	/*
1998 	 * We will be modifying both the runlist (if non-resident) and the mft
1999 	 * record so lock them both down.
2000 	 */
2001 	down_write(&ni->runlist.lock);
2002 	m = map_mft_record(base_ni);
2003 	if (IS_ERR(m)) {
2004 		err = PTR_ERR(m);
2005 		m = NULL;
2006 		ctx = NULL;
2007 		goto err_out;
2008 	}
2009 	ctx = ntfs_attr_get_search_ctx(base_ni, m);
2010 	if (unlikely(!ctx)) {
2011 		err = -ENOMEM;
2012 		goto err_out;
2013 	}
2014 	read_lock_irqsave(&ni->size_lock, flags);
2015 	allocated_size = ni->allocated_size;
2016 	read_unlock_irqrestore(&ni->size_lock, flags);
2017 	/*
2018 	 * If non-resident, seek to the last extent.  If resident, there is
2019 	 * only one extent, so seek to that.
2020 	 */
2021 	vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
2022 			0;
2023 	/*
2024 	 * Abort if someone did the work whilst we waited for the locks.  If we
2025 	 * just converted the attribute from resident to non-resident it is
2026 	 * likely that exactly this has happened already.  We cannot quite
2027 	 * abort if we need to update the data size.
2028 	 */
2029 	if (unlikely(new_alloc_size <= allocated_size)) {
2030 		ntfs_debug("Allocated size already exceeds requested size.");
2031 		new_alloc_size = allocated_size;
2032 		if (new_data_size < 0)
2033 			goto done;
2034 		/*
2035 		 * We want the first attribute extent so that we can update the
2036 		 * data size.
2037 		 */
2038 		vcn = 0;
2039 	}
2040 	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2041 			CASE_SENSITIVE, vcn, NULL, 0, ctx);
2042 	if (unlikely(err)) {
2043 		if (err == -ENOENT)
2044 			err = -EIO;
2045 		goto err_out;
2046 	}
2047 	m = ctx->mrec;
2048 	a = ctx->attr;
2049 	/* Use goto to reduce indentation. */
2050 	if (a->non_resident)
2051 		goto do_non_resident_extend;
2052 	BUG_ON(NInoNonResident(ni));
2053 	/* The total length of the attribute value. */
2054 	attr_len = le32_to_cpu(a->data.resident.value_length);
2055 	/*
2056 	 * Extend the attribute record to be able to store the new attribute
2057 	 * size.  ntfs_attr_record_resize() will not do anything if the size is
2058 	 * not changing.
2059 	 */
2060 	if (new_alloc_size < vol->mft_record_size &&
2061 			!ntfs_attr_record_resize(m, a,
2062 			le16_to_cpu(a->data.resident.value_offset) +
2063 			new_alloc_size)) {
2064 		/* The resize succeeded! */
2065 		write_lock_irqsave(&ni->size_lock, flags);
2066 		ni->allocated_size = le32_to_cpu(a->length) -
2067 				le16_to_cpu(a->data.resident.value_offset);
2068 		write_unlock_irqrestore(&ni->size_lock, flags);
2069 		if (new_data_size >= 0) {
2070 			BUG_ON(new_data_size < attr_len);
2071 			a->data.resident.value_length =
2072 					cpu_to_le32((u32)new_data_size);
2073 		}
2074 		goto flush_done;
2075 	}
2076 	/*
2077 	 * We have to drop all the locks so we can call
2078 	 * ntfs_attr_make_non_resident().  This could be optimised by try-
2079 	 * locking the first page cache page and only if that fails dropping
2080 	 * the locks, locking the page, and redoing all the locking and
2081 	 * lookups.  While this would be a huge optimisation, it is not worth
2082 	 * it as this is definitely a slow code path.
2083 	 */
2084 	ntfs_attr_put_search_ctx(ctx);
2085 	unmap_mft_record(base_ni);
2086 	up_write(&ni->runlist.lock);
2087 	/*
2088 	 * Not enough space in the mft record, try to make the attribute
2089 	 * non-resident and if successful restart the extension process.
2090 	 */
2091 	err = ntfs_attr_make_non_resident(ni, attr_len);
2092 	if (likely(!err))
2093 		goto retry_extend;
2094 	/*
2095 	 * Could not make non-resident.  If this is due to this not being
2096 	 * permitted for this attribute type or there not being enough space,
2097 	 * try to make other attributes non-resident.  Otherwise fail.
2098 	 */
2099 	if (unlikely(err != -EPERM && err != -ENOSPC)) {
2100 		/* Only emit errors when the write will fail completely. */
2101 		read_lock_irqsave(&ni->size_lock, flags);
2102 		allocated_size = ni->allocated_size;
2103 		read_unlock_irqrestore(&ni->size_lock, flags);
2104 		if (start < 0 || start >= allocated_size)
2105 			ntfs_error(vol->sb, "Cannot extend allocation of "
2106 					"inode 0x%lx, attribute type 0x%x, "
2107 					"because the conversion from resident "
2108 					"to non-resident attribute failed "
2109 					"with error code %i.", vi->i_ino,
2110 					(unsigned)le32_to_cpu(ni->type), err);
2111 		if (err != -ENOMEM)
2112 			err = -EIO;
2113 		goto conv_err_out;
2114 	}
2115 	/* TODO: Not implemented from here, abort. */
2116 	read_lock_irqsave(&ni->size_lock, flags);
2117 	allocated_size = ni->allocated_size;
2118 	read_unlock_irqrestore(&ni->size_lock, flags);
2119 	if (start < 0 || start >= allocated_size) {
2120 		if (err == -ENOSPC)
2121 			ntfs_error(vol->sb, "Not enough space in the mft "
2122 					"record/on disk for the non-resident "
2123 					"attribute value.  This case is not "
2124 					"implemented yet.");
2125 		else /* if (err == -EPERM) */
2126 			ntfs_error(vol->sb, "This attribute type may not be "
2127 					"non-resident.  This case is not "
2128 					"implemented yet.");
2129 	}
2130 	err = -EOPNOTSUPP;
2131 	goto conv_err_out;
2132 #if 0
2133 	// TODO: Attempt to make other attributes non-resident.
2134 	if (!err)
2135 		goto do_resident_extend;
2136 	/*
2137 	 * Both the attribute list attribute and the standard information
2138 	 * attribute must remain in the base inode.  Thus, if this is one of
2139 	 * these attributes, we have to try to move other attributes out into
2140 	 * extent mft records instead.
2141 	 */
2142 	if (ni->type == AT_ATTRIBUTE_LIST ||
2143 			ni->type == AT_STANDARD_INFORMATION) {
2144 		// TODO: Attempt to move other attributes into extent mft
2145 		// records.
2146 		err = -EOPNOTSUPP;
2147 		if (!err)
2148 			goto do_resident_extend;
2149 		goto err_out;
2150 	}
2151 	// TODO: Attempt to move this attribute to an extent mft record, but
2152 	// only if it is not already the only attribute in an mft record in
2153 	// which case there would be nothing to gain.
2154 	err = -EOPNOTSUPP;
2155 	if (!err)
2156 		goto do_resident_extend;
2157 	/* There is nothing we can do to make enough space. )-: */
2158 	goto err_out;
2159 #endif
2160 do_non_resident_extend:
2161 	BUG_ON(!NInoNonResident(ni));
2162 	if (new_alloc_size == allocated_size) {
2163 		BUG_ON(vcn);
2164 		goto alloc_done;
2165 	}
2166 	/*
2167 	 * If the data starts after the end of the old allocation, this is a
2168 	 * $DATA attribute and sparse attributes are enabled on the volume and
2169 	 * for this inode, then create a sparse region between the old
2170 	 * allocated size and the start of the data.  Otherwise simply proceed
2171 	 * with filling the whole space between the old allocated size and the
2172 	 * new allocated size with clusters.
2173 	 */
2174 	if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
2175 			!NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
2176 		goto skip_sparse;
2177 	// TODO: This is not implemented yet.  We just fill in with real
2178 	// clusters for now...
2179 	ntfs_debug("Inserting holes is not-implemented yet.  Falling back to "
2180 			"allocating real clusters instead.");
2181 skip_sparse:
2182 	rl = ni->runlist.rl;
2183 	if (likely(rl)) {
2184 		/* Seek to the end of the runlist. */
2185 		while (rl->length)
2186 			rl++;
2187 	}
2188 	/* If this attribute extent is not mapped, map it now. */
2189 	if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
2190 			(rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
2191 			(rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
2192 		if (!rl && !allocated_size)
2193 			goto first_alloc;
2194 		rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
2195 		if (IS_ERR(rl)) {
2196 			err = PTR_ERR(rl);
2197 			if (start < 0 || start >= allocated_size)
2198 				ntfs_error(vol->sb, "Cannot extend allocation "
2199 						"of inode 0x%lx, attribute "
2200 						"type 0x%x, because the "
2201 						"mapping of a runlist "
2202 						"fragment failed with error "
2203 						"code %i.", vi->i_ino,
2204 						(unsigned)le32_to_cpu(ni->type),
2205 						err);
2206 			if (err != -ENOMEM)
2207 				err = -EIO;
2208 			goto err_out;
2209 		}
2210 		ni->runlist.rl = rl;
2211 		/* Seek to the end of the runlist. */
2212 		while (rl->length)
2213 			rl++;
2214 	}
2215 	/*
2216 	 * We now know the runlist of the last extent is mapped and @rl is at
2217 	 * the end of the runlist.  We want to begin allocating clusters
2218 	 * starting at the last allocated cluster to reduce fragmentation.  If
2219 	 * there are no valid LCNs in the attribute we let the cluster
2220 	 * allocator choose the starting cluster.
2221 	 */
2222 	/* If the last LCN is a hole or simillar seek back to last real LCN. */
2223 	while (rl->lcn < 0 && rl > ni->runlist.rl)
2224 		rl--;
2225 first_alloc:
2226 	// FIXME: Need to implement partial allocations so at least part of the
2227 	// write can be performed when start >= 0.  (Needed for POSIX write(2)
2228 	// conformance.)
2229 	rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
2230 			(new_alloc_size - allocated_size) >>
2231 			vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
2232 			rl->lcn + rl->length : -1, DATA_ZONE, true);
2233 	if (IS_ERR(rl2)) {
2234 		err = PTR_ERR(rl2);
2235 		if (start < 0 || start >= allocated_size)
2236 			ntfs_error(vol->sb, "Cannot extend allocation of "
2237 					"inode 0x%lx, attribute type 0x%x, "
2238 					"because the allocation of clusters "
2239 					"failed with error code %i.", vi->i_ino,
2240 					(unsigned)le32_to_cpu(ni->type), err);
2241 		if (err != -ENOMEM && err != -ENOSPC)
2242 			err = -EIO;
2243 		goto err_out;
2244 	}
2245 	rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
2246 	if (IS_ERR(rl)) {
2247 		err = PTR_ERR(rl);
2248 		if (start < 0 || start >= allocated_size)
2249 			ntfs_error(vol->sb, "Cannot extend allocation of "
2250 					"inode 0x%lx, attribute type 0x%x, "
2251 					"because the runlist merge failed "
2252 					"with error code %i.", vi->i_ino,
2253 					(unsigned)le32_to_cpu(ni->type), err);
2254 		if (err != -ENOMEM)
2255 			err = -EIO;
2256 		if (ntfs_cluster_free_from_rl(vol, rl2)) {
2257 			ntfs_error(vol->sb, "Failed to release allocated "
2258 					"cluster(s) in error code path.  Run "
2259 					"chkdsk to recover the lost "
2260 					"cluster(s).");
2261 			NVolSetErrors(vol);
2262 		}
2263 		ntfs_free(rl2);
2264 		goto err_out;
2265 	}
2266 	ni->runlist.rl = rl;
2267 	ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
2268 			allocated_size) >> vol->cluster_size_bits);
2269 	/* Find the runlist element with which the attribute extent starts. */
2270 	ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
2271 	rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
2272 	BUG_ON(!rl2);
2273 	BUG_ON(!rl2->length);
2274 	BUG_ON(rl2->lcn < LCN_HOLE);
2275 	mp_rebuilt = false;
2276 	/* Get the size for the new mapping pairs array for this extent. */
2277 	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
2278 	if (unlikely(mp_size <= 0)) {
2279 		err = mp_size;
2280 		if (start < 0 || start >= allocated_size)
2281 			ntfs_error(vol->sb, "Cannot extend allocation of "
2282 					"inode 0x%lx, attribute type 0x%x, "
2283 					"because determining the size for the "
2284 					"mapping pairs failed with error code "
2285 					"%i.", vi->i_ino,
2286 					(unsigned)le32_to_cpu(ni->type), err);
2287 		err = -EIO;
2288 		goto undo_alloc;
2289 	}
2290 	/* Extend the attribute record to fit the bigger mapping pairs array. */
2291 	attr_len = le32_to_cpu(a->length);
2292 	err = ntfs_attr_record_resize(m, a, mp_size +
2293 			le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
2294 	if (unlikely(err)) {
2295 		BUG_ON(err != -ENOSPC);
2296 		// TODO: Deal with this by moving this extent to a new mft
2297 		// record or by starting a new extent in a new mft record,
2298 		// possibly by extending this extent partially and filling it
2299 		// and creating a new extent for the remainder, or by making
2300 		// other attributes non-resident and/or by moving other
2301 		// attributes out of this mft record.
2302 		if (start < 0 || start >= allocated_size)
2303 			ntfs_error(vol->sb, "Not enough space in the mft "
2304 					"record for the extended attribute "
2305 					"record.  This case is not "
2306 					"implemented yet.");
2307 		err = -EOPNOTSUPP;
2308 		goto undo_alloc;
2309 	}
2310 	mp_rebuilt = true;
2311 	/* Generate the mapping pairs array directly into the attr record. */
2312 	err = ntfs_mapping_pairs_build(vol, (u8*)a +
2313 			le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
2314 			mp_size, rl2, ll, -1, NULL);
2315 	if (unlikely(err)) {
2316 		if (start < 0 || start >= allocated_size)
2317 			ntfs_error(vol->sb, "Cannot extend allocation of "
2318 					"inode 0x%lx, attribute type 0x%x, "
2319 					"because building the mapping pairs "
2320 					"failed with error code %i.", vi->i_ino,
2321 					(unsigned)le32_to_cpu(ni->type), err);
2322 		err = -EIO;
2323 		goto undo_alloc;
2324 	}
2325 	/* Update the highest_vcn. */
2326 	a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
2327 			vol->cluster_size_bits) - 1);
2328 	/*
2329 	 * We now have extended the allocated size of the attribute.  Reflect
2330 	 * this in the ntfs_inode structure and the attribute record.
2331 	 */
2332 	if (a->data.non_resident.lowest_vcn) {
2333 		/*
2334 		 * We are not in the first attribute extent, switch to it, but
2335 		 * first ensure the changes will make it to disk later.
2336 		 */
2337 		flush_dcache_mft_record_page(ctx->ntfs_ino);
2338 		mark_mft_record_dirty(ctx->ntfs_ino);
2339 		ntfs_attr_reinit_search_ctx(ctx);
2340 		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2341 				CASE_SENSITIVE, 0, NULL, 0, ctx);
2342 		if (unlikely(err))
2343 			goto restore_undo_alloc;
2344 		/* @m is not used any more so no need to set it. */
2345 		a = ctx->attr;
2346 	}
2347 	write_lock_irqsave(&ni->size_lock, flags);
2348 	ni->allocated_size = new_alloc_size;
2349 	a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
2350 	/*
2351 	 * FIXME: This would fail if @ni is a directory, $MFT, or an index,
2352 	 * since those can have sparse/compressed set.  For example can be
2353 	 * set compressed even though it is not compressed itself and in that
2354 	 * case the bit means that files are to be created compressed in the
2355 	 * directory...  At present this is ok as this code is only called for
2356 	 * regular files, and only for their $DATA attribute(s).
2357 	 * FIXME: The calculation is wrong if we created a hole above.  For now
2358 	 * it does not matter as we never create holes.
2359 	 */
2360 	if (NInoSparse(ni) || NInoCompressed(ni)) {
2361 		ni->itype.compressed.size += new_alloc_size - allocated_size;
2362 		a->data.non_resident.compressed_size =
2363 				cpu_to_sle64(ni->itype.compressed.size);
2364 		vi->i_blocks = ni->itype.compressed.size >> 9;
2365 	} else
2366 		vi->i_blocks = new_alloc_size >> 9;
2367 	write_unlock_irqrestore(&ni->size_lock, flags);
2368 alloc_done:
2369 	if (new_data_size >= 0) {
2370 		BUG_ON(new_data_size <
2371 				sle64_to_cpu(a->data.non_resident.data_size));
2372 		a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
2373 	}
2374 flush_done:
2375 	/* Ensure the changes make it to disk. */
2376 	flush_dcache_mft_record_page(ctx->ntfs_ino);
2377 	mark_mft_record_dirty(ctx->ntfs_ino);
2378 done:
2379 	ntfs_attr_put_search_ctx(ctx);
2380 	unmap_mft_record(base_ni);
2381 	up_write(&ni->runlist.lock);
2382 	ntfs_debug("Done, new_allocated_size 0x%llx.",
2383 			(unsigned long long)new_alloc_size);
2384 	return new_alloc_size;
2385 restore_undo_alloc:
2386 	if (start < 0 || start >= allocated_size)
2387 		ntfs_error(vol->sb, "Cannot complete extension of allocation "
2388 				"of inode 0x%lx, attribute type 0x%x, because "
2389 				"lookup of first attribute extent failed with "
2390 				"error code %i.", vi->i_ino,
2391 				(unsigned)le32_to_cpu(ni->type), err);
2392 	if (err == -ENOENT)
2393 		err = -EIO;
2394 	ntfs_attr_reinit_search_ctx(ctx);
2395 	if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
2396 			allocated_size >> vol->cluster_size_bits, NULL, 0,
2397 			ctx)) {
2398 		ntfs_error(vol->sb, "Failed to find last attribute extent of "
2399 				"attribute in error code path.  Run chkdsk to "
2400 				"recover.");
2401 		write_lock_irqsave(&ni->size_lock, flags);
2402 		ni->allocated_size = new_alloc_size;
2403 		/*
2404 		 * FIXME: This would fail if @ni is a directory...  See above.
2405 		 * FIXME: The calculation is wrong if we created a hole above.
2406 		 * For now it does not matter as we never create holes.
2407 		 */
2408 		if (NInoSparse(ni) || NInoCompressed(ni)) {
2409 			ni->itype.compressed.size += new_alloc_size -
2410 					allocated_size;
2411 			vi->i_blocks = ni->itype.compressed.size >> 9;
2412 		} else
2413 			vi->i_blocks = new_alloc_size >> 9;
2414 		write_unlock_irqrestore(&ni->size_lock, flags);
2415 		ntfs_attr_put_search_ctx(ctx);
2416 		unmap_mft_record(base_ni);
2417 		up_write(&ni->runlist.lock);
2418 		/*
2419 		 * The only thing that is now wrong is the allocated size of the
2420 		 * base attribute extent which chkdsk should be able to fix.
2421 		 */
2422 		NVolSetErrors(vol);
2423 		return err;
2424 	}
2425 	ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
2426 			(allocated_size >> vol->cluster_size_bits) - 1);
2427 undo_alloc:
2428 	ll = allocated_size >> vol->cluster_size_bits;
2429 	if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
2430 		ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
2431 				"in error code path.  Run chkdsk to recover "
2432 				"the lost cluster(s).");
2433 		NVolSetErrors(vol);
2434 	}
2435 	m = ctx->mrec;
2436 	a = ctx->attr;
2437 	/*
2438 	 * If the runlist truncation fails and/or the search context is no
2439 	 * longer valid, we cannot resize the attribute record or build the
2440 	 * mapping pairs array thus we mark the inode bad so that no access to
2441 	 * the freed clusters can happen.
2442 	 */
2443 	if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
2444 		ntfs_error(vol->sb, "Failed to %s in error code path.  Run "
2445 				"chkdsk to recover.", IS_ERR(m) ?
2446 				"restore attribute search context" :
2447 				"truncate attribute runlist");
2448 		NVolSetErrors(vol);
2449 	} else if (mp_rebuilt) {
2450 		if (ntfs_attr_record_resize(m, a, attr_len)) {
2451 			ntfs_error(vol->sb, "Failed to restore attribute "
2452 					"record in error code path.  Run "
2453 					"chkdsk to recover.");
2454 			NVolSetErrors(vol);
2455 		} else /* if (success) */ {
2456 			if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
2457 					a->data.non_resident.
2458 					mapping_pairs_offset), attr_len -
2459 					le16_to_cpu(a->data.non_resident.
2460 					mapping_pairs_offset), rl2, ll, -1,
2461 					NULL)) {
2462 				ntfs_error(vol->sb, "Failed to restore "
2463 						"mapping pairs array in error "
2464 						"code path.  Run chkdsk to "
2465 						"recover.");
2466 				NVolSetErrors(vol);
2467 			}
2468 			flush_dcache_mft_record_page(ctx->ntfs_ino);
2469 			mark_mft_record_dirty(ctx->ntfs_ino);
2470 		}
2471 	}
2472 err_out:
2473 	if (ctx)
2474 		ntfs_attr_put_search_ctx(ctx);
2475 	if (m)
2476 		unmap_mft_record(base_ni);
2477 	up_write(&ni->runlist.lock);
2478 conv_err_out:
2479 	ntfs_debug("Failed.  Returning error code %i.", err);
2480 	return err;
2481 }
2482 
2483 /**
2484  * ntfs_attr_set - fill (a part of) an attribute with a byte
2485  * @ni:		ntfs inode describing the attribute to fill
2486  * @ofs:	offset inside the attribute at which to start to fill
2487  * @cnt:	number of bytes to fill
2488  * @val:	the unsigned 8-bit value with which to fill the attribute
2489  *
2490  * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
2491  * byte offset @ofs inside the attribute with the constant byte @val.
2492  *
2493  * This function is effectively like memset() applied to an ntfs attribute.
2494  * Note thie function actually only operates on the page cache pages belonging
2495  * to the ntfs attribute and it marks them dirty after doing the memset().
2496  * Thus it relies on the vm dirty page write code paths to cause the modified
2497  * pages to be written to the mft record/disk.
2498  *
2499  * Return 0 on success and -errno on error.  An error code of -ESPIPE means
2500  * that @ofs + @cnt were outside the end of the attribute and no write was
2501  * performed.
2502  */
ntfs_attr_set(ntfs_inode * ni,const s64 ofs,const s64 cnt,const u8 val)2503 int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2504 {
2505 	ntfs_volume *vol = ni->vol;
2506 	struct address_space *mapping;
2507 	struct page *page;
2508 	u8 *kaddr;
2509 	pgoff_t idx, end;
2510 	unsigned start_ofs, end_ofs, size;
2511 
2512 	ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
2513 			(long long)ofs, (long long)cnt, val);
2514 	BUG_ON(ofs < 0);
2515 	BUG_ON(cnt < 0);
2516 	if (!cnt)
2517 		goto done;
2518 	/*
2519 	 * FIXME: Compressed and encrypted attributes are not supported when
2520 	 * writing and we should never have gotten here for them.
2521 	 */
2522 	BUG_ON(NInoCompressed(ni));
2523 	BUG_ON(NInoEncrypted(ni));
2524 	mapping = VFS_I(ni)->i_mapping;
2525 	/* Work out the starting index and page offset. */
2526 	idx = ofs >> PAGE_SHIFT;
2527 	start_ofs = ofs & ~PAGE_MASK;
2528 	/* Work out the ending index and page offset. */
2529 	end = ofs + cnt;
2530 	end_ofs = end & ~PAGE_MASK;
2531 	/* If the end is outside the inode size return -ESPIPE. */
2532 	if (unlikely(end > i_size_read(VFS_I(ni)))) {
2533 		ntfs_error(vol->sb, "Request exceeds end of attribute.");
2534 		return -ESPIPE;
2535 	}
2536 	end >>= PAGE_SHIFT;
2537 	/* If there is a first partial page, need to do it the slow way. */
2538 	if (start_ofs) {
2539 		page = read_mapping_page(mapping, idx, NULL);
2540 		if (IS_ERR(page)) {
2541 			ntfs_error(vol->sb, "Failed to read first partial "
2542 					"page (error, index 0x%lx).", idx);
2543 			return PTR_ERR(page);
2544 		}
2545 		/*
2546 		 * If the last page is the same as the first page, need to
2547 		 * limit the write to the end offset.
2548 		 */
2549 		size = PAGE_SIZE;
2550 		if (idx == end)
2551 			size = end_ofs;
2552 		kaddr = kmap_atomic(page);
2553 		memset(kaddr + start_ofs, val, size - start_ofs);
2554 		flush_dcache_page(page);
2555 		kunmap_atomic(kaddr);
2556 		set_page_dirty(page);
2557 		put_page(page);
2558 		balance_dirty_pages_ratelimited(mapping);
2559 		cond_resched();
2560 		if (idx == end)
2561 			goto done;
2562 		idx++;
2563 	}
2564 	/* Do the whole pages the fast way. */
2565 	for (; idx < end; idx++) {
2566 		/* Find or create the current page.  (The page is locked.) */
2567 		page = grab_cache_page(mapping, idx);
2568 		if (unlikely(!page)) {
2569 			ntfs_error(vol->sb, "Insufficient memory to grab "
2570 					"page (index 0x%lx).", idx);
2571 			return -ENOMEM;
2572 		}
2573 		kaddr = kmap_atomic(page);
2574 		memset(kaddr, val, PAGE_SIZE);
2575 		flush_dcache_page(page);
2576 		kunmap_atomic(kaddr);
2577 		/*
2578 		 * If the page has buffers, mark them uptodate since buffer
2579 		 * state and not page state is definitive in 2.6 kernels.
2580 		 */
2581 		if (page_has_buffers(page)) {
2582 			struct buffer_head *bh, *head;
2583 
2584 			bh = head = page_buffers(page);
2585 			do {
2586 				set_buffer_uptodate(bh);
2587 			} while ((bh = bh->b_this_page) != head);
2588 		}
2589 		/* Now that buffers are uptodate, set the page uptodate, too. */
2590 		SetPageUptodate(page);
2591 		/*
2592 		 * Set the page and all its buffers dirty and mark the inode
2593 		 * dirty, too.  The VM will write the page later on.
2594 		 */
2595 		set_page_dirty(page);
2596 		/* Finally unlock and release the page. */
2597 		unlock_page(page);
2598 		put_page(page);
2599 		balance_dirty_pages_ratelimited(mapping);
2600 		cond_resched();
2601 	}
2602 	/* If there is a last partial page, need to do it the slow way. */
2603 	if (end_ofs) {
2604 		page = read_mapping_page(mapping, idx, NULL);
2605 		if (IS_ERR(page)) {
2606 			ntfs_error(vol->sb, "Failed to read last partial page "
2607 					"(error, index 0x%lx).", idx);
2608 			return PTR_ERR(page);
2609 		}
2610 		kaddr = kmap_atomic(page);
2611 		memset(kaddr, val, end_ofs);
2612 		flush_dcache_page(page);
2613 		kunmap_atomic(kaddr);
2614 		set_page_dirty(page);
2615 		put_page(page);
2616 		balance_dirty_pages_ratelimited(mapping);
2617 		cond_resched();
2618 	}
2619 done:
2620 	ntfs_debug("Done.");
2621 	return 0;
2622 }
2623 
2624 #endif /* NTFS_RW */
2625