1 /*
2  * inode.c
3  *
4  * PURPOSE
5  *  Inode handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * CONTACTS
8  *  E-mail regarding any portion of the Linux UDF file system should be
9  *  directed to the development team mailing list (run by majordomo):
10  *    linux_udf@hpesjro.fc.hp.com
11  *
12  * COPYRIGHT
13  *  This file is distributed under the terms of the GNU General Public
14  *  License (GPL). Copies of the GPL can be obtained from:
15  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
16  *  Each contributing author retains all rights to their own work.
17  *
18  *  (C) 1998 Dave Boynton
19  *  (C) 1998-2001 Ben Fennema
20  *  (C) 1999-2000 Stelias Computing Inc
21  *
22  * HISTORY
23  *
24  *  10/04/98 dgb  Added rudimentary directory functions
25  *  10/07/98      Fully working udf_block_map! It works!
26  *  11/25/98      bmap altered to better support extents
27  *  12/06/98 blf  partition support in udf_iget, udf_block_map and udf_read_inode
28  *  12/12/98      rewrote udf_block_map to handle next extents and descs across
29  *                block boundaries (which is not actually allowed)
30  *  12/20/98      added support for strategy 4096
31  *  03/07/99      rewrote udf_block_map (again)
32  *                New funcs, inode_bmap, udf_next_aext
33  *  04/19/99      Support for writing device EA's for major/minor #
34  */
35 
36 #include "udfdecl.h"
37 #include <linux/locks.h>
38 #include <linux/mm.h>
39 #include <linux/smp_lock.h>
40 #include <linux/module.h>
41 
42 #include "udf_i.h"
43 #include "udf_sb.h"
44 
45 MODULE_AUTHOR("Ben Fennema");
46 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
47 MODULE_LICENSE("GPL");
48 
49 #define EXTENT_MERGE_SIZE 5
50 
51 static mode_t udf_convert_permissions(struct fileEntry *);
52 static int udf_update_inode(struct inode *, int);
53 static void udf_fill_inode(struct inode *, struct buffer_head *);
54 static struct buffer_head *inode_getblk(struct inode *, long, int *, long *, int *);
55 static void udf_split_extents(struct inode *, int *, int, int,
56 	long_ad [EXTENT_MERGE_SIZE], int *);
57 static void udf_prealloc_extents(struct inode *, int, int,
58 	 long_ad [EXTENT_MERGE_SIZE], int *);
59 static void udf_merge_extents(struct inode *,
60 	 long_ad [EXTENT_MERGE_SIZE], int *);
61 static void udf_update_extents(struct inode *,
62 	long_ad [EXTENT_MERGE_SIZE], int, int,
63 	lb_addr, uint32_t, struct buffer_head **);
64 static int udf_get_block(struct inode *, long, struct buffer_head *, int);
65 
66 /*
67  * udf_put_inode
68  *
69  * PURPOSE
70  *
71  * DESCRIPTION
72  *	This routine is called whenever the kernel no longer needs the inode.
73  *
74  * HISTORY
75  *	July 1, 1997 - Andrew E. Mileski
76  *	Written, tested, and released.
77  *
78  *  Called at each iput()
79  */
udf_put_inode(struct inode * inode)80 void udf_put_inode(struct inode * inode)
81 {
82 	if (!(inode->i_sb->s_flags & MS_RDONLY))
83 	{
84 		lock_kernel();
85 		udf_discard_prealloc(inode);
86 		unlock_kernel();
87 	}
88 }
89 
90 /*
91  * udf_delete_inode
92  *
93  * PURPOSE
94  *	Clean-up before the specified inode is destroyed.
95  *
96  * DESCRIPTION
97  *	This routine is called when the kernel destroys an inode structure
98  *	ie. when iput() finds i_count == 0.
99  *
100  * HISTORY
101  *	July 1, 1997 - Andrew E. Mileski
102  *	Written, tested, and released.
103  *
104  *  Called at the last iput() if i_nlink is zero.
105  */
udf_delete_inode(struct inode * inode)106 void udf_delete_inode(struct inode * inode)
107 {
108 	lock_kernel();
109 
110 	if (is_bad_inode(inode))
111 		goto no_delete;
112 
113 	inode->i_size = 0;
114 	udf_truncate(inode);
115 	udf_update_inode(inode, IS_SYNC(inode));
116 	udf_free_inode(inode);
117 
118 	unlock_kernel();
119 	return;
120 no_delete:
121 	unlock_kernel();
122 	clear_inode(inode);
123 }
124 
udf_discard_prealloc(struct inode * inode)125 void udf_discard_prealloc(struct inode * inode)
126 {
127 	if (inode->i_size && inode->i_size != UDF_I_LENEXTENTS(inode) &&
128 		UDF_I_ALLOCTYPE(inode) != ICBTAG_FLAG_AD_IN_ICB)
129 	{
130 		udf_truncate_extents(inode);
131 	}
132 }
133 
udf_writepage(struct page * page)134 static int udf_writepage(struct page *page)
135 {
136 	return block_write_full_page(page, udf_get_block);
137 }
138 
udf_readpage(struct file * file,struct page * page)139 static int udf_readpage(struct file *file, struct page *page)
140 {
141 	return block_read_full_page(page, udf_get_block);
142 }
143 
udf_prepare_write(struct file * file,struct page * page,unsigned from,unsigned to)144 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
145 {
146 	return block_prepare_write(page, from, to, udf_get_block);
147 }
148 
udf_bmap(struct address_space * mapping,long block)149 static int udf_bmap(struct address_space *mapping, long block)
150 {
151 	return generic_block_bmap(mapping,block,udf_get_block);
152 }
153 
154 struct address_space_operations udf_aops = {
155 	readpage:		udf_readpage,
156 	writepage:		udf_writepage,
157 	sync_page:		block_sync_page,
158 	prepare_write:		udf_prepare_write,
159 	commit_write:		generic_commit_write,
160 	bmap:			udf_bmap,
161 };
162 
udf_expand_file_adinicb(struct inode * inode,int newsize,int * err)163 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
164 {
165 	struct buffer_head *bh = NULL;
166 	struct page *page;
167 	char *kaddr;
168 	int block;
169 
170 	/* from now on we have normal address_space methods */
171 	inode->i_data.a_ops = &udf_aops;
172 
173 	if (!UDF_I_LENALLOC(inode))
174 	{
175 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
176 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
177 		else
178 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
179 		mark_inode_dirty(inode);
180 		return;
181 	}
182 
183 	block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
184 	bh = udf_tread(inode->i_sb, block);
185 	if (!bh)
186 		return;
187 	page = grab_cache_page(inode->i_mapping, 0);
188 	if (!PageLocked(page))
189 		PAGE_BUG(page);
190 	if (!Page_Uptodate(page))
191 	{
192 		kaddr = kmap(page);
193 		memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
194 			PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
195 		memcpy(kaddr, bh->b_data + udf_file_entry_alloc_offset(inode),
196 			UDF_I_LENALLOC(inode));
197 		flush_dcache_page(page);
198 		SetPageUptodate(page);
199 		kunmap(page);
200 	}
201 	memset(bh->b_data + udf_file_entry_alloc_offset(inode),
202 		0, UDF_I_LENALLOC(inode));
203 	UDF_I_LENALLOC(inode) = 0;
204 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
205 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
206 	else
207 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
208 	mark_buffer_dirty_inode(bh, inode);
209 	udf_release_data(bh);
210 
211 	inode->i_data.a_ops->writepage(page);
212 	page_cache_release(page);
213 
214 	mark_inode_dirty(inode);
215 	inode->i_version ++;
216 }
217 
udf_expand_dir_adinicb(struct inode * inode,int * block,int * err)218 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
219 {
220 	int newblock;
221 	struct buffer_head *sbh = NULL, *dbh = NULL;
222 	lb_addr bloc, eloc;
223 	uint32_t elen, extoffset;
224 
225 	struct udf_fileident_bh sfibh, dfibh;
226 	loff_t f_pos = udf_ext0_offset(inode) >> 2;
227 	int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
228 	struct fileIdentDesc cfi, *sfi, *dfi;
229 
230 	if (!inode->i_size)
231 	{
232 		if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
233 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
234 		else
235 			UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
236 		mark_inode_dirty(inode);
237 		return NULL;
238 	}
239 
240 	/* alloc block, and copy data to it */
241 	*block = udf_new_block(inode->i_sb, inode,
242 		UDF_I_LOCATION(inode).partitionReferenceNum,
243 		UDF_I_LOCATION(inode).logicalBlockNum, err);
244 
245 	if (!(*block))
246 		return NULL;
247 	newblock = udf_get_pblock(inode->i_sb, *block,
248 		UDF_I_LOCATION(inode).partitionReferenceNum, 0);
249 	if (!newblock)
250 		return NULL;
251 	sbh = udf_tread(inode->i_sb, inode->i_ino);
252 	if (!sbh)
253 		return NULL;
254 	dbh = udf_tgetblk(inode->i_sb, newblock);
255 	if (!dbh)
256 		return NULL;
257 	lock_buffer(dbh);
258 	memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
259 	mark_buffer_uptodate(dbh, 1);
260 	unlock_buffer(dbh);
261 	mark_buffer_dirty_inode(dbh, inode);
262 
263 	sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
264 	sfibh.sbh = sfibh.ebh = sbh;
265 	dfibh.soffset = dfibh.eoffset = 0;
266 	dfibh.sbh = dfibh.ebh = dbh;
267 	while ( (f_pos < size) )
268 	{
269 		sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
270 		if (!sfi)
271 		{
272 			udf_release_data(sbh);
273 			udf_release_data(dbh);
274 			return NULL;
275 		}
276 		sfi->descTag.tagLocation = *block;
277 		dfibh.soffset = dfibh.eoffset;
278 		dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
279 		dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
280 		if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
281 			sfi->fileIdent + sfi->lengthOfImpUse))
282 		{
283 			udf_release_data(sbh);
284 			udf_release_data(dbh);
285 			return NULL;
286 		}
287 	}
288 	mark_buffer_dirty_inode(dbh, inode);
289 
290 	memset(sbh->b_data + udf_file_entry_alloc_offset(inode),
291 		0, UDF_I_LENALLOC(inode));
292 
293 	UDF_I_LENALLOC(inode) = 0;
294 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
295 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
296 	else
297 		UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
298 	bloc = UDF_I_LOCATION(inode);
299 	eloc.logicalBlockNum = *block;
300 	eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
301 	elen = inode->i_size;
302 	UDF_I_LENEXTENTS(inode) = elen;
303 	extoffset = udf_file_entry_alloc_offset(inode);
304 	udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
305 	/* UniqueID stuff */
306 
307 	mark_buffer_dirty(sbh);
308 	udf_release_data(sbh);
309 	mark_inode_dirty(inode);
310 	inode->i_version ++;
311 	return dbh;
312 }
313 
udf_get_block(struct inode * inode,long block,struct buffer_head * bh_result,int create)314 static int udf_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
315 {
316 	int err, new;
317 	struct buffer_head *bh;
318 	unsigned long phys;
319 
320 	if (!create)
321 	{
322 		phys = udf_block_map(inode, block);
323 		if (phys)
324 		{
325 			bh_result->b_dev = inode->i_dev;
326 			bh_result->b_blocknr = phys;
327 			bh_result->b_state |= (1UL << BH_Mapped);
328 		}
329 		return 0;
330 	}
331 
332 	err = -EIO;
333 	new = 0;
334 	bh = NULL;
335 
336 	lock_kernel();
337 
338 	if (block < 0)
339 		goto abort_negative;
340 
341 	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
342 	{
343 		UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
344 		UDF_I_NEXT_ALLOC_GOAL(inode) ++;
345 	}
346 
347 	err = 0;
348 
349 	bh = inode_getblk(inode, block, &err, &phys, &new);
350 	if (bh)
351 		BUG();
352 	if (err)
353 		goto abort;
354 	if (!phys)
355 		BUG();
356 
357 	bh_result->b_dev = inode->i_dev;
358 	bh_result->b_blocknr = phys;
359 	bh_result->b_state |= (1UL << BH_Mapped);
360 	if (new)
361 		bh_result->b_state |= (1UL << BH_New);
362 abort:
363 	unlock_kernel();
364 	return err;
365 
366 abort_negative:
367 	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
368 	goto abort;
369 }
370 
udf_getblk(struct inode * inode,long block,int create,int * err)371 struct buffer_head * udf_getblk(struct inode * inode, long block,
372 	int create, int * err)
373 {
374 	struct buffer_head dummy;
375 
376 	dummy.b_state = 0;
377 	dummy.b_blocknr = -1000;
378 	*err = udf_get_block(inode, block, &dummy, create);
379 	if (!*err && buffer_mapped(&dummy))
380 	{
381 		struct buffer_head *bh;
382 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
383 		if (buffer_new(&dummy))
384 		{
385 			lock_buffer(bh);
386 			memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
387 			mark_buffer_uptodate(bh, 1);
388 			unlock_buffer(bh);
389 			mark_buffer_dirty_inode(bh, inode);
390 		}
391 		return bh;
392 	}
393 	return NULL;
394 }
395 
inode_getblk(struct inode * inode,long block,int * err,long * phys,int * new)396 static struct buffer_head * inode_getblk(struct inode * inode, long block,
397 	int *err, long *phys, int *new)
398 {
399 	struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
400 	long_ad laarr[EXTENT_MERGE_SIZE];
401 	uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
402 	int count = 0, startnum = 0, endnum = 0;
403 	uint32_t elen = 0;
404 	lb_addr eloc, pbloc, cbloc, nbloc;
405 	int c = 1;
406 	uint64_t lbcount = 0, b_off = 0;
407 	uint32_t newblocknum, newblock, offset = 0;
408 	int8_t etype;
409 	int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
410 	char lastblock = 0;
411 
412 	pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
413 	b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
414 	pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
415 
416 	/* find the extent which contains the block we are looking for.
417        alternate between laarr[0] and laarr[1] for locations of the
418        current extent, and the previous extent */
419 	do
420 	{
421 		if (pbh != cbh)
422 		{
423 			udf_release_data(pbh);
424 			atomic_inc(&cbh->b_count);
425 			pbh = cbh;
426 		}
427 		if (cbh != nbh)
428 		{
429 			udf_release_data(cbh);
430 			atomic_inc(&nbh->b_count);
431 			cbh = nbh;
432 		}
433 
434 		lbcount += elen;
435 
436 		pbloc = cbloc;
437 		cbloc = nbloc;
438 
439 		pextoffset = cextoffset;
440 		cextoffset = nextoffset;
441 
442 		if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
443 			break;
444 
445 		c = !c;
446 
447 		laarr[c].extLength = (etype << 30) | elen;
448 		laarr[c].extLocation = eloc;
449 
450 		if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
451 			pgoal = eloc.logicalBlockNum +
452 				((elen + inode->i_sb->s_blocksize - 1) >>
453 				inode->i_sb->s_blocksize_bits);
454 
455 		count ++;
456 	} while (lbcount + elen <= b_off);
457 
458 	b_off -= lbcount;
459 	offset = b_off >> inode->i_sb->s_blocksize_bits;
460 
461 	/* if the extent is allocated and recorded, return the block
462        if the extent is not a multiple of the blocksize, round up */
463 
464 	if (etype == (EXT_RECORDED_ALLOCATED >> 30))
465 	{
466 		if (elen & (inode->i_sb->s_blocksize - 1))
467 		{
468 			elen = EXT_RECORDED_ALLOCATED |
469 				((elen + inode->i_sb->s_blocksize - 1) &
470 				~(inode->i_sb->s_blocksize - 1));
471 			etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
472 		}
473 		udf_release_data(pbh);
474 		udf_release_data(cbh);
475 		udf_release_data(nbh);
476 		newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
477 		*phys = newblock;
478 		return NULL;
479 	}
480 
481 	if (etype == -1)
482 	{
483 		endnum = startnum = ((count > 1) ? 1 : count);
484 		if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
485 		{
486 			laarr[c].extLength =
487 				(laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
488 				(((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
489 					inode->i_sb->s_blocksize - 1) &
490 				~(inode->i_sb->s_blocksize - 1));
491 			UDF_I_LENEXTENTS(inode) =
492 				(UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
493 					~(inode->i_sb->s_blocksize - 1);
494 		}
495 		c = !c;
496 		laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
497 			((offset + 1) << inode->i_sb->s_blocksize_bits);
498 		memset(&laarr[c].extLocation, 0x00, sizeof(lb_addr));
499 		count ++;
500 		endnum ++;
501 		lastblock = 1;
502 	}
503 	else
504 		endnum = startnum = ((count > 2) ? 2 : count);
505 
506 	/* if the current extent is in position 0, swap it with the previous */
507 	if (!c && count != 1)
508 	{
509 		laarr[2] = laarr[0];
510 		laarr[0] = laarr[1];
511 		laarr[1] = laarr[2];
512 		c = 1;
513 	}
514 
515 	/* if the current block is located in a extent, read the next extent */
516 	if (etype != -1)
517 	{
518 		if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
519 		{
520 			laarr[c+1].extLength = (etype << 30) | elen;
521 			laarr[c+1].extLocation = eloc;
522 			count ++;
523 			startnum ++;
524 			endnum ++;
525 		}
526 		else
527 			lastblock = 1;
528 	}
529 	udf_release_data(nbh);
530 	if (!pbh)
531 		pbh = cbh;
532 	else
533 		udf_release_data(cbh);
534 
535 	/* if the current extent is not recorded but allocated, get the
536 		block in the extent corresponding to the requested block */
537 	if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
538 		newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
539 	else /* otherwise, allocate a new block */
540 	{
541 		if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
542 			goal = UDF_I_NEXT_ALLOC_GOAL(inode);
543 
544 		if (!goal)
545 		{
546 			if (!(goal = pgoal))
547 				goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
548 		}
549 
550 		if (!(newblocknum = udf_new_block(inode->i_sb, inode,
551 			UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
552 		{
553 			udf_release_data(pbh);
554 			*err = -ENOSPC;
555 			return NULL;
556 		}
557 		UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
558 	}
559 
560 	/* if the extent the requsted block is located in contains multiple blocks,
561        split the extent into at most three extents. blocks prior to requested
562        block, requested block, and blocks after requested block */
563 	udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
564 
565 #ifdef UDF_PREALLOCATE
566 	/* preallocate blocks */
567 	udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
568 #endif
569 
570 	/* merge any continuous blocks in laarr */
571 	udf_merge_extents(inode, laarr, &endnum);
572 
573 	/* write back the new extents, inserting new extents if the new number
574        of extents is greater than the old number, and deleting extents if
575        the new number of extents is less than the old number */
576 	udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
577 
578 	udf_release_data(pbh);
579 
580 	if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
581 		UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
582 	{
583 		return NULL;
584 	}
585 	*phys = newblock;
586 	*err = 0;
587 	*new = 1;
588 	UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
589 	UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
590 	inode->i_ctime = CURRENT_TIME;
591 	UDF_I_UCTIME(inode) = CURRENT_UTIME;
592 
593 	if (IS_SYNC(inode))
594 		udf_sync_inode(inode);
595 	else
596 		mark_inode_dirty(inode);
597 	return result;
598 }
599 
udf_split_extents(struct inode * inode,int * c,int offset,int newblocknum,long_ad laarr[EXTENT_MERGE_SIZE],int * endnum)600 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
601 	long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
602 {
603 	if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
604 		(laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
605 	{
606 		int curr = *c;
607 		int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
608 			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
609 		int type = laarr[curr].extLength & ~UDF_EXTENT_LENGTH_MASK;
610 
611 		if (blen == 1)
612 			;
613 		else if (!offset || blen == offset + 1)
614 		{
615 			laarr[curr+2] = laarr[curr+1];
616 			laarr[curr+1] = laarr[curr];
617 		}
618 		else
619 		{
620 			laarr[curr+3] = laarr[curr+1];
621 			laarr[curr+2] = laarr[curr+1] = laarr[curr];
622 		}
623 
624 		if (offset)
625 		{
626 			if ((type >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
627 			{
628 				udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
629 				laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
630 					(offset << inode->i_sb->s_blocksize_bits);
631 				laarr[curr].extLocation.logicalBlockNum = 0;
632 				laarr[curr].extLocation.partitionReferenceNum = 0;
633 			}
634 			else
635 				laarr[curr].extLength = type |
636 					(offset << inode->i_sb->s_blocksize_bits);
637 			curr ++;
638 			(*c) ++;
639 			(*endnum) ++;
640 		}
641 
642 		laarr[curr].extLocation.logicalBlockNum = newblocknum;
643 		if ((type >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
644 			laarr[curr].extLocation.partitionReferenceNum =
645 				UDF_I_LOCATION(inode).partitionReferenceNum;
646 		laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
647 			inode->i_sb->s_blocksize;
648 		curr ++;
649 
650 		if (blen != offset + 1)
651 		{
652 			if ((type >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
653 				laarr[curr].extLocation.logicalBlockNum += (offset + 1);
654 			laarr[curr].extLength = type |
655 				((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
656 			curr ++;
657 			(*endnum) ++;
658 		}
659 	}
660 }
661 
udf_prealloc_extents(struct inode * inode,int c,int lastblock,long_ad laarr[EXTENT_MERGE_SIZE],int * endnum)662 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
663 	 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
664 {
665 	int start, length = 0, currlength = 0, i;
666 
667 	if (*endnum >= (c+1))
668 	{
669 		if (!lastblock)
670 			return;
671 		else
672 			start = c;
673 	}
674 	else
675 	{
676 		if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
677 		{
678 			start = c+1;
679 			length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
680 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
681 		}
682 		else
683 			start = c;
684 	}
685 
686 	for (i=start+1; i<=*endnum; i++)
687 	{
688 		if (i == *endnum)
689 		{
690 			if (lastblock)
691 				length += UDF_DEFAULT_PREALLOC_BLOCKS;
692 		}
693 		else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
694 			length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
695 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
696 		else
697 			break;
698 	}
699 
700 	if (length)
701 	{
702 		int next = laarr[start].extLocation.logicalBlockNum +
703 			(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
704 			inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
705 		int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
706 			laarr[start].extLocation.partitionReferenceNum,
707 			next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
708 				UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
709 
710 		if (numalloc)
711 		{
712 			if (start == (c+1))
713 				laarr[start].extLength +=
714 					(numalloc << inode->i_sb->s_blocksize_bits);
715 			else
716 			{
717 				memmove(&laarr[c+2], &laarr[c+1],
718 					sizeof(long_ad) * (*endnum - (c+1)));
719 				(*endnum) ++;
720 				laarr[c+1].extLocation.logicalBlockNum = next;
721 				laarr[c+1].extLocation.partitionReferenceNum =
722 					laarr[c].extLocation.partitionReferenceNum;
723 				laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
724 					(numalloc << inode->i_sb->s_blocksize_bits);
725 				start = c+1;
726 			}
727 
728 			for (i=start+1; numalloc && i<*endnum; i++)
729 			{
730 				int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
731 					inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
732 
733 				if (elen > numalloc)
734 				{
735 					laarr[c].extLength -=
736 						(numalloc << inode->i_sb->s_blocksize_bits);
737 					numalloc = 0;
738 				}
739 				else
740 				{
741 					numalloc -= elen;
742 					if (*endnum > (i+1))
743 						memmove(&laarr[i], &laarr[i+1],
744 							sizeof(long_ad) * (*endnum - (i+1)));
745 					i --;
746 					(*endnum) --;
747 				}
748 			}
749 			UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
750 		}
751 	}
752 }
753 
udf_merge_extents(struct inode * inode,long_ad laarr[EXTENT_MERGE_SIZE],int * endnum)754 static void udf_merge_extents(struct inode *inode,
755 	 long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
756 {
757 	int i;
758 
759 	for (i=0; i<(*endnum-1); i++)
760 	{
761 		if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
762 		{
763 			if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
764 				((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
765 				(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
766 				inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
767 			{
768 				if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
769 					(laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
770 					inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
771 				{
772 					laarr[i+1].extLength = (laarr[i+1].extLength -
773 						(laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
774 						UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
775 					laarr[i].extLength = (UDF_EXTENT_LENGTH_MASK + 1) -
776 						inode->i_sb->s_blocksize;
777 					laarr[i+1].extLocation.logicalBlockNum =
778 						laarr[i].extLocation.logicalBlockNum +
779 						((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
780 							inode->i_sb->s_blocksize_bits);
781 				}
782 				else
783 				{
784 					laarr[i].extLength = laarr[i+1].extLength +
785 						(((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
786 						inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
787 					if (*endnum > (i+2))
788 						memmove(&laarr[i+1], &laarr[i+2],
789 							sizeof(long_ad) * (*endnum - (i+2)));
790 					i --;
791 					(*endnum) --;
792 				}
793 			}
794 		}
795 	}
796 }
797 
udf_update_extents(struct inode * inode,long_ad laarr[EXTENT_MERGE_SIZE],int startnum,int endnum,lb_addr pbloc,uint32_t pextoffset,struct buffer_head ** pbh)798 static void udf_update_extents(struct inode *inode,
799 	long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
800 	lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
801 {
802 	int start = 0, i;
803 	lb_addr tmploc;
804 	uint32_t tmplen;
805 
806 	if (startnum > endnum)
807 	{
808 		for (i=0; i<(startnum-endnum); i++)
809 		{
810 			udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
811 				laarr[i].extLength, *pbh);
812 		}
813 	}
814 	else if (startnum < endnum)
815 	{
816 		for (i=0; i<(endnum-startnum); i++)
817 		{
818 			udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
819 				laarr[i].extLength, *pbh);
820 			udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
821 				&laarr[i].extLength, pbh, 1);
822 			start ++;
823 		}
824 	}
825 
826 	for (i=start; i<endnum; i++)
827 	{
828 		udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
829 		udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
830 			laarr[i].extLength, *pbh, 1);
831 	}
832 }
833 
udf_bread(struct inode * inode,int block,int create,int * err)834 struct buffer_head * udf_bread(struct inode * inode, int block,
835 	int create, int * err)
836 {
837 	struct buffer_head * bh = NULL;
838 
839 	bh = udf_getblk(inode, block, create, err);
840 	if (!bh)
841 		return NULL;
842 
843 	if (buffer_uptodate(bh))
844 		return bh;
845 	ll_rw_block(READ, 1, &bh);
846 	wait_on_buffer(bh);
847 	if (buffer_uptodate(bh))
848 		return bh;
849 	brelse(bh);
850 	*err = -EIO;
851 	return NULL;
852 }
853 
udf_truncate(struct inode * inode)854 void udf_truncate(struct inode * inode)
855 {
856 	int offset;
857 	struct buffer_head *bh;
858 	int err;
859 
860 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
861 			S_ISLNK(inode->i_mode)))
862 		return;
863 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
864 		return;
865 
866 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
867 	{
868 		if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
869 			inode->i_size))
870 		{
871 			udf_expand_file_adinicb(inode, inode->i_size, &err);
872 			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
873 			{
874 				inode->i_size = UDF_I_LENALLOC(inode);
875 				return;
876 			}
877 			else
878 				udf_truncate_extents(inode);
879 		}
880 		else
881 		{
882 			offset = (inode->i_size & (inode->i_sb->s_blocksize - 1)) +
883 				udf_file_entry_alloc_offset(inode);
884 
885 			if ((bh = udf_tread(inode->i_sb,
886 				udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
887 			{
888 				memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
889 				mark_buffer_dirty(bh);
890 				udf_release_data(bh);
891 			}
892 			UDF_I_LENALLOC(inode) = inode->i_size;
893 		}
894 	}
895 	else
896 	{
897 		block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
898 		udf_truncate_extents(inode);
899 	}
900 
901 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
902 	UDF_I_UMTIME(inode) = UDF_I_UCTIME(inode) = CURRENT_UTIME;
903 	if (IS_SYNC(inode))
904 		udf_sync_inode (inode);
905 	else
906 		mark_inode_dirty(inode);
907 }
908 
909 /*
910  * udf_read_inode
911  *
912  * PURPOSE
913  *	Read an inode.
914  *
915  * DESCRIPTION
916  *	This routine is called by iget() [which is called by udf_iget()]
917  *      (clean_inode() will have been called first)
918  *	when an inode is first read into memory.
919  *
920  * HISTORY
921  *	July 1, 1997 - Andrew E. Mileski
922  *	Written, tested, and released.
923  *
924  * 12/19/98 dgb  Updated to fix size problems.
925  */
926 
927 void
udf_read_inode(struct inode * inode)928 udf_read_inode(struct inode *inode)
929 {
930 	memset(&UDF_I_LOCATION(inode), 0xFF, sizeof(lb_addr));
931 }
932 
933 void
__udf_read_inode(struct inode * inode)934 __udf_read_inode(struct inode *inode)
935 {
936 	struct buffer_head *bh = NULL;
937 	struct fileEntry *fe;
938 	uint16_t ident;
939 
940 	/*
941 	 * Set defaults, but the inode is still incomplete!
942 	 * Note: get_new_inode() sets the following on a new inode:
943 	 *      i_sb = sb
944 	 *      i_dev = sb->s_dev;
945 	 *      i_no = ino
946 	 *      i_flags = sb->s_flags
947 	 *      i_state = 0
948 	 * clean_inode(): zero fills and sets
949 	 *      i_count = 1
950 	 *      i_nlink = 1
951 	 *      i_op = NULL;
952 	 */
953 
954 	inode->i_blksize = PAGE_SIZE;
955 
956 	bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
957 
958 	if (!bh)
959 	{
960 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
961 			inode->i_ino);
962 		make_bad_inode(inode);
963 		return;
964 	}
965 
966 	if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
967 		ident != TAG_IDENT_USE)
968 	{
969 		printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
970 			inode->i_ino, ident);
971 		udf_release_data(bh);
972 		make_bad_inode(inode);
973 		return;
974 	}
975 
976 	fe = (struct fileEntry *)bh->b_data;
977 
978 	if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
979 	{
980 		struct buffer_head *ibh = NULL, *nbh = NULL;
981 		struct indirectEntry *ie;
982 
983 		ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
984 		if (ident == TAG_IDENT_IE)
985 		{
986 			if (ibh)
987 			{
988 				lb_addr loc;
989 				ie = (struct indirectEntry *)ibh->b_data;
990 
991 				loc = lelb_to_cpu(ie->indirectICB.extLocation);
992 
993 				if (ie->indirectICB.extLength &&
994 					(nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
995 				{
996 					if (ident == TAG_IDENT_FE ||
997 						ident == TAG_IDENT_EFE)
998 					{
999 						memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(lb_addr));
1000 						udf_release_data(bh);
1001 						udf_release_data(ibh);
1002 						udf_release_data(nbh);
1003 						__udf_read_inode(inode);
1004 						return;
1005 					}
1006 					else
1007 					{
1008 						udf_release_data(nbh);
1009 						udf_release_data(ibh);
1010 					}
1011 				}
1012 				else
1013 					udf_release_data(ibh);
1014 			}
1015 		}
1016 		else
1017 			udf_release_data(ibh);
1018 	}
1019 	else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1020 	{
1021 		printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1022 			le16_to_cpu(fe->icbTag.strategyType));
1023 		udf_release_data(bh);
1024 		make_bad_inode(inode);
1025 		return;
1026 	}
1027 	udf_fill_inode(inode, bh);
1028 	udf_release_data(bh);
1029 }
1030 
udf_fill_inode(struct inode * inode,struct buffer_head * bh)1031 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1032 {
1033 	struct fileEntry *fe;
1034 	struct extendedFileEntry *efe;
1035 	time_t convtime;
1036 	long convtime_usec;
1037 	int offset, alen;
1038 
1039 	inode->i_version = ++event;
1040 	UDF_I_NEW_INODE(inode) = 0;
1041 
1042 	fe = (struct fileEntry *)bh->b_data;
1043 	efe = (struct extendedFileEntry *)bh->b_data;
1044 
1045 	if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1046 		UDF_I_STRAT4096(inode) = 0;
1047 	else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1048 		UDF_I_STRAT4096(inode) = 1;
1049 
1050 	UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1051 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1052 		UDF_I_EXTENDED_FE(inode) = 1;
1053 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1054 		UDF_I_EXTENDED_FE(inode) = 0;
1055 	else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1056 	{
1057 		UDF_I_LENALLOC(inode) =
1058 			le32_to_cpu(
1059 				((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1060 		return;
1061 	}
1062 
1063 	inode->i_uid = le32_to_cpu(fe->uid);
1064 	if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1065 
1066 	inode->i_gid = le32_to_cpu(fe->gid);
1067 	if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1068 
1069 	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1070 	if (!inode->i_nlink)
1071 		inode->i_nlink = 1;
1072 
1073 	inode->i_size = le64_to_cpu(fe->informationLength);
1074 	UDF_I_LENEXTENTS(inode) = inode->i_size;
1075 
1076 	inode->i_mode = udf_convert_permissions(fe);
1077 	inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1078 
1079 	UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1080 	UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1081 
1082 	if (UDF_I_EXTENDED_FE(inode) == 0)
1083 	{
1084 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1085 			(inode->i_sb->s_blocksize_bits - 9);
1086 
1087 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1088 			lets_to_cpu(fe->accessTime)) )
1089 		{
1090 			inode->i_atime = convtime;
1091 		}
1092 		else
1093 		{
1094 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1095 		}
1096 
1097 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1098 			lets_to_cpu(fe->modificationTime)) )
1099 		{
1100 			inode->i_mtime = convtime;
1101 			UDF_I_UMTIME(inode) = convtime_usec;
1102 		}
1103 		else
1104 		{
1105 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1106 			UDF_I_UMTIME(inode) = 0;
1107 		}
1108 
1109 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1110 			lets_to_cpu(fe->attrTime)) )
1111 		{
1112 			inode->i_ctime = convtime;
1113 			UDF_I_UCTIME(inode) = convtime_usec;
1114 		}
1115 		else
1116 		{
1117 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1118 			UDF_I_UCTIME(inode) = 0;
1119 		}
1120 
1121 		UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1122 		UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1123 		UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1124 		offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1125 		alen = offset + UDF_I_LENALLOC(inode);
1126 	}
1127 	else
1128 	{
1129 		inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1130 			(inode->i_sb->s_blocksize_bits - 9);
1131 
1132 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1133 			lets_to_cpu(efe->accessTime)) )
1134 		{
1135 			inode->i_atime = convtime;
1136 		}
1137 		else
1138 		{
1139 			inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1140 		}
1141 
1142 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1143 			lets_to_cpu(efe->modificationTime)) )
1144 		{
1145 			inode->i_mtime = convtime;
1146 			UDF_I_UMTIME(inode) = convtime_usec;
1147 		}
1148 		else
1149 		{
1150 			inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1151 			UDF_I_UMTIME(inode) = 0;
1152 		}
1153 
1154 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1155 			lets_to_cpu(efe->createTime)) )
1156 		{
1157 			UDF_I_CRTIME(inode) = convtime;
1158 			UDF_I_UCRTIME(inode) = convtime_usec;
1159 		}
1160 		else
1161 		{
1162 			UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1163 			UDF_I_UCRTIME(inode) = 0;
1164 		}
1165 
1166 		if ( udf_stamp_to_time(&convtime, &convtime_usec,
1167 			lets_to_cpu(efe->attrTime)) )
1168 		{
1169 			inode->i_ctime = convtime;
1170 			UDF_I_UCTIME(inode) = convtime_usec;
1171 		}
1172 		else
1173 		{
1174 			inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1175 			UDF_I_UCTIME(inode) = 0;
1176 		}
1177 
1178 		UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1179 		UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1180 		UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1181 		offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1182 		alen = offset + UDF_I_LENALLOC(inode);
1183 	}
1184 
1185 	switch (fe->icbTag.fileType)
1186 	{
1187 		case ICBTAG_FILE_TYPE_DIRECTORY:
1188 		{
1189 			inode->i_op = &udf_dir_inode_operations;
1190 			inode->i_fop = &udf_dir_operations;
1191 			inode->i_mode |= S_IFDIR;
1192 			inode->i_nlink ++;
1193 			break;
1194 		}
1195 		case ICBTAG_FILE_TYPE_REALTIME:
1196 		case ICBTAG_FILE_TYPE_REGULAR:
1197 		case ICBTAG_FILE_TYPE_UNDEF:
1198 		{
1199 			if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1200 				inode->i_data.a_ops = &udf_adinicb_aops;
1201 			else
1202 				inode->i_data.a_ops = &udf_aops;
1203 			inode->i_op = &udf_file_inode_operations;
1204 			inode->i_fop = &udf_file_operations;
1205 			inode->i_mode |= S_IFREG;
1206 			break;
1207 		}
1208 		case ICBTAG_FILE_TYPE_BLOCK:
1209 		{
1210 			inode->i_mode |= S_IFBLK;
1211 			break;
1212 		}
1213 		case ICBTAG_FILE_TYPE_CHAR:
1214 		{
1215 			inode->i_mode |= S_IFCHR;
1216 			break;
1217 		}
1218 		case ICBTAG_FILE_TYPE_FIFO:
1219 		{
1220 			init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1221 			break;
1222 		}
1223 		case ICBTAG_FILE_TYPE_SYMLINK:
1224 		{
1225 			inode->i_data.a_ops = &udf_symlink_aops;
1226 			inode->i_op = &page_symlink_inode_operations;
1227 			inode->i_mode = S_IFLNK|S_IRWXUGO;
1228 			break;
1229 		}
1230 		default:
1231 		{
1232 			printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1233 				inode->i_ino, fe->icbTag.fileType);
1234 			make_bad_inode(inode);
1235 			return;
1236 		}
1237 	}
1238 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1239 	{
1240 		struct buffer_head *tbh = NULL;
1241 		struct deviceSpec *dsea =
1242 			(struct deviceSpec *)
1243 				udf_get_extendedattr(inode, 12, 1, &tbh);
1244 
1245 		if (dsea)
1246 		{
1247 			init_special_inode(inode, inode->i_mode,
1248 				((le32_to_cpu(dsea->majorDeviceIdent)) << 8) |
1249 				(le32_to_cpu(dsea->minorDeviceIdent) & 0xFF));
1250 			/* Developer ID ??? */
1251 			udf_release_data(tbh);
1252 		}
1253 		else
1254 		{
1255 			make_bad_inode(inode);
1256 		}
1257 	}
1258 }
1259 
1260 static mode_t
udf_convert_permissions(struct fileEntry * fe)1261 udf_convert_permissions(struct fileEntry *fe)
1262 {
1263 	mode_t mode;
1264 	uint32_t permissions;
1265 	uint32_t flags;
1266 
1267 	permissions = le32_to_cpu(fe->permissions);
1268 	flags = le16_to_cpu(fe->icbTag.flags);
1269 
1270 	mode =	(( permissions      ) & S_IRWXO) |
1271 		(( permissions >> 2 ) & S_IRWXG) |
1272 		(( permissions >> 4 ) & S_IRWXU) |
1273 		(( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1274 		(( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1275 		(( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1276 
1277 	return mode;
1278 }
1279 
1280 /*
1281  * udf_write_inode
1282  *
1283  * PURPOSE
1284  *	Write out the specified inode.
1285  *
1286  * DESCRIPTION
1287  *	This routine is called whenever an inode is synced.
1288  *	Currently this routine is just a placeholder.
1289  *
1290  * HISTORY
1291  *	July 1, 1997 - Andrew E. Mileski
1292  *	Written, tested, and released.
1293  */
1294 
udf_write_inode(struct inode * inode,int sync)1295 void udf_write_inode(struct inode * inode, int sync)
1296 {
1297 	lock_kernel();
1298 	udf_update_inode(inode, sync);
1299 	unlock_kernel();
1300 }
1301 
udf_sync_inode(struct inode * inode)1302 int udf_sync_inode(struct inode * inode)
1303 {
1304 	return udf_update_inode(inode, 1);
1305 }
1306 
1307 static int
udf_update_inode(struct inode * inode,int do_sync)1308 udf_update_inode(struct inode *inode, int do_sync)
1309 {
1310 	struct buffer_head *bh = NULL;
1311 	struct fileEntry *fe;
1312 	struct extendedFileEntry *efe;
1313 	uint32_t udfperms;
1314 	uint16_t icbflags;
1315 	uint16_t crclen;
1316 	int i;
1317 	timestamp cpu_time;
1318 	int err = 0;
1319 
1320 	bh = udf_tread(inode->i_sb,
1321 		udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1322 
1323 	if (!bh)
1324 	{
1325 		udf_debug("bread failure\n");
1326 		return -EIO;
1327 	}
1328 	fe = (struct fileEntry *)bh->b_data;
1329 	efe = (struct extendedFileEntry *)bh->b_data;
1330 	if (UDF_I_NEW_INODE(inode) == 1)
1331 	{
1332 		if (UDF_I_EXTENDED_FE(inode) == 0)
1333 			memset(bh->b_data, 0x00, sizeof(struct fileEntry));
1334 		else
1335 			memset(bh->b_data, 0x00, sizeof(struct extendedFileEntry));
1336 		memset(bh->b_data + udf_file_entry_alloc_offset(inode) +
1337 			UDF_I_LENALLOC(inode), 0x0, inode->i_sb->s_blocksize -
1338 			udf_file_entry_alloc_offset(inode) - UDF_I_LENALLOC(inode));
1339 		UDF_I_NEW_INODE(inode) = 0;
1340 	}
1341 
1342 	if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1343 	{
1344 		struct unallocSpaceEntry *use =
1345 			(struct unallocSpaceEntry *)bh->b_data;
1346 
1347 		use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1348 		crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1349 			sizeof(tag);
1350 		use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1351 		use->descTag.descCRCLength = cpu_to_le16(crclen);
1352 		use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1353 
1354 		use->descTag.tagChecksum = 0;
1355 		for (i=0; i<16; i++)
1356 			if (i != 4)
1357 				use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1358 
1359 		mark_buffer_dirty(bh);
1360 		udf_release_data(bh);
1361 		return err;
1362 	}
1363 
1364 	if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1365 		fe->uid = cpu_to_le32(inode->i_uid);
1366 	else
1367 		fe->uid = cpu_to_le32(-1);
1368 
1369 	if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1370 		fe->gid = cpu_to_le32(inode->i_gid);
1371 	else
1372 		fe->uid = cpu_to_le32(-1);
1373 
1374 	udfperms =	((inode->i_mode & S_IRWXO)     ) |
1375 			((inode->i_mode & S_IRWXG) << 2) |
1376 			((inode->i_mode & S_IRWXU) << 4);
1377 
1378 	udfperms |=	(le32_to_cpu(fe->permissions) &
1379 			(FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1380 			 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1381 			 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1382 	fe->permissions = cpu_to_le32(udfperms);
1383 
1384 	if (S_ISDIR(inode->i_mode))
1385 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1386 	else
1387 		fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1388 
1389 	fe->informationLength = cpu_to_le64(inode->i_size);
1390 
1391 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1392 	{
1393 		regid *eid;
1394 		struct buffer_head *tbh = NULL;
1395 		struct deviceSpec *dsea =
1396 			(struct deviceSpec *)
1397 				udf_get_extendedattr(inode, 12, 1, &tbh);
1398 
1399 		if (!dsea)
1400 		{
1401 			dsea = (struct deviceSpec *)
1402 				udf_add_extendedattr(inode,
1403 					sizeof(struct deviceSpec) +
1404 					sizeof(regid), 12, 0x3, &tbh);
1405 			dsea->attrType = 12;
1406 			dsea->attrSubtype = 1;
1407 			dsea->attrLength = sizeof(struct deviceSpec) +
1408 				sizeof(regid);
1409 			dsea->impUseLength = sizeof(regid);
1410 		}
1411 		eid = (regid *)dsea->impUse;
1412 		memset(eid, 0, sizeof(regid));
1413 		strcpy(eid->ident, UDF_ID_DEVELOPER);
1414 		eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1415 		eid->identSuffix[1] = UDF_OS_ID_LINUX;
1416 		dsea->majorDeviceIdent = kdev_t_to_nr(inode->i_rdev) >> 8;
1417 		dsea->minorDeviceIdent = kdev_t_to_nr(inode->i_rdev) & 0xFF;
1418 		mark_buffer_dirty_inode(tbh, inode);
1419 		udf_release_data(tbh);
1420 	}
1421 
1422 	if (UDF_I_EXTENDED_FE(inode) == 0)
1423 	{
1424 		fe->logicalBlocksRecorded = cpu_to_le64(
1425 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1426 			(inode->i_sb->s_blocksize_bits - 9));
1427 
1428 		if (udf_time_to_stamp(&cpu_time, inode->i_atime, 0))
1429 			fe->accessTime = cpu_to_lets(cpu_time);
1430 		if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1431 			fe->modificationTime = cpu_to_lets(cpu_time);
1432 		if (udf_time_to_stamp(&cpu_time, inode->i_ctime, UDF_I_UCTIME(inode)))
1433 			fe->attrTime = cpu_to_lets(cpu_time);
1434 		memset(&(fe->impIdent), 0, sizeof(regid));
1435 		strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1436 		fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1437 		fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1438 		fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1439 		fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1440 		fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1441 		fe->descTag.tagIdent = le16_to_cpu(TAG_IDENT_FE);
1442 		crclen = sizeof(struct fileEntry);
1443 	}
1444 	else
1445 	{
1446 		efe->objectSize = cpu_to_le64(inode->i_size);
1447 		efe->logicalBlocksRecorded = cpu_to_le64(
1448 			(inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1449 			(inode->i_sb->s_blocksize_bits - 9));
1450 
1451 		if (UDF_I_CRTIME(inode) >= inode->i_atime)
1452 		{
1453 			UDF_I_CRTIME(inode) = inode->i_atime;
1454 			UDF_I_UCRTIME(inode) = 0;
1455 		}
1456 		if (UDF_I_CRTIME(inode) > inode->i_mtime ||
1457 			(UDF_I_CRTIME(inode) == inode->i_mtime &&
1458 			 UDF_I_UCRTIME(inode) > UDF_I_UMTIME(inode)))
1459 		{
1460 			UDF_I_CRTIME(inode) = inode->i_mtime;
1461 			UDF_I_UCRTIME(inode) = UDF_I_UMTIME(inode);
1462 		}
1463 		if (UDF_I_CRTIME(inode) > inode->i_ctime ||
1464 			(UDF_I_CRTIME(inode) == inode->i_ctime &&
1465 			 UDF_I_UCRTIME(inode) > UDF_I_UCTIME(inode)))
1466 		{
1467 			UDF_I_CRTIME(inode) = inode->i_ctime;
1468 			UDF_I_UCRTIME(inode) = UDF_I_UCTIME(inode);
1469 		}
1470 
1471 		if (udf_time_to_stamp(&cpu_time, inode->i_atime, 0))
1472 			efe->accessTime = cpu_to_lets(cpu_time);
1473 		if (udf_time_to_stamp(&cpu_time, inode->i_mtime, UDF_I_UMTIME(inode)))
1474 			efe->modificationTime = cpu_to_lets(cpu_time);
1475 		if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode), UDF_I_UCRTIME(inode)))
1476 			efe->createTime = cpu_to_lets(cpu_time);
1477 		if (udf_time_to_stamp(&cpu_time, inode->i_ctime, UDF_I_UCTIME(inode)))
1478 			efe->attrTime = cpu_to_lets(cpu_time);
1479 
1480 		memset(&(efe->impIdent), 0, sizeof(regid));
1481 		strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1482 		efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1483 		efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1484 		efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1485 		efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1486 		efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1487 		efe->descTag.tagIdent = le16_to_cpu(TAG_IDENT_EFE);
1488 		crclen = sizeof(struct extendedFileEntry);
1489 	}
1490 	if (UDF_I_STRAT4096(inode))
1491 	{
1492 		fe->icbTag.strategyType = cpu_to_le16(4096);
1493 		fe->icbTag.strategyParameter = cpu_to_le16(1);
1494 		fe->icbTag.numEntries = cpu_to_le16(2);
1495 	}
1496 	else
1497 	{
1498 		fe->icbTag.strategyType = cpu_to_le16(4);
1499 		fe->icbTag.numEntries = cpu_to_le16(1);
1500 	}
1501 
1502 	if (S_ISDIR(inode->i_mode))
1503 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1504 	else if (S_ISREG(inode->i_mode))
1505 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1506 	else if (S_ISLNK(inode->i_mode))
1507 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1508 	else if (S_ISBLK(inode->i_mode))
1509 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1510 	else if (S_ISCHR(inode->i_mode))
1511 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1512 	else if (S_ISFIFO(inode->i_mode))
1513 		fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1514 
1515 	icbflags =	UDF_I_ALLOCTYPE(inode) |
1516 			((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1517 			((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1518 			((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1519 			(le16_to_cpu(fe->icbTag.flags) &
1520 				~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1521 				ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1522 
1523 	fe->icbTag.flags = cpu_to_le16(icbflags);
1524 	if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1525 		fe->descTag.descVersion = cpu_to_le16(3);
1526 	else
1527 		fe->descTag.descVersion = cpu_to_le16(2);
1528 	fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1529 	fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1530 	crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1531 	fe->descTag.descCRCLength = cpu_to_le16(crclen);
1532 	fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1533 
1534 	fe->descTag.tagChecksum = 0;
1535 	for (i=0; i<16; i++)
1536 		if (i != 4)
1537 			fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1538 
1539 	/* write the data blocks */
1540 	mark_buffer_dirty(bh);
1541 	if (do_sync)
1542 	{
1543 		ll_rw_block(WRITE, 1, &bh);
1544 		wait_on_buffer(bh);
1545 		if (buffer_req(bh) && !buffer_uptodate(bh))
1546 		{
1547 			printk("IO error syncing udf inode [%s:%08lx]\n",
1548 				bdevname(inode->i_dev), inode->i_ino);
1549 			err = -EIO;
1550 		}
1551 	}
1552 	udf_release_data(bh);
1553 	return err;
1554 }
1555 
1556 /*
1557  * udf_iget
1558  *
1559  * PURPOSE
1560  *	Get an inode.
1561  *
1562  * DESCRIPTION
1563  *	This routine replaces iget() and read_inode().
1564  *
1565  * HISTORY
1566  *	October 3, 1997 - Andrew E. Mileski
1567  *	Written, tested, and released.
1568  *
1569  * 12/19/98 dgb  Added semaphore and changed to be a wrapper of iget
1570  */
1571 struct inode *
udf_iget(struct super_block * sb,lb_addr ino)1572 udf_iget(struct super_block *sb, lb_addr ino)
1573 {
1574 	struct inode *inode;
1575 	unsigned long block;
1576 
1577 	block = udf_get_lb_pblock(sb, ino, 0);
1578 
1579 	/* Get the inode */
1580 
1581 	inode = iget(sb, block);
1582 		/* calls udf_read_inode() ! */
1583 
1584 	if (!inode)
1585 	{
1586 		printk(KERN_ERR "udf: iget() failed\n");
1587 		return NULL;
1588 	}
1589 	else if (is_bad_inode(inode))
1590 	{
1591 		iput(inode);
1592 		return NULL;
1593 	}
1594 	else if (UDF_I_LOCATION(inode).logicalBlockNum == 0xFFFFFFFF &&
1595 		UDF_I_LOCATION(inode).partitionReferenceNum == 0xFFFF)
1596 	{
1597 		memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(lb_addr));
1598 		__udf_read_inode(inode);
1599 		if (is_bad_inode(inode))
1600 		{
1601 			iput(inode);
1602 			return NULL;
1603 		}
1604 	}
1605 
1606 	if ( ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum) )
1607 	{
1608 		udf_debug("block=%d, partition=%d out of range\n",
1609 			ino.logicalBlockNum, ino.partitionReferenceNum);
1610 		make_bad_inode(inode);
1611 		iput(inode);
1612 		return NULL;
1613  	}
1614 
1615 	return inode;
1616 }
1617 
udf_add_aext(struct inode * inode,lb_addr * bloc,int * extoffset,lb_addr eloc,uint32_t elen,struct buffer_head ** bh,int inc)1618 int8_t udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1619 	lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1620 {
1621 	int adsize;
1622 	short_ad *sad = NULL;
1623 	long_ad *lad = NULL;
1624 	struct allocExtDesc *aed;
1625 	int8_t etype;
1626 
1627 	if (!(*bh))
1628 	{
1629 		if (!(*bh = udf_tread(inode->i_sb,
1630 			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1631 		{
1632 			udf_debug("reading block %d failed!\n",
1633 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1634 			return -1;
1635 		}
1636 	}
1637 
1638 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1639 		adsize = sizeof(short_ad);
1640 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1641 		adsize = sizeof(long_ad);
1642 	else
1643 		return -1;
1644 
1645 	if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1646 	{
1647 		char *sptr, *dptr;
1648 		struct buffer_head *nbh;
1649 		int err, loffset;
1650 		lb_addr obloc = *bloc;
1651 
1652 		if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, inode,
1653 			obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1654 		{
1655 			return -1;
1656 		}
1657 		if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1658 			*bloc, 0))))
1659 		{
1660 			return -1;
1661 		}
1662 		lock_buffer(nbh);
1663 		memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1664 		mark_buffer_uptodate(nbh, 1);
1665 		unlock_buffer(nbh);
1666 		mark_buffer_dirty_inode(nbh, inode);
1667 
1668 		aed = (struct allocExtDesc *)(nbh->b_data);
1669 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1670 			aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1671 		if (*extoffset + adsize > inode->i_sb->s_blocksize)
1672 		{
1673 			loffset = *extoffset;
1674 			aed->lengthAllocDescs = cpu_to_le32(adsize);
1675 			sptr = (*bh)->b_data + *extoffset - adsize;
1676 			dptr = nbh->b_data + sizeof(struct allocExtDesc);
1677 			memcpy(dptr, sptr, adsize);
1678 			*extoffset = sizeof(struct allocExtDesc) + adsize;
1679 		}
1680 		else
1681 		{
1682 			loffset = *extoffset + adsize;
1683 			aed->lengthAllocDescs = cpu_to_le32(0);
1684 			sptr = (*bh)->b_data + *extoffset;
1685 			*extoffset = sizeof(struct allocExtDesc);
1686 
1687 			if (memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
1688 			{
1689 				aed = (struct allocExtDesc *)(*bh)->b_data;
1690 				aed->lengthAllocDescs =
1691 					cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1692 			}
1693 			else
1694 			{
1695 				UDF_I_LENALLOC(inode) += adsize;
1696 				mark_inode_dirty(inode);
1697 			}
1698 		}
1699 		if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1700 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1701 				bloc->logicalBlockNum, sizeof(tag));
1702 		else
1703 			udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1704 				bloc->logicalBlockNum, sizeof(tag));
1705 		switch (UDF_I_ALLOCTYPE(inode))
1706 		{
1707 			case ICBTAG_FLAG_AD_SHORT:
1708 			{
1709 				sad = (short_ad *)sptr;
1710 				sad->extLength = cpu_to_le32(
1711 					EXT_NEXT_EXTENT_ALLOCDECS |
1712 					inode->i_sb->s_blocksize);
1713 				sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1714 				break;
1715 			}
1716 			case ICBTAG_FLAG_AD_LONG:
1717 			{
1718 				lad = (long_ad *)sptr;
1719 				lad->extLength = cpu_to_le32(
1720 					EXT_NEXT_EXTENT_ALLOCDECS |
1721 					inode->i_sb->s_blocksize);
1722 				lad->extLocation = cpu_to_lelb(*bloc);
1723 				memset(lad->impUse, 0x00, sizeof(lad->impUse));
1724 				break;
1725 			}
1726 		}
1727 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1728 			udf_update_tag((*bh)->b_data, loffset);
1729 		else
1730 			udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1731 		mark_buffer_dirty_inode(*bh, inode);
1732 		udf_release_data(*bh);
1733 		*bh = nbh;
1734 	}
1735 
1736 	etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1737 
1738 	if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1739 	{
1740 		UDF_I_LENALLOC(inode) += adsize;
1741 		mark_inode_dirty(inode);
1742 	}
1743 	else
1744 	{
1745 		aed = (struct allocExtDesc *)(*bh)->b_data;
1746 		aed->lengthAllocDescs =
1747 			cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1748 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1749 			udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1750 		else
1751 			udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1752 		mark_buffer_dirty_inode(*bh, inode);
1753 	}
1754 
1755 	return etype;
1756 }
1757 
udf_write_aext(struct inode * inode,lb_addr bloc,int * extoffset,lb_addr eloc,uint32_t elen,struct buffer_head * bh,int inc)1758 int8_t udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
1759     lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1760 {
1761 	int adsize;
1762 	short_ad *sad = NULL;
1763 	long_ad *lad = NULL;
1764 
1765 	if (!(bh))
1766 	{
1767 		if (!(bh = udf_tread(inode->i_sb,
1768 			udf_get_lb_pblock(inode->i_sb, bloc, 0))))
1769 		{
1770 			udf_debug("reading block %d failed!\n",
1771 				udf_get_lb_pblock(inode->i_sb, bloc, 0));
1772 			return -1;
1773 		}
1774 	}
1775 	else
1776 		atomic_inc(&bh->b_count);
1777 
1778 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1779 		adsize = sizeof(short_ad);
1780 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1781 		adsize = sizeof(long_ad);
1782 	else
1783 		return -1;
1784 
1785 	switch (UDF_I_ALLOCTYPE(inode))
1786 	{
1787 		case ICBTAG_FLAG_AD_SHORT:
1788 		{
1789 			sad = (short_ad *)((bh)->b_data + *extoffset);
1790 			sad->extLength = cpu_to_le32(elen);
1791 			sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1792 			break;
1793 		}
1794 		case ICBTAG_FLAG_AD_LONG:
1795 		{
1796 			lad = (long_ad *)((bh)->b_data + *extoffset);
1797 			lad->extLength = cpu_to_le32(elen);
1798 			lad->extLocation = cpu_to_lelb(eloc);
1799 			memset(lad->impUse, 0x00, sizeof(lad->impUse));
1800 			break;
1801 		}
1802 	}
1803 
1804 	if (memcmp(&UDF_I_LOCATION(inode), &bloc, sizeof(lb_addr)))
1805 	{
1806 		if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1807 		{
1808 			struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1809 			udf_update_tag((bh)->b_data,
1810 				le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1811 		}
1812 		mark_buffer_dirty_inode(bh, inode);
1813 	}
1814 	else
1815 	{
1816 		mark_inode_dirty(inode);
1817 		mark_buffer_dirty(bh);
1818 	}
1819 
1820 	if (inc)
1821 		*extoffset += adsize;
1822 	udf_release_data(bh);
1823 	return (elen >> 30);
1824 }
1825 
udf_next_aext(struct inode * inode,lb_addr * bloc,int * extoffset,lb_addr * eloc,uint32_t * elen,struct buffer_head ** bh,int inc)1826 int8_t udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1827 	lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1828 {
1829 	uint16_t tagIdent;
1830 	int pos, alen;
1831 	int8_t etype;
1832 
1833 	if (!(*bh))
1834 	{
1835 		if (!(*bh = udf_tread(inode->i_sb,
1836 			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1837 		{
1838 			udf_debug("reading block %d failed!\n",
1839 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1840 			return -1;
1841 		}
1842 	}
1843 
1844 	tagIdent = le16_to_cpu(((tag *)(*bh)->b_data)->tagIdent);
1845 
1846 	if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1847 	{
1848 		if (tagIdent == TAG_IDENT_FE || tagIdent == TAG_IDENT_EFE ||
1849 			UDF_I_NEW_INODE(inode))
1850 		{
1851 			pos = udf_file_entry_alloc_offset(inode);
1852 			alen = UDF_I_LENALLOC(inode) + pos;
1853 		}
1854 		else if (tagIdent == TAG_IDENT_USE)
1855 		{
1856 			pos = sizeof(struct unallocSpaceEntry);
1857 			alen = UDF_I_LENALLOC(inode) + pos;
1858 		}
1859 		else
1860 			return -1;
1861 	}
1862 	else if (tagIdent == TAG_IDENT_AED)
1863 	{
1864 		struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
1865 
1866 		pos = sizeof(struct allocExtDesc);
1867 		alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1868 	}
1869 	else
1870 		return -1;
1871 
1872 	if (!(*extoffset))
1873 		*extoffset = pos;
1874 
1875 	switch (UDF_I_ALLOCTYPE(inode))
1876 	{
1877 		case ICBTAG_FLAG_AD_SHORT:
1878 		{
1879 			short_ad *sad;
1880 
1881 			if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1882 				return -1;
1883 
1884 			if ((etype = le32_to_cpu(sad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1885 			{
1886 				bloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1887 				*extoffset = 0;
1888 				udf_release_data(*bh);
1889 				*bh = NULL;
1890 				return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1891 			}
1892 			else
1893 			{
1894 				eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1895 				eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1896 				*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1897 			}
1898 			break;
1899 		}
1900 		case ICBTAG_FLAG_AD_LONG:
1901 		{
1902 			long_ad *lad;
1903 
1904 			if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
1905 				return -1;
1906 
1907 			if ((etype = le32_to_cpu(lad->extLength) >> 30) == (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1908 			{
1909 				*bloc = lelb_to_cpu(lad->extLocation);
1910 				*extoffset = 0;
1911 				udf_release_data(*bh);
1912 				*bh = NULL;
1913 				return udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, inc);
1914 			}
1915 			else
1916 			{
1917 				*eloc = lelb_to_cpu(lad->extLocation);
1918 				*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1919 			}
1920 			break;
1921 		}
1922 		case ICBTAG_FLAG_AD_IN_ICB:
1923 		{
1924 			if (UDF_I_LENALLOC(inode) == 0)
1925 				return -1;
1926 			etype = (EXT_RECORDED_ALLOCATED >> 30);
1927 			*eloc = UDF_I_LOCATION(inode);
1928 			*elen = UDF_I_LENALLOC(inode);
1929 			break;
1930 		}
1931 		default:
1932 		{
1933 			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1934 			return -1;
1935 		}
1936 	}
1937 	if (*elen)
1938 		return etype;
1939 
1940 	udf_debug("Empty Extent, inode=%ld, alloctype=%d, eloc=%d, elen=%d, etype=%d, extoffset=%d\n",
1941 		inode->i_ino, UDF_I_ALLOCTYPE(inode), eloc->logicalBlockNum, *elen, etype, *extoffset);
1942 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1943 		*extoffset -= sizeof(short_ad);
1944 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1945 		*extoffset -= sizeof(long_ad);
1946 	return -1;
1947 }
1948 
udf_current_aext(struct inode * inode,lb_addr * bloc,int * extoffset,lb_addr * eloc,uint32_t * elen,struct buffer_head ** bh,int inc)1949 int8_t udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
1950 	lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1951 {
1952 	int pos, alen;
1953 	int8_t etype;
1954 
1955 	if (!(*bh))
1956 	{
1957 		if (!(*bh = udf_tread(inode->i_sb,
1958 			udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1959 		{
1960 			udf_debug("reading block %d failed!\n",
1961 				udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1962 			return -1;
1963 		}
1964 	}
1965 
1966 	if (!memcmp(&UDF_I_LOCATION(inode), bloc, sizeof(lb_addr)))
1967 	{
1968 		if (!(UDF_I_EXTENDED_FE(inode)))
1969 			pos = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1970 		else
1971 			pos = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1972 		alen = UDF_I_LENALLOC(inode) + pos;
1973 	}
1974 	else
1975 	{
1976 		struct allocExtDesc *aed = (struct allocExtDesc *)(*bh)->b_data;
1977 
1978 		pos = sizeof(struct allocExtDesc);
1979 		alen = le32_to_cpu(aed->lengthAllocDescs) + pos;
1980 	}
1981 
1982 	if (!(*extoffset))
1983 		*extoffset = pos;
1984 
1985 	switch (UDF_I_ALLOCTYPE(inode))
1986 	{
1987 		case ICBTAG_FLAG_AD_SHORT:
1988 		{
1989 			short_ad *sad;
1990 
1991 			if (!(sad = udf_get_fileshortad((*bh)->b_data, alen, extoffset, inc)))
1992 				return -1;
1993 
1994 			etype = le32_to_cpu(sad->extLength) >> 30;
1995 			eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1996 			eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1997 			*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1998 			break;
1999 		}
2000 		case ICBTAG_FLAG_AD_LONG:
2001 		{
2002 			long_ad *lad;
2003 
2004 			if (!(lad = udf_get_filelongad((*bh)->b_data, alen, extoffset, inc)))
2005 				return -1;
2006 
2007 			etype = le32_to_cpu(lad->extLength) >> 30;
2008 			*eloc = lelb_to_cpu(lad->extLocation);
2009 			*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
2010 			break;
2011 		}
2012 		default:
2013 		{
2014 			udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
2015 			return -1;
2016 		}
2017 	}
2018 	if (*elen)
2019 		return etype;
2020 
2021 	udf_debug("Empty Extent!\n");
2022 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
2023 		*extoffset -= sizeof(short_ad);
2024 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
2025 		*extoffset -= sizeof(long_ad);
2026 	return -1;
2027 }
2028 
udf_insert_aext(struct inode * inode,lb_addr bloc,int extoffset,lb_addr neloc,uint32_t nelen,struct buffer_head * bh)2029 int8_t udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
2030 	lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
2031 {
2032 	lb_addr oeloc;
2033 	uint32_t oelen;
2034 	int8_t etype;
2035 
2036 	if (!bh)
2037 	{
2038 		if (!(bh = udf_tread(inode->i_sb,
2039 			udf_get_lb_pblock(inode->i_sb, bloc, 0))))
2040 		{
2041 			udf_debug("reading block %d failed!\n",
2042 				udf_get_lb_pblock(inode->i_sb, bloc, 0));
2043 			return -1;
2044 		}
2045 	}
2046 	else
2047 		atomic_inc(&bh->b_count);
2048 
2049 	while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
2050 	{
2051 		udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
2052 
2053 		neloc = oeloc;
2054 		nelen = (etype << 30) | oelen;
2055 	}
2056 	udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
2057 	udf_release_data(bh);
2058 	return (nelen >> 30);
2059 }
2060 
udf_delete_aext(struct inode * inode,lb_addr nbloc,int nextoffset,lb_addr eloc,uint32_t elen,struct buffer_head * nbh)2061 int8_t udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
2062 	lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
2063 {
2064 	struct buffer_head *obh;
2065 	lb_addr obloc;
2066 	int oextoffset, adsize;
2067 	int8_t etype;
2068 	struct allocExtDesc *aed;
2069 
2070 	if (!(nbh))
2071 	{
2072 		if (!(nbh = udf_tread(inode->i_sb,
2073 			udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
2074 		{
2075 			udf_debug("reading block %d failed!\n",
2076 				udf_get_lb_pblock(inode->i_sb, nbloc, 0));
2077 			return -1;
2078 		}
2079 	}
2080 	else
2081 		atomic_inc(&nbh->b_count);
2082 	atomic_inc(&nbh->b_count);
2083 
2084 	if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
2085 		adsize = sizeof(short_ad);
2086 	else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
2087 		adsize = sizeof(long_ad);
2088 	else
2089 		adsize = 0;
2090 
2091 	obh = nbh;
2092 	obloc = nbloc;
2093 	oextoffset = nextoffset;
2094 
2095 	if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
2096 		return -1;
2097 
2098 	while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
2099 	{
2100 		udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
2101 		if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
2102 		{
2103 			obloc = nbloc;
2104 			udf_release_data(obh);
2105 			atomic_inc(&nbh->b_count);
2106 			obh = nbh;
2107 			oextoffset = nextoffset - adsize;
2108 		}
2109 	}
2110 	memset(&eloc, 0x00, sizeof(lb_addr));
2111 	elen = 0;
2112 
2113 	if (memcmp(&nbloc, &obloc, sizeof(lb_addr)))
2114 	{
2115 		udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
2116 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
2117 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
2118 		if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
2119 		{
2120 			UDF_I_LENALLOC(inode) -= (adsize * 2);
2121 			mark_inode_dirty(inode);
2122 		}
2123 		else
2124 		{
2125 			aed = (struct allocExtDesc *)(obh)->b_data;
2126 			aed->lengthAllocDescs =
2127 				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
2128 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2129 				udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
2130 			else
2131 				udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
2132 			mark_buffer_dirty_inode(obh, inode);
2133 		}
2134 	}
2135 	else
2136 	{
2137 		udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
2138 		if (!memcmp(&UDF_I_LOCATION(inode), &obloc, sizeof(lb_addr)))
2139 		{
2140 			UDF_I_LENALLOC(inode) -= adsize;
2141 			mark_inode_dirty(inode);
2142 		}
2143 		else
2144 		{
2145 			aed = (struct allocExtDesc *)(obh)->b_data;
2146 			aed->lengthAllocDescs =
2147 				cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2148 			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2149 				udf_update_tag((obh)->b_data, oextoffset - adsize);
2150 			else
2151 				udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
2152 			mark_buffer_dirty_inode(obh, inode);
2153 		}
2154 	}
2155 
2156 	udf_release_data(nbh);
2157 	udf_release_data(obh);
2158 	return (elen >> 30);
2159 }
2160 
inode_bmap(struct inode * inode,int block,lb_addr * bloc,uint32_t * extoffset,lb_addr * eloc,uint32_t * elen,uint32_t * offset,struct buffer_head ** bh)2161 int8_t inode_bmap(struct inode *inode, int block, lb_addr *bloc, uint32_t *extoffset,
2162 	lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
2163 {
2164 	uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
2165 	int8_t etype;
2166 
2167 	if (block < 0)
2168 	{
2169 		printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2170 		return -1;
2171 	}
2172 	if (!inode)
2173 	{
2174 		printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
2175 		return -1;
2176 	}
2177 
2178 	*extoffset = 0;
2179 	*elen = 0;
2180 	*bloc = UDF_I_LOCATION(inode);
2181 
2182 	do
2183 	{
2184 		if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
2185 		{
2186 			*offset = bcount - lbcount;
2187 			UDF_I_LENEXTENTS(inode) = lbcount;
2188 			return -1;
2189 		}
2190 		lbcount += *elen;
2191 	} while (lbcount <= bcount);
2192 
2193 	*offset = bcount + *elen - lbcount;
2194 
2195 	return etype;
2196 }
2197 
udf_block_map(struct inode * inode,long block)2198 long udf_block_map(struct inode *inode, long block)
2199 {
2200 	lb_addr eloc, bloc;
2201 	uint32_t offset, extoffset, elen;
2202 	struct buffer_head *bh = NULL;
2203 	int ret;
2204 
2205 	lock_kernel();
2206 
2207 	if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
2208 		ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2209 	else
2210 		ret = 0;
2211 
2212 	unlock_kernel();
2213 
2214 	if (bh)
2215 		udf_release_data(bh);
2216 
2217 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2218 		return udf_fixed_to_variable(ret);
2219 	else
2220 		return ret;
2221 }
2222