1 /* -*- linux-c -*- ------------------------------------------------------- *
2  *
3  *   Copyright 2001 H. Peter Anvin - All Rights Reserved
4  *
5  *   This program is free software; you can redistribute it and/or modify
6  *   it under the terms of the GNU General Public License as published by
7  *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
8  *   USA; either version 2 of the License, or (at your option) any later
9  *   version; incorporated herein by reference.
10  *
11  * ----------------------------------------------------------------------- */
12 
13 /*
14  * linux/fs/isofs/compress.c
15  *
16  * Transparent decompression of files on an iso9660 filesystem
17  */
18 
19 #include <linux/config.h>
20 #include <linux/module.h>
21 
22 #include <linux/stat.h>
23 #include <linux/sched.h>
24 #include <linux/iso_fs.h>
25 #include <linux/kernel.h>
26 #include <linux/major.h>
27 #include <linux/mm.h>
28 #include <linux/string.h>
29 #include <linux/locks.h>
30 #include <linux/slab.h>
31 #include <linux/errno.h>
32 #include <linux/cdrom.h>
33 #include <linux/init.h>
34 #include <linux/nls.h>
35 #include <linux/ctype.h>
36 #include <linux/smp_lock.h>
37 #include <linux/blkdev.h>
38 #include <linux/vmalloc.h>
39 #include <linux/zlib.h>
40 
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/semaphore.h>
44 
45 #include "zisofs.h"
46 
47 /* This should probably be global. */
48 static char zisofs_sink_page[PAGE_CACHE_SIZE];
49 
50 /*
51  * This contains the zlib memory allocation and the mutex for the
52  * allocation; this avoids failures at block-decompression time.
53  */
54 static void *zisofs_zlib_workspace;
55 static struct semaphore zisofs_zlib_semaphore;
56 
57 /*
58  * When decompressing, we typically obtain more than one page
59  * per reference.  We inject the additional pages into the page
60  * cache as a form of readahead.
61  */
zisofs_readpage(struct file * file,struct page * page)62 static int zisofs_readpage(struct file *file, struct page *page)
63 {
64 	struct inode *inode = file->f_dentry->d_inode;
65 	struct address_space *mapping = inode->i_mapping;
66 	unsigned int maxpage, xpage, fpage, blockindex;
67 	unsigned long offset;
68 	unsigned long blockptr, blockendptr, cstart, cend, csize;
69 	struct buffer_head *bh, *ptrbh[2];
70 	unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
71 	unsigned int bufshift = ISOFS_BUFFER_BITS(inode);
72 	unsigned long bufmask  = bufsize - 1;
73 	int err = -EIO;
74 	int i;
75 	unsigned int header_size = inode->u.isofs_i.i_format_parm[0];
76 	unsigned int zisofs_block_shift = inode->u.isofs_i.i_format_parm[1];
77 	/* unsigned long zisofs_block_size = 1UL << zisofs_block_shift; */
78 	unsigned int zisofs_block_page_shift = zisofs_block_shift-PAGE_CACHE_SHIFT;
79 	unsigned long zisofs_block_pages = 1UL << zisofs_block_page_shift;
80 	unsigned long zisofs_block_page_mask = zisofs_block_pages-1;
81 	struct page *pages[zisofs_block_pages];
82 	unsigned long index = page->index;
83 	int indexblocks;
84 
85 	/* We have already been given one page, this is the one
86 	   we must do. */
87 	xpage = index & zisofs_block_page_mask;
88 	pages[xpage] = page;
89 
90 	/* The remaining pages need to be allocated and inserted */
91 	offset = index & ~zisofs_block_page_mask;
92 	blockindex = offset >> zisofs_block_page_shift;
93 	maxpage = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
94 	maxpage = min(zisofs_block_pages, maxpage-offset);
95 
96 	for ( i = 0 ; i < maxpage ; i++, offset++ ) {
97 		if ( i != xpage ) {
98 			pages[i] = grab_cache_page_nowait(mapping, offset);
99 		}
100 		page = pages[i];
101 		if ( page ) {
102 			ClearPageError(page);
103 			kmap(page);
104 		}
105 	}
106 
107 	/* This is the last page filled, plus one; used in case of abort. */
108 	fpage = 0;
109 
110 	/* Find the pointer to this specific chunk */
111 	/* Note: we're not using isonum_731() here because the data is known aligned */
112 	/* Note: header_size is in 32-bit words (4 bytes) */
113 	blockptr = (header_size + blockindex) << 2;
114 	blockendptr = blockptr + 4;
115 
116 	indexblocks = ((blockptr^blockendptr) >> bufshift) ? 2 : 1;
117 	ptrbh[0] = ptrbh[1] = 0;
118 
119 	if ( isofs_get_blocks(inode, blockptr >> bufshift, ptrbh, indexblocks) != indexblocks ) {
120 		if ( ptrbh[0] ) brelse(ptrbh[0]);
121 		printk(KERN_DEBUG "zisofs: Null buffer on reading block table, inode = %lu, block = %lu\n",
122 		       inode->i_ino, blockptr >> bufshift);
123 		goto eio;
124 	}
125 	ll_rw_block(READ, indexblocks, ptrbh);
126 
127 	bh = ptrbh[0];
128 	if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
129 		printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
130 		       inode->i_ino, blockptr >> bufshift);
131 		if ( ptrbh[1] )
132 			brelse(ptrbh[1]);
133 		goto eio;
134 	}
135 	cstart = le32_to_cpu(*(u32 *)(bh->b_data + (blockptr & bufmask)));
136 
137 	if ( indexblocks == 2 ) {
138 		/* We just crossed a block boundary.  Switch to the next block */
139 		brelse(bh);
140 		bh = ptrbh[1];
141 		if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
142 			printk(KERN_DEBUG "zisofs: Failed to read block table, inode = %lu, block = %lu\n",
143 			       inode->i_ino, blockendptr >> bufshift);
144 			goto eio;
145 		}
146 	}
147 	cend = le32_to_cpu(*(u32 *)(bh->b_data + (blockendptr & bufmask)));
148 	brelse(bh);
149 
150 	if (cstart > cend)
151 		goto eio;
152 
153 	csize = cend-cstart;
154 
155 	if (csize > deflateBound(1UL << zisofs_block_shift))
156 		goto eio;
157 
158 	/* Now page[] contains an array of pages, any of which can be NULL,
159 	   and the locks on which we hold.  We should now read the data and
160 	   release the pages.  If the pages are NULL the decompressed data
161 	   for that particular page should be discarded. */
162 
163 	if ( csize == 0 ) {
164 		/* This data block is empty. */
165 
166 		for ( fpage = 0 ; fpage < maxpage ; fpage++ ) {
167 			if ( (page = pages[fpage]) != NULL ) {
168 				memset(page_address(page), 0, PAGE_CACHE_SIZE);
169 
170 				flush_dcache_page(page);
171 				SetPageUptodate(page);
172 				kunmap(page);
173 				UnlockPage(page);
174 				if ( fpage == xpage )
175 					err = 0; /* The critical page */
176 				else
177 					page_cache_release(page);
178 			}
179 		}
180 	} else {
181 		/* This data block is compressed. */
182 		z_stream stream;
183 		int bail = 0, left_out = -1;
184 		int zerr;
185 		int needblocks = (csize + (cstart & bufmask) + bufmask) >> bufshift;
186 		int haveblocks;
187 		struct buffer_head *bhs[needblocks+1];
188 		struct buffer_head **bhptr;
189 
190 		/* Because zlib is not thread-safe, do all the I/O at the top. */
191 
192 		blockptr = cstart >> bufshift;
193 		memset(bhs, 0, (needblocks+1)*sizeof(struct buffer_head *));
194 		haveblocks = isofs_get_blocks(inode, blockptr, bhs, needblocks);
195 		ll_rw_block(READ, haveblocks, bhs);
196 
197 		bhptr = &bhs[0];
198 		bh = *bhptr++;
199 
200 		/* First block is special since it may be fractional.
201 		   We also wait for it before grabbing the zlib
202 		   semaphore; odds are that the subsequent blocks are
203 		   going to come in in short order so we don't hold
204 		   the zlib semaphore longer than necessary. */
205 
206 		if ( !bh || (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
207 			printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
208 			       fpage, xpage, csize);
209 			goto b_eio;
210 		}
211 		stream.next_in  = bh->b_data + (cstart & bufmask);
212 		stream.avail_in = min(bufsize-(cstart & bufmask), csize);
213 		csize -= stream.avail_in;
214 
215 		stream.workspace = zisofs_zlib_workspace;
216 		down(&zisofs_zlib_semaphore);
217 
218 		zerr = zlib_inflateInit(&stream);
219 		if ( zerr != Z_OK ) {
220 			if ( err && zerr == Z_MEM_ERROR )
221 				err = -ENOMEM;
222 			printk(KERN_DEBUG "zisofs: zisofs_inflateInit returned %d\n",
223 			       zerr);
224 			goto z_eio;
225 		}
226 
227 		while ( !bail && fpage < maxpage ) {
228 			page = pages[fpage];
229 			if ( page )
230 				stream.next_out = page_address(page);
231 			else
232 				stream.next_out = (void *)&zisofs_sink_page;
233 			stream.avail_out = PAGE_CACHE_SIZE;
234 
235 			while ( stream.avail_out ) {
236 				int ao, ai;
237 				if ( stream.avail_in == 0 && left_out ) {
238 					if ( !csize ) {
239 						printk(KERN_WARNING "zisofs: ZF read beyond end of input\n");
240 						bail = 1;
241 						break;
242 					} else {
243 						bh = *bhptr++;
244 						if ( !bh ||
245 						     (wait_on_buffer(bh), !buffer_uptodate(bh)) ) {
246 							/* Reached an EIO */
247  							printk(KERN_DEBUG "zisofs: Hit null buffer, fpage = %d, xpage = %d, csize = %ld\n",
248 							       fpage, xpage, csize);
249 
250 							bail = 1;
251 							break;
252 						}
253 						stream.next_in = bh->b_data;
254 						stream.avail_in = min(csize,bufsize);
255 						csize -= stream.avail_in;
256 					}
257 				}
258 				ao = stream.avail_out;  ai = stream.avail_in;
259 				zerr = zlib_inflate(&stream, Z_SYNC_FLUSH);
260 				left_out = stream.avail_out;
261 				if ( zerr == Z_BUF_ERROR && stream.avail_in == 0 )
262 					continue;
263 				if ( zerr != Z_OK ) {
264 					/* EOF, error, or trying to read beyond end of input */
265 					if ( err && zerr == Z_MEM_ERROR )
266 						err = -ENOMEM;
267 					if ( zerr != Z_STREAM_END )
268 						printk(KERN_DEBUG "zisofs: zisofs_inflate returned %d, inode = %lu, index = %lu, fpage = %d, xpage = %d, avail_in = %d, avail_out = %d, ai = %d, ao = %d\n",
269 						       zerr, inode->i_ino, index,
270 						       fpage, xpage,
271 						       stream.avail_in, stream.avail_out,
272 						       ai, ao);
273 					bail = 1;
274 					break;
275 				}
276 			}
277 
278 			if ( stream.avail_out && zerr == Z_STREAM_END ) {
279 				/* Fractional page written before EOF.  This may
280 				   be the last page in the file. */
281 				memset(stream.next_out, 0, stream.avail_out);
282 				stream.avail_out = 0;
283 			}
284 
285 			if ( !stream.avail_out ) {
286 				/* This page completed */
287 				if ( page ) {
288 					flush_dcache_page(page);
289 					SetPageUptodate(page);
290 					kunmap(page);
291 					UnlockPage(page);
292 					if ( fpage == xpage )
293 						err = 0; /* The critical page */
294 					else
295 						page_cache_release(page);
296 				}
297 				fpage++;
298 			}
299 		}
300 		zlib_inflateEnd(&stream);
301 
302 	z_eio:
303 		up(&zisofs_zlib_semaphore);
304 
305 	b_eio:
306 		for ( i = 0 ; i < haveblocks ; i++ ) {
307 			if ( bhs[i] )
308 				brelse(bhs[i]);
309 		}
310 	}
311 
312 eio:
313 
314 	/* Release any residual pages, do not SetPageUptodate */
315 	while ( fpage < maxpage ) {
316 		page = pages[fpage];
317 		if ( page ) {
318 			flush_dcache_page(page);
319 			if ( fpage == xpage )
320 				SetPageError(page);
321 			kunmap(page);
322 			UnlockPage(page);
323 			if ( fpage != xpage )
324 				page_cache_release(page);
325 		}
326 		fpage++;
327 	}
328 
329 	/* At this point, err contains 0 or -EIO depending on the "critical" page */
330 	return err;
331 }
332 
333 struct address_space_operations zisofs_aops = {
334 	readpage: zisofs_readpage,
335 	/* No sync_page operation supported? */
336 	/* No bmap operation supported */
337 };
338 
339 static int initialized = 0;
340 
zisofs_init(void)341 int __init zisofs_init(void)
342 {
343 	if ( initialized ) {
344 		printk("zisofs_init: called more than once\n");
345 		return 0;
346 	}
347 
348 	zisofs_zlib_workspace = vmalloc(zlib_inflate_workspacesize());
349 	if ( !zisofs_zlib_workspace )
350 		return -ENOMEM;
351 	init_MUTEX(&zisofs_zlib_semaphore);
352 
353 	initialized = 1;
354 	return 0;
355 }
356 
zisofs_cleanup(void)357 void __exit zisofs_cleanup(void)
358 {
359 	if ( !initialized ) {
360 		printk("zisofs_cleanup: called without initialization\n");
361 		return;
362 	}
363 
364 	vfree(zisofs_zlib_workspace);
365 	initialized = 0;
366 }
367