1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ioctl to enable verity on a file
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 #include "fsverity_private.h"
9 
10 #include <crypto/hash.h>
11 #include <linux/mount.h>
12 #include <linux/sched/signal.h>
13 #include <linux/uaccess.h>
14 
15 struct block_buffer {
16 	u32 filled;
17 	bool is_root_hash;
18 	u8 *data;
19 };
20 
21 /* Hash a block, writing the result to the next level's pending block buffer. */
hash_one_block(struct inode * inode,const struct merkle_tree_params * params,struct block_buffer * cur)22 static int hash_one_block(struct inode *inode,
23 			  const struct merkle_tree_params *params,
24 			  struct block_buffer *cur)
25 {
26 	struct block_buffer *next = cur + 1;
27 	int err;
28 
29 	/*
30 	 * Safety check to prevent a buffer overflow in case of a filesystem bug
31 	 * that allows the file size to change despite deny_write_access(), or a
32 	 * bug in the Merkle tree logic itself
33 	 */
34 	if (WARN_ON_ONCE(next->is_root_hash && next->filled != 0))
35 		return -EINVAL;
36 
37 	/* Zero-pad the block if it's shorter than the block size. */
38 	memset(&cur->data[cur->filled], 0, params->block_size - cur->filled);
39 
40 	err = fsverity_hash_block(params, inode, cur->data,
41 				  &next->data[next->filled]);
42 	if (err)
43 		return err;
44 	next->filled += params->digest_size;
45 	cur->filled = 0;
46 	return 0;
47 }
48 
write_merkle_tree_block(struct inode * inode,const u8 * buf,unsigned long index,const struct merkle_tree_params * params)49 static int write_merkle_tree_block(struct inode *inode, const u8 *buf,
50 				   unsigned long index,
51 				   const struct merkle_tree_params *params)
52 {
53 	u64 pos = (u64)index << params->log_blocksize;
54 	int err;
55 
56 	err = inode->i_sb->s_vop->write_merkle_tree_block(inode, buf, pos,
57 							  params->block_size);
58 	if (err)
59 		fsverity_err(inode, "Error %d writing Merkle tree block %lu",
60 			     err, index);
61 	return err;
62 }
63 
64 /*
65  * Build the Merkle tree for the given file using the given parameters, and
66  * return the root hash in @root_hash.
67  *
68  * The tree is written to a filesystem-specific location as determined by the
69  * ->write_merkle_tree_block() method.  However, the blocks that comprise the
70  * tree are the same for all filesystems.
71  */
build_merkle_tree(struct file * filp,const struct merkle_tree_params * params,u8 * root_hash)72 static int build_merkle_tree(struct file *filp,
73 			     const struct merkle_tree_params *params,
74 			     u8 *root_hash)
75 {
76 	struct inode *inode = file_inode(filp);
77 	const u64 data_size = inode->i_size;
78 	const int num_levels = params->num_levels;
79 	struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {};
80 	struct block_buffer *buffers = &_buffers[1];
81 	unsigned long level_offset[FS_VERITY_MAX_LEVELS];
82 	int level;
83 	u64 offset;
84 	int err;
85 
86 	if (data_size == 0) {
87 		/* Empty file is a special case; root hash is all 0's */
88 		memset(root_hash, 0, params->digest_size);
89 		return 0;
90 	}
91 
92 	/*
93 	 * Allocate the block buffers.  Buffer "-1" is for data blocks.
94 	 * Buffers 0 <= level < num_levels are for the actual tree levels.
95 	 * Buffer 'num_levels' is for the root hash.
96 	 */
97 	for (level = -1; level < num_levels; level++) {
98 		buffers[level].data = kzalloc(params->block_size, GFP_KERNEL);
99 		if (!buffers[level].data) {
100 			err = -ENOMEM;
101 			goto out;
102 		}
103 	}
104 	buffers[num_levels].data = root_hash;
105 	buffers[num_levels].is_root_hash = true;
106 
107 	BUILD_BUG_ON(sizeof(level_offset) != sizeof(params->level_start));
108 	memcpy(level_offset, params->level_start, sizeof(level_offset));
109 
110 	/* Hash each data block, also hashing the tree blocks as they fill up */
111 	for (offset = 0; offset < data_size; offset += params->block_size) {
112 		ssize_t bytes_read;
113 		loff_t pos = offset;
114 
115 		buffers[-1].filled = min_t(u64, params->block_size,
116 					   data_size - offset);
117 		bytes_read = __kernel_read(filp, buffers[-1].data,
118 					   buffers[-1].filled, &pos);
119 		if (bytes_read < 0) {
120 			err = bytes_read;
121 			fsverity_err(inode, "Error %d reading file data", err);
122 			goto out;
123 		}
124 		if (bytes_read != buffers[-1].filled) {
125 			err = -EINVAL;
126 			fsverity_err(inode, "Short read of file data");
127 			goto out;
128 		}
129 		err = hash_one_block(inode, params, &buffers[-1]);
130 		if (err)
131 			goto out;
132 		for (level = 0; level < num_levels; level++) {
133 			if (buffers[level].filled + params->digest_size <=
134 			    params->block_size) {
135 				/* Next block at @level isn't full yet */
136 				break;
137 			}
138 			/* Next block at @level is full */
139 
140 			err = hash_one_block(inode, params, &buffers[level]);
141 			if (err)
142 				goto out;
143 			err = write_merkle_tree_block(inode,
144 						      buffers[level].data,
145 						      level_offset[level],
146 						      params);
147 			if (err)
148 				goto out;
149 			level_offset[level]++;
150 		}
151 		if (fatal_signal_pending(current)) {
152 			err = -EINTR;
153 			goto out;
154 		}
155 		cond_resched();
156 	}
157 	/* Finish all nonempty pending tree blocks. */
158 	for (level = 0; level < num_levels; level++) {
159 		if (buffers[level].filled != 0) {
160 			err = hash_one_block(inode, params, &buffers[level]);
161 			if (err)
162 				goto out;
163 			err = write_merkle_tree_block(inode,
164 						      buffers[level].data,
165 						      level_offset[level],
166 						      params);
167 			if (err)
168 				goto out;
169 		}
170 	}
171 	/* The root hash was filled by the last call to hash_one_block(). */
172 	if (WARN_ON_ONCE(buffers[num_levels].filled != params->digest_size)) {
173 		err = -EINVAL;
174 		goto out;
175 	}
176 	err = 0;
177 out:
178 	for (level = -1; level < num_levels; level++)
179 		kfree(buffers[level].data);
180 	return err;
181 }
182 
enable_verity(struct file * filp,const struct fsverity_enable_arg * arg)183 static int enable_verity(struct file *filp,
184 			 const struct fsverity_enable_arg *arg)
185 {
186 	struct inode *inode = file_inode(filp);
187 	const struct fsverity_operations *vops = inode->i_sb->s_vop;
188 	struct merkle_tree_params params = { };
189 	struct fsverity_descriptor *desc;
190 	size_t desc_size = struct_size(desc, signature, arg->sig_size);
191 	struct fsverity_info *vi;
192 	int err;
193 
194 	/* Start initializing the fsverity_descriptor */
195 	desc = kzalloc(desc_size, GFP_KERNEL);
196 	if (!desc)
197 		return -ENOMEM;
198 	desc->version = 1;
199 	desc->hash_algorithm = arg->hash_algorithm;
200 	desc->log_blocksize = ilog2(arg->block_size);
201 
202 	/* Get the salt if the user provided one */
203 	if (arg->salt_size &&
204 	    copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
205 			   arg->salt_size)) {
206 		err = -EFAULT;
207 		goto out;
208 	}
209 	desc->salt_size = arg->salt_size;
210 
211 	/* Get the builtin signature if the user provided one */
212 	if (arg->sig_size &&
213 	    copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
214 			   arg->sig_size)) {
215 		err = -EFAULT;
216 		goto out;
217 	}
218 	desc->sig_size = cpu_to_le32(arg->sig_size);
219 
220 	desc->data_size = cpu_to_le64(inode->i_size);
221 
222 	/* Prepare the Merkle tree parameters */
223 	err = fsverity_init_merkle_tree_params(&params, inode,
224 					       arg->hash_algorithm,
225 					       desc->log_blocksize,
226 					       desc->salt, desc->salt_size);
227 	if (err)
228 		goto out;
229 
230 	/*
231 	 * Start enabling verity on this file, serialized by the inode lock.
232 	 * Fail if verity is already enabled or is already being enabled.
233 	 */
234 	inode_lock(inode);
235 	if (IS_VERITY(inode))
236 		err = -EEXIST;
237 	else
238 		err = vops->begin_enable_verity(filp);
239 	inode_unlock(inode);
240 	if (err)
241 		goto out;
242 
243 	/*
244 	 * Build the Merkle tree.  Don't hold the inode lock during this, since
245 	 * on huge files this may take a very long time and we don't want to
246 	 * force unrelated syscalls like chown() to block forever.  We don't
247 	 * need the inode lock here because deny_write_access() already prevents
248 	 * the file from being written to or truncated, and we still serialize
249 	 * ->begin_enable_verity() and ->end_enable_verity() using the inode
250 	 * lock and only allow one process to be here at a time on a given file.
251 	 */
252 	BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
253 	err = build_merkle_tree(filp, &params, desc->root_hash);
254 	if (err) {
255 		fsverity_err(inode, "Error %d building Merkle tree", err);
256 		goto rollback;
257 	}
258 
259 	/*
260 	 * Create the fsverity_info.  Don't bother trying to save work by
261 	 * reusing the merkle_tree_params from above.  Instead, just create the
262 	 * fsverity_info from the fsverity_descriptor as if it were just loaded
263 	 * from disk.  This is simpler, and it serves as an extra check that the
264 	 * metadata we're writing is valid before actually enabling verity.
265 	 */
266 	vi = fsverity_create_info(inode, desc);
267 	if (IS_ERR(vi)) {
268 		err = PTR_ERR(vi);
269 		goto rollback;
270 	}
271 
272 	/*
273 	 * Tell the filesystem to finish enabling verity on the file.
274 	 * Serialized with ->begin_enable_verity() by the inode lock.
275 	 */
276 	inode_lock(inode);
277 	err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size);
278 	inode_unlock(inode);
279 	if (err) {
280 		fsverity_err(inode, "%ps() failed with err %d",
281 			     vops->end_enable_verity, err);
282 		fsverity_free_info(vi);
283 	} else if (WARN_ON_ONCE(!IS_VERITY(inode))) {
284 		err = -EINVAL;
285 		fsverity_free_info(vi);
286 	} else {
287 		/* Successfully enabled verity */
288 
289 		/*
290 		 * Readers can start using ->i_verity_info immediately, so it
291 		 * can't be rolled back once set.  So don't set it until just
292 		 * after the filesystem has successfully enabled verity.
293 		 */
294 		fsverity_set_info(inode, vi);
295 	}
296 out:
297 	kfree(params.hashstate);
298 	kfree(desc);
299 	return err;
300 
301 rollback:
302 	inode_lock(inode);
303 	(void)vops->end_enable_verity(filp, NULL, 0, params.tree_size);
304 	inode_unlock(inode);
305 	goto out;
306 }
307 
308 /**
309  * fsverity_ioctl_enable() - enable verity on a file
310  * @filp: file to enable verity on
311  * @uarg: user pointer to fsverity_enable_arg
312  *
313  * Enable fs-verity on a file.  See the "FS_IOC_ENABLE_VERITY" section of
314  * Documentation/filesystems/fsverity.rst for the documentation.
315  *
316  * Return: 0 on success, -errno on failure
317  */
fsverity_ioctl_enable(struct file * filp,const void __user * uarg)318 int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
319 {
320 	struct inode *inode = file_inode(filp);
321 	struct fsverity_enable_arg arg;
322 	int err;
323 
324 	if (copy_from_user(&arg, uarg, sizeof(arg)))
325 		return -EFAULT;
326 
327 	if (arg.version != 1)
328 		return -EINVAL;
329 
330 	if (arg.__reserved1 ||
331 	    memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
332 		return -EINVAL;
333 
334 	if (!is_power_of_2(arg.block_size))
335 		return -EINVAL;
336 
337 	if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
338 		return -EMSGSIZE;
339 
340 	if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
341 		return -EMSGSIZE;
342 
343 	/*
344 	 * Require a regular file with write access.  But the actual fd must
345 	 * still be readonly so that we can lock out all writers.  This is
346 	 * needed to guarantee that no writable fds exist to the file once it
347 	 * has verity enabled, and to stabilize the data being hashed.
348 	 */
349 
350 	err = file_permission(filp, MAY_WRITE);
351 	if (err)
352 		return err;
353 	/*
354 	 * __kernel_read() is used while building the Merkle tree.  So, we can't
355 	 * allow file descriptors that were opened for ioctl access only, using
356 	 * the special nonstandard access mode 3.  O_RDONLY only, please!
357 	 */
358 	if (!(filp->f_mode & FMODE_READ))
359 		return -EBADF;
360 
361 	if (IS_APPEND(inode))
362 		return -EPERM;
363 
364 	if (S_ISDIR(inode->i_mode))
365 		return -EISDIR;
366 
367 	if (!S_ISREG(inode->i_mode))
368 		return -EINVAL;
369 
370 	err = mnt_want_write_file(filp);
371 	if (err) /* -EROFS */
372 		return err;
373 
374 	err = deny_write_access(filp);
375 	if (err) /* -ETXTBSY */
376 		goto out_drop_write;
377 
378 	err = enable_verity(filp, &arg);
379 
380 	/*
381 	 * We no longer drop the inode's pagecache after enabling verity.  This
382 	 * used to be done to try to avoid a race condition where pages could be
383 	 * evicted after being used in the Merkle tree construction, then
384 	 * re-instantiated by a concurrent read.  Such pages are unverified, and
385 	 * the backing storage could have filled them with different content, so
386 	 * they shouldn't be used to fulfill reads once verity is enabled.
387 	 *
388 	 * But, dropping the pagecache has a big performance impact, and it
389 	 * doesn't fully solve the race condition anyway.  So for those reasons,
390 	 * and also because this race condition isn't very important relatively
391 	 * speaking (especially for small-ish files, where the chance of a page
392 	 * being used, evicted, *and* re-instantiated all while enabling verity
393 	 * is quite small), we no longer drop the inode's pagecache.
394 	 */
395 
396 	/*
397 	 * allow_write_access() is needed to pair with deny_write_access().
398 	 * Regardless, the filesystem won't allow writing to verity files.
399 	 */
400 	allow_write_access(filp);
401 out_drop_write:
402 	mnt_drop_write_file(filp);
403 	return err;
404 }
405 EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
406